/* This file is part of Mailfromd.
Copyright (C) 2008-2020 Sergey Poznyakoff
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3, or (at your option)
any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see . */
#ifdef HAVE_CONFIG_H
# include
#endif
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include
#include "libmf.h"
#include "sm/error.h"
#include "sm/smreplycodes.h"
#include "sm/pmilter.h"
#include "sm/pmfdef.h"
#include "sm/pmfapi.h"
#ifndef PM_MACROS_MAX /* MeTA 1.0.0.0 has this constant */
# ifdef PM_MAX_MACROS /* This was used in older versions */
# define PM_MACROS_MAX PM_MAX_MACROS
# else
# define PM_MACROS_MAX 16 /* Provide a reasonable default anyway */
# endif
#endif
const char *program_version = "pmult (" PACKAGE_STRING ")";
int log_to_stderr = -1;
const char *log_stream;
char *portspec; /* Communication socket */
unsigned pmilter_debug_level;
char *pidfile; /* pidfile name */
int no_sig_handler; /* Disable signal handler */
int want_auth_macros; /* Pass auth_ macros to MAIL handler */
static pthread_mutex_t pmult_debug_mutex = PTHREAD_MUTEX_INITIALIZER;
size_t pmult_debug;
#define PMU_DEBUG(cat, lev, s) \
do \
{ \
pthread_mutex_lock (&pmult_debug_mutex); \
mu_debug (cat, lev, s); \
pthread_mutex_unlock (&pmult_debug_mutex); \
} \
while (0)
unsigned int max_threads_soft;
unsigned int max_threads_hard;
unsigned int max_pmilter_fd;
struct pmult_client
{
int type; /* Type, unused so far */
char *name;
struct timeval timeout[GACOPYZ_TO_COUNT];
char *url;
int logmask;
};
/* List of configured clients: */
mu_list_t /* of struct pmult_client */ client_list;
enum pmult_msg_state
{
pmult_msg_state_initial,
pmult_msg_state_headers,
pmult_msg_state_cr1,
pmult_msg_state_crlf1,
pmult_msg_state_cr2,
pmult_msg_state_body,
pmult_msg_state_dot,
pmult_msg_state_dot_cr,
pmult_msg_state_eom
};
struct pmult_priv_data
{
mu_list_t /* of gacopyz_srv_t */ srvlist;
pthread_mutex_t mutex;
char *seid;
char *seid_c;
char *taid;
unsigned nrcpt;
unsigned nbadrcpts;
enum pmult_msg_state state;
mu_stream_t hdrstream;
mu_header_t hdr;
unsigned char *hdrbuf;
mu_opool_t body_chunks;
mu_iterator_t body_itr;
size_t argc;
char **argv;
};
#define PRIV_SEID(p) ((p)->seid ? (p)->seid : "")
#define PRIV_SEID_C(p) ((p)->seid_c ? (p)->seid_c : pmult_seid_c (p))
#define PRIV_LOCK(p) pthread_mutex_lock (&(p)->mutex)
#define PRIV_UNLOCK(p) pthread_mutex_unlock (&(p)->mutex)
static char *
pmult_seid_c (struct pmult_priv_data *p)
{
if (p->seid)
{
p->seid_c = malloc (strlen (p->seid) + 2);
if (p->seid_c)
{
strcpy (p->seid_c, p->seid);
strcat (p->seid_c, ":");
return p->seid_c;
}
}
return "";
}
static int
pmult_append_body_chunk (struct pmult_priv_data *p, char *buf, size_t size)
{
int rc;
if (!p->body_chunks)
{
rc = mu_opool_create (&p->body_chunks, MU_OPOOL_DEFAULT);
if (rc)
{
mu_error (_("cannot create opool: %s"), mu_strerror (rc));
return rc;
}
mu_opool_set_bucket_size (p->body_chunks, PMILTER_CHUNK_SIZE);
}
rc = mu_opool_append (p->body_chunks, buf, size);
if (rc)
mu_error (_("cannot append body chunk: %s"), mu_strerror (rc));
return rc;
}
struct mu_kwd macro_trans[] = {
{ "hostname", PMM_SRVHOSTNAME, }, /* hostname of server */
{ "client_resolve", PMM_CLIENT_RESOLVE }, /* result of client lookups */
{ "tls_version", PMM_TLS_VERSION },
{ "tls_cipher_suite", PMM_TLS_CIPHER_SUITE },
{ "tls_cipher_bits", PMM_TLS_CIPHER_BITS },
{ "tls_cert_subject", PMM_TLS_CERT_SUBJECT },
{ "tls_cert_issuer", PMM_TLS_CERT_ISSUER },
{ "tls_alg_bits", PMM_TLS_ALG_BITS },
{ "tls_vrfy", PMM_TLS_VRFY },
{ "tls_cn_subject", PMM_TLS_CN_SUBJECT },
{ "tls_cn_issuer", PMM_TLS_CN_ISSUER },
{ "cn_issuer", PMM_TLS_CN_ISSUER },
{ "cn_subject", PMM_TLS_CN_SUBJECT },
{ "auth_type", PMM_AUTH_TYPE },
{ "auth_authen", PMM_AUTH_AUTHEN },
{ "auth_author", PMM_AUTH_AUTHOR },
{ "taid", PMM_MAIL_TAID },
{ "msgid", PMM_DOT_MSGID },
{ "c", PMM_DOT_HOPS },
{ NULL }
};
struct mu_kwd meta_stage_trans[] = {
{ "connect", PM_SMST_CONNECT },
{ "helo", PM_SMST_EHLO },
{ "envfrom", PM_SMST_MAIL },
{ "mail", PM_SMST_MAIL },
{ "envrcpt", PM_SMST_RCPT },
{ "rcpt", PM_SMST_RCPT },
{ "data", PM_SMST_DATA },
{ "header", PM_SMST_DATA },
{ "body", PM_SMST_DATA },
{ "eom", PM_SMST_DOT },
{ "dot", PM_SMST_DOT },
{ NULL }
};
static uint32_t macrotab[PM_SMST_MAX][PM_MACROS_MAX+1] = {
{ PMM_SEID, PMM_END },
{ PMM_END },
{ PMM_END },
{ PMM_END },
{ PMM_END },
{ PMM_END }
};
static char *macronames[PM_SMST_MAX][PM_MACROS_MAX+1] = {
{ "seid" },
};
static int
macro_defined(uint32_t *tab, uint count, uint32_t macro)
{
uint i;
for (i = 0; i < PM_MACROS_MAX && tab[i] != PMM_END; i++)
if (tab[i] == macro)
return 1;
return 0;
}
static char *builtin_macros[] = {
"multiplexer", "pmult",
"mult_version", PACKAGE_VERSION,
NULL
};
static char **
translate_macros0 (pmse_ctx_P pmse_ctx, struct pmult_priv_data *p,
uint where, size_t reserve_count)
{
char **kv;
size_t count, i;
for (count = 0; count < PM_MACROS_MAX && macrotab[where][count] != PMM_END;
count++)
;
count += reserve_count;
count *= 2;
count += MU_ARRAY_SIZE (builtin_macros) - 1;
count++;
if (count > p->argc)
{
p->argc = count;
p->argv = mu_realloc (p->argv, count * sizeof (p->argv[0]));
}
kv = p->argv;
for (i = 0, count = 2*reserve_count; macrotab[where][i] != PMM_END; i++)
{
char *val;
sm_ret_T ret = sm_pmfi_getmac (pmse_ctx, macrotab[where][i], &val);
if (ret != SM_SUCCESS)
mu_error (_("cannot get the value of {%s}: %s"),
macronames[where][i], smerr2txt (ret));
else if (val)
{
kv[count++] = macronames[where][i];
kv[count++] = val;
}
}
for (i = 0; builtin_macros[i]; i++)
kv[count++] = builtin_macros[i];
kv[count] = NULL;
return kv;
}
static char **
translate_macros (pmse_ctx_P pmse_ctx, uint where, size_t reserve_count)
{
return translate_macros0 (pmse_ctx, sm_pmfi_get_ctx_se (pmse_ctx),
where, reserve_count);
}
static pthread_mutex_t pmult_mutex = PTHREAD_MUTEX_INITIALIZER;
#define protect() pthread_mutex_lock (&pmult_mutex)
#define unprotect() pthread_mutex_unlock (&pmult_mutex)
static int
_cb_client_type (void *data, mu_config_value_t *arg)
{
if (mu_cfg_assert_value_type (arg, MU_CFG_STRING))
return 1;
if (strcmp (arg->v.string, "milter") == 0)
/* dobrze */;
else if (strcmp (arg->v.string, "pmilter") == 0)
mu_error (_("client type %s is not supported yet"),
arg->v.string);
else
mu_error (_("unknown client type %s"),
arg->v.string);
return 0;
}
static int
_cb_write_timeout (void *data, mu_config_value_t *arg)
{
struct pmult_client *clt = data;
return config_cb_timeout (&clt->timeout[GACOPYZ_TO_WRITE], arg);
}
static int
_cb_read_timeout (void *data, mu_config_value_t *arg)
{
struct pmult_client *clt = data;
return config_cb_timeout (&clt->timeout[GACOPYZ_TO_READ], arg);
}
static int
_cb_eom_timeout (void *data, mu_config_value_t *arg)
{
struct pmult_client *clt = data;
return config_cb_timeout (&clt->timeout[GACOPYZ_TO_EOM], arg);
}
static int
_cb_connect_timeout (void *data, mu_config_value_t *arg)
{
struct pmult_client *clt = data;
return config_cb_timeout (&clt->timeout[GACOPYZ_TO_CONNECT], arg);
}
#define PMULT_LF_REVERT 0x1
#define PMULT_LF_UPTO 0x2
#define PMULT_LF_FROM 0x4
#define PMULT_LOG_LEVEL(flags, lev) \
((flags & PMULT_LF_UPTO) ? SMI_LOG_UPTO (lev) : \
(flags & PMULT_LF_FROM) ? SMI_LOG_FROM (lev) : \
SMI_LOG_MASK (lev))
static int
_cb_log_level (void *data, mu_config_value_t *arg)
{
struct pmult_client *clt = data;
int i;
struct mu_wordsplit ws;
if (mu_cfg_assert_value_type (arg, MU_CFG_STRING))
return 1;
ws.ws_delim = ",";
if (mu_wordsplit (arg->v.string, &ws,
MU_WRDSF_NOVAR | MU_WRDSF_NOCMD | MU_WRDSF_DELIM))
{
mu_error ("cannot split line: %s", mu_wordsplit_strerror (&ws));
return 1;
}
for (i = 0; i < ws.ws_wordc; i++)
{
char *p = ws.ws_wordv[i];
int lev;
int flags = 0;
if (*p == '!')
{
p++;
flags |= PMULT_LF_REVERT;
}
if (*p == '<')
{
p++;
flags |= PMULT_LF_UPTO;
}
else if (*p == '>')
{
p++;
flags |= PMULT_LF_FROM;
}
lev = gacopyz_string_to_log_level (p);
if (lev == -1)
{
mu_error (_("invalid log level: %s"), p);
return 1;
}
if (flags & PMULT_LF_REVERT)
clt->logmask &= ~PMULT_LOG_LEVEL (flags, lev);
else
clt->logmask |= PMULT_LOG_LEVEL (flags, lev);
}
mu_wordsplit_free (&ws);
return 0;
}
static int
cb_debug(void *data, mu_config_value_t *arg)
{
if (mu_cfg_assert_value_type(arg, MU_CFG_STRING))
return 1;
mu_debug_parse_spec(arg->v.string);
return 0;
}
static int
cb_ignore(void *data, mu_config_value_t *arg)
{
/* nothing */
return 0;
}
struct mu_cfg_param client_cfg_param[] = {
{ "type", mu_cfg_callback, NULL, 0, _cb_client_type,
N_("Set remote milter type. Only `milter' is understood so far."),
/* TRANSLATORS: 'milter' and 'pmilter' are keywords, do not translate
them. */
N_("arg: milter [version: number]|pmilter") },
{ "url", mu_c_string, NULL, mu_offsetof(struct pmult_client, url), NULL,
N_("Set remote client URL.") },
{ "write-timeout", mu_cfg_callback, NULL, 0, _cb_write_timeout,
N_("Set write timeout."),
N_("arg: interval") },
{ "read-timeout", mu_cfg_callback, NULL, 0, _cb_read_timeout,
N_("Set read timeout."),
N_("arg: interval") },
{ "eom-timeout", mu_cfg_callback, NULL, 0, _cb_eom_timeout,
N_("Set timeout for EOM."),
N_("arg: interval") },
{ "connect-timeout", mu_cfg_callback, NULL, 0, _cb_connect_timeout,
N_("Set connect timeout."),
N_("arg: interval") },
{ "log-level", mu_cfg_callback, NULL, 0, _cb_log_level,
N_("Set log verbosity level. Arg is a list of items separated by commas "
"or whitespace. Each item is a log level optionally prefixed with `!' "
"to indicate `any level except this', or '<', meaning `all levels up "
"to and including this'. Log levels in order of increasing priority "
"are: proto, debug, info, warn, err, fatal."),
N_("arg: list") },
{ NULL }
};
static char *
convert_url (char const *arg)
{
char *ptr;
char *proto = NULL;
char *port = NULL;
char *path = NULL;
size_t len;
if (gacopyz_parse_connection (arg, &proto, &port, &path))
return NULL;
if (!proto)
{
if (port)
return NULL;
ptr = mu_strdup (path);
}
else
{
len = strlen (proto) + 1
+ (port ? strlen (port) + 1 : 0) + strlen (path) + 1;
ptr = mu_alloc (len);
strcpy (ptr, proto);
strcat (ptr, ":");
if (port)
{
strcat (ptr, port);
strcat (ptr, "@");
}
strcat (ptr, path);
}
free (path);
free (port);
free (proto);
return ptr;
}
static int
_cb_portspec (void *data, mu_config_value_t *arg)
{
char **pptr = data, *ptr;
if (mu_cfg_assert_value_type (arg, MU_CFG_STRING))
return 1;
ptr = convert_url (arg->v.string);
if (!ptr)
return 1;
*pptr = ptr;
return 0;
}
struct macro_slot
{
uint stage;
uint num;
};
int
define_single_macro (const char *string, void *data)
{
struct macro_slot *slot = data;
int macro;
if (slot->num == PM_MACROS_MAX)
{
mu_error (_("macro table %d is full, symbol {%s} ignored"),
slot->stage, string);
return 0;
}
if (mu_kwd_xlat_name (macro_trans, string, ¯o))
{
mu_error (_("unknown MeTA1 macro %s"),
string);
return 1;
}
switch (macro)
{
case PMM_MAIL_TAID:
if (slot->stage < PM_SMST_MAIL)
{
mu_error (_("macro \"%s\" cannot be requested at this stage"),
"taid");
return 1;
}
break;
case PMM_DOT_MSGID:
if (slot->stage != PM_SMST_DOT)
{
mu_error (_("macro \"%s\" cannot be requested at this stage"),
"msgid");
return 1;
}
}
if (macro_defined (macrotab[slot->stage], slot->num, macro))
{
mu_diag_output (MU_LOG_WARNING, _("macro %s already defined"), string);
return 1;
}
macrotab[slot->stage][slot->num] = macro;
if (mu_kwd_xlat_tok (macro_trans, macro,
(const char**) ¯onames[slot->stage][slot->num]))
{
mu_error (_("INTERNAL ERROR at %s:%d"),
__FILE__, __LINE__);
exit (EX_SOFTWARE);
}
slot->num++;
return 0;
}
static int
define_macros (const char *stage_name, mu_config_value_t *arg)
{
int rc;
uint i;
struct macro_slot slot;
if (mu_kwd_xlat_name (meta_stage_trans, stage_name, (int*) &slot.stage))
{
mu_error (_("unknown SMTP stage"));
return 1;
}
for (i = 0; i < PM_MACROS_MAX && macrotab[slot.stage][i] != PMM_END; i++)
;
slot.num = i;
rc = mu_cfg_string_value_cb (arg, define_single_macro, &slot);
macrotab[slot.stage][slot.num] = PMM_END;
macronames[slot.stage][slot.num] = NULL;
return rc;
}
struct define_closure
{
int retcode;
mu_config_value_t *arg;
};
static int
define_macros_iter (void *item, void *data)
{
mu_config_value_t *arg = item;
struct define_closure *clos = data;
if (mu_cfg_assert_value_type (arg, MU_CFG_STRING))
clos->retcode = 1;
else
clos->retcode |= define_macros (arg->v.string, clos->arg);
return 0;
}
static int
_cb_define (void *data, mu_config_value_t *arg)
{
int rc;
struct define_closure dc;
if (mu_cfg_assert_value_type (arg, MU_CFG_ARRAY))
return 1;
if (arg->v.arg.c < 2)
{
mu_error (_("not enough arguments to define-macros"));
return 1;
}
else if (arg->v.arg.c > 2)
{
mu_error (_("too many arguments to define-macros"));
return 1;
}
switch (arg->v.arg.v[0].type)
{
case MU_CFG_STRING:
rc = define_macros (arg->v.arg.v[0].v.string, &arg->v.arg.v[1]);
break;
case MU_CFG_LIST:
dc.retcode = 0;
dc.arg = &arg->v.arg.v[1];
mu_list_foreach (arg->v.arg.v[0].v.list, define_macros_iter, &dc);
rc = dc.retcode;
break;
}
return 0;
}
static void
request_auth_macros ()
{
uint32_t authmac[] = { PMM_AUTH_TYPE, PMM_AUTH_AUTHEN, PMM_AUTH_AUTHOR,
PMM_END };
uint i, j;
for (i = 0; i < PM_MACROS_MAX && macrotab[PM_SMST_MAIL][i] != PMM_END; i++)
;
for (j = 0; authmac[j] != PMM_END; j++)
{
if (i == PM_MACROS_MAX)
{
mu_error (_("too many macros defined for stage \"mail\", while "
"processing \"auth-macros\" statement"));
exit (EX_CONFIG);
}
if (macro_defined (macrotab[PM_SMST_MAIL], i, authmac[j]))
continue;
macrotab[PM_SMST_MAIL][i] = authmac[j];
if (mu_kwd_xlat_tok (macro_trans, authmac[j],
(const char**) ¯onames[PM_SMST_MAIL][i]))
{
mu_error (_("INTERNAL ERROR at %s:%d"), __FILE__, __LINE__);
exit (EX_SOFTWARE);
}
i++;
}
macrotab[PM_SMST_MAIL][i] = PMM_END;
macronames[PM_SMST_MAIL][i] = NULL;
}
struct mu_cfg_param pmult_cfg_param[] = {
{ "listen", mu_cfg_callback, &portspec, 0, _cb_portspec,
N_("Listen for milter requests on the given URL."),
N_("url: string") },
{ "max-threads-soft", mu_c_uint, &max_threads_soft, 0, NULL,
N_("Maximum number of threads (soft limit).") },
{ "max-threads-hard", mu_c_uint, &max_threads_hard, 0, NULL,
N_("Maximum number of threads (hard limit).") },
{ "max-pmilter-fd", mu_c_uint, &max_pmilter_fd, 0, NULL,
N_("Maximum number of file descriptors pmilter is allowed to open") },
{ "client", mu_cfg_section },
{ "pmilter-debug", mu_c_uint, &pmilter_debug_level, 0, NULL,
N_("Set pmilter debug verbosity level.") },
{ "pidfile", mu_c_string, &pidfile, 0, NULL,
N_("Write PID to this file.") },
{ "auth-macros", mu_c_bool, &want_auth_macros, 0, NULL,
N_("Pass auth macros to MAIL handler.") },
{ "define-macros", mu_cfg_callback, NULL, 0, _cb_define,
N_("Define macros for the given SMTP stage."),
N_("stage: string> [=]\n"
"where is the name of a Mailfromd module, and "
"is the desired verbosity level for that module."),
N_("spec: list") },
{ "database-type", mu_cfg_callback, NULL, 0, cb_ignore,
N_("Ignored for compatibility with mailfromd"),
N_("type: string") },
{ "database-mode", mu_cfg_callback, NULL, 0, cb_ignore,
N_("Ignored for compatibility with mailfromd"),
N_("mode: octal") },
{ "state-directory", mu_cfg_callback, NULL, 0, cb_ignore,
N_("Ignored for compatibility with mailfromd"),
N_("dir: string") },
{ "lock-retry-count", mu_cfg_callback, NULL, 0, cb_ignore,
N_("Ignored for compatibility with mailfromd"),
N_("arg: number")},
{ "lock-retry-timeout", mu_cfg_callback, NULL, 0, cb_ignore,
N_("Ignored for compatibility with mailfromd"),
N_("time: interval") },
{ NULL }
};
static void
pmult_client_free (struct pmult_client *clt)
{
free (clt->name);
free (clt);
}
static int
client_block_begin (const char *name, void **section_data)
{
extern struct timeval default_gacopyz_timeout[GACOPYZ_TO_COUNT];
struct pmult_client *clt = mu_calloc (1, sizeof *clt);
clt->name = name ? mu_strdup (name) : "";
memcpy (clt->timeout, default_gacopyz_timeout, sizeof clt->timeout);
clt->logmask = SMI_LOG_FROM(SMI_LOG_INFO);
*section_data = clt;
return 0;
}
static int
client_block_end (struct pmult_client *clt)
{
gacopyz_srv_t gsrv;
int rc = gacopyz_srv_create (&gsrv, clt->name, clt->url, clt->logmask);
if (rc)
{
pmult_client_free (clt);
return 1; /* FIXME: error message */
}
gacopyz_srv_destroy (&gsrv);
if (!client_list)
MU_ASSERT (mu_list_create (&client_list));
mu_list_append (client_list, clt);
return 0;
}
static int
client_section_parser (enum mu_cfg_section_stage stage,
const mu_cfg_node_t *node,
const char *section_label, void **section_data,
void *call_data,
mu_cfg_tree_t *tree)
{
const char *s;
switch (stage)
{
case mu_cfg_section_start:
if (!node->label)
s = NULL;
else
{
if (mu_cfg_assert_value_type (node->label, MU_CFG_STRING))
return 1;
s = node->label->v.string;
}
return client_block_begin (s, section_data);
case mu_cfg_section_end:
return client_block_end ((struct pmult_client *)*section_data);
}
return 0;
}
static void
pmult_cfg_init ()
{
struct mu_cfg_section *section;
if (mu_create_canned_section ("client", §ion))
exit (EX_SOFTWARE);
section->parser = client_section_parser;
section->label = N_("ident");
mu_cfg_section_add_params (section, client_cfg_param);
}
/* Command line parsing */
static char prog_doc[] = N_("pmilter multiplexer");
static void
opt_url (struct mu_parseopt *po, struct mu_option *opt, char const *arg)
{
portspec = convert_url (arg);
if (!portspec)
exit (po->po_exit_error);
}
static void
opt_debug (struct mu_parseopt *po, struct mu_option *opt, char const *arg)
{
mu_debug_parse_spec (arg);
}
static struct mu_option pmult_options[] = {
{ "url", 0, N_("URL"), MU_OPTION_DEFAULT,
N_("listen on the given URL"),
mu_c_string, NULL, opt_url },
{ "syslog", 0, NULL, MU_OPTION_DEFAULT,
N_("log to syslog (default)"),
mu_c_int, &log_to_stderr, NULL, "0" },
{ "stderr", 's', NULL, MU_OPTION_DEFAULT,
N_("log to stderr"),
mu_c_int, &log_to_stderr, NULL, "1" },
{ "log-tag", 0, N_("STRING"), MU_OPTION_DEFAULT,
N_("set the identifier used in syslog messages to STRING"),
mu_c_string, &mu_log_tag },
{ "debug", 'x', N_("LEVEL"), MU_OPTION_DEFAULT,
N_("set debug verbosity level"),
mu_c_string, NULL, opt_debug },
//FIXME:
{ "no-signal-handler", 0, NULL, MU_OPTION_DEFAULT,
N_("disable signal handling in the main thread (use for debugging)."),
mu_c_int, &no_sig_handler, NULL, "1" },
MU_OPTION_END
}, *options[] = { pmult_options, NULL };
static char *capa[] = {
"debug",
"logging",
NULL
};
#define SM_ONERROR_ACTION(expr,onerr) do \
{ \
sm_ret_T ret = expr; \
if (sm_is_err (ret)) \
{ \
char const *cp = smerr2txt (ret); \
if (cp) \
mu_error ("%s:%d: " #expr " failed: %s", \
__FILE__, __LINE__, cp); \
else \
mu_error ("%s:%d: " #expr " failed: %#x", \
__FILE__, __LINE__, ret); \
onerr; \
} \
} \
while (0)
#define SM_VERBOSE(expr) SM_ONERROR_ACTION(expr,)
#define SM_CHECK(expr) SM_ONERROR_ACTION(expr,return ret)
#define SM_ASSERT(expr) SM_ONERROR_ACTION(expr, exit (EX_CONFIG))
#define MSG_TEMPFAIL "451 4.3.2 Please try again later\r\n"
#define MSG_REJECT "550 5.7.1 Command rejected\r\n"
#define MSG_SHUTDOWN "421 4.7.0 pmult closing connection\r\n"
const char *
hdrcommname (int rc)
{
switch (rc)
{
case SMFIR_CHGHEADER:
return "chgheader";
case SMFIR_ADDHEADER:
return "addheader";
case SMFIR_INSHEADER:
return "insheader";
}
return "unknown";
}
static int
get_mod_index (struct pmult_priv_data *p, const char *header_name,
size_t pos, size_t *pidx)
{
size_t i, count;
const char *name;
if (mu_header_get_field_count (p->hdr, &count))
return 1;
for (i = 1; i <= count; i++)
{
if (mu_header_sget_field_name (p->hdr, i, &name))
continue;
if (strcmp (name, header_name) == 0 && --pos == 0)
{
*pidx = i - 1;
return 0;
}
}
return 1;
}
static int
cb_reply (gacopyz_srv_t gsrv, int cmd, int rcmd, void *data)
{
char *buf;
size_t size;
char *cp;
char *header;
uint type;
uint32_t idx = 0;
pmse_ctx_P pmse_ctx = data;
struct pmult_priv_data *p = sm_pmfi_get_ctx_se (pmse_ctx);
switch (rcmd)
{
case SMFIR_CHGHEADER:
case SMFIR_ADDHEADER:
case SMFIR_INSHEADER:
gacopyz_srv_reply (gsrv, &buf, &size);
if (rcmd != SMFIR_ADDHEADER)
{
idx = ntohl (*(uint32_t*)buf);
buf += sizeof (uint32_t);
}
cp = memchr (buf, 0, size);
if (!cp)
{
mu_diag_output (MU_DIAG_NOTICE, "Malformed CHGHEADER command");
break;
}
cp++;
PMU_DEBUG (pmult_debug, MU_DEBUG_TRACE2,
("%s=%u '%s','%s'",
hdrcommname (rcmd), idx, buf, cp));
header = malloc (size + 5);
if (!header)
{
mu_error ("%s", mu_strerror (ENOMEM));
return SMTP_R_ACCEPT;
}
strcpy (header, buf);
strcat (header, ": ");
strcat (header, cp);
strcat (header, "\r\n");
switch (rcmd)
{
case SMFIR_CHGHEADER:
{
/* Emulate milter semantics */
size_t index;
type = (*cp == 0) ? SM_HDRMOD_T_REMOVE : SM_HDRMOD_T_REPLACE;
if (get_mod_index (p, buf, idx, &index))
{
PMU_DEBUG (pmult_debug, MU_DEBUG_TRACE1,
("no such header: %s", buf));
return 0;
}
idx = index;
}
break;
case SMFIR_ADDHEADER:
type = SM_HDRMOD_T_APPEND;
break;
case SMFIR_INSHEADER:
type = SM_HDRMOD_T_INSERT;
}
SM_VERBOSE (sm_pmfi_hdr_mod (pmse_ctx, type, idx,
(unsigned char*) header));
free (header);
break;
case SMFIR_REPLBODY:
gacopyz_srv_reply (gsrv, &buf, &size);
PMU_DEBUG (pmult_debug, MU_DEBUG_TRACE2,
("replbody, len=%lu",
(unsigned long) size));
PMU_DEBUG (pmult_debug, MU_DEBUG_TRACE7, ("bodyp=%.*s", (int) size, buf));
pmult_append_body_chunk (p, buf, size);
break;
}
return 0;
}
#define CRLF "\r\n"
static char *
trailcrlf (char **pbuf)
{
char *buf = *pbuf;
char *retp = NULL;
int len;
if (!buf)
*pbuf = CRLF;
else if ((len = strlen (buf)) < 2 || memcmp (buf + len - 2, CRLF, 2))
{
retp = malloc (len + 3);
if (!retp)
{
mu_error ("%s", mu_strerror (ENOMEM));
*pbuf = MSG_TEMPFAIL;
}
strcat (strcpy (retp, buf), CRLF);
*pbuf = retp;
}
return retp;
}
static int
pmult_std_reply (struct pmult_priv_data *p, pmse_ctx_P pmse_ctx,
gacopyz_srv_t gsrv, int rc,
const char *ident, const char *arg)
{
char *buf, *tmp;
size_t size;
int status;
if (!arg)
arg = "";
switch (rc)
{
case SMFIR_CONTINUE:
break;
case SMFIR_REPLYCODE:
gacopyz_srv_reply (gsrv, &buf, &size);
PMU_DEBUG (pmult_debug, MU_DEBUG_TRACE2,
("%s=%s, reply=%s",
ident, arg, buf));
tmp = trailcrlf (&buf);
SM_VERBOSE (sm_pmfi_setreply (pmse_ctx, tmp));
status = (buf[0] == '4') ? SMTP_R_TEMP : SMTP_R_PERM;
free (tmp);
return status;
case SMFIR_ACCEPT:
PMU_DEBUG (pmult_debug, MU_DEBUG_TRACE2,
("%s=%s, accept", ident, arg));
return SMTP_R_ACCEPT;
case SMFIR_REJECT:
PMU_DEBUG (pmult_debug, MU_DEBUG_TRACE2,
("%s=%s, reply=%s", ident, arg, MSG_REJECT));
SM_VERBOSE (sm_pmfi_setreply (pmse_ctx, MSG_REJECT));
return SMTP_R_PERM;
case SMFIR_DISCARD:
PMU_DEBUG (pmult_debug, MU_DEBUG_TRACE2,
("%s=%s, discard", ident, arg));
return SMTP_R_DISCARD;
case SMFIR_TEMPFAIL:
PMU_DEBUG (pmult_debug, MU_DEBUG_TRACE2,
("%s=%s, reply=%s", ident, arg, MSG_TEMPFAIL));
SM_VERBOSE (sm_pmfi_setreply (pmse_ctx, MSG_TEMPFAIL));
return SMTP_R_TEMP;
case SMFIR_SHUTDOWN:
PMU_DEBUG (pmult_debug, MU_DEBUG_TRACE2,
("%s=%s, reply=%s", ident, arg, MSG_SHUTDOWN));
SM_VERBOSE (sm_pmfi_setreply (pmse_ctx, MSG_SHUTDOWN));
return SMTP_R_SSD;
case SMFIR_ADDRCPT:
gacopyz_srv_reply (gsrv, &buf, &size);
PMU_DEBUG (pmult_debug, MU_DEBUG_TRACE2,
("%s=%s, addrcpt=%s", ident, arg, buf));
SM_VERBOSE (sm_pmfi_rcpt_add (pmse_ctx, buf, NULL));
break;
case SMFIR_DELRCPT:
gacopyz_srv_reply (gsrv, &buf, &size);
PMU_DEBUG (pmult_debug, MU_DEBUG_TRACE2,
("%s=%s, delrcpt=%s", ident, arg, buf));
/* FIXME: Index is always 0. Should it be 1? */
SM_VERBOSE (sm_pmfi_rcpt_del (pmse_ctx, buf, 0));
break;
case SMFIR_CHGHEADER:
case SMFIR_ADDHEADER:
case SMFIR_INSHEADER:
case SMFIR_REPLBODY:
break;
default:
if (mu_isprint (rc))
mu_diag_output (MU_DIAG_WARNING, _("unsupported reply code: %c (%d)"),
rc, rc);
else
mu_diag_output (MU_DIAG_WARNING, _("unsupported reply code: (%d)"),
rc);
}
return SMTP_R_CONT;
}
typedef sfsistat_T (*pmult_runfun_t) (pmse_ctx_P, gacopyz_srv_t, void *);
static sfsistat_T
pmult_runlist0 (struct pmult_priv_data *p, pmult_runfun_t runfun,
pmse_ctx_P pmse_ctx, void *data, char **macros)
{
sfsistat_T rc;
mu_iterator_t itr = NULL;
mu_list_get_iterator (p->srvlist, &itr);
for (mu_iterator_first (itr); !mu_iterator_is_done (itr);
mu_iterator_next (itr))
{
int i;
gacopyz_srv_t gsrv;
mu_iterator_current (itr, (void**)&gsrv);
if (macros)
for (i = 0; macros[i]; i += 2)
gacopyz_srv_define_macro (gsrv, macros[i], macros[i+1]);
rc = runfun (pmse_ctx, gsrv, data);
if (rc != SMTP_R_CONT && rc != SMTP_R_OK)
break;
}
mu_iterator_destroy (&itr);
return rc;
}
static sfsistat_T
pmult_runlist (struct pmult_priv_data *p, pmult_runfun_t runfun,
pmse_ctx_P pmse_ctx, void *data, char **macros)
{
sfsistat_T rc;
PRIV_LOCK (p);
rc = pmult_runlist0 (p, runfun, pmse_ctx, data, macros);
PRIV_UNLOCK (p);
return rc;
}
void
pmult_clear (struct pmult_priv_data *p)
{
PRIV_LOCK (p);
mu_stream_destroy (&p->hdrstream);
mu_header_destroy (&p->hdr);
mu_opool_destroy (&p->body_chunks);
mu_iterator_destroy (&p->body_itr);
if (p->hdrbuf)
{
free (p->hdrbuf);
p->hdrbuf = NULL;
}
p->state = pmult_msg_state_initial;
p->nrcpt = 0;
p->nbadrcpts = 0;
PRIV_UNLOCK (p);
}
void
pmult_free (struct pmult_priv_data *p)
{
if (!p)
return;
pmult_clear (p);
mu_list_destroy (&p->srvlist);
free (p->taid);
free (p->seid);
free (p->seid_c);
free (p->argv);
pthread_mutex_destroy (&p->mutex);
free (p);
}
void
pmult_shutdown (pmse_ctx_P pmse_ctx, struct pmult_priv_data *p)
{
mu_iterator_t itr;
if (!p)
return;
PRIV_LOCK (p);
mu_list_get_iterator (p->srvlist, &itr);
for (mu_iterator_first (itr); !mu_iterator_is_done (itr);
mu_iterator_next (itr))
{
gacopyz_srv_t gsrv;
mu_iterator_current (itr, (void**)&gsrv);
gacopyz_srv_quit (gsrv);
gacopyz_srv_close (gsrv);
gacopyz_srv_destroy (&gsrv);
}
mu_iterator_destroy (&itr);
sm_pmfi_set_ctx_se (pmse_ctx, NULL);
PRIV_UNLOCK (p);
pmult_free (p);
}
/* Translation table gacopyz_state -> PM state */
static int gacopyz_to_smst[] = {
/* gacopyz_stage_conn -> */ PM_SMST_CONNECT,
/* gacopyz_stage_helo -> */ PM_SMST_EHLO,
/* gacopyz_stage_mail -> */ PM_SMST_MAIL,
/* gacopyz_stage_rcpt -> */ PM_SMST_RCPT,
/* gacopyz_stage_data -> */ PM_SMST_DATA,
/* gacopyz_stage_eom -> */ PM_SMST_DOT,
/* gacopyz_stage_eoh -> */ PM_SMST_DATA
};
/* Tables of macros defined internally by pmult */
/* NOTE: Update them wherever you add or remove macros in the handlers
below.
FIXME: Find a way to automate this. */
static const char *conn_imt[] = { "r", "i", "client_addr", "client_name",
"client_port", NULL };
static const char *helo_imt[] = { "s", NULL };
static const char *mail_imt[] = { "f", "ntries", "nrcpts", "nbadrcpts", NULL };
static const char *rcpt_imt[] = { "nrcpts", "nbadrcpts", "rcpt_host",
"rcpt_addr", NULL };
#define data_imt NULL
#define eom_imt NULL
#define eoh_imt NULL
static const char **internal_macro_tab[gacopyz_stage_max] = {
conn_imt,
helo_imt,
mail_imt,
rcpt_imt,
data_imt,
eom_imt,
eoh_imt,
};
static int
internal_macro_p (enum gacopyz_stage gstage, const char *macro)
{
int i;
for (i = 0; i <= gstage; i++)
if (internal_macro_tab[i])
{
const char **p;
for (p = internal_macro_tab[i]; *p; p++)
if (strcmp (macro, *p) == 0)
return 1;
}
return 0;
}
/* For each Sendmail macro name from SYMV, find the corresponding
MeTA1 macro and register it for use at a given GSTAGE.
Produce verbose warnings if there are no more free slots in the
macro table. */
void
collect_stage_symbols (enum gacopyz_stage gstage, const char **symv)
{
int i;
int smst = gacopyz_to_smst[gstage];
for (i = 0; i < PM_MACROS_MAX && macrotab[smst][i] != PMM_END; i++)
;
for (; *symv; ++symv)
{
int macro;
if (internal_macro_p (gstage, *symv))
continue;
if (i == PM_MACROS_MAX)
{
PMU_DEBUG (pmult_debug, MU_DEBUG_TRACE1,
(_("macro table %d is full, symbol {%s} ignored"),
smst, *symv));
continue;
}
if (mu_kwd_xlat_name (macro_trans, *symv, ¯o))
{
PMU_DEBUG (pmult_debug, MU_DEBUG_TRACE1,
(_("Sendmail macro {%s} does not translate to a "
"MeTA1 one"),
*symv));
continue;
}
if (macro_defined (macrotab[smst], i, macro))
continue;
if (mu_kwd_xlat_tok (macro_trans, macro,
(const char**) ¯onames[smst][i]))
{
mu_error (_("INTERNAL ERROR at %s:%d"), __FILE__, __LINE__);
exit (EX_SOFTWARE);
}
macrotab[smst][i++] = macro;
}
}
/* Run initial negotiation with each registered Gacopyz server. If the
negotiation returns requested macro names, register them for further
use. */
void
collect_symlists ()
{
mu_iterator_t itr = NULL;
protect ();
mu_list_get_iterator (client_list, &itr);
unprotect ();
for (mu_iterator_first (itr); !mu_iterator_is_done (itr);
mu_iterator_next (itr))
{
struct pmult_client *clt;
gacopyz_srv_t gsrv;
int i;
mu_iterator_current (itr, (void**)&clt);
gacopyz_srv_create (&gsrv, clt->name, clt->url, clt->logmask);
gacopyz_srv_set_all_timeouts (gsrv, clt->timeout);
if (gacopyz_srv_open (gsrv) != MI_SUCCESS)
{
if (clt->name[0])
mu_error (_("failed to connect to %s (milter %s)"),
clt->url, clt->name);
else
mu_error (_("failed to connect to %s"), clt->url);
gacopyz_srv_destroy (&gsrv);
continue;
}
gacopyz_srv_negotiate (gsrv);
for (i = 0; i < gacopyz_stage_max; i++)
{
const char **symv = gacopyz_srv_get_required_macros (gsrv, i);
if (symv)
collect_stage_symbols (i, symv);
}
gacopyz_srv_quit (gsrv);
gacopyz_srv_close (gsrv);
gacopyz_srv_destroy (&gsrv);
}
mu_iterator_destroy (&itr);
}
static sm_ret_T
pmult_negotiate (pmss_ctx_P pmss_ctx,
uint32_t srv_cap, uint32_t srv_fct, uint32_t srv_feat,
uint32_t srv_misc, uint32_t *pm_cap, uint32_t *pm_fct,
uint32_t *pm_feat, uint32_t *pm_misc)
{
#if HAVE_SM_PMFI_SETMACS
uint i;
collect_symlists ();
for (i = 0; i < PM_SMST_MAX; i++)
SM_CHECK (sm_pmfi_setmacs (pmss_ctx, i, macrotab[i]));
#else
SM_CHECK (sm_pmfi_setmaclist (pmss_ctx, PM_SMST_CONNECT, PMM_SEID, PMM_END));
if (want_auth_macros)
SM_CHECK (sm_pmfi_setmaclist (pmss_ctx, PM_SMST_MAIL, PMM_MAIL_TAID,
PMM_AUTH_TYPE, PMM_AUTH_AUTHEN, PMM_AUTH_AUTHOR,
PMM_END));
else
SM_CHECK (sm_pmfi_setmaclist (pmss_ctx, PM_SMST_MAIL, PMM_MAIL_TAID,
PMM_END));
#endif
return SM_SUCCESS;
}
static sfsistat_T
pmult_connect (pmse_ctx_P pmse_ctx, const char *hostname,
sm_sockaddr_T *hostaddr)
{
mu_iterator_t itr;
struct pmult_priv_data *p;
int status = SMTP_R_CONT;
int rc;
char *tmp = NULL;
char *client_addr = NULL, *client_port = NULL;
char **kv;
char buf[INT_BUFSIZE_BOUND (uintmax_t)];
if (mu_debug_level_p (pmult_debug, MU_DEBUG_TRACE1))
{
char *p = mu_sockaddr_to_astr (&hostaddr->sa, sizeof *hostaddr);
pthread_mutex_lock (&pmult_debug_mutex);
mu_debug_log_begin ("Connect from: %s, address %s\n", hostname, p);
pthread_mutex_unlock (&pmult_debug_mutex);
free (p);
}
p = calloc (1, sizeof *p);
if (!p)
{
mu_error ("%s: accept", mu_strerror (ENOMEM));
return SMTP_R_ACCEPT;
}
pthread_mutex_init (&p->mutex, NULL);
kv = translate_macros0 (pmse_ctx, p, PM_SMST_CONNECT, 0);
sm_pmfi_getmac (pmse_ctx, PMM_SEID, &tmp);
p->seid = strdup (tmp);
rc = mu_list_create (&p->srvlist);
if (rc)
{
mu_error ("%smu_list_create: %s", PRIV_SEID_C (p), mu_strerror (rc));
free (p);
return SMTP_R_ACCEPT;
}
protect ();
mu_list_get_iterator (client_list, &itr);
client_addr = strdup (inet_ntoa (hostaddr->sin.sin_addr));
snprintf (buf, sizeof buf, "%hu", ntohs (hostaddr->sin.sin_port));
client_port = strdup (buf);
unprotect ();
PRIV_LOCK (p);
for (mu_iterator_first (itr); !mu_iterator_is_done (itr);
mu_iterator_next (itr))
{
struct pmult_client *clt;
gacopyz_srv_t gsrv;
int i;
mu_iterator_current (itr, (void**)&clt);
gacopyz_srv_create (&gsrv, clt->name, clt->url, clt->logmask);
gacopyz_srv_set_callback (gsrv, cb_reply);
gacopyz_srv_set_callback_data (gsrv, pmse_ctx);
gacopyz_srv_set_all_timeouts (gsrv, clt->timeout);
if (gacopyz_srv_open (gsrv) != MI_SUCCESS)
{
mu_error (_("%sfailed to connect to %s (milter %s)"),
PRIV_SEID_C (p), clt->url, clt->name);
gacopyz_srv_destroy (&gsrv);
continue;
}
gacopyz_srv_negotiate (gsrv);
gacopyz_srv_define_macro (gsrv, "r", "SMTP");
gacopyz_srv_define_macro (gsrv, "i", PRIV_SEID (p));
if (client_addr)
gacopyz_srv_define_macro (gsrv, "client_addr", client_addr);
gacopyz_srv_define_macro (gsrv, "client_name", hostname);
if (client_port)
gacopyz_srv_define_macro (gsrv, "client_port", client_port);
for (i = 0; kv[i]; i += 2)
gacopyz_srv_define_macro (gsrv, kv[i], kv[i+1]);
/* FIXME: client_ptr, */
rc = gacopyz_srv_connect (gsrv, hostname, &hostaddr->sa);
status = pmult_std_reply (p, pmse_ctx, gsrv, rc, "connect", hostname);
if (status != SMTP_R_CONT)
{
gacopyz_srv_quit (gsrv);
gacopyz_srv_close (gsrv);
gacopyz_srv_destroy (&gsrv);
break;
}
mu_list_append (p->srvlist, gsrv);
}
free (client_addr);
free (client_port);
protect ();
mu_iterator_destroy (&itr);
unprotect ();
PRIV_UNLOCK (p);
sm_pmfi_set_ctx_se (pmse_ctx, p);
return status;
}
static sm_ret_T
pmult_close (pmse_ctx_P pmse_ctx)
{
struct pmult_priv_data *p = sm_pmfi_get_ctx_se (pmse_ctx);
if (p)
{
PMU_DEBUG (pmult_debug, MU_DEBUG_TRACE1,
("%sClosing connection", PRIV_SEID_C (p)));
pmult_shutdown (pmse_ctx, p);
}
return SM_SUCCESS;
}
static sfsistat_T
rf_helo (pmse_ctx_P pmse_ctx, gacopyz_srv_t gsrv, void *data)
{
struct pmult_priv_data *p = sm_pmfi_get_ctx_se (pmse_ctx);
const char *helohost = data;
int rc = gacopyz_srv_helo (gsrv, helohost);
return pmult_std_reply (p, pmse_ctx, gsrv, rc, "helo", helohost);
}
static sfsistat_T
pmult_helo (pmse_ctx_P pmse_ctx, const char *helohost, bool ehlo)
{
struct pmult_priv_data *p = sm_pmfi_get_ctx_se (pmse_ctx);
char **kv = translate_macros (pmse_ctx, PM_SMST_EHLO, 1);
PMU_DEBUG (pmult_debug, MU_DEBUG_TRACE1,
("%sHELO %s", PRIV_SEID_C (p), helohost));
kv[0] = "s";
kv[1] = (char*) helohost;
kv[2] = NULL;
return pmult_runlist (p, rf_helo, pmse_ctx, (void*) helohost, kv);
}
struct env_arg
{
char *ident;
int (*srvfun) (gacopyz_srv_t, char **);
char **argv;
};
/* Common for envmail and envrcpt */
static sfsistat_T
rf_envfun (pmse_ctx_P pmse_ctx, gacopyz_srv_t gsrv, void *data)
{
struct pmult_priv_data *p = sm_pmfi_get_ctx_se (pmse_ctx);
struct env_arg *x = data;
int rc = x->srvfun (gsrv, x->argv);
return pmult_std_reply (p, pmse_ctx, gsrv, rc, x->ident, x->argv[0]);
}
static sfsistat_T
pmult_mail (pmse_ctx_P pmse_ctx, const char *mail, char **argv)
{
int rc, i, n;
mu_address_t addr;
struct pmult_priv_data *p = sm_pmfi_get_ctx_se (pmse_ctx);
char **kv = translate_macros (pmse_ctx, PM_SMST_MAIL, 4);
struct env_arg x;
sfsistat_T status;
if (mu_debug_level_p (pmult_debug, MU_DEBUG_TRACE1))
{
int i;
pthread_mutex_lock (&pmult_debug_mutex);
mu_debug_log_begin ("%sMAIL FROM: %s", PRIV_SEID_C (p), mail);
if (argv)
{
for (i = 0; argv[i]; i++)
mu_debug_log_cont (" %s", argv[i]);
}
mu_debug_log_nl ();
pthread_mutex_unlock (&pmult_debug_mutex);
}
/* Fill in the macro array */
kv[0] = "f";
rc = mu_address_create (&addr, mail);
if (rc)
kv[1] = strdup (mail);
else
{
mu_address_aget_email (addr, 1, &kv[1]);
mu_address_destroy (&addr);
}
kv[2] = "ntries";
kv[3] = "1";
kv[4] = "nrcpts";
kv[5] = "0";
kv[6] = "nbadrcpts";
kv[7] = "0";
/* Count arguments and allocate env array */
if (argv)
{
for (n = 0; argv[n]; n++)
;
}
else
n = 0;
x.argv = calloc (n + 2, sizeof (x.argv[0]));
if (!x.argv)
{
mu_error ("%spmult_mail: %s", PRIV_SEID_C (p), mu_strerror (ENOMEM));
free (kv[1]);
return SMTP_R_CONT;
}
x.argv[0] = (char *) mail;
for (i = 0; i < n; i++)
x.argv[i+1] = argv[i];
x.argv[i+1] = NULL;
x.ident = "envfrom";
x.srvfun = gacopyz_srv_envfrom;
status = pmult_runlist (p, rf_envfun, pmse_ctx, &x, kv);
free (kv[1]);
free (x.argv);
return status;
}
int
parse_email_addr (const char *arg, char **psender, char **addr, char **host)
{
size_t len;
const char *p = arg, *q;
if (*p == '<')
{
len = strlen (p);
if (p[len-1] != '>')
return 1;
p++;
*psender = malloc (len - 1);
if (*psender)
{
memcpy (*psender, p, len - 2);
(*psender)[len - 2] = 0;
}
}
else
*psender = strdup (arg);
if (!*psender)
return 1;
p = *psender;
q = strchr (p, '@');
if (q)
len = q - p;
else
len = strlen (p);
*addr = malloc (len + 1);
if (!*addr)
{
free (*psender);
return 1;
}
memcpy (*addr, p, len);
(*addr)[len] = 0;
if (q)
q++;
else
q = "localhost";
*host = strdup (q);
if (!*host)
{
free (*psender);
free (*addr);
return 1;
}
return 0;
}
static sfsistat_T
pmult_rcpt (pmse_ctx_P pmse_ctx, const char *rcpt, char **argv)
{
struct pmult_priv_data *p = sm_pmfi_get_ctx_se (pmse_ctx);
char *sender, *addr, *host;
char **kv = translate_macros (pmse_ctx, PM_SMST_RCPT, 4);
int i, n;
struct env_arg x;
sfsistat_T status;
char buf1[INT_BUFSIZE_BOUND (uintmax_t)];
char buf2[INT_BUFSIZE_BOUND (uintmax_t)];
if (mu_debug_level_p (pmult_debug, MU_DEBUG_TRACE1))
{
int i;
pthread_mutex_lock (&pmult_debug_mutex);
mu_debug_log_begin ("%sRCPT TO: %s", PRIV_SEID_C (p), rcpt);
if (argv)
{
for (i = 0; argv[i]; i++)
mu_debug_log_cont (" %s", argv[i]);
}
mu_debug_log_nl ();
pthread_mutex_unlock (&pmult_debug_mutex);
}
p->nrcpt++;
if (parse_email_addr (rcpt, &sender, &addr, &host))
{
PMU_DEBUG (pmult_debug, MU_DEBUG_ERROR,
("%sbad recipient address %s", PRIV_SEID_C (p), rcpt));
p->nbadrcpts++;
return SMTP_R_CONT; /* eh? */
}
/* Populate macros array */
kv[0] = "nrcpts";
snprintf (buf1, sizeof buf1, "%lu", p->nrcpt);
kv[1] = buf1;
kv[2] = "nbadrcpts";
snprintf (buf2, sizeof buf2, "%lu", p->nbadrcpts);
kv[3] = buf2;
kv[4] = "rcpt_host";
kv[5] = host;
kv[6] = "rcpt_addr";
kv[7] = addr;
kv[8] = NULL;
/* Count arguments and allocate env array */
if (argv)
{
for (n = 0; argv[n]; n++)
;
}
else
n = 0;
x.argv = calloc (n + 2, sizeof (x.argv[0]));
if (!x.argv)
{
mu_error ("%spmult_rcpt: %s", PRIV_SEID_C (p), mu_strerror (ENOMEM));
return SMTP_R_CONT;
}
x.argv[0] = (char *) rcpt;
for (i = 0; i < n; i++)
x.argv[i+1] = argv[i];
x.argv[i+1] = NULL;
x.ident = "envrcpt";
x.srvfun = gacopyz_srv_envrcpt;
status = pmult_runlist (p, rf_envfun, pmse_ctx, &x, kv);
free (x.argv);
free (sender);
free (addr);
free (host);
return status;
}
static sfsistat_T
rf_data (pmse_ctx_P pmse_ctx, gacopyz_srv_t gsrv, void *data)
{
struct pmult_priv_data *p = sm_pmfi_get_ctx_se (pmse_ctx);
int rc = gacopyz_srv_data (gsrv);
return pmult_std_reply (p, pmse_ctx, gsrv, rc, "data", "");
}
static sfsistat_T
pmult_data (pmse_ctx_P pmse_ctx)
{
struct pmult_priv_data *p = sm_pmfi_get_ctx_se (pmse_ctx);
char **kv = translate_macros (pmse_ctx, PM_SMST_DATA, 4);
PMU_DEBUG (pmult_debug, MU_DEBUG_TRACE1, ("%sDATA", PRIV_SEID_C (p)));
return pmult_runlist (p, rf_data, pmse_ctx, NULL, kv);
}
static sfsistat_T
pmult_unknown (pmse_ctx_P pmse_ctx, const char *cmd)
{
struct pmult_priv_data *p = sm_pmfi_get_ctx_se (pmse_ctx);
PMU_DEBUG (pmult_debug, MU_DEBUG_TRACE1,
("%sUnknown command %s", PRIV_SEID_C (p), cmd));
return SMTP_R_CONT;
}
static int
str_in_array (const char *str, void *data)
{
char **ar = data;
for (; *ar; ar++)
if (strcmp (*ar, str) == 0)
return 1;
return 0;
}
/* The macros in this array are not cleared by Milter abort requests */
static char *immutable_macros[] = {
"r",
"s",
"client_addr",
"client_name",
"client_port",
"client_ptr",
NULL
};
static sfsistat_T
rf_abort (pmse_ctx_P pmse_ctx, gacopyz_srv_t gsrv, void *data)
{
gacopyz_srv_clear_macros_pred (gsrv, str_in_array, immutable_macros);
gacopyz_srv_abort (gsrv);
return SMTP_R_CONT;
}
static sm_ret_T
pmult_abort (pmse_ctx_P pmse_ctx)
{
struct pmult_priv_data *p = sm_pmfi_get_ctx_se (pmse_ctx);
if (p)
{
PMU_DEBUG (pmult_debug, MU_DEBUG_TRACE1, ("%sABORT", PRIV_SEID_C (p)));
pmult_runlist (p, rf_abort, pmse_ctx, NULL, NULL);
pmult_clear (p);
}
return SM_SUCCESS;
}
struct body_chunk
{
const char *ident;
int (*srvfun) (gacopyz_srv_t, unsigned char *, size_t);
size_t size;
unsigned char *buf;
};
static sfsistat_T
rf_body (pmse_ctx_P pmse_ctx, gacopyz_srv_t gsrv, void *data)
{
struct pmult_priv_data *p = sm_pmfi_get_ctx_se (pmse_ctx);
struct body_chunk *bc = data;
int rc = bc->srvfun (gsrv, bc->buf, bc->size);
sfsistat_T status = pmult_std_reply (p, pmse_ctx, gsrv, rc, bc->ident, NULL);
/* Meta1 won't call EOM if _msg handler returns SMT_R_CONT. Why?
I don't know neither care. */
return status == SMTP_R_CONT ? SMTP_R_OK : status;
}
static int
flush_buf (struct pmult_priv_data *p, unsigned char *buf, size_t start,
size_t end)
{
if (end > start)
{
int rc = mu_stream_write (p->hdrstream, (char*) (buf + start),
end - start, NULL);
if (rc)
{
mu_error ("%smu_stream_sequential_write: %s",
PRIV_SEID_C (p), mu_strerror (rc));
return 1;
}
}
return 0;
}
static int
collect_headers (struct pmult_priv_data *p, unsigned char *buf, size_t len,
size_t *poff)
{
size_t start = 0, off = 0;
if (p->state == pmult_msg_state_initial)
{
mu_memory_stream_create (&p->hdrstream, MU_STREAM_RDWR);
p->state = pmult_msg_state_headers;
}
while (off < len
&& p->state != pmult_msg_state_body
&& p->state != pmult_msg_state_eom)
{
switch (p->state)
{
case pmult_msg_state_headers:
for (; off < len; off++)
if (buf[off] == '\r')
{
if (flush_buf (p, buf, start, off))
return 1;
off++;
start = off;
p->state = pmult_msg_state_cr1;
break;
}
break;
case pmult_msg_state_cr1:
switch (buf[off++])
{
case '\n':
p->state = pmult_msg_state_crlf1;
break;
default:
p->state = pmult_msg_state_headers;
break;
}
break;
case pmult_msg_state_crlf1:
switch (buf[off++])
{
case '.':
p->state = pmult_msg_state_dot;
break;
case '\r':
if (flush_buf (p, buf, start, off))
return 1;
start = off + 1;
p->state = pmult_msg_state_cr2;
break;
default:
p->state = pmult_msg_state_headers;
}
break;
case pmult_msg_state_cr2:
if (buf[off++] == '\n')
p->state = pmult_msg_state_body;
else
p->state = pmult_msg_state_crlf1;
break;
case pmult_msg_state_dot:
if (buf[off] == '\r')
{
if (flush_buf (p, buf, start, off))
return 1;
off++;
start = off;
p->state = pmult_msg_state_dot_cr;
}
else
p->state = pmult_msg_state_headers;
break;
case pmult_msg_state_dot_cr:
if (buf[off] == '\n')
{
p->state = pmult_msg_state_eom;
off++;
}
else
p->state = pmult_msg_state_headers;
break;
default:
abort ();
}
}
if (flush_buf (p, buf, start, off))
return 1;
*poff = off;
return 0;
}
struct header
{
const char *name;
const char *value;
};
static sfsistat_T
rf_header (pmse_ctx_P pmse_ctx, gacopyz_srv_t gsrv, void *data)
{
struct pmult_priv_data *p = sm_pmfi_get_ctx_se (pmse_ctx);
struct header *hp = data;
int rc = gacopyz_srv_header (gsrv, (char*)hp->name, (char*)hp->value);
pmult_std_reply (p, pmse_ctx, gsrv, rc, "header", NULL);
/* FIXME: do I need to analyze its return? */
return SMTP_R_OK;
}
static sfsistat_T
rf_eoh (pmse_ctx_P pmse_ctx, gacopyz_srv_t gsrv, void *data)
{
struct pmult_priv_data *p = sm_pmfi_get_ctx_se (pmse_ctx);
int rc = gacopyz_srv_eoh (gsrv);
pmult_std_reply (p, pmse_ctx, gsrv, rc, "eoh", NULL);
/* FIXME: do I need to analyze its return? */
return SMTP_R_OK;
}
#define KV_ARRAY(p) (((p)->argv && (p)->argv[0]) ? (p)->argv : NULL)
static int
process_headers (pmse_ctx_P pmse_ctx, struct pmult_priv_data *p)
{
mu_off_t size;
size_t count, i;
int rc;
mu_transport_t tbuf[2];
/* FIXME-MU: This could be largely simplified if mu_header_t had
a _set_stream method. */
rc = mu_stream_size (p->hdrstream, &size);
if (rc)
{
mu_error (_("%scannot get the size of the header stream: %s"),
PRIV_SEID_C (p), mu_strerror (rc));
return 1;
}
mu_stream_ioctl (p->hdrstream, MU_IOCTL_TRANSPORT, MU_IOCTL_OP_GET, tbuf);
rc = mu_header_create (&p->hdr, (char*) tbuf[0], size);
if (rc)
{
mu_error (_("%scannot create header: %s"),
PRIV_SEID_C (p), mu_strerror (rc));
return 1;
}
/* FIXME: mu_header_get_iterator would be in place here. */
mu_header_get_field_count (p->hdr, &count);
for (i = 1; i <= count; i++)
{
struct header h;
if (mu_header_sget_field_name (p->hdr, i, &h.name)
|| mu_header_sget_field_value (p->hdr, i, &h.value))
continue;
pmult_runlist0 (p, rf_header, pmse_ctx, &h, KV_ARRAY (p));
}
pmult_runlist0 (p, rf_eoh, pmse_ctx, NULL, KV_ARRAY (p));
return 0;
}
#define EOM_MARK "\r\n.\r\n"
#define EOM_MARK_LEN (sizeof (EOM_MARK) - 1)
static sfsistat_T
pmult_msg_handler (pmse_ctx_P pmse_ctx, unsigned char *buf, size_t len)
{
struct pmult_priv_data *p = sm_pmfi_get_ctx_se (pmse_ctx);
struct body_chunk bc;
if (p)
{
if (mu_debug_level_p (pmult_debug, MU_DEBUG_TRACE7))
PMU_DEBUG (pmult_debug, MU_DEBUG_TRACE1,
("%sBODY %lu %.*s",
PRIV_SEID_C (p), (unsigned long) len, (int) len, buf));
else
PMU_DEBUG (pmult_debug, MU_DEBUG_TRACE1,
("%sBODY %lu",
PRIV_SEID_C (p), (unsigned long) len));
}
else
return SMTP_R_OK;
if (p->state != pmult_msg_state_body)
{
size_t off;
if (collect_headers (p, buf, len, &off))
return SMTP_R_ACCEPT; /* Better safe than sorry. */
if (p->state == pmult_msg_state_eom)
process_headers (pmse_ctx, p);
else
{
if (p->state != pmult_msg_state_body)
return SMTP_R_OK; /* See comment to rf_body */
if (process_headers (pmse_ctx, p))
return SMTP_R_ACCEPT;
}
len -= off;
buf += off;
}
/* FIXME: Can the marker be split between two successive calls? */
if (len >= EOM_MARK_LEN
&& memcmp (buf + len - EOM_MARK_LEN, EOM_MARK, EOM_MARK_LEN) == 0)
len -= 3;
bc.ident = "body";
bc.srvfun = gacopyz_srv_body;
bc.size = len;
bc.buf = buf;
return pmult_runlist0 (p, rf_body, pmse_ctx, &bc, NULL);
}
static sfsistat_T
pmult_msg (pmse_ctx_P pmse_ctx, unsigned char *buf, size_t len)
{
sfsistat_T rc;
struct pmult_priv_data *p = sm_pmfi_get_ctx_se (pmse_ctx);
PRIV_LOCK (p);
rc = pmult_msg_handler (pmse_ctx, buf, len);
PRIV_UNLOCK (p);
return rc;
}
static sfsistat_T
pmult_eom (pmse_ctx_P pmse_ctx)
{
struct pmult_priv_data *p = sm_pmfi_get_ctx_se (pmse_ctx);
char **kv = translate_macros (pmse_ctx, PM_SMST_DOT, 0);
struct body_chunk bc;
sfsistat_T rc;
PMU_DEBUG (pmult_debug, MU_DEBUG_TRACE1, ("%sEOM", PRIV_SEID_C (p)));
bc.ident = "eom";
bc.srvfun = gacopyz_srv_eom;
bc.size = 0;
bc.buf = (unsigned char*) "";
rc = pmult_runlist (p, rf_body, pmse_ctx, &bc, kv);
if (p->body_chunks)
{
p->state = pmult_msg_state_initial;
p->hdrbuf = malloc (PMILTER_CHUNK_SIZE);
if (!p->hdrbuf)
return SMTP_R_ACCEPT; /* FIXME: or better rc? */
return SMTP_R_RPLCMSG;
}
pmult_clear (p);
return rc;
}
sfsistat_T
pmult_msg_rplc (pmse_ctx_P pmse_ctx,
const unsigned char **pmsgchunk, size_t *pmsglen)
{
struct pmult_priv_data *p = sm_pmfi_get_ctx_se (pmse_ctx);
size_t rest;
int rc;
switch (p->state)
{
case pmult_msg_state_initial:
mu_stream_seek (p->hdrstream, 0, SEEK_SET, NULL);
p->state = pmult_msg_state_headers;
/* fall through */
case pmult_msg_state_headers:
rc = mu_stream_read (p->hdrstream, (char*) p->hdrbuf,
PMILTER_CHUNK_SIZE, &rest);
if (rc)
{
mu_error (_("reading from header stream failed: %s"),
mu_strerror (rc));
return SMTP_R_TEMP;
}
if (rest)
{
*pmsgchunk = p->hdrbuf;
*pmsglen = rest;
return SMTP_R_CONT;
}
free (p->hdrbuf);
p->hdrbuf = NULL;
rc = mu_opool_get_iterator (p->body_chunks, &p->body_itr);
if (rc)
{
mu_error (_("%s failed: %s"),
"mu_opool_get_iterator", mu_strerror (rc));
return SMTP_R_TEMP;
}
/* The \r\n delimiter was included in hdrstream. */
p->state = pmult_msg_state_body;
/* fall through */
case pmult_msg_state_body:
if (mu_iterator_is_done (p->body_itr))
{
mu_iterator_destroy (&p->body_itr);
return SMTP_R_OK;
}
mu_iterator_current_kv (p->body_itr,
(const void**)pmsglen, (void **)pmsgchunk);
mu_iterator_next (p->body_itr);
return SMTP_R_CONT;
default:
abort ();
}
}
sfsistat_T
pmult_msg_rplc_stat (pmse_ctx_P pmse_ctx, sm_ret_T status)
{
struct pmult_priv_data *p = sm_pmfi_get_ctx_se (pmse_ctx);
pmult_clear (p);
return SMTP_R_OK;
}
static sm_ret_T
pmult_signal (pmg_ctx_P pmg_ctx, int sig)
{
mu_diag_output (MU_DIAG_INFO, _("got signal %d"), sig);
return SM_SUCCESS;
}
static pmilter_T pmilter = {
"pmult",
LPMILTER_VERSION,
SM_SCAP_PM_ALL,
0,
0,
0,
pmult_negotiate,
pmult_connect,
pmult_helo,
pmult_mail,
pmult_rcpt,
pmult_data,
pmult_msg,
pmult_eom,
pmult_abort,
pmult_close,
pmult_unknown,
pmult_signal,
NULL, /* pmfi_starttls */
NULL, /* pmfi_auth */
pmult_msg_rplc,
pmult_msg_rplc_stat
};
void *
main_thread (void *p)
{
pmg_ctx_P pmg_ctx = p;
mu_diag_output (MU_DIAG_INFO, _("%s starting"), program_version);
SM_ASSERT (sm_pmfi_start (pmg_ctx, &pmilter));
mu_diag_output (MU_DIAG_INFO, _("%s terminated"), program_version);
exit (EX_OK);
}
int
wait_for_signal ()
{
sigset_t set;
int rc;
sigemptyset (&set);
sigaddset (&set, SIGHUP);
sigaddset (&set, SIGTERM);
sigaddset (&set, SIGSEGV);
sigaddset (&set, SIGABRT);
/* sigaddset (&set, SIGINT); */
rc = pthread_sigmask (SIG_BLOCK, &set, NULL);
if (rc)
{
mu_error (_("failed to set up signals: %s"),
mu_strerror (errno));
return EX_SOFTWARE;
}
sigwait (&set, &rc);
return EX_OK;
}
static void
log_setup (const char *stream)
{
if (logger_select (stream))
{
mu_error (_("unsupported logger stream: %s"), stream);
exit (EX_USAGE);
}
logger_open ();
}
void
alloc_die_func ()
{
mu_error ("not enough memory");
abort ();
}
struct mu_cli_setup cli = {
.optv = options,
.cfg = pmult_cfg_param,
.prog_doc = prog_doc,
};
int
main (int argc, char **argv)
{
int rc;
size_t count;
pmg_ctx_P pmg_ctx;
uint32_t major, minor, patchlevel;
pthread_t tid;
mf_init_nls ();
mu_alloc_die_hook = alloc_die_func;
mu_set_program_name (argv[0]);
/* Set up debugging */
pmult_debug = mu_debug_register_category ("pmult");
/* Set default logging */
log_stream = stderr_closed_p() ? "syslog" : "stderr";
log_setup (log_stream);
pmult_cfg_init ();
mf_getopt (&cli, &argc, &argv, capa, MF_GETOPT_DEFAULT);
if (want_auth_macros)
request_auth_macros ();
if (log_to_stderr >= 0)
log_stream = log_to_stderr ? "stderr" : "syslog";
log_setup (log_stream);
if (!portspec)
{
mu_error (_("URL to listen on was not specified"));
exit (EX_CONFIG);
}
if (!client_list || mu_list_count (client_list, &count) || count == 0)
{
mu_error (_("no clients configured"));
exit (EX_CONFIG);
}
SM_ASSERT (sm_pmfi_init (&pmg_ctx));
SM_ASSERT (sm_pmfi_version (pmg_ctx, &major, &minor, &patchlevel));
if (major != LPMILTER_VERSION_MAJOR)
{
mu_error (_("version mismatch: compile_time=%d, run_time=%d"),
LPMILTER_VERSION_MAJOR, major);
exit (EX_CONFIG);
}
sm_pmfi_setdbg (pmg_ctx, pmilter_debug_level);
SM_ASSERT (sm_pmfi_setconn (pmg_ctx, portspec));
SM_ASSERT (sm_pmfi_set_ctx_g (pmg_ctx, NULL));
if (max_threads_soft)
{
#if defined(HAVE_STRUCT_PMILTER_S_PMFI_THRMAX_S)
pmilter.pmfi_thrmax_s = max_threads_soft;
#else
mu_diag_output (MU_DIAG_WARNING, "Cannot set soft thread limit");
#endif
}
if (max_threads_hard)
{
#if defined(HAVE_STRUCT_PMILTER_S_PMFI_THRMAX_H)
pmilter.pmfi_thrmax_h = max_threads_hard;
#else
mu_diag_output (MU_DIAG_WARNING, "Cannot set hard thread limit");
#endif
}
if (max_pmilter_fd)
{
#if defined(HAVE_STRUCT_PMILTER_S_PMFI_FDMAX)
pmilter.pmfi_fdmax = max_pmilter_fd;
#else
mu_diag_output (MU_DIAG_WARNING, "Cannot set fd limit");
#endif
}
if (pidfile)
{
rc = mu_daemon_create_pidfile (pidfile);
if (rc)
mu_error (_("cannot create PID file `%s': %s"),
pidfile, mu_strerror (rc));
}
if (no_sig_handler)
main_thread (pmg_ctx);
else
{
rc = pthread_create (&tid, NULL, main_thread, pmg_ctx);
if (rc)
{
mu_error (_("cannot create main thread: %s"),
mu_strerror (errno));
exit (EX_SOFTWARE);
}
return wait_for_signal ();
}
}
/*
Local Variables:
c-file-style: "gnu"
End:
*/
/* EOF */