redmine

Bugfixes of process-splitting

... ... @@ -68,8 +68,7 @@
#define FANOTIFY_MARKMASK (FAN_OPEN|FAN_MODIFY|FAN_CLOSE|FAN_ONDIR|FAN_EVENT_ON_CHILD)
#define INOTIFY_FLAGS 0
//(FD_CLOEXEC)
#define INOTIFY_FLAGS (IN_CLOEXEC)
#define INOTIFY_MARKMASK (IN_ATTRIB|IN_CLOSE_WRITE|IN_CREATE|IN_DELETE|IN_DELETE_SELF|IN_MOVE_SELF|IN_MOVED_FROM|IN_MOVED_TO|IN_MODIFY|IN_DONT_FOLLOW)
... ... @@ -197,3 +196,6 @@ filesz:1M\n\
#define DEFAULT_CG_GROUPNAME "clsync/%PID%"
// In nanoseconds
#define OUTPUT_LOCK_TIMEOUT (100*1000*1000)
#define WAITPID_TIMED_GRANULARITY (30*1000*1000)
... ...
... ... @@ -170,7 +170,11 @@ void _critical(const char *const function_name, const char *fmt, ...) {
if (*quiet)
return;
pthread_mutex_lock(error_mutex_p);
struct timespec abs_time;
clock_gettime(CLOCK_REALTIME , &abs_time);
abs_time.tv_sec += 1;
pthread_mutex_timedlock(error_mutex_p, &abs_time);
outputmethod_t method = *outputmethod;
... ... @@ -224,7 +228,7 @@ void _error(const char *const function_name, const char *fmt, ...) {
if (*verbose < 1)
return;
pthread_mutex_lock(error_mutex_p);
pthread_mutex_reltimedlock(error_mutex_p, 0, OUTPUT_LOCK_TIMEOUT);
pthread_t thread = pthread_self();
pid_t pid = getpid();
... ... @@ -251,7 +255,7 @@ void _info(const char *const function_name, const char *fmt, ...) {
if (*verbose < 3)
return;
pthread_mutex_lock(error_mutex_p);
pthread_mutex_reltimedlock(error_mutex_p, 0, OUTPUT_LOCK_TIMEOUT);
pthread_t thread = pthread_self();
pid_t pid = getpid();
... ... @@ -276,7 +280,7 @@ void _warning(const char *const function_name, const char *fmt, ...) {
if (*verbose < 2)
return;
pthread_mutex_lock(error_mutex_p);
pthread_mutex_reltimedlock(error_mutex_p, 0, OUTPUT_LOCK_TIMEOUT);
pthread_t thread = pthread_self();
pid_t pid = getpid();
... ... @@ -302,7 +306,7 @@ void _debug(int debug_level, const char *const function_name, const char *fmt, .
if (debug_level > *debug)
return;
pthread_mutex_lock(error_mutex_p);
pthread_mutex_reltimedlock(error_mutex_p, 0, OUTPUT_LOCK_TIMEOUT);
pthread_t thread = pthread_self();
pid_t pid = getpid();
... ...
... ... @@ -292,6 +292,32 @@ int syntax() {
int ncpus;
pid_t parent_pid;
pid_t waitpid_timed(pid_t child_pid, int *status_p, __time_t sec, __syscall_slong_t nsec) {
struct timespec ts;
int status;
ts.tv_sec = sec;
ts.tv_nsec = nsec;
while (ts.tv_sec >= 0) {
if (waitpid(child_pid, &status, WNOHANG)<0) {
if (errno==ECHILD)
return child_pid;
return -1;
} else
if (status_p != NULL)
*status_p = status;
ts.tv_nsec -= WAITPID_TIMED_GRANULARITY;
if (ts.tv_nsec < 0) {
ts.tv_nsec += 1000*1000*1000;
ts.tv_sec--;
}
}
return 0;
}
int parent_isalive() {
int rc;
debug(12, "parent_pid == %u", parent_pid);
... ... @@ -320,16 +346,37 @@ int sethandler_sigchld(void (*handler)()) {
return 0;
}
pid_t myfork() {
# ifndef __linux__
void *watchforparent(void *parent_pid_p) {
while (1) {
if (getppid() == 1)
child_sigchld();
sleep(1);
}
return NULL;
}
# endif
pthread_t pthread_watchforparent;
pid_t fork_helper() {
pid_t pid = fork();
if (!pid) {
if (!pid) { // is child?
parent_pid = getppid();
sethandler_sigchld(child_sigchld);
// Anti-zombie:
# ifdef __linux__
// Linux have support of "prctl(PR_SET_PDEATHSIG, signal);"
sethandler_sigchld(child_sigchld);
prctl(PR_SET_PDEATHSIG, SIGCHLD);
# else
pthread_create(&pthread_watchforparent, NULL, watchforparent, &parent_pid);
# endif
debug(20, "parent_pid == %u", parent_pid);
return 0;
}
return pid;
... ...
... ... @@ -34,7 +34,8 @@ extern char *parameter_expand(
const char *(*parameter_get)(const char *variable_name, void *arg),
void *parameter_get_arg
);
extern pid_t myfork();
extern pid_t fork_helper();
extern int parent_isalive();
extern int sethandler_sigchld(void (*handler)());
extern pid_t waitpid_timed(pid_t child_pid, int *status_p, __time_t sec, __syscall_slong_t nsec);
... ...
... ... @@ -61,7 +61,7 @@ int inotify_add_watch_dir(ctx_t *ctx_p, indexes_t *indexes_p, const char *const
int inotify_wait(ctx_t *ctx_p, struct indexes *indexes_p, struct timeval *tv_p) {
int inotify_d = (int)(long)ctx_p->fsmondata;
debug(3, "select with timeout %li secs.", tv_p->tv_sec);
debug(3, "select with timeout %li secs (fd == %u).", tv_p->tv_sec, inotify_d);
fd_set rfds;
FD_ZERO(&rfds);
FD_SET(inotify_d, &rfds);
... ...
... ... @@ -420,7 +420,7 @@ int __privileged_kill_child_itself(pid_t child_pid, int signal) {
return errno;
}
sleep(1); // TODO: replace this sleep() with something to do not sleep if process already died
waitpid_timed(child_pid, NULL, SLEEP_SECONDS, 0);
} else
return ENOENT;
... ... @@ -782,7 +782,9 @@ int privileged_handler(ctx_t *ctx_p)
sigaddset(&sigset, SIGQUIT);
sigaddset(&sigset, SIGTERM);
sigaddset(&sigset, SIGINT);
# ifdef __linux__
sigaddset(&sigset, SIGCHLD);
# endif
critical_on(pthread_sigmask(SIG_UNBLOCK, &sigset, NULL));
# ifndef __linux__
... ... @@ -889,12 +891,10 @@ int privileged_handler(ctx_t *ctx_p)
debug(20, "PA_INOTIFY_INIT");
cmd_p->ret = (void *)(long)inotify_init();
break;
# ifndef INOTIFY_OLD
case PA_INOTIFY_INIT1:
debug(20, "PA_INOTIFY_INIT1");
cmd_p->ret = (void *)(long)inotify_init1((long)cmd_p->arg.ctx_p);
cmd_p->ret = (void *)(long)inotify_init1(cmd_p->arg.uint32_v);
break;
# endif
case PA_INOTIFY_ADD_WATCH: {
struct pa_inotify_add_watch_arg *arg_p = (void *)&cmd_p->arg.inotify_add_watch;
debug(20, "PA_INOTIFY_ADD_WATCH(%u, <%s>, 0x%o)", arg_p->fd, arg_p->pathname, arg_p->mask);
... ... @@ -1482,7 +1482,7 @@ int privileged_init(ctx_t *ctx_p)
cmd_init(cmd_p);
// Running the privileged helper
SAFE ( (helper_pid = myfork()) == -1, return errno);
SAFE ( (helper_pid = fork_helper()) == -1, return errno);
if (!helper_pid)
exit(privileged_handler(ctx_p));
critical_on(!helper_isalive());
... ... @@ -1557,6 +1557,7 @@ int privileged_deinit(ctx_t *ctx_p)
{
int status;
__privileged_kill_child_itself(helper_pid, SIGKILL);
debug(9, "waitpid(%u, ...)", helper_pid);
waitpid(helper_pid, &status, 0);
}
/*
... ...
... ... @@ -57,3 +57,21 @@ int pthread_cond_destroy_shared(pthread_cond_t *cond_p) {
shm_free(cond_p);
return rc;
}
int pthread_mutex_reltimedlock(pthread_mutex_t *mutex_p, __time_t tv_sec, __syscall_slong_t tv_nsec) {
struct timespec abs_time;
if (clock_gettime(CLOCK_REALTIME, &abs_time))
return -1;
abs_time.tv_sec += tv_sec;
abs_time.tv_nsec += tv_nsec;
if (abs_time.tv_nsec > 1000*1000*1000) {
abs_time.tv_sec++;
abs_time.tv_nsec -= 1000*1000*1000;
}
return pthread_mutex_timedlock(mutex_p, &abs_time);
}
... ...
... ... @@ -23,4 +23,5 @@ extern int pthread_mutex_init_shared(pthread_mutex_t **mutex_p);
extern int pthread_mutex_destroy_shared(pthread_mutex_t *mutex_p);
extern int pthread_cond_init_shared(pthread_cond_t **cond_p);
extern int pthread_cond_destroy_shared(pthread_cond_t *cond_p);
extern int pthread_mutex_reltimedlock(pthread_mutex_t *mutex_p, __time_t tv_sec, __syscall_slong_t tv_nsec);
... ...
... ... @@ -961,12 +961,12 @@ char *sync_path_abs2rel(ctx_t *ctx_p, const char *path_abs, size_t path_abs_len,
debug(3, "\"%s\" (len: %i) --%i--> \"%s\" (len: %i) + ",
path_abs, path_abs_len, path_rel[path_rel_len - 1] == '/',
ctx_p->watchdirwslash, watchdirlen+1);
if(path_rel[path_rel_len - 1] == '/')
if (path_rel[path_rel_len - 1] == '/')
path_rel[--path_rel_len] = 0x00;
debug(3, "\"%s\" (len: %i)", path_rel, path_rel_len);
#endif
if(path_rel_len_p != NULL)
if (path_rel_len_p != NULL)
*path_rel_len_p = path_rel_len;
return path_rel;
... ... @@ -978,15 +978,15 @@ pid_t clsyncapi_fork(ctx_t *ctx_p) {
// Cleaning stale pids. TODO: Optimize this. Remove this GC.
int i=0;
while(i < ctx_p->children) {
if(waitpid(ctx_p->child_pid[i], NULL, WNOHANG)<0)
while (i < ctx_p->children) {
if (waitpid(ctx_p->child_pid[i], NULL, WNOHANG)<0)
if(errno==ECHILD)
ctx_p->child_pid[i] = ctx_p->child_pid[--ctx_p->children];
i++;
}
// Too many children
if(ctx_p->children >= MAXCHILDREN) {
if (ctx_p->children >= MAXCHILDREN) {
errno = ECANCELED;
return -1;
}
... ... @@ -1795,13 +1795,16 @@ int sync_notify_init(ctx_t *ctx_p) {
#endif
#ifdef INOTIFY_SUPPORT
case NE_INOTIFY: {
#if INOTIFY_OLD
ctx_p->fsmondata = (void *)(long)privileged_inotify_init();
#else
ctx_p->fsmondata = (void *)(long)privileged_inotify_init1(INOTIFY_FLAGS);
#endif
# ifdef INOTIFY_OLD
ctx_p->fsmondata = (void *)(long)inotify_init();
# if INOTIFY_FLAGS != 0
# warning Do not know how to set inotify flags (too old system)
# endif
# else
ctx_p->fsmondata = (void *)(long)inotify_init1(INOTIFY_FLAGS);
# endif
if ((long)ctx_p->fsmondata == -1) {
error("cannot inotify_init(%i).", INOTIFY_FLAGS);
error("cannot inotify_init1(%i).", INOTIFY_FLAGS);
return -1;
}
... ... @@ -3180,6 +3183,7 @@ int sync_tryforcecycle(pthread_t pthread_parent) {
if (pthread_cond_timedwait(pthread_cond_state, pthread_mutex_state, &time_timeout) != ETIMEDOUT)
return 0;
#else
debug(9, "sleep("TOSTR(SLEEP_SECONDS)")");
sleep(SLEEP_SECONDS); // TODO: replace this with pthread_cond_timedwait()
#endif
... ... @@ -3705,9 +3709,6 @@ int sync_run(ctx_t *ctx_p) {
}
}
if ((ret=privileged_init(ctx_p)))
return ret;
#ifdef CLUSTER_SUPPORT
// Initializing cluster subsystem
... ... @@ -3766,21 +3767,24 @@ int sync_run(ctx_t *ctx_p) {
#ifdef ENABLE_SOCKET
// Creating control socket
if(ctx_p->socketpath != NULL)
if (ctx_p->socketpath != NULL)
ret = control_run(ctx_p);
#endif
if(!ctx_p->flags[ONLYINITSYNC]) {
if (!ctx_p->flags[ONLYINITSYNC]) {
// Initializing FS monitor kernel subsystem in this userspace application
if(sync_notify_init(ctx_p))
if (sync_notify_init(ctx_p))
return errno;
}
if ((ret=privileged_init(ctx_p)))
return ret;
if (!ctx_p->flags[ONLYINITSYNC]) {
// Marking file tree for FS monitor
debug(30, "Running recursive notify marking function");
ret = sync_mark_walk(ctx_p, ctx_p->watchdir, &indexes);
if(ret) return ret;
if (ret) return ret;
}
// "Infinite" loop of processling the events
... ... @@ -3887,6 +3891,7 @@ int sync_run(ctx_t *ctx_p) {
#ifdef VERYPARANOID
// One second for another threads
debug(9, "sleep("TOSTR(SLEEP_SECONDS)")");
sleep(SLEEP_SECONDS);
#endif
... ...