From a9ca8236dce63f9b7fe6494cd6df31ca3a417f33 Mon Sep 17 00:00:00 2001
From: Martijn Vermaat <martijn@vermaat.name>
Date: Thu, 12 Dec 2013 13:10:16 +0100
Subject: [PATCH] Refactor batch daemon to not be a full UNIX daemon
 implementation

Do not try to implement a full UNIX daemon suitable for init.d with the
batch daemon, but rather keep it simple and depend on something like
supervisord to implement process control.

We now also correctly handle SIGINT (Ctrl+C) and SIGTERM signals.
---
 bin/mutalyzer-batchd | 116 ++++++++++++++-----------------------------
 requirements.txt     |   3 --
 2 files changed, 38 insertions(+), 81 deletions(-)

diff --git a/bin/mutalyzer-batchd b/bin/mutalyzer-batchd
index 3d03aad2..84f7869f 100755
--- a/bin/mutalyzer-batchd
+++ b/bin/mutalyzer-batchd
@@ -2,24 +2,16 @@
 """
 Daemon for processing scheduled batch jobs.
 
-We use python-daemon [1] for daemonizing the job processing. This file
-should be run with the mutalyzer directory as working directory.
+The process can be shutdown gracefully by sending a SIGINT (Ctrl+C) or SIGTERM
+signal.
 
-@todo: Check if PID dir is writable.
 @todo: Get rid of ugly exception logging.
 @todo: Reload configuration without restarting (for example, on SIGHUP).
-@todo: Use [2] to set process name (and use that in init script).
-
-[1] http://pypi.python.org/pypi/python-daemon/
-[2] http://code.google.com/p/py-setproctitle/
 """
 
 
-import os
-import sys
-from daemon import pidfile, DaemonContext
-from lockfile import LockTimeout
 import signal
+import sys
 import time
 import traceback
 
@@ -28,76 +20,44 @@ from mutalyzer.Db import Batch, Counter
 from mutalyzer.Scheduler import Scheduler
 
 
-def cleanup(signum, stack_frame):
-    """
-    Generate a normal exit signal.
-    """
-    sys.exit(1)
-
-
 def daemonize():
     """
-    Write PID file when it is not locked and daemonize a loop processing
-    scheduled batch jobs.
+    Run forever in a loop processing scheduled batch jobs.
     """
-    pidfile_path = os.path.realpath(config.get('PIDfile'))
-
-    lockfile = pidfile.TimeoutPIDLockFile(pidfile_path, acquire_timeout=1,
-                                          threaded=False)
-
-    context = DaemonContext(working_directory=os.getcwd(),
-                            pidfile=lockfile)
-
-    # To preserve stderr and stdout, add these arguments.
-    #stdin=sys.stdin,
-    #stdout=sys.stdout,
-    #files_preserve=[sys.stdin, sys.stdout]
-
-    # Writing the PID file as root before changing user/group does not seem
-    # to work.
-    #uid=pwd.getpwnam('www-data').pw_uid
-    #gid=grp.getgrnam('www-data').gr_gid
-
-    context.signal_map = {
-        signal.SIGTERM: cleanup,
-        signal.SIGHUP:  'terminate'
-    }
-
-    with context:
-        # Note that any opened files are now closed. This is not a problem for
-        # the config module, since it does not read its file again after
-        # initialisation.
-        database = Batch()
-        counter = Counter()
-
-        scheduler = Scheduler(database)
-
-        def stop_scheduler(signum, stack_frame):
-            scheduler.stop()
-        signal.signal(signal.SIGTERM, stop_scheduler)
-
-        while not scheduler.stopped():
-            # Process batch jobs. This process() method runs while there
-            # exist jobs to run.
-            try:
-                scheduler.process(counter)
-            except Exception as e:
-                f = open('/tmp/batcherror.log', 'a+')
-                f.write('Error (%s): %s\n' % (type(e), str(e)))
-                f.write('%s\n\n' % repr(traceback.format_exc()))
-                f.flush()
-                f.close()
-            if scheduler.stopped():
-                break
-            # Wait a bit and process any possible new jobs.
-            time.sleep(5)
+    database = Batch()
+    counter = Counter()
+    scheduler = Scheduler(database)
+
+    def handle_exit(signum, stack_frame):
+        if scheduler.stopped():
+            sys.stderr.write('mutalyzer-batchd: Terminated\n')
+            sys.exit(1)
+        if signum == signal.SIGINT:
+            sys.stderr.write('mutalyzer-batchd: Hitting Ctrl+C again will terminate any running job!\n')
+        scheduler.stop()
+
+    signal.signal(signal.SIGTERM, handle_exit)
+    signal.signal(signal.SIGINT, handle_exit)
+
+    while not scheduler.stopped():
+        # Process batch jobs. This process() method runs while there
+        # exist jobs to run.
+        try:
+            scheduler.process(counter)
+        except Exception as e:
+            f = open('/tmp/batcherror.log', 'a+')
+            f.write('Error (%s): %s\n' % (type(e), str(e)))
+            f.write('%s\n\n' % repr(traceback.format_exc()))
+            f.flush()
+            f.close()
+        if scheduler.stopped():
+            break
+        # Wait a bit and process any possible new jobs.
+        time.sleep(1)
+
+    sys.stderr.write('mutalyzer-batchd: Graceful shutdown\n')
+    sys.exit(0)
 
 
 if __name__ == '__main__':
-    try:
-        daemonize()
-    except LockTimeout:
-        # If we want to see something on stdout, we have to add it to the
-        # {files_preserve} argument of the DaemonContext.
-        #print 'Mutalyzer batch daemon is already running.'
-        sys.exit(1)
+    daemonize()
diff --git a/requirements.txt b/requirements.txt
index da91f3e3..f4a36d34 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,12 +1,9 @@
 MySQL-python==1.2.4
-argparse==1.2.1
 biopython==1.63b
 configobj==4.7.2
-lockfile==0.9.1
 lxml==3.2.4
 nose==1.3.0
 pyparsing==2.0.1
-python-daemon==1.6
 pytz==2013.8
 requests==2.0.1
 simplejson==3.3.1
-- 
GitLab