I have a couple of tasks running as a service. To kick off the worker I use:
def SvcDoRun(self):
logging.info('Starting {name} service ...'.format(name=self._svc_name_))
os.chdir(INSTDIR) # so that proj worker can be found
logging.info('cwd: ' + os.getcwd())
self.ReportServiceStatus(win32service.SERVICE_RUNNING)
command = '{celery_path} -A {proj_dir} worker -f "{log_path}" -l info --hostname=theService@%h'.format(
celery_path='celery',
proj_dir=PROJECTDIR,
log_path=os.path.join(INSTDIR,'celery.log'))
logging.info('command: ' + command)
args = shlex.split(command)
proc = subprocess.Popen(args)
logging.info('pid: {pid}'.format(pid=proc.pid))
self.timeout = 5000
while True:
rc = win32event.WaitForSingleObject(self.hWaitStop, self.timeout)
if rc == win32event.WAIT_OBJECT_0:
print 'Terminate'
# stop signal encountered
PROCESS_TERMINATE = 1
handle = win32api.OpenProcess(PROCESS_TERMINATE, False, proc.pid)
win32api.TerminateProcess(handle, -1)
win32api.CloseHandle(handle)
break
To kick off the python tasks as a service I use:
def SvcDoRun(self):
import servicemanager
servicemanager.LogMsg(servicemanager.EVENTLOG_INFORMATION_TYPE,servicemanager.PYS_SERVICE_STARTED,(self._svc_name_, ''))
self.timeout = 640000 #640 seconds / 10 minutes (value is in milliseconds)
# This is how long the service will wait to run / refresh itself (see script below)
while 1:
try:
# Wait for service stop signal, if I timeout, loop again
rc = win32event.WaitForSingleObject(self.hWaitStop, self.timeout)
# Check to see if self.hWaitStop happened
if rc == win32event.WAIT_OBJECT_0:
# Stop signal encountered
servicemanager.LogInfoMsg(self._svc_name_ + " - STOPPED!") #For Event Log
break
else:
#[actual service code between rests]
try:
file_path = "C:\framework\celery\celery.py"
execfile(file_path) #Execute the script
except Exception as exc:
traceback.print_exc(file=sys.stdout)
#[actual service code between rests]
except Exception as exc:
traceback.print_exc(file=sys.stdout)
The 'celery.py' contains the call to those tasks as follow celery.py:
from __future__ import absolute_import, unicode_literals
from celery import Celery
app = Celery(include=[
'framework.task1.tasks',
'framework.task2.tasks',
'framework.task3.tasks',
'framework.task4.tasks'
])
app.config_from_object('config')
if __name__ == '__main__':
app.start()
The problem I call a task 'python runTask1.py param1 param2 param3
' but sometimes the task never executes then I send it again (or twice) and it works... Looking at the rabbitmq log file I saw:
=INFO REPORT==== 24-Jun-2017::00:57:37 ===
accepting AMQP connection <0.3846.0> (127.0.0.1:57753 -> 127.0.0.1:5672)
=INFO REPORT==== 24-Jun-2017::00:57:37 ===
connection <0.3846.0> (127.0.0.1:57753 -> 127.0.0.1:5672): user 'guest' authenticated and granted access to vhost '/'
=INFO REPORT==== 24-Jun-2017::00:58:08 ===
accepting AMQP connection <0.3871.0> (127.0.0.1:57821 -> 127.0.0.1:5672)
=INFO REPORT==== 24-Jun-2017::00:58:08 ===
connection <0.3871.0> (127.0.0.1:57821 -> 127.0.0.1:5672): user 'guest' authenticated and granted access to vhost '/'
=INFO REPORT==== 24-Jun-2017::01:10:49 ===
accepting AMQP connection <0.3940.0> (127.0.0.1:57861 -> 127.0.0.1:5672)
=INFO REPORT==== 24-Jun-2017::01:10:49 ===
connection <0.3940.0> (127.0.0.1:57861 -> 127.0.0.1:5672): user 'guest' authenticated and granted access to vhost '/'
=WARNING REPORT==== 24-Jun-2017::01:10:49 ===
closing AMQP connection <0.3940.0> (127.0.0.1:57861 -> 127.0.0.1:5672):
client unexpectedly closed TCP connection
Why sometimes it works and sometimes it does not?