If I understood what you are trying to do, subprocess.Popen() is the way to go. Here's a simple class which I think can provide all functionality you want:
from time import sleep
import subprocess
import datetime
import os
class Worker:
def __init__(self, cmd):
print datetime.datetime.now(), ":: starting subprocess :: %s"%cmd
self.cmd = cmd
self.log = "[running :: %s]\n"%cmd
self.subp = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.start_time = datetime.datetime.now()
def wait_to_finish(self, timeout_seconds = None):
while True:
retcode = self.subp.poll()
if retcode is not None:
self.get_process_output()
self.log += "\n[subprocess finished, return code: %d]\n"%retcode
print datetime.datetime.now(), ":: subprocess %s exited, retcode=%d"%(self.cmd, retcode)
return
else:
# process hasn't finished yet
sleep(1)
if timeout_seconds is not None:
cur_time = datetime.datetime.now()
if (cur_time - self.start_time).seconds > timeout_seconds:
print datetime.datetime.now(), ":: subprocess %s :: killing after %d seconds"%(self.cmd, timeout_seconds)
self.kill()
return
def still_running(self):
return (self.subp.poll() is None)
def kill(self):
self.subp.terminate()
self.get_process_output()
self.log += "\n[subprocess killed by explicit request]\n"
return
def get_process_output(self):
out, err = self.subp.communicate()
self.log += out
self.log += err
You give the command and the class starts it in the background. You can then wait while it finishes, with optional timeout (counted from the time process has been started). You can get process output, and if needed explicitly kill the process.
Here's just a quick example showing it's functionality:
# Start two subprocesses in the background
worker1 = Worker([r'c:\python26\python.exe', 'sub1.py'])
worker2 = Worker([r'c:\python26\python.exe', 'sub2.py'])
# Wait for both to finish, kill after 10 seconds timeout
worker1.wait_to_finish(timeout_seconds = 10)
worker2.wait_to_finish(timeout_seconds = 10)
# Start another subprocess giving it 5 seconds to finish
worker3 = Worker([r'c:\python26\python.exe', 'sub3.py'])
worker3.wait_to_finish(timeout_seconds = 5)
print "----LOG1----\n" + worker1.log
print "----LOG2----\n" + worker2.log
print "----LOG3----\n" + worker3.log
sub1.py:
from time import sleep
print "sub1 output: start"
sleep(5)
print "sub1 output: finish"
sub2.py:
print "sub2 output: start"
erroneous_command()
sub3.py:
from time import sleep
import sys
print "sub3 output: start, sleeping 15 sec"
sys.stdout.flush()
sleep(15)
print "sub3 output: finish"
Here's the output:
2013-11-06 15:31:17.296000 :: starting subprocess :: ['c:\\python26\\python.exe', 'sub1.py']
2013-11-06 15:31:17.300000 :: starting subprocess :: ['c:\\python26\\python.exe', 'sub2.py']
2013-11-06 15:31:23.306000 :: subprocess ['c:\\python26\\python.exe', 'sub1.py'] exited, retcode=0
2013-11-06 15:31:23.309000 :: subprocess ['c:\\python26\\python.exe', 'sub2.py'] exited, retcode=1
2013-11-06 15:31:23.310000 :: starting subprocess :: ['c:\\python26\\python.exe', 'sub3.py']
2013-11-06 15:31:29.314000 :: subprocess ['c:\\python26\\python.exe', 'sub3.py'] :: killing after 5 seconds
----LOG1----
[running :: ['c:\\python26\\python.exe', 'sub1.py']]
sub1 output: start
sub1 output: finish
[subprocess finished, return code: 0]
----LOG2----
[running :: ['c:\\python26\\python.exe', 'sub2.py']]
sub2 output: start
Traceback (most recent call last):
File "sub2.py", line 2, in <module>
erroneous_command()
NameError: name 'erroneous_command' is not defined
[subprocess finished, return code: 1]
----LOG3----
[running :: ['c:\\python26\\python.exe', 'sub3.py']]
sub3 output: start, sleeping 15 sec
[subprocess killed by explicit request]
As far as implementing the scheduling goes, I can suggest couple of options but the choice really depends on what your task is:
1) If you can specify the precise scheduling at any point in time, then you can implement a fully synchronous scheduler:
while True:
# check time
# check currently running processes :: workerX.still_running()
# -> if some are past their timeout, kill them workerX.kill()
# start new subprocesses according to your scheduling logic
sleep(1)
2) If you have several well-defined sequences of scripts which you want just "fire-and-forget" every 10 seconds, then put each sequence in its own .py script (with 'import Worker'), and start all sequences every 10 seconds, also periodically checking which sequences have exited to collect their logs.
3) If your sequences are defined dynamically and you prefer "fire-and-forget" approach, then threads would be best approach.