importcStringIOimportuuidimportthreadingimportosimporttimeimportsysclassInputStreamChunker(threading.Thread):''' Threaded object / code that mediates reading output from a stream, detects "separation markers" in the stream and spits out chunks of original stream, split when ends of chunk are encountered. Results are made available as a list of filled file-like objects (your choice). Results are accessible either "asynchronously" (you can poll at will for results in a non-blocking way) or "synchronously" by exposing a "subscribe and wait" system based on threading.Event flags. Usage: - instantiate this object - give our input pipe as "stdout" to other subprocess and start it: Popen(..., stdout = th.input, ...) - (optional) subscribe to data_available event - pull resulting file-like objects off .data (if you are "messing" with .data from outside of the thread, be curteous and wrap the thread-unsafe manipulations between: obj.data_unoccupied.clear() ... mess with .data obj.data_unoccupied.set() The thread will not touch obj.data for the duration and will block reading.) License: Public domain Absolutely no warranty provided www.accentsolution.com/ddotsenko '''def__init__(self,delimiter=None,outputObjConstructor=None):''' marker - the string that will be considered a delimiter for the stream outputObjConstructor - instanses of these will be attached to self.results array (intantiator_pointer, args, kw) '''super(InputStreamChunker,self).__init__()self._data_available=threading.Event()self._data_available.clear()# parent will .wait() on this for results.self._data=[]self._data_unoccupied=threading.Event()self._data_unoccupied.set()# parent will set this to true when self.results is being changed from outsideself._r,self._w=os.pipe()# takes all inputs. self.input = public pipe in.self._stop=Falseifnotdelimiter:delimiter=str(uuid.uuid1())self._stream_delimiter=[lforlindelimiter]self._stream_roll_back_len=(len(delimiter)-1)*-1ifnotoutputObjConstructor:self._obj=(cStringIO.StringIO,(),{})else:self._obj=outputObjConstructor@propertydefdata_available(self):'''returns a threading.Event instance pointer that is True (and non-blocking to .wait() ) when we attached a new IO obj to the .data array. Code consuming the array may decide to set it back to False if it's done with all chunks and wants to be blocked on .wait()'''returnself._data_available@propertydefdata_unoccupied(self):'''returns a threading.Event instance pointer that is normally True (and non-blocking to .wait() ) Set it to False with .clear() before you start non-thread-safe manipulations (changing) .data array. Set it back to True with .set() when you are done'''returnself._data_unoccupied@propertydefdata(self):'''returns a list of input chunkes (file-like objects) captured so far. This is a "stack" of sorts. Code consuming the chunks would be responsible for disposing of the file-like objects. By default, the file-like objects are instances of cStringIO'''returnself._data@propertydefinput(self):'''This is a file desciptor (not a file-like). It's the input end of our pipe which you give to other process to be used as stdout pipe for that process'''returnself._wdefflush(self):'''Normallly a read on a pipe is blocking. To get things moving (make the subprocess yield the buffer, we inject our chunk delimiter into self.input This is useful when primary subprocess does not write anythiong to our in pipe, but we need to make internal pipe reader let go of the pipe and move on with things. '''os.write(self._w,''.join(self._stream_delimiter))defstop(self):self._stop=Trueself.flush()# reader has its teeth on the pipe. This makes it let go for for a sec.os.close(self._w)self._data_available.set()def__del__(self):try:self.stop()except:passtry:delself._wdelself._rdelself.resultsexcept:passdefrun(self):''' Plan: - We read into a fresh instance of IO obj until marker encountered. - When marker is detected, we attach that IO obj to "resutls" array and signal the calling code (through threading.Event flag) that results are available - repeat until .stop() was called on the thread. '''marker=[''forlinself._stream_delimiter]# '' is there on purposetf=self._obj[0](*self._obj[1],**self._obj[2])whilenotself._stop:l=os.read(self._r,1)print('Thread talking: Ordinal of char is:%s'%ord(l))trash_str=marker.pop(0)marker.append(l)ifmarker!=self._stream_delimiter:tf.write(l)else:# chopping off the marker firsttf.seek(self._stream_roll_back_len,2)tf.truncate()tf.seek(0)self._data_unoccupied.wait(5)self._data.append(tf)self._data_available.set()tf=self._obj[0](*self._obj[1],**self._obj[2])os.close(self._r)tf.close()deltfdeftestth():''' - set up chunker - set up Popen with chunker's output stream - push process.wait() into a thread - push some data into proc.stdin - close proc.stdin - see if proc is alive. - see results of chunker '''importsubprocess,tempfilech=InputStreamChunker('\n')ch.daemon=Truech.start()#Perseid: Replaced grep with cat:p=subprocess.Popen(['cat'],stdin=subprocess.PIPE,stdout=ch.input,stderr=subprocess.PIPE)i=p.stdini.write('line1 asdf\n')# will be displayedi.write('line2 asdf')# will be appended to next linei.write('line3 qwer\n')# will be displayed only because prior line did not have a delimiter in it.i.write('line4 qwer\n')# will NOT be displayedi.write('line5 zxcv asdf')# will be displayed just because it's the last one in the pipe, even without a delimeter.#Perseid: I don't know if flushing is necessary, but it doensn't hurt.i.flush()#Perseid: Give it a little time to process:time.sleep(1)print"Length of ch.data:",len(ch.data)#Perseid: Check the content:forpOutinch.data:printpOut.read()#Perseid: Some other data.i.write('line6 bla\n')i.flush()time.sleep(1)#Perseid: all six (actually four) lines appearprint"Length of ch.data:",len(ch.data)forpOutinch.data:printpOut.read()#Perseid: You can even see, that "line" 5 was already read, even though it wasn't delimited by a newline.## until now, because p is waiting for stdin's EOF (needs to be closed) ## there will be no results in ch.dataassertch.data==[]## this will flush out the input.i.close()## because the last write does not end with delimiter, you would expect it to be ## still in the output chunker's buffer... wrong. While you cannot manually send EOF to## a python pipe, python CAN do that and it did immedately after being done reading from iwhilelen(ch.data)<3:print('Main talking: Data is %s long, should be 3\n'%len(ch.data))ch.data_available.wait(0.2)ch.data_available.clear()## by this time, because the stdin was closed and we have all expected output from the p## we expect p to be done running.print'return code is %s'%p.returncode# assert p.returncode == 0## now we will flush the rest of input and stop the chunker## this will flush out an empty string into the results.p.terminate()ch.stop()print'process return code is %s'%p.returncodewhilelen(ch.data)<4:print('Main talking: Data is %s long, should be 4\n'%len(ch.data))ch.data_available.wait(1)assertlen(ch.data)==4a=ch.datadelch,pshould_be=['line1 asdf','line2 asdfline3 qwer','line5 zxcv asdf','']assertshould_be==[i.read()foriina]if__name__=='__main__':testth()