Out-of-core classification of text documents in Scikit-learn

This is an example showing how scikit-learn can be used for classification using an out-of-core approach: learning from data that doesn’t fit into main memory. We make use of an online classifier, i.e., one that supports the partial_fit method, that will be fed with batches of examples. To guarantee that the features space remains the same over time we leverage a HashingVectorizer that will project each example into the same feature space. This is especially useful in the case of text classification where new features (words) may appear in each batch.

The dataset used in this example is Reuters-21578 as provided by the UCI ML repository. It will be automatically downloaded and uncompressed on first run.

The plot represents the learning curve of the classifier: the evolution of classification accuracy over the course of the mini-batches. Accuracy is measured on the first 1000 samples, held out as a validation set.
To limit the memory consumption, we queue examples up to a fixed amount before feeding them to the learner.

classReutersParser(html_parser.HTMLParser):"""Utility class to parse a SGML file and yield documents one at a time."""def__init__(self,encoding='latin-1'):html_parser.HTMLParser.__init__(self)self._reset()self.encoding=encodingdefhandle_starttag(self,tag,attrs):method='start_'+taggetattr(self,method,lambdax:None)(attrs)defhandle_endtag(self,tag):method='end_'+taggetattr(self,method,lambda:None)()def_reset(self):self.in_title=0self.in_body=0self.in_topics=0self.in_topic_d=0self.title=""self.body=""self.topics=[]self.topic_d=""defparse(self,fd):self.docs=[]forchunkinfd:self.feed(chunk.decode(self.encoding))fordocinself.docs:yielddocself.docs=[]self.close()defhandle_data(self,data):ifself.in_body:self.body+=dataelifself.in_title:self.title+=dataelifself.in_topic_d:self.topic_d+=datadefstart_reuters(self,attributes):passdefend_reuters(self):self.body=re.sub(r'\s+',r' ',self.body)self.docs.append({'title':self.title,'body':self.body,'topics':self.topics})self._reset()defstart_title(self,attributes):self.in_title=1defend_title(self):self.in_title=0defstart_body(self,attributes):self.in_body=1defend_body(self):self.in_body=0defstart_topics(self,attributes):self.in_topics=1defend_topics(self):self.in_topics=0defstart_d(self,attributes):self.in_topic_d=1defend_d(self):self.in_topic_d=0self.topics.append(self.topic_d)self.topic_d=""defstream_reuters_documents(data_path=None):"""Iterate over documents of the Reuters dataset. The Reuters archive will automatically be downloaded and uncompressed if the `data_path` directory does not exist. Documents are represented as dictionaries with 'body' (str), 'title' (str), 'topics' (list(str)) keys. """DOWNLOAD_URL=('http://archive.ics.uci.edu/ml/machine-learning-databases/''reuters21578-mld/reuters21578.tar.gz')ARCHIVE_FILENAME='reuters21578.tar.gz'ifdata_pathisNone:data_path=os.path.join(get_data_home(),"reuters")ifnotos.path.exists(data_path):"""Download the dataset."""print("downloading dataset (once and for all) into %s"%data_path)os.mkdir(data_path)defprogress(blocknum,bs,size):total_sz_mb='%.2f MB'%(size/1e6)current_sz_mb='%.2f MB'%((blocknum*bs)/1e6)if_not_in_sphinx():print('\rdownloaded %s / %s'%(current_sz_mb,total_sz_mb),end='')archive_path=os.path.join(data_path,ARCHIVE_FILENAME)urllib.request.urlretrieve(DOWNLOAD_URL,filename=archive_path,reporthook=progress)if_not_in_sphinx():print('\r',end='')print("untarring Reuters dataset...")tarfile.open(archive_path,'r:gz').extractall(data_path)print("done.")parser=ReutersParser()forfilenameinglob(os.path.join(data_path,"*.sgm")):fordocinparser.parse(open(filename,'rb')):yielddoc

Create the vectorizer and limit the number of features to a reasonable maximum

In [5]:

vectorizer=HashingVectorizer(decode_error='ignore',n_features=2**18,non_negative=True)# Iterator over parsed Reuters SGML files.data_stream=stream_reuters_documents()# We learn a binary classification between the "acq" class and all the others.# "acq" was chosen as it is more or less evenly distributed in the Reuters# files. For other datasets, one should take care of creating a test set with# a realistic portion of positive instances.all_classes=np.array([0,1])positive_class='acq'# Here are some classifiers that support the `partial_fit` methodpartial_fit_classifiers={'SGD':SGDClassifier(),'Perceptron':Perceptron(),'NB Multinomial':MultinomialNB(alpha=0.01),'Passive-Aggressive':PassiveAggressiveClassifier(),}defget_minibatch(doc_iter,size,pos_class=positive_class):"""Extract a minibatch of examples, return a tuple X_text, y. Note: size is before excluding invalid docs with no topics assigned. """data=[(u'{title}<br><br>{body}'.format(**doc),pos_classindoc['topics'])fordocinitertools.islice(doc_iter,size)ifdoc['topics']]ifnotlen(data):returnnp.asarray([],dtype=int),np.asarray([],dtype=int)X_text,y=zip(*data)returnX_text,np.asarray(y,dtype=int)defiter_minibatches(doc_iter,minibatch_size):"""Generator of minibatches."""X_text,y=get_minibatch(doc_iter,minibatch_size)whilelen(X_text):yieldX_text,yX_text,y=get_minibatch(doc_iter,minibatch_size)# test data statisticstest_stats={'n_test':0,'n_test_pos':0}# First we hold out a number of examples to estimate accuracyn_test_documents=1000tick=time.time()X_test_text,y_test=get_minibatch(data_stream,1000)parsing_time=time.time()-ticktick=time.time()X_test=vectorizer.transform(X_test_text)vectorizing_time=time.time()-ticktest_stats['n_test']+=len(y_test)test_stats['n_test_pos']+=sum(y_test)print("Test set is %d documents (%d positive)"%(len(y_test),sum(y_test)))defprogress(cls_name,stats):"""Report progress information, return a string."""duration=time.time()-stats['t0']s="%20s classifier : \t"%cls_names+="%(n_train)6d train docs (%(n_train_pos)6d positive) "%statss+="%(n_test)6d test docs (%(n_test_pos)6d positive) "%test_statss+="accuracy: %(accuracy).3f "%statss+="in %.2fs (%5d docs/s)"%(duration,stats['n_train']/duration)returnscls_stats={}forcls_nameinpartial_fit_classifiers:stats={'n_train':0,'n_train_pos':0,'accuracy':0.0,'accuracy_history':[(0,0)],'t0':time.time(),'runtime_history':[(0,0)],'total_fit_time':0.0}cls_stats[cls_name]=statsget_minibatch(data_stream,n_test_documents)# Discard test set# We will feed the classifier with mini-batches of 1000 documents; this means# we have at most 1000 docs in memory at any time. The smaller the document# batch, the bigger the relative overhead of the partial fit methods.minibatch_size=1000# Create the data_stream that parses Reuters SGML files and iterates on# documents as a stream.minibatch_iterators=iter_minibatches(data_stream,minibatch_size)total_vect_time=0.0# Main loop : iterate on mini-batches of examplesfori,(X_train_text,y_train)inenumerate(minibatch_iterators):tick=time.time()X_train=vectorizer.transform(X_train_text)total_vect_time+=time.time()-tickforcls_name,clsinpartial_fit_classifiers.items():tick=time.time()# update estimator with examples in the current mini-batchcls.partial_fit(X_train,y_train,classes=all_classes)# accumulate test accuracy statscls_stats[cls_name]['total_fit_time']+=time.time()-tickcls_stats[cls_name]['n_train']+=X_train.shape[0]cls_stats[cls_name]['n_train_pos']+=sum(y_train)tick=time.time()cls_stats[cls_name]['accuracy']=cls.score(X_test,y_test)cls_stats[cls_name]['prediction_time']=time.time()-tickacc_history=(cls_stats[cls_name]['accuracy'],cls_stats[cls_name]['n_train'])cls_stats[cls_name]['accuracy_history'].append(acc_history)run_history=(cls_stats[cls_name]['accuracy'],total_vect_time+cls_stats[cls_name]['total_fit_time'])cls_stats[cls_name]['runtime_history'].append(run_history)ifi%3==0:print(progress(cls_name,cls_stats[cls_name]))ifi%3==0:print('\n')