#!/usr/bin/env pythonversion="1.7"version_info=(1,7,0,"rc-2")__revision__="$Rev: 72 $""""Python-Markdown===============Converts Markdown to HTML. Basic usage as a module: import markdown md = Markdown() html = md.convert(your_text_string)See http://www.freewisdom.org/projects/python-markdown/ for moreinformation and instructions on how to extend the functionality of thescript. (You might want to read that before you try modifying thisfile.)Started by [Manfred Stienstra](http://www.dwerg.net/). Continued andmaintained by [Yuri Takhteyev](http://www.freewisdom.org) and [WaylanLimberg](http://achinghead.com/).Contact: yuri [at] freewisdom.org waylan [at] gmail.comLicense: GPL 2 (http://www.gnu.org/copyleft/gpl.html) or BSD"""importre,sys,codecsfromloggingimportgetLogger,StreamHandler,Formatter, \
DEBUG,INFO,WARN,ERROR,CRITICALMESSAGE_THRESHOLD=CRITICAL# Configure debug message logger (the hard way - to support python 2.3)logger=getLogger('MARKDOWN')logger.setLevel(DEBUG)# This is restricted by handlers laterconsole_hndlr=StreamHandler()formatter=Formatter('%(name)s-%(levelname)s: "%(message)s"')console_hndlr.setFormatter(formatter)console_hndlr.setLevel(MESSAGE_THRESHOLD)logger.addHandler(console_hndlr)defmessage(level,text):''' A wrapper method for logging debug messages. '''logger.log(level,text)# --------------- CONSTANTS YOU MIGHT WANT TO MODIFY -----------------TAB_LENGTH=4# expand tabs to this many spacesENABLE_ATTRIBUTES=True# @id = xyz -> <... id="xyz">SMART_EMPHASIS=1# this_or_that does not become this<i>or</i>thatHTML_REMOVED_TEXT="[HTML_REMOVED]"# text used instead of HTML in safe modeRTL_BIDI_RANGES=((u'\u0590',u'\u07FF'),# from Hebrew to Nko (includes Arabic, Syriac and Thaana)(u'\u2D30',u'\u2D7F'),# Tifinagh)# Unicode Reference Table:# 0590-05FF - Hebrew# 0600-06FF - Arabic# 0700-074F - Syriac# 0750-077F - Arabic Supplement# 0780-07BF - Thaana# 07C0-07FF - NkoBOMS={'utf-8':(codecs.BOM_UTF8,),'utf-16':(codecs.BOM_UTF16_LE,codecs.BOM_UTF16_BE),#'utf-32': (codecs.BOM_UTF32_LE, codecs.BOM_UTF32_BE)}defremoveBOM(text,encoding):convert=isinstance(text,unicode)forbominBOMS[encoding]:bom=convertandbom.decode(encoding)orbomiftext.startswith(bom):returntext.lstrip(bom)returntext# The following constant specifies the name used in the usage# statement displayed for python versions lower than 2.3. (With# python2.3 and higher the usage statement is generated by optparse# and uses the actual name of the executable called.)EXECUTABLE_NAME_FOR_USAGE="python markdown.py"# --------------- CONSTANTS YOU _SHOULD NOT_ HAVE TO CHANGE ----------# a template for html placeholdersHTML_PLACEHOLDER_PREFIX="qaodmasdkwaspemas"HTML_PLACEHOLDER=HTML_PLACEHOLDER_PREFIX+"%dajkqlsmdqpakldnzsdfls"BLOCK_LEVEL_ELEMENTS=['p','div','blockquote','pre','table','dl','ol','ul','script','noscript','form','fieldset','iframe','math','ins','del','hr','hr/','style']defisBlockLevel(tag):return((taginBLOCK_LEVEL_ELEMENTS)or(tag[0]=='h'andtag[1]in"0123456789"))"""================================================================================================ NANODOM =========================================================================================================The three classes below implement some of the most basic DOMmethods. I use this instead of minidom because I need a simplerfunctionality and do not want to require additional libraries.Importantly, NanoDom does not do normalization, which is what wewant. It also adds extra white space when converting DOM to string"""ENTITY_NORMALIZATION_EXPRESSIONS=[(re.compile("&"),"&amp;"),(re.compile("<"),"&lt;"),(re.compile(">"),"&gt;")]ENTITY_NORMALIZATION_EXPRESSIONS_SOFT=[(re.compile("&(?!\#)"),"&amp;"),(re.compile("<"),"&lt;"),(re.compile(">"),"&gt;"),(re.compile("\""),"&quot;")]defgetBidiType(text):ifnottext:returnNonech=text[0]ifnotisinstance(ch,unicode)ornotch.isalpha():returnNoneelse:formin,maxinRTL_BIDI_RANGES:if(ch>=minandch<=max):return"rtl"else:return"ltr"classDocument:def__init__(self):self.bidi="ltr"defappendChild(self,child):self.documentElement=childchild.isDocumentElement=Truechild.parent=selfself.entities={}defsetBidi(self,bidi):ifbidi:self.bidi=bididefcreateElement(self,tag,textNode=None):el=Element(tag)el.doc=selfiftextNode:el.appendChild(self.createTextNode(textNode))returneldefcreateTextNode(self,text):node=TextNode(text)node.doc=selfreturnnodedefcreateEntityReference(self,entity):ifentitynotinself.entities:self.entities[entity]=EntityReference(entity)returnself.entities[entity]defcreateCDATA(self,text):node=CDATA(text)node.doc=selfreturnnodedeftoxml(self):returnself.documentElement.toxml()defnormalizeEntities(self,text,avoidDoubleNormalizing=False):ifavoidDoubleNormalizing:regexps=ENTITY_NORMALIZATION_EXPRESSIONS_SOFTelse:regexps=ENTITY_NORMALIZATION_EXPRESSIONSforregexp,substitutioninregexps:text=regexp.sub(substitution,text)returntextdeffind(self,test):returnself.documentElement.find(test)defunlink(self):self.documentElement.unlink()self.documentElement=NoneclassCDATA:type="cdata"def__init__(self,text):self.text=textdefhandleAttributes(self):passdeftoxml(self):return"<![CDATA["+self.text+"]]>"classElement:type="element"def__init__(self,tag):self.nodeName=tagself.attributes=[]self.attribute_values={}self.childNodes=[]self.bidi=Noneself.isDocumentElement=FalsedefsetBidi(self,bidi):ifbidi:orig_bidi=self.bidiifnotself.bidiorself.isDocumentElement:# Once the bidi is set don't change it (except for doc element)self.bidi=bidiself.parent.setBidi(bidi)defunlink(self):forchildinself.childNodes:ifchild.type=="element":child.unlink()self.childNodes=NonedefsetAttribute(self,attr,value):ifnotattrinself.attributes:self.attributes.append(attr)self.attribute_values[attr]=valuedefinsertChild(self,position,child):self.childNodes.insert(position,child)child.parent=selfdefremoveChild(self,child):self.childNodes.remove(child)defreplaceChild(self,oldChild,newChild):position=self.childNodes.index(oldChild)self.removeChild(oldChild)self.insertChild(position,newChild)defappendChild(self,child):self.childNodes.append(child)child.parent=selfdefhandleAttributes(self):passdeffind(self,test,depth=0):""" Returns a list of descendants that pass the test function """matched_nodes=[]forchildinself.childNodes:iftest(child):matched_nodes.append(child)ifchild.type=="element":matched_nodes+=child.find(test,depth+1)returnmatched_nodesdeftoxml(self):ifENABLE_ATTRIBUTES:forchildinself.childNodes:child.handleAttributes()buffer=""ifself.nodeNamein['h1','h2','h3','h4']:buffer+="\n"elifself.nodeNamein['li']:buffer+="\n "# Process children FIRST, then do the attributeschildBuffer=""ifself.childNodesorself.nodeNamein['blockquote']:childBuffer+=">"forchildinself.childNodes:childBuffer+=child.toxml()ifself.nodeName=='p':childBuffer+="\n"elifself.nodeName=='li':childBuffer+="\n "childBuffer+="</%s>"%self.nodeNameelse:childBuffer+="/>"buffer+="<"+self.nodeNameifself.nodeNamein['p','li','ul','ol','h1','h2','h3','h4','h5','h6']:ifnotself.attribute_values.has_key("dir"):ifself.bidi:bidi=self.bidielse:bidi=self.doc.bidiifbidi=="rtl":self.setAttribute("dir","rtl")forattrinself.attributes:value=self.attribute_values[attr]value=self.doc.normalizeEntities(value,avoidDoubleNormalizing=True)buffer+=' %s="%s"'%(attr,value)# Now let's actually append the childrenbuffer+=childBufferifself.nodeNamein['p','br ','li','ul','ol','h1','h2','h3','h4']:buffer+="\n"returnbufferclassTextNode:type="text"attrRegExp=re.compile(r'\{@([^\}]*)=([^\}]*)}')# {@id=123}def__init__(self,text):self.value=textdefattributeCallback(self,match):self.parent.setAttribute(match.group(1),match.group(2))defhandleAttributes(self):self.value=self.attrRegExp.sub(self.attributeCallback,self.value)deftoxml(self):text=self.valueself.parent.setBidi(getBidiType(text))ifnottext.startswith(HTML_PLACEHOLDER_PREFIX):ifself.parent.nodeName=="p":text=text.replace("\n","\n ")elif(self.parent.nodeName=="li"andself.parent.childNodes[0]==self):text="\n "+text.replace("\n","\n ")text=self.doc.normalizeEntities(text)returntextclassEntityReference:type="entity_ref"def__init__(self,entity):self.entity=entitydefhandleAttributes(self):passdeftoxml(self):return"&"+self.entity+";""""================================================================================================ PRE-PROCESSORS ==================================================================================================Preprocessors munge source text before we start doing anything toocomplicated.There are two types of preprocessors: TextPreprocessor and Preprocessor."""classTextPreprocessor:''' TextPreprocessors are run before the text is broken into lines. Each TextPreprocessor implements a "run" method that takes a pointer to a text string of the document, modifies it as necessary and returns either the same pointer or a pointer to a new string. TextPreprocessors must extend markdown.TextPreprocessor. '''defrun(self,text):passclassPreprocessor:''' Preprocessors are run after the text is broken into lines. Each preprocessor implements a "run" method that takes a pointer to a list of lines of the document, modifies it as necessary and returns either the same pointer or a pointer to a new list. Preprocessors must extend markdown.Preprocessor. '''defrun(self,lines):passclassHtmlBlockPreprocessor(TextPreprocessor):"""Removes html blocks from the source text and stores it."""def_get_left_tag(self,block):returnblock[1:].replace(">"," ",1).split()[0].lower()def_get_right_tag(self,left_tag,block):returnblock.rstrip()[-len(left_tag)-2:-1].lower()def_equal_tags(self,left_tag,right_tag):ifleft_tag=='div'orleft_tag[0]in['?','@','%']:# handle PHP, etc.returnTrueif("/"+left_tag)==right_tag:returnTrueif(right_tag=="--"andleft_tag=="--"):returnTrueelifleft_tag==right_tag[1:] \
andright_tag[0]!="<":returnTrueelse:returnFalsedef_is_oneliner(self,tag):return(tagin['hr','hr/'])defrun(self,text):new_blocks=[]text=text.split("\n\n")items=[]left_tag=''right_tag=''in_tag=False# flagforblockintext:ifblock.startswith("\n"):block=block[1:]ifnotin_tag:ifblock.startswith("<"):left_tag=self._get_left_tag(block)right_tag=self._get_right_tag(left_tag,block)ifnot(isBlockLevel(left_tag) \
orblock[1]in["!","?","@","%"]):new_blocks.append(block)continueifself._is_oneliner(left_tag):new_blocks.append(block.strip())continueifblock[1]=="!":# is a comment blockleft_tag="--"right_tag=self._get_right_tag(left_tag,block)# keep checking conditions below and maybe just appendifblock.rstrip().endswith(">") \
andself._equal_tags(left_tag,right_tag):new_blocks.append(self.stash.store(block.strip()))continueelse:#if not block[1] == "!":# if is block level tag and is not completeitems.append(block.strip())in_tag=Truecontinuenew_blocks.append(block)else:items.append(block.strip())right_tag=self._get_right_tag(left_tag,block)ifself._equal_tags(left_tag,right_tag):# if find closing tagin_tag=Falsenew_blocks.append(self.stash.store('\n\n'.join(items)))items=[]ifitems:new_blocks.append(self.stash.store('\n\n'.join(items)))new_blocks.append('\n')return"\n\n".join(new_blocks)HTML_BLOCK_PREPROCESSOR=HtmlBlockPreprocessor()classHeaderPreprocessor(Preprocessor):""" Replaces underlined headers with hashed headers to avoid the need for lookahead later. """defrun(self,lines):i=-1whilei+1<len(lines):i=i+1ifnotlines[i].strip():continueiflines[i].startswith("#"):lines.insert(i+1,"\n")if(i+1<=len(lines)andlines[i+1]andlines[i+1][0]in['-','=']):underline=lines[i+1].strip()ifunderline=="="*len(underline):lines[i]="# "+lines[i].strip()lines[i+1]=""elifunderline=="-"*len(underline):lines[i]="## "+lines[i].strip()lines[i+1]=""returnlinesHEADER_PREPROCESSOR=HeaderPreprocessor()classLinePreprocessor(Preprocessor):"""Deals with HR lines (needs to be done before processing lists)"""blockquote_re=re.compile(r'^(> )+')defrun(self,lines):foriinrange(len(lines)):prefix=''m=self.blockquote_re.search(lines[i])ifm:prefix=m.group(0)ifself._isLine(lines[i][len(prefix):]):lines[i]=prefix+self.stash.store("<hr />",safe=True)returnlinesdef_isLine(self,block):"""Determines if a block should be replaced with an <HR>"""ifblock.startswith(" "):return0# a code blocktext="".join([xforxinblockifnotx.isspace()])iflen(text)<=2:return0forpatternin['isline1','isline2','isline3']:m=RE.regExp[pattern].match(text)if(mandm.group(1)):return1else:return0LINE_PREPROCESSOR=LinePreprocessor()classReferencePreprocessor(Preprocessor):''' Removes reference definitions from the text and stores them for later use. '''defrun(self,lines):new_text=[];forlineinlines:m=RE.regExp['reference-def'].match(line)ifm:id=m.group(2).strip().lower()t=m.group(4).strip()# potential titleifnott:self.references[id]=(m.group(3),t)elif(len(t)>=2and(t[0]==t[-1]=="\""ort[0]==t[-1]=="\'"or(t[0]=="("andt[-1]==")"))):self.references[id]=(m.group(3),t[1:-1])else:new_text.append(line)else:new_text.append(line)returnnew_text#+ "\n"REFERENCE_PREPROCESSOR=ReferencePreprocessor()"""================================================================================================ INLINE PATTERNS =================================================================================================Inline patterns such as *emphasis* are handled by means of auxiliaryobjects, one per pattern. Pattern objects must be instances of classesthat extend markdown.Pattern. Each pattern object uses a single regularexpression and needs support the following methods: pattern.getCompiledRegExp() - returns a regular expression pattern.handleMatch(m, doc) - takes a match object and returns a NanoDom node (as a part of the provided doc) or NoneAll of python markdown's built-in patterns subclass from Patter,but you can add additional patterns that don't.Also note that all the regular expressions used by inline mustcapture the whole block. For this reason, they all start with'^(.*)' and end with '(.*)!'. In case with built-in expressionPattern takes care of adding the "^(.*)" and "(.*)!".Finally, the order in which regular expressions are applied is veryimportant - e.g. if we first replace http://.../ links with <a> tagsand _then_ try to replace inline html, we would end up with a mess.So, we apply the expressions in the following order: * escape and backticks have to go before everything else, so that we can preempt any markdown patterns by escaping them. * then we handle auto-links (must be done before inline html) * then we handle inline HTML. At this point we will simply replace all inline HTML strings with a placeholder and add the actual HTML to a hash. * then inline images (must be done before links) * then bracketed links, first regular then reference-style * finally we apply strong and emphasis"""NOBRACKET=r'[^\]\[]*'BRK=(r'\[('+(NOBRACKET+r'(\[')*6+(NOBRACKET+r'\])*')*6+NOBRACKET+r')\]')NOIMG=r'(?<!\!)'BACKTICK_RE=r'\`([^\`]*)\`'# `e= m*c^2`DOUBLE_BACKTICK_RE=r'\`\`(.*)\`\`'# ``e=f("`")``ESCAPE_RE=r'\\(.)'# \<EMPHASIS_RE=r'\*([^\*]*)\*'# *emphasis*STRONG_RE=r'\*\*(.*)\*\*'# **strong**STRONG_EM_RE=r'\*\*\*([^_]*)\*\*\*'# ***strong***ifSMART_EMPHASIS:EMPHASIS_2_RE=r'(?<!\S)_(\S[^_]*)_'# _emphasis_else:EMPHASIS_2_RE=r'_([^_]*)_'# _emphasis_STRONG_2_RE=r'__([^_]*)__'# __strong__STRONG_EM_2_RE=r'___([^_]*)___'# ___strong___LINK_RE=NOIMG+BRK+r'\s*\(([^\)]*)\)'# [text](url)LINK_ANGLED_RE=NOIMG+BRK+r'\s*\(<([^\)]*)>\)'# [text](<url>)IMAGE_LINK_RE=r'\!'+BRK+r'\s*\(([^\)]*)\)'# ![alttxt](http://x.com/)REFERENCE_RE=NOIMG+BRK+r'\s*\[([^\]]*)\]'# [Google][3]IMAGE_REFERENCE_RE=r'\!'+BRK+'\s*\[([^\]]*)\]'# ![alt text][2]NOT_STRONG_RE=r'( \* )'# stand-alone * or _AUTOLINK_RE=r'<(http://[^>]*)>'# <http://www.123.com>AUTOMAIL_RE=r'<([^> \!]*@[^> ]*)>'# <me@example.com>#HTML_RE = r'(\<[^\>]*\>)' # <...>HTML_RE=r'(\<[a-zA-Z/][^\>]*\>)'# <...>ENTITY_RE=r'(&[\#a-zA-Z0-9]*;)'# &amp;LINE_BREAK_RE=r' \n'# two spaces at end of lineLINE_BREAK_2_RE=r' $'# two spaces at end of textclassPattern:def__init__(self,pattern):self.pattern=patternself.compiled_re=re.compile("^(.*)%s(.*)$"%pattern,re.DOTALL)defgetCompiledRegExp(self):returnself.compiled_reBasePattern=Pattern# for backward compatibilityclassSimpleTextPattern(Pattern):defhandleMatch(self,m,doc):returndoc.createTextNode(m.group(2))classSimpleTagPattern(Pattern):def__init__(self,pattern,tag):Pattern.__init__(self,pattern)self.tag=tagdefhandleMatch(self,m,doc):el=doc.createElement(self.tag)el.appendChild(doc.createTextNode(m.group(2)))returnelclassSubstituteTagPattern(SimpleTagPattern):defhandleMatch(self,m,doc):returndoc.createElement(self.tag)classBacktickPattern(Pattern):def__init__(self,pattern):Pattern.__init__(self,pattern)self.tag="code"defhandleMatch(self,m,doc):el=doc.createElement(self.tag)text=m.group(2).strip()#text = text.replace("&", "&amp;")el.appendChild(doc.createTextNode(text))returnelclassDoubleTagPattern(SimpleTagPattern):defhandleMatch(self,m,doc):tag1,tag2=self.tag.split(",")el1=doc.createElement(tag1)el2=doc.createElement(tag2)el1.appendChild(el2)el2.appendChild(doc.createTextNode(m.group(2)))returnel1classHtmlPattern(Pattern):defhandleMatch(self,m,doc):rawhtml=m.group(2)inline=Trueplace_holder=self.stash.store(rawhtml)returndoc.createTextNode(place_holder)classLinkPattern(Pattern):defhandleMatch(self,m,doc):el=doc.createElement('a')el.appendChild(doc.createTextNode(m.group(2)))parts=m.group(9).split('"')# We should now have [], [href], or [href, title]ifparts:el.setAttribute('href',parts[0].strip())else:el.setAttribute('href',"")iflen(parts)>1:# we also got a titletitle='"'+'"'.join(parts[1:]).strip()title=dequote(title)#.replace('"', "&quot;")el.setAttribute('title',title)returnelclassImagePattern(Pattern):defhandleMatch(self,m,doc):el=doc.createElement('img')src_parts=m.group(9).split()ifsrc_parts:el.setAttribute('src',src_parts[0])else:el.setAttribute('src',"")iflen(src_parts)>1:el.setAttribute('title',dequote(" ".join(src_parts[1:])))ifENABLE_ATTRIBUTES:text=doc.createTextNode(m.group(2))el.appendChild(text)text.handleAttributes()truealt=text.valueel.childNodes.remove(text)else:truealt=m.group(2)el.setAttribute('alt',truealt)returnelclassReferencePattern(Pattern):defhandleMatch(self,m,doc):ifm.group(9):id=m.group(9).lower()else:# if we got something like "[Google][]"# we'll use "google" as the idid=m.group(2).lower()ifnotself.references.has_key(id):# ignore undefined refsreturnNonehref,title=self.references[id]text=m.group(2)returnself.makeTag(href,title,text,doc)defmakeTag(self,href,title,text,doc):el=doc.createElement('a')el.setAttribute('href',href)iftitle:el.setAttribute('title',title)el.appendChild(doc.createTextNode(text))returnelclassImageReferencePattern(ReferencePattern):defmakeTag(self,href,title,text,doc):el=doc.createElement('img')el.setAttribute('src',href)iftitle:el.setAttribute('title',title)el.setAttribute('alt',text)returnelclassAutolinkPattern(Pattern):defhandleMatch(self,m,doc):el=doc.createElement('a')el.setAttribute('href',m.group(2))el.appendChild(doc.createTextNode(m.group(2)))returnelclassAutomailPattern(Pattern):defhandleMatch(self,m,doc):el=doc.createElement('a')email=m.group(2)ifemail.startswith("mailto:"):email=email[len("mailto:"):]forletterinemail:entity=doc.createEntityReference("#%d"%ord(letter))el.appendChild(entity)mailto="mailto:"+emailmailto="".join(['&#%d;'%ord(letter)forletterinmailto])el.setAttribute('href',mailto)returnelESCAPE_PATTERN=SimpleTextPattern(ESCAPE_RE)NOT_STRONG_PATTERN=SimpleTextPattern(NOT_STRONG_RE)BACKTICK_PATTERN=BacktickPattern(BACKTICK_RE)DOUBLE_BACKTICK_PATTERN=BacktickPattern(DOUBLE_BACKTICK_RE)STRONG_PATTERN=SimpleTagPattern(STRONG_RE,'strong')STRONG_PATTERN_2=SimpleTagPattern(STRONG_2_RE,'strong')EMPHASIS_PATTERN=SimpleTagPattern(EMPHASIS_RE,'em')EMPHASIS_PATTERN_2=SimpleTagPattern(EMPHASIS_2_RE,'em')STRONG_EM_PATTERN=DoubleTagPattern(STRONG_EM_RE,'strong,em')STRONG_EM_PATTERN_2=DoubleTagPattern(STRONG_EM_2_RE,'strong,em')LINE_BREAK_PATTERN=SubstituteTagPattern(LINE_BREAK_RE,'br ')LINE_BREAK_PATTERN_2=SubstituteTagPattern(LINE_BREAK_2_RE,'br ')LINK_PATTERN=LinkPattern(LINK_RE)LINK_ANGLED_PATTERN=LinkPattern(LINK_ANGLED_RE)IMAGE_LINK_PATTERN=ImagePattern(IMAGE_LINK_RE)IMAGE_REFERENCE_PATTERN=ImageReferencePattern(IMAGE_REFERENCE_RE)REFERENCE_PATTERN=ReferencePattern(REFERENCE_RE)HTML_PATTERN=HtmlPattern(HTML_RE)ENTITY_PATTERN=HtmlPattern(ENTITY_RE)AUTOLINK_PATTERN=AutolinkPattern(AUTOLINK_RE)AUTOMAIL_PATTERN=AutomailPattern(AUTOMAIL_RE)"""================================================================================================ POST-PROCESSORS =================================================================================================Markdown also allows post-processors, which are similar topreprocessors in that they need to implement a "run" method. However,they are run after core processing.There are two types of post-processors: Postprocessor and TextPostprocessor"""classPostprocessor:''' Postprocessors are run before the dom it converted back into text. Each Postprocessor implements a "run" method that takes a pointer to a NanoDom document, modifies it as necessary and returns a NanoDom document. Postprocessors must extend markdown.Postprocessor. There are currently no standard post-processors, but the footnote extension uses one. '''defrun(self,dom):passclassTextPostprocessor:''' TextPostprocessors are run after the dom it converted back into text. Each TextPostprocessor implements a "run" method that takes a pointer to a text string, modifies it as necessary and returns a text string. TextPostprocessors must extend markdown.TextPostprocessor. '''defrun(self,text):passclassRawHtmlTextPostprocessor(TextPostprocessor):def__init__(self):passdefrun(self,text):foriinrange(self.stash.html_counter):html,safe=self.stash.rawHtmlBlocks[i]ifself.safeModeandnotsafe:ifstr(self.safeMode).lower()=='escape':html=self.escape(html)elifstr(self.safeMode).lower()=='remove':html=''else:html=HTML_REMOVED_TEXTtext=text.replace("<p>%s\n</p>"%(HTML_PLACEHOLDER%i),html+"\n")text=text.replace(HTML_PLACEHOLDER%i,html)returntextdefescape(self,html):''' Basic html escaping '''html=html.replace('&','&amp;')html=html.replace('<','&lt;')html=html.replace('>','&gt;')returnhtml.replace('"','&quot;')RAWHTMLTEXTPOSTPROCESSOR=RawHtmlTextPostprocessor()"""================================================================================================ MISC AUXILIARY CLASSES =========================================================================================="""classHtmlStash:"""This class is used for stashing HTML objects that we extract in the beginning and replace with place-holders."""def__init__(self):self.html_counter=0# for counting inline html segmentsself.rawHtmlBlocks=[]defstore(self,html,safe=False):"""Saves an HTML segment for later reinsertion. Returns a placeholder string that needs to be inserted into the document. @param html: an html segment @param safe: label an html segment as safe for safemode @param inline: label a segmant as inline html @returns : a placeholder string """self.rawHtmlBlocks.append((html,safe))placeholder=HTML_PLACEHOLDER%self.html_counterself.html_counter+=1returnplaceholderclassBlockGuru:def_findHead(self,lines,fn,allowBlank=0):"""Functional magic to help determine boundaries of indented blocks. @param lines: an array of strings @param fn: a function that returns a substring of a string if the string matches the necessary criteria @param allowBlank: specifies whether it's ok to have blank lines between matching functions @returns: a list of post processes items and the unused remainder of the original list"""items=[]item=-1i=0# to keep track of where we areforlineinlines:ifnotline.strip()andnotallowBlank:returnitems,lines[i:]ifnotline.strip()andallowBlank:# If we see a blank line, this _might_ be the endi+=1# Find the next non-blank lineforjinrange(i,len(lines)):iflines[j].strip():next=lines[j]breakelse:# There is no more text => this is the endbreak# Check if the next non-blank line is still a part of the listpart=fn(next)ifpart:items.append("")continueelse:break# found end of the listpart=fn(line)ifpart:items.append(part)i+=1continueelse:returnitems,lines[i:]else:i+=1returnitems,lines[i:]defdetabbed_fn(self,line):""" An auxiliary method to be passed to _findHead """m=RE.regExp['tabbed'].match(line)ifm:returnm.group(4)else:returnNonedefdetectTabbed(self,lines):returnself._findHead(lines,self.detabbed_fn,allowBlank=1)defprint_error(string):"""Print an error string to stderr"""sys.stderr.write(string+'\n')defdequote(string):""" Removes quotes from around a string """if((string.startswith('"')andstring.endswith('"'))or(string.startswith("'")andstring.endswith("'"))):returnstring[1:-1]else:returnstring"""================================================================================================ CORE MARKDOWN ===================================================================================================This stuff is ugly, so if you are thinking of extending the syntax,see first if you can do it via pre-processors, post-processors,inline patterns or a combination of the three."""classCorePatterns:"""This class is scheduled for removal as part of a refactoring effort."""patterns={'header':r'(#*)([^#]*)(#*)',# # A title'reference-def':r'(\ ?\ ?\ ?)\[([^\]]*)\]:\s*([^ ]*)(.*)',# [Google]: http://www.google.com/'containsline':r'([-]*)$|^([=]*)',# -----, =====, etc.'ol':r'[ ]{0,3}[\d]*\.\s+(.*)',# 1. text'ul':r'[ ]{0,3}[*+-]\s+(.*)',# "* text"'isline1':r'(\**)',# ***'isline2':r'(\-*)',# ---'isline3':r'(\_*)',# ___'tabbed':r'((\t)|( ))(.*)',# an indented line'quoted':r'> ?(.*)',# a quoted block ("> ...")}def__init__(self):self.regExp={}forkeyinself.patterns.keys():self.regExp[key]=re.compile("^%s$"%self.patterns[key],re.DOTALL)self.regExp['containsline']=re.compile(r'^([-]*)$|^([=]*)$',re.M)RE=CorePatterns()classMarkdown:""" Markdown formatter class for creating an html document from Markdown text """def__init__(self,source=None,# depreciatedextensions=[],extension_configs=None,safe_mode=False):"""Creates a new Markdown instance. @param source: The text in Markdown format. Depreciated! @param extensions: A list if extensions. @param extension-configs: Configuration setting for extensions. @param safe_mode: Disallow raw html. """self.source=sourceifsourceisnotNone:message(WARN,"The `source` arg of Markdown.__init__() is depreciated and will be removed in the future. Use `instance.convert(source)` instead.")self.safeMode=safe_modeself.blockGuru=BlockGuru()self.registeredExtensions=[]self.stripTopLevelTags=1self.docType=""self.textPreprocessors=[HTML_BLOCK_PREPROCESSOR]self.preprocessors=[HEADER_PREPROCESSOR,LINE_PREPROCESSOR,# A footnote preprocessor will# get inserted hereREFERENCE_PREPROCESSOR]self.postprocessors=[]# a footnote postprocessor will get# inserted laterself.textPostprocessors=[# a footnote postprocessor will get# inserted hereRAWHTMLTEXTPOSTPROCESSOR]self.prePatterns=[]self.inlinePatterns=[DOUBLE_BACKTICK_PATTERN,BACKTICK_PATTERN,ESCAPE_PATTERN,REFERENCE_PATTERN,LINK_ANGLED_PATTERN,LINK_PATTERN,IMAGE_LINK_PATTERN,IMAGE_REFERENCE_PATTERN,AUTOLINK_PATTERN,AUTOMAIL_PATTERN,LINE_BREAK_PATTERN_2,LINE_BREAK_PATTERN,HTML_PATTERN,ENTITY_PATTERN,NOT_STRONG_PATTERN,STRONG_EM_PATTERN,STRONG_EM_PATTERN_2,STRONG_PATTERN,STRONG_PATTERN_2,EMPHASIS_PATTERN,EMPHASIS_PATTERN_2# The order of the handlers matters!!!]self.registerExtensions(extensions=extensions,configs=extension_configs)self.reset()defregisterExtensions(self,extensions,configs):ifnotconfigs:configs={}forextinextensions:extension_module_name="mdx_"+exttry:module=__import__(extension_module_name)except:message(CRITICAL,"couldn't load extension %s (looking for %s module)"%(ext,extension_module_name))else:ifconfigs.has_key(ext):configs_for_ext=configs[ext]else:configs_for_ext=[]extension=module.makeExtension(configs_for_ext)extension.extendMarkdown(self,globals())defregisterExtension(self,extension):""" This gets called by the extension """self.registeredExtensions.append(extension)defreset(self):"""Resets all state variables so that we can start with a new text."""self.references={}self.htmlStash=HtmlStash()HTML_BLOCK_PREPROCESSOR.stash=self.htmlStashLINE_PREPROCESSOR.stash=self.htmlStashREFERENCE_PREPROCESSOR.references=self.referencesHTML_PATTERN.stash=self.htmlStashENTITY_PATTERN.stash=self.htmlStashREFERENCE_PATTERN.references=self.referencesIMAGE_REFERENCE_PATTERN.references=self.referencesRAWHTMLTEXTPOSTPROCESSOR.stash=self.htmlStashRAWHTMLTEXTPOSTPROCESSOR.safeMode=self.safeModeforextensioninself.registeredExtensions:extension.reset()def_transform(self):"""Transforms the Markdown text into a XHTML body document @returns: A NanoDom Document """# Setup the documentself.doc=Document()self.top_element=self.doc.createElement("span")self.top_element.appendChild(self.doc.createTextNode('\n'))self.top_element.setAttribute('class','markdown')self.doc.appendChild(self.top_element)# Fixup the source texttext=self.sourcetext=text.replace("\r\n","\n").replace("\r","\n")text+="\n\n"text=text.expandtabs(TAB_LENGTH)# Split into lines and run the preprocessors that will work with# self.linesself.lines=text.split("\n")# Run the pre-processors on the linesforprepinself.preprocessors:self.lines=prep.run(self.lines)# Create a NanoDom tree from the lines and attach it to Documentbuffer=[]forlineinself.lines:ifline.startswith("#"):self._processSection(self.top_element,buffer)buffer=[line]else:buffer.append(line)self._processSection(self.top_element,buffer)#self._processSection(self.top_element, self.lines)# Not sure why I put this in but let's leave it for now.self.top_element.appendChild(self.doc.createTextNode('\n'))# Run the post-processorsforpostprocessorinself.postprocessors:postprocessor.run(self.doc)returnself.docdef_processSection(self,parent_elem,lines,inList=0,looseList=0):"""Process a section of a source document, looking for high level structural elements like lists, block quotes, code segments, html blocks, etc. Some those then get stripped of their high level markup (e.g. get unindented) and the lower-level markup is processed recursively. @param parent_elem: A NanoDom element to which the content will be added @param lines: a list of lines @param inList: a level @returns: None"""# Loop through lines until none left.whilelines:# Check if this section starts with a list, a blockquote or# a code blockprocessFn={'ul':self._processUList,'ol':self._processOList,'quoted':self._processQuote,'tabbed':self._processCodeBlock}forregexpin['ul','ol','quoted','tabbed']:m=RE.regExp[regexp].match(lines[0])ifm:processFn[regexp](parent_elem,lines,inList)return# We are NOT looking at one of the high-level structures like# lists or blockquotes. So, it's just a regular paragraph# (though perhaps nested inside a list or something else). If# we are NOT inside a list, we just need to look for a blank# line to find the end of the block. If we ARE inside a# list, however, we need to consider that a sublist does not# need to be separated by a blank line. Rather, the following# markup is legal:## * The top level list item## Another paragraph of the list. This is where we are now.# * Underneath we might have a sublist.#ifinList:start,lines=self._linesUntil(lines,(lambdaline:RE.regExp['ul'].match(line)orRE.regExp['ol'].match(line)ornotline.strip()))self._processSection(parent_elem,start,inList-1,looseList=looseList)inList=inList-1else:# Ok, so it's just a simple blockparagraph,lines=self._linesUntil(lines,lambdaline:notline.strip())iflen(paragraph)andparagraph[0].startswith('#'):self._processHeader(parent_elem,paragraph)elifparagraph:self._processParagraph(parent_elem,paragraph,inList,looseList)iflinesandnotlines[0].strip():lines=lines[1:]# skip the first (blank) linedef_processHeader(self,parent_elem,paragraph):m=RE.regExp['header'].match(paragraph[0])ifm:level=len(m.group(1))h=self.doc.createElement("h%d"%level)parent_elem.appendChild(h)foriteminself._handleInline(m.group(2).strip()):h.appendChild(item)else:message(CRITICAL,"We've got a problem header!")def_processParagraph(self,parent_elem,paragraph,inList,looseList):list=self._handleInline("\n".join(paragraph))if(parent_elem.nodeName=='li'andnot(looseListorparent_elem.childNodes)):# If this is the first paragraph inside "li", don't# put <p> around it - append the paragraph bits directly# onto parent_elemel=parent_elemelse:# Otherwise make a "p" elementel=self.doc.createElement("p")parent_elem.appendChild(el)foriteminlist:el.appendChild(item)def_processUList(self,parent_elem,lines,inList):self._processList(parent_elem,lines,inList,listexpr='ul',tag='ul')def_processOList(self,parent_elem,lines,inList):self._processList(parent_elem,lines,inList,listexpr='ol',tag='ol')def_processList(self,parent_elem,lines,inList,listexpr,tag):"""Given a list of document lines starting with a list item, finds the end of the list, breaks it up, and recursively processes each list item and the remainder of the text file. @param parent_elem: A dom element to which the content will be added @param lines: a list of lines @param inList: a level @returns: None"""ul=self.doc.createElement(tag)# ul might actually be '<ol>'parent_elem.appendChild(ul)looseList=0# Make a list of list itemsitems=[]item=-1i=0# a counter to keep track of where we areforlineinlines:loose=0ifnotline.strip():# If we see a blank line, this _might_ be the end of the listi+=1loose=1# Find the next non-blank lineforjinrange(i,len(lines)):iflines[j].strip():next=lines[j]breakelse:# There is no more text => end of the listbreak# Check if the next non-blank line is still a part of the listif(RE.regExp['ul'].match(next)orRE.regExp['ol'].match(next)orRE.regExp['tabbed'].match(next)):# get rid of any white space in the lineitems[item].append(line.strip())looseList=looseorlooseListcontinueelse:break# found end of the list# Now we need to detect list items (at the current level)# while also detabing child elements if necessaryforexprin['ul','ol','tabbed']:m=RE.regExp[expr].match(line)ifm:ifexprin['ul','ol']:# We are looking at a new item#if m.group(1) :# Removed the check to allow for a blank line# at the beginning of the list itemitems.append([m.group(1)])item+=1elifexpr=='tabbed':# This line needs to be detabbeditems[item].append(m.group(4))#after the 'tab'i+=1breakelse:items[item].append(line)# Just regular continuationi+=1# added on 2006.02.25else:i+=1# Add the dom elementsforiteminitems:li=self.doc.createElement("li")ul.appendChild(li)self._processSection(li,item,inList+1,looseList=looseList)# Process the remaining part of the sectionself._processSection(parent_elem,lines[i:],inList)def_linesUntil(self,lines,condition):""" A utility function to break a list of lines upon the first line that satisfied a condition. The condition argument should be a predicate function. """i=-1forlineinlines:i+=1ifcondition(line):breakelse:i+=1returnlines[:i],lines[i:]def_processQuote(self,parent_elem,lines,inList):"""Given a list of document lines starting with a quote finds the end of the quote, unindents it and recursively processes the body of the quote and the remainder of the text file. @param parent_elem: DOM element to which the content will be added @param lines: a list of lines @param inList: a level @returns: None """dequoted=[]i=0blank_line=False# allow one blank line between paragraphsforlineinlines:m=RE.regExp['quoted'].match(line)ifm:dequoted.append(m.group(1))i+=1blank_line=Falseelifnotblank_lineandline.strip()!='':dequoted.append(line)i+=1elifnotblank_lineandline.strip()=='':dequoted.append(line)i+=1blank_line=Trueelse:breakblockquote=self.doc.createElement('blockquote')parent_elem.appendChild(blockquote)self._processSection(blockquote,dequoted,inList)self._processSection(parent_elem,lines[i:],inList)def_processCodeBlock(self,parent_elem,lines,inList):"""Given a list of document lines starting with a code block finds the end of the block, puts it into the dom verbatim wrapped in ("<pre><code>") and recursively processes the the remainder of the text file. @param parent_elem: DOM element to which the content will be added @param lines: a list of lines @param inList: a level @returns: None"""detabbed,theRest=self.blockGuru.detectTabbed(lines)pre=self.doc.createElement('pre')code=self.doc.createElement('code')parent_elem.appendChild(pre)pre.appendChild(code)text="\n".join(detabbed).rstrip()+"\n"#text = text.replace("&", "&amp;")code.appendChild(self.doc.createTextNode(text))self._processSection(parent_elem,theRest,inList)def_handleInline(self,line,patternIndex=0):"""Transform a Markdown line with inline elements to an XHTML fragment. This function uses auxiliary objects called inline patterns. See notes on inline patterns above. @param line: A line of Markdown text @param patternIndex: The index of the inlinePattern to start with @return: A list of NanoDom nodes """parts=[line]whilepatternIndex<len(self.inlinePatterns):i=0whilei<len(parts):x=parts[i]ifisinstance(x,(str,unicode)):result=self._applyPattern(x, \
self.inlinePatterns[patternIndex], \
patternIndex)ifresult:i-=1parts.remove(x)foryinresult:parts.insert(i+1,y)i+=1patternIndex+=1foriinrange(len(parts)):x=parts[i]ifisinstance(x,(str,unicode)):parts[i]=self.doc.createTextNode(x)returnpartsdef_applyPattern(self,line,pattern,patternIndex):""" Given a pattern name, this function checks if the line fits the pattern, creates the necessary elements, and returns back a list consisting of NanoDom elements and/or strings. @param line: the text to be processed @param pattern: the pattern to be checked @returns: the appropriate newly created NanoDom element if the pattern matches, None otherwise. """# match the line to pattern's pre-compiled reg exp.# if no match, move on.m=pattern.getCompiledRegExp().match(line)ifnotm:returnNone# if we got a match let the pattern make us a NanoDom node# if it doesn't, move onnode=pattern.handleMatch(m,self.doc)# check if any of this nodes have children that need processingifisinstance(node,Element):ifnotnode.nodeNamein["code","pre"]:forchildinnode.childNodes:ifisinstance(child,TextNode):result=self._handleInline(child.value,patternIndex+1)ifresult:ifresult==[child]:continueresult.reverse()#to make insertion easierposition=node.childNodes.index(child)node.removeChild(child)foriteminresult:ifisinstance(item,(str,unicode)):iflen(item)>0:node.insertChild(position,self.doc.createTextNode(item))else:node.insertChild(position,item)ifnode:# Those are in the reverse order!return(m.groups()[-1],# the string to the leftnode,# the new nodem.group(1))# the string to the right of the matchelse:returnNonedefconvert(self,source=None):"""Return the document in XHTML format. @returns: A serialized XHTML body."""ifsourceisnotNone:#Allow blank stringself.source=sourceifnotself.source:returnu""try:self.source=unicode(self.source)exceptUnicodeDecodeError:message(CRITICAL,'UnicodeDecodeError: Markdown only accepts unicode or ascii input.')returnu""forppinself.textPreprocessors:self.source=pp.run(self.source)doc=self._transform()xml=doc.toxml()# Return everything but the top level tagifself.stripTopLevelTags:xml=xml.strip()[23:-7]+"\n"forppinself.textPostprocessors:xml=pp.run(xml)return(self.docType+xml).strip()def__str__(self):''' Report info about instance. Markdown always returns unicode. '''ifself.sourceisNone:status='in which no source text has been assinged.'else:status='which contains %d chars and %d line(s) of source.'%\
(len(self.source),self.source.count('\n')+1)return'An instance of "%s" %s'%(self.__class__,status)__unicode__=convert# markdown should always return a unicode string# ====================================================================defmarkdownFromFile(input=None,output=None,extensions=[],encoding=None,message_threshold=CRITICAL,safe=False):globalconsole_hndlrconsole_hndlr.setLevel(message_threshold)message(DEBUG,"input file: %s"%input)ifnotencoding:encoding="utf-8"input_file=codecs.open(input,mode="r",encoding=encoding)text=input_file.read()input_file.close()text=removeBOM(text,encoding)new_text=markdown(text,extensions,safe_mode=safe)ifoutput:output_file=codecs.open(output,"w",encoding=encoding)output_file.write(new_text)output_file.close()else:sys.stdout.write(new_text.encode(encoding))defmarkdown(text,extensions=[],safe_mode=False):message(DEBUG,"in markdown.markdown(), received text:\n%s"%text)extension_names=[]extension_configs={}forextinextensions:pos=ext.find("(")ifpos==-1:extension_names.append(ext)else:name=ext[:pos]extension_names.append(name)pairs=[x.split("=")forxinext[pos+1:-1].split(",")]configs=[(x.strip(),y.strip())for(x,y)inpairs]extension_configs[name]=configsmd=Markdown(extensions=extension_names,extension_configs=extension_configs,safe_mode=safe_mode)returnmd.convert(text)classExtension:def__init__(self,configs={}):self.config=configsdefgetConfig(self,key):ifself.config.has_key(key):returnself.config[key][0]else:return""defgetConfigInfo(self):return[(key,self.config[key][1])forkeyinself.config.keys()]defsetConfig(self,key,value):self.config[key][0]=valueOPTPARSE_WARNING="""Python 2.3 or higher required for advanced command line options.For lower versions of Python use:%s INPUT_FILE > OUTPUT_FILE"""%EXECUTABLE_NAME_FOR_USAGEdefparse_options():try:optparse=__import__("optparse")except:iflen(sys.argv)==2:return{'input':sys.argv[1],'output':None,'message_threshold':CRITICAL,'safe':False,'extensions':[],'encoding':None}else:printOPTPARSE_WARNINGreturnNoneparser=optparse.OptionParser(usage="%prog INPUTFILE [options]")parser.add_option("-f","--file",dest="filename",help="write output to OUTPUT_FILE",metavar="OUTPUT_FILE")parser.add_option("-e","--encoding",dest="encoding",help="encoding for input and output files",)parser.add_option("-q","--quiet",default=CRITICAL,action="store_const",const=60,dest="verbose",help="suppress all messages")parser.add_option("-v","--verbose",action="store_const",const=INFO,dest="verbose",help="print info messages")parser.add_option("-s","--safe",dest="safe",default=False,metavar="SAFE_MODE",help="same mode ('replace', 'remove' or 'escape' user's HTML tag)")parser.add_option("--noisy",action="store_const",const=DEBUG,dest="verbose",help="print debug messages")parser.add_option("-x","--extension",action="append",dest="extensions",help="load extension EXTENSION",metavar="EXTENSION")(options,args)=parser.parse_args()ifnotlen(args)==1:parser.print_help()returnNoneelse:input_file=args[0]ifnotoptions.extensions:options.extensions=[]return{'input':input_file,'output':options.filename,'message_threshold':options.verbose,'safe':options.safe,'extensions':options.extensions,'encoding':options.encoding}if__name__=='__main__':""" Run Markdown from the command line. """options=parse_options()#if os.access(inFile, os.R_OK):ifnotoptions:sys.exit(0)markdownFromFile(**options)