Ignore:
Timestamp:
Mar 19, 2014, 11:31:01 PM (11 years ago)
Author:
dmik
Message:

python: Merge vendor 2.7.6 to trunk.

Location:
python/trunk
Files:
2 edited

Legend:

Unmodified
Added
Removed
  • python/trunk

  • python/trunk/Doc/tools/sphinxext/suspicious.py

    r2 r391  
    4242"""
    4343
    44 import os, sys
     44import os
     45import re
    4546import csv
    46 import re
     47import sys
     48
    4749from docutils import nodes
    48 
    49 try:
    50     from sphinx.builders import Builder
    51 except ImportError:
    52     from sphinx.builder import Builder
    53 
     50from sphinx.builders import Builder
    5451
    5552detect_all = re.compile(ur'''
     
    6057    ''', re.UNICODE | re.VERBOSE).finditer
    6158
     59
    6260class Rule:
    6361    def __init__(self, docname, lineno, issue, line):
    64         "A rule for ignoring issues"
     62        """A rule for ignoring issues"""
    6563        self.docname = docname # document to which this rule applies
    6664        self.lineno = lineno   # line number in the original source;
     
    6967        self.issue = issue     # the markup fragment that triggered this rule
    7068        self.line = line       # text of the container element (single line only)
     69        self.used = False
     70
     71    def __repr__(self):
     72        return '{0.docname},,{0.issue},{0.line}'.format(self)
     73
     74
     75
     76class dialect(csv.excel):
     77    """Our dialect: uses only linefeed as newline."""
     78    lineterminator = '\n'
    7179
    7280
    7381class CheckSuspiciousMarkupBuilder(Builder):
    7482    """
    75     Checks for possibly invalid markup that may leak into the output
     83    Checks for possibly invalid markup that may leak into the output.
    7684    """
    7785    name = 'suspicious'
     
    8290        open(self.log_file_name, 'w').close()
    8391        # load database of previously ignored issues
    84         self.load_rules(os.path.join(os.path.dirname(__file__), 'susp-ignored.csv'))
     92        self.load_rules(os.path.join(os.path.dirname(__file__),
     93                                     'susp-ignored.csv'))
    8594
    8695    def get_outdated_docs(self):
     
    91100
    92101    def prepare_writing(self, docnames):
    93         ### PYTHON PROJECT SPECIFIC ###
    94         for name in set(docnames):
    95             if name.split('/', 1)[0] == 'documenting':
    96                 docnames.remove(name)
    97         ### PYTHON PROJECT SPECIFIC ###
     102        pass
    98103
    99104    def write_doc(self, docname, doctree):
    100         self.any_issue = False # set when any issue is encountered in this document
     105        # set when any issue is encountered in this document
     106        self.any_issue = False
    101107        self.docname = docname
    102108        visitor = SuspiciousVisitor(doctree, self)
     
    104110
    105111    def finish(self):
     112        unused_rules = [rule for rule in self.rules if not rule.used]
     113        if unused_rules:
     114            self.warn('Found %s/%s unused rules:' %
     115                      (len(unused_rules), len(self.rules)))
     116            for rule in unused_rules:
     117                self.info(repr(rule))
    106118        return
    107119
     
    111123
    112124    def is_ignored(self, line, lineno, issue):
    113         """Determine whether this issue should be ignored.
    114         """
     125        """Determine whether this issue should be ignored."""
    115126        docname = self.docname
    116127        for rule in self.rules:
     
    129140                abs(rule.lineno - lineno) > 5: continue
    130141            # if it came this far, the rule matched
     142            rule.used = True
    131143            return True
    132144        return False
     
    145157    def write_log_entry(self, lineno, issue, text):
    146158        f = open(self.log_file_name, 'ab')
    147         writer = csv.writer(f)
     159        writer = csv.writer(f, dialect)
    148160        writer.writerow([self.docname.encode('utf-8'),
    149                 lineno,
    150                 issue.encode('utf-8'),
    151                 text.strip().encode('utf-8')])
    152         del writer
     161                         lineno,
     162                         issue.encode('utf-8'),
     163                         text.strip().encode('utf-8')])
    153164        f.close()
    154165
     
    165176        for i, row in enumerate(csv.reader(f)):
    166177            if len(row) != 4:
    167                 raise ValueError("wrong format in %s, line %d: %s" % (filename, i+1, row))
     178                raise ValueError(
     179                    "wrong format in %s, line %d: %s" % (filename, i+1, row))
    168180            docname, lineno, issue, text = row
    169181            docname = docname.decode('utf-8')
     
    179191
    180192def get_lineno(node):
    181     "Obtain line number information for a node"
     193    """Obtain line number information for a node."""
    182194    lineno = None
    183195    while lineno is None and node:
     
    204216    p = text.rfind('\n', 0, index) + 1
    205217    q = text.find('\n', index)
    206     if q<0: q = len(text)
     218    if q < 0:
     219        q = len(text)
    207220    return text[p:q]
    208221
     
    223236            seen = set() # don't report the same issue more than only once per line
    224237            for match in detect_all(text):
    225                 #import pdb; pdb.set_trace()
    226238                issue = match.group()
    227239                line = extract_line(text, match.start())
Note: See TracChangeset for help on using the changeset viewer.