Changeset 391 for python/trunk/Lib/logging
- Timestamp:
- Mar 19, 2014, 11:31:01 PM (11 years ago)
- Location:
- python/trunk
- Files:
-
- 4 edited
Legend:
- Unmodified
- Added
- Removed
-
python/trunk
-
Property svn:mergeinfo
set to
/python/vendor/Python-2.7.6 merged eligible /python/vendor/current merged eligible
-
Property svn:mergeinfo
set to
-
python/trunk/Lib/logging/__init__.py
r2 r391 1 # Copyright 2001-20 09by Vinay Sajip. All Rights Reserved.1 # Copyright 2001-2012 by Vinay Sajip. All Rights Reserved. 2 2 # 3 3 # Permission to use, copy, modify, and distribute this software and its … … 17 17 """ 18 18 Logging package for Python. Based on PEP 282 and comments thereto in 19 comp.lang.python , and influenced by Apache's log4j system.20 21 Copyright (C) 2001-20 09Vinay Sajip. All Rights Reserved.19 comp.lang.python. 20 21 Copyright (C) 2001-2012 Vinay Sajip. All Rights Reserved. 22 22 23 23 To use, simply 'import logging' and log away! 24 24 """ 25 25 26 import sys, os, time, cStringIO, traceback, warnings, weakref 27 26 28 __all__ = ['BASIC_FORMAT', 'BufferingFormatter', 'CRITICAL', 'DEBUG', 'ERROR', 27 29 'FATAL', 'FileHandler', 'Filter', 'Formatter', 'Handler', 'INFO', 28 'LogRecord', 'Logger', 'LoggerAdapter', 'NOTSET', 30 'LogRecord', 'Logger', 'LoggerAdapter', 'NOTSET', 'NullHandler', 29 31 'StreamHandler', 'WARN', 'WARNING', 'addLevelName', 'basicConfig', 30 'c ritical', 'debug', 'disable', 'error',32 'captureWarnings', 'critical', 'debug', 'disable', 'error', 31 33 'exception', 'fatal', 'getLevelName', 'getLogger', 'getLoggerClass', 32 34 'info', 'log', 'makeLogRecord', 'setLoggerClass', 'warn', 'warning'] 33 34 import sys, os, types, time, string, cStringIO, traceback35 35 36 36 try: … … 47 47 __author__ = "Vinay Sajip <vinay_sajip@red-dove.com>" 48 48 __status__ = "production" 49 __version__ = "0.5. 0.5"50 __date__ = " 17 February 2009"49 __version__ = "0.5.1.2" 50 __date__ = "07 February 2010" 51 51 52 52 #--------------------------------------------------------------------------- 53 53 # Miscellaneous module data 54 54 #--------------------------------------------------------------------------- 55 try: 56 unicode 57 _unicode = True 58 except NameError: 59 _unicode = False 55 60 56 61 # … … 60 65 if hasattr(sys, 'frozen'): #support for py2exe 61 66 _srcfile = "logging%s__init__%s" % (os.sep, __file__[-4:]) 62 elif string.lower(__file__[-4:]) in ['.pyc', '.pyo']:67 elif __file__[-4:].lower() in ['.pyc', '.pyo']: 63 68 _srcfile = __file__[:-4] + '.py' 64 69 else: … … 72 77 raise Exception 73 78 except: 74 return sys.exc_ traceback.tb_frame.f_back79 return sys.exc_info()[2].tb_frame.f_back 75 80 76 81 if hasattr(sys, '_getframe'): currentframe = lambda: sys._getframe(3) … … 175 180 _releaseLock() 176 181 182 def _checkLevel(level): 183 if isinstance(level, (int, long)): 184 rv = level 185 elif str(level) == level: 186 if level not in _levelNames: 187 raise ValueError("Unknown level: %r" % level) 188 rv = _levelNames[level] 189 else: 190 raise TypeError("Level not an integer or a valid string: %r" % level) 191 return rv 192 177 193 #--------------------------------------------------------------------------- 178 194 # Thread-related stuff … … 181 197 # 182 198 #_lock is used to serialize access to shared data structures in this module. 183 #This needs to be an RLock because fileConfig() creates Handlers and so184 # might arbitrary user threads. Since Handler.__init__() updates the shared185 # dictionary _handlers, it needs to acquire the lock. But if configuring,199 #This needs to be an RLock because fileConfig() creates and configures 200 #Handlers, and so might arbitrary user threads. Since Handler code updates the 201 #shared dictionary _handlers, it needs to acquire the lock. But if configuring, 186 202 #the lock would already have been acquired - so we need an RLock. 187 203 #The same argument applies to Loggers and Manager.loggerDict. … … 212 228 #--------------------------------------------------------------------------- 213 229 214 class LogRecord :230 class LogRecord(object): 215 231 """ 216 232 A LogRecord instance represents an event being logged. … … 245 261 # For the use case of passing a dictionary, this should not be a 246 262 # problem. 247 if args and len(args) == 1 and ( 248 type(args[0]) == types.DictType 249 ) and args[0]: 263 if args and len(args) == 1 and isinstance(args[0], dict) and args[0]: 250 264 args = args[0] 251 265 self.args = args … … 274 288 if not logMultiprocessing: 275 289 self.processName = None 276 el if 'multiprocessing' not in sys.modules:290 else: 277 291 self.processName = 'MainProcess' 278 else: 279 self.processName = sys.modules['multiprocessing'].current_process().name 292 mp = sys.modules.get('multiprocessing') 293 if mp is not None: 294 # Errors may occur if multiprocessing has not finished loading 295 # yet - e.g. if a custom import hook causes third-party code 296 # to run when multiprocessing calls import. See issue 8200 297 # for an example 298 try: 299 self.processName = mp.current_process().name 300 except StandardError: 301 pass 280 302 if logProcesses and hasattr(os, 'getpid'): 281 303 self.process = os.getpid() … … 294 316 arguments with the message. 295 317 """ 296 if not hasattr(types, "UnicodeType"): #if no unicode support...318 if not _unicode: #if no unicode support... 297 319 msg = str(self.msg) 298 320 else: 299 321 msg = self.msg 300 if type(msg) not in (types.UnicodeType, types.StringType):322 if not isinstance(msg, basestring): 301 323 try: 302 324 msg = str(self.msg) … … 322 344 #--------------------------------------------------------------------------- 323 345 324 class Formatter :346 class Formatter(object): 325 347 """ 326 348 Formatter instances are used to convert a LogRecord to text. … … 421 443 return s 422 444 445 def usesTime(self): 446 """ 447 Check if the format uses the creation time of the record. 448 """ 449 return self._fmt.find("%(asctime)") >= 0 450 423 451 def format(self, record): 424 452 """ … … 429 457 Before formatting the dictionary, a couple of preparatory steps 430 458 are carried out. The message attribute of the record is computed 431 using LogRecord.getMessage(). If the formatting string contains432 "%(asctime)", formatTime() is called to format the event time.433 If there is exception information, it is formatted using434 formatException() and appended to the message.459 using LogRecord.getMessage(). If the formatting string uses the 460 time (as determined by a call to usesTime(), formatTime() is 461 called to format the event time. If there is exception information, 462 it is formatted using formatException() and appended to the message. 435 463 """ 436 464 record.message = record.getMessage() 437 if s tring.find(self._fmt,"%(asctime)") >= 0:465 if self.usesTime(): 438 466 record.asctime = self.formatTime(record, self.datefmt) 439 467 s = self._fmt % record.__dict__ … … 446 474 if s[-1:] != "\n": 447 475 s = s + "\n" 448 s = s + record.exc_text 476 try: 477 s = s + record.exc_text 478 except UnicodeError: 479 # Sometimes filenames have non-ASCII chars, which can lead 480 # to errors when s is Unicode and record.exc_text is str 481 # See issue 8924. 482 # We also use replace for when there are multiple 483 # encodings, e.g. UTF-8 for the filesystem and latin-1 484 # for a script. See issue 13232. 485 s = s + record.exc_text.decode(sys.getfilesystemencoding(), 486 'replace') 449 487 return s 450 488 … … 454 492 _defaultFormatter = Formatter() 455 493 456 class BufferingFormatter :494 class BufferingFormatter(object): 457 495 """ 458 496 A formatter suitable for formatting a number of records. … … 496 534 #--------------------------------------------------------------------------- 497 535 498 class Filter :536 class Filter(object): 499 537 """ 500 538 Filter instances are used to perform arbitrary filtering of LogRecords. … … 529 567 elif self.name == record.name: 530 568 return 1 531 elif string.find(record.name,self.name, 0, self.nlen) != 0:569 elif record.name.find(self.name, 0, self.nlen) != 0: 532 570 return 0 533 571 return (record.name[self.nlen] == ".") 534 572 535 class Filterer :573 class Filterer(object): 536 574 """ 537 575 A base class for loggers and handlers which allows them to share … … 577 615 #--------------------------------------------------------------------------- 578 616 579 _handlers = {} #repository of handlers (for flushing when shutdown called)617 _handlers = weakref.WeakValueDictionary() #map of handler names to handlers 580 618 _handlerList = [] # added to allow handlers to be removed in reverse of order initialized 619 620 def _removeHandlerRef(wr): 621 """ 622 Remove a handler reference from the internal cleanup list. 623 """ 624 # This function can be called during module teardown, when globals are 625 # set to None. If _acquireLock is None, assume this is the case and do 626 # nothing. 627 if (_acquireLock is not None and _handlerList is not None and 628 _releaseLock is not None): 629 _acquireLock() 630 try: 631 if wr in _handlerList: 632 _handlerList.remove(wr) 633 finally: 634 _releaseLock() 635 636 def _addHandlerRef(handler): 637 """ 638 Add a handler to the internal cleanup list using a weak reference. 639 """ 640 _acquireLock() 641 try: 642 _handlerList.append(weakref.ref(handler, _removeHandlerRef)) 643 finally: 644 _releaseLock() 581 645 582 646 class Handler(Filterer): … … 595 659 """ 596 660 Filterer.__init__(self) 597 self.level = level 661 self._name = None 662 self.level = _checkLevel(level) 598 663 self.formatter = None 599 #get the module data lock, as we're updating a shared structure. 664 # Add the handler to the global _handlerList (for cleanup on shutdown) 665 _addHandlerRef(self) 666 self.createLock() 667 668 def get_name(self): 669 return self._name 670 671 def set_name(self, name): 600 672 _acquireLock() 601 try: #unlikely to raise an exception, but you never know... 602 _handlers[self] = 1 603 _handlerList.insert(0, self) 673 try: 674 if self._name in _handlers: 675 del _handlers[self._name] 676 self._name = name 677 if name: 678 _handlers[name] = self 604 679 finally: 605 680 _releaseLock() 606 self.createLock() 681 682 name = property(get_name, set_name) 607 683 608 684 def createLock(self): … … 633 709 Set the logging level of this handler. 634 710 """ 635 self.level = level711 self.level = _checkLevel(level) 636 712 637 713 def format(self, record): … … 655 731 raises a NotImplementedError. 656 732 """ 657 raise NotImplementedError , 'emit must be implemented '\658 'by Handler subclasses'733 raise NotImplementedError('emit must be implemented ' 734 'by Handler subclasses') 659 735 660 736 def handle(self, record): … … 695 771 Tidy up any resources used by the handler. 696 772 697 This version does removes the handler from an internal list698 of handlers which is closed when shutdown() is called. Subclasses773 This version removes the handler from an internal map of handlers, 774 _handlers, which is used for handler lookup by name. Subclasses 699 775 should ensure that this gets called from overridden close() 700 776 methods. … … 703 779 _acquireLock() 704 780 try: #unlikely to raise an exception, but you never know... 705 del _handlers[self]706 _handlerList.remove(self)781 if self._name and self._name in _handlers: 782 del _handlers[self._name] 707 783 finally: 708 784 _releaseLock() … … 720 796 The record which was being processed is passed in to this method. 721 797 """ 722 if raiseExceptions :798 if raiseExceptions and sys.stderr: # see issue 13807 723 799 ei = sys.exc_info() 724 800 try: 725 traceback.print_exception(ei[0], ei[1], ei[2], None, sys.stderr) 801 traceback.print_exception(ei[0], ei[1], ei[2], 802 None, sys.stderr) 803 sys.stderr.write('Logged from file %s, line %s\n' % ( 804 record.filename, record.lineno)) 726 805 except IOError: 727 806 pass # see issue 5971 … … 736 815 """ 737 816 738 def __init__(self, str m=None):817 def __init__(self, stream=None): 739 818 """ 740 819 Initialize the handler. 741 820 742 If str m is not specified, sys.stderr is used.821 If stream is not specified, sys.stderr is used. 743 822 """ 744 823 Handler.__init__(self) 745 if str m is None:746 str m = sys.stderr747 self.stream = str m824 if stream is None: 825 stream = sys.stderr 826 self.stream = stream 748 827 749 828 def flush(self): … … 751 830 Flushes the stream. 752 831 """ 753 if self.stream and hasattr(self.stream, "flush"): 754 self.stream.flush() 832 self.acquire() 833 try: 834 if self.stream and hasattr(self.stream, "flush"): 835 self.stream.flush() 836 finally: 837 self.release() 755 838 756 839 def emit(self, record): … … 762 845 exception information is present, it is formatted using 763 846 traceback.print_exception and appended to the stream. If the stream 764 has an 'encoding' attribute, it is used to encode the message before847 has an 'encoding' attribute, it is used to determine how to do the 765 848 output to the stream. 766 849 """ … … 769 852 stream = self.stream 770 853 fs = "%s\n" 771 if not hasattr(types, "UnicodeType"): #if no unicode support...854 if not _unicode: #if no unicode support... 772 855 stream.write(fs % msg) 773 856 else: … … 775 858 if (isinstance(msg, unicode) and 776 859 getattr(stream, 'encoding', None)): 777 fs = fs.decode(stream.encoding)860 ufs = u'%s\n' 778 861 try: 779 stream.write( fs % msg)862 stream.write(ufs % msg) 780 863 except UnicodeEncodeError: 781 864 #Printing to terminals sometimes fails. For example, … … 785 868 #terminal even when the codepage is set to cp1251. 786 869 #An extra encoding step seems to be needed. 787 stream.write(( fs % msg).encode(stream.encoding))870 stream.write((ufs % msg).encode(stream.encoding)) 788 871 else: 789 872 stream.write(fs % msg) … … 811 894 self.mode = mode 812 895 self.encoding = encoding 896 self.delay = delay 813 897 if delay: 814 898 #We don't open the stream, but we still need to call the … … 823 907 Closes the stream. 824 908 """ 825 if self.stream: 826 self.flush() 827 if hasattr(self.stream, "close"): 828 self.stream.close() 829 StreamHandler.close(self) 830 self.stream = None 909 self.acquire() 910 try: 911 if self.stream: 912 self.flush() 913 if hasattr(self.stream, "close"): 914 self.stream.close() 915 StreamHandler.close(self) 916 self.stream = None 917 finally: 918 self.release() 831 919 832 920 def _open(self): … … 856 944 #--------------------------------------------------------------------------- 857 945 858 class PlaceHolder :946 class PlaceHolder(object): 859 947 """ 860 948 PlaceHolder instances are used in the Manager logger hierarchy to take … … 891 979 if klass != Logger: 892 980 if not issubclass(klass, Logger): 893 raise TypeError , "logger not derived from logging.Logger: " + \894 klass.__name__981 raise TypeError("logger not derived from logging.Logger: " 982 + klass.__name__) 895 983 global _loggerClass 896 984 _loggerClass = klass … … 903 991 return _loggerClass 904 992 905 class Manager :993 class Manager(object): 906 994 """ 907 995 There is [under normal circumstances] just one Manager instance, which … … 916 1004 self.emittedNoHandlerWarning = 0 917 1005 self.loggerDict = {} 1006 self.loggerClass = None 918 1007 919 1008 def getLogger(self, name): … … 929 1018 """ 930 1019 rv = None 1020 if not isinstance(name, basestring): 1021 raise TypeError('A logger name must be string or Unicode') 1022 if isinstance(name, unicode): 1023 name = name.encode('utf-8') 931 1024 _acquireLock() 932 1025 try: … … 935 1028 if isinstance(rv, PlaceHolder): 936 1029 ph = rv 937 rv = _loggerClass(name)1030 rv = (self.loggerClass or _loggerClass)(name) 938 1031 rv.manager = self 939 1032 self.loggerDict[name] = rv … … 941 1034 self._fixupParents(rv) 942 1035 else: 943 rv = _loggerClass(name)1036 rv = (self.loggerClass or _loggerClass)(name) 944 1037 rv.manager = self 945 1038 self.loggerDict[name] = rv … … 949 1042 return rv 950 1043 1044 def setLoggerClass(self, klass): 1045 """ 1046 Set the class to be used when instantiating a logger with this Manager. 1047 """ 1048 if klass != Logger: 1049 if not issubclass(klass, Logger): 1050 raise TypeError("logger not derived from logging.Logger: " 1051 + klass.__name__) 1052 self.loggerClass = klass 1053 951 1054 def _fixupParents(self, alogger): 952 1055 """ … … 955 1058 """ 956 1059 name = alogger.name 957 i = string.rfind(name,".")1060 i = name.rfind(".") 958 1061 rv = None 959 1062 while (i > 0) and not rv: … … 968 1071 assert isinstance(obj, PlaceHolder) 969 1072 obj.append(alogger) 970 i = string.rfind(name,".", 0, i - 1)1073 i = name.rfind(".", 0, i - 1) 971 1074 if not rv: 972 1075 rv = self.root … … 982 1085 for c in ph.loggerMap.keys(): 983 1086 #The if means ... if not c.parent.name.startswith(nm) 984 #if string.find(c.parent.name, nm) <> 0:985 1087 if c.parent.name[:namelen] != name: 986 1088 alogger.parent = c.parent … … 1012 1114 Filterer.__init__(self) 1013 1115 self.name = name 1014 self.level = level1116 self.level = _checkLevel(level) 1015 1117 self.parent = None 1016 1118 self.propagate = 1 … … 1022 1124 Set the logging level of this logger. 1023 1125 """ 1024 self.level = level1126 self.level = _checkLevel(level) 1025 1127 1026 1128 def debug(self, msg, *args, **kwargs): … … 1074 1176 self._log(ERROR, msg, args, **kwargs) 1075 1177 1076 def exception(self, msg, *args ):1178 def exception(self, msg, *args, **kwargs): 1077 1179 """ 1078 1180 Convenience method for logging an ERROR with exception information. 1079 1181 """ 1080 self.error(*((msg,) + args), **{'exc_info': 1}) 1182 kwargs['exc_info'] = 1 1183 self.error(msg, *args, **kwargs) 1081 1184 1082 1185 def critical(self, msg, *args, **kwargs): … … 1103 1206 logger.log(level, "We have a %s", "mysterious problem", exc_info=1) 1104 1207 """ 1105 if type(level) != types.IntType:1208 if not isinstance(level, int): 1106 1209 if raiseExceptions: 1107 raise TypeError , "level must be an integer"1210 raise TypeError("level must be an integer") 1108 1211 else: 1109 1212 return … … 1128 1231 f = f.f_back 1129 1232 continue 1130 rv = ( filename, f.f_lineno, co.co_name)1233 rv = (co.co_filename, f.f_lineno, co.co_name) 1131 1234 break 1132 1235 return rv … … 1151 1254 """ 1152 1255 if _srcfile: 1153 #IronPython doesn't track Python frames, so findCaller throws an 1154 #exception. We trap it here so that IronPython can use logging. 1256 #IronPython doesn't track Python frames, so findCaller raises an 1257 #exception on some versions of IronPython. We trap it here so that 1258 #IronPython can use logging. 1155 1259 try: 1156 1260 fn, lno, func = self.findCaller() … … 1160 1264 fn, lno, func = "(unknown file)", 0, "(unknown function)" 1161 1265 if exc_info: 1162 if type(exc_info) != types.TupleType:1266 if not isinstance(exc_info, tuple): 1163 1267 exc_info = sys.exc_info() 1164 1268 record = self.makeRecord(self.name, level, fn, lno, msg, args, exc_info, func, extra) … … 1179 1283 Add the specified handler to this logger. 1180 1284 """ 1181 if not (hdlr in self.handlers): 1182 self.handlers.append(hdlr) 1285 _acquireLock() 1286 try: 1287 if not (hdlr in self.handlers): 1288 self.handlers.append(hdlr) 1289 finally: 1290 _releaseLock() 1183 1291 1184 1292 def removeHandler(self, hdlr): … … 1186 1294 Remove the specified handler from this logger. 1187 1295 """ 1188 if hdlr in self.handlers: 1189 #hdlr.close() 1190 hdlr.acquire() 1191 try: 1296 _acquireLock() 1297 try: 1298 if hdlr in self.handlers: 1192 1299 self.handlers.remove(hdlr) 1193 1194 hdlr.release()1300 finally: 1301 _releaseLock() 1195 1302 1196 1303 def callHandlers(self, record): … … 1242 1349 return level >= self.getEffectiveLevel() 1243 1350 1351 def getChild(self, suffix): 1352 """ 1353 Get a logger which is a descendant to this one. 1354 1355 This is a convenience method, such that 1356 1357 logging.getLogger('abc').getChild('def.ghi') 1358 1359 is the same as 1360 1361 logging.getLogger('abc.def.ghi') 1362 1363 It's useful, for example, when the parent logger is named using 1364 __name__ rather than a literal string. 1365 """ 1366 if self.root is not self: 1367 suffix = '.'.join((self.name, suffix)) 1368 return self.manager.getLogger(suffix) 1369 1244 1370 class RootLogger(Logger): 1245 1371 """ … … 1256 1382 _loggerClass = Logger 1257 1383 1258 class LoggerAdapter :1384 class LoggerAdapter(object): 1259 1385 """ 1260 1386 An adapter for loggers which makes it easier to specify contextual … … 1345 1471 msg, kwargs = self.process(msg, kwargs) 1346 1472 self.logger.log(level, msg, *args, **kwargs) 1473 1474 def isEnabledFor(self, level): 1475 """ 1476 See if the underlying logger is enabled for the specified level. 1477 """ 1478 return self.logger.isEnabledFor(level) 1347 1479 1348 1480 root = RootLogger(WARNING) … … 1388 1520 when the handler is closed. 1389 1521 """ 1390 if len(root.handlers) == 0: 1391 filename = kwargs.get("filename") 1392 if filename: 1393 mode = kwargs.get("filemode", 'a') 1394 hdlr = FileHandler(filename, mode) 1395 else: 1396 stream = kwargs.get("stream") 1397 hdlr = StreamHandler(stream) 1398 fs = kwargs.get("format", BASIC_FORMAT) 1399 dfs = kwargs.get("datefmt", None) 1400 fmt = Formatter(fs, dfs) 1401 hdlr.setFormatter(fmt) 1402 root.addHandler(hdlr) 1403 level = kwargs.get("level") 1404 if level is not None: 1405 root.setLevel(level) 1522 # Add thread safety in case someone mistakenly calls 1523 # basicConfig() from multiple threads 1524 _acquireLock() 1525 try: 1526 if len(root.handlers) == 0: 1527 filename = kwargs.get("filename") 1528 if filename: 1529 mode = kwargs.get("filemode", 'a') 1530 hdlr = FileHandler(filename, mode) 1531 else: 1532 stream = kwargs.get("stream") 1533 hdlr = StreamHandler(stream) 1534 fs = kwargs.get("format", BASIC_FORMAT) 1535 dfs = kwargs.get("datefmt", None) 1536 fmt = Formatter(fs, dfs) 1537 hdlr.setFormatter(fmt) 1538 root.addHandler(hdlr) 1539 level = kwargs.get("level") 1540 if level is not None: 1541 root.setLevel(level) 1542 finally: 1543 _releaseLock() 1406 1544 1407 1545 #--------------------------------------------------------------------------- … … 1436 1574 if len(root.handlers) == 0: 1437 1575 basicConfig() 1438 root.critical( *((msg,)+args), **kwargs)1576 root.critical(msg, *args, **kwargs) 1439 1577 1440 1578 fatal = critical … … 1446 1584 if len(root.handlers) == 0: 1447 1585 basicConfig() 1448 root.error( *((msg,)+args), **kwargs)1449 1450 def exception(msg, *args ):1586 root.error(msg, *args, **kwargs) 1587 1588 def exception(msg, *args, **kwargs): 1451 1589 """ 1452 1590 Log a message with severity 'ERROR' on the root logger, 1453 1591 with exception information. 1454 1592 """ 1455 error(*((msg,)+args), **{'exc_info': 1}) 1593 kwargs['exc_info'] = 1 1594 error(msg, *args, **kwargs) 1456 1595 1457 1596 def warning(msg, *args, **kwargs): … … 1461 1600 if len(root.handlers) == 0: 1462 1601 basicConfig() 1463 root.warning( *((msg,)+args), **kwargs)1602 root.warning(msg, *args, **kwargs) 1464 1603 1465 1604 warn = warning … … 1471 1610 if len(root.handlers) == 0: 1472 1611 basicConfig() 1473 root.info( *((msg,)+args), **kwargs)1612 root.info(msg, *args, **kwargs) 1474 1613 1475 1614 def debug(msg, *args, **kwargs): … … 1479 1618 if len(root.handlers) == 0: 1480 1619 basicConfig() 1481 root.debug( *((msg,)+args), **kwargs)1620 root.debug(msg, *args, **kwargs) 1482 1621 1483 1622 def log(level, msg, *args, **kwargs): … … 1487 1626 if len(root.handlers) == 0: 1488 1627 basicConfig() 1489 root.log( *((level, msg)+args), **kwargs)1628 root.log(level, msg, *args, **kwargs) 1490 1629 1491 1630 def disable(level): 1492 1631 """ 1493 Disable all logging calls less severe than 'level'.1632 Disable all logging calls of severity 'level' and below. 1494 1633 """ 1495 1634 root.manager.disable = level … … 1502 1641 Should be called at application exit. 1503 1642 """ 1504 for h in handlerList[:]:1643 for wr in reversed(handlerList[:]): 1505 1644 #errors might occur, for example, if files are locked 1506 1645 #we just ignore them if raiseExceptions is not set 1507 1646 try: 1508 h.flush() 1509 h.close() 1647 h = wr() 1648 if h: 1649 try: 1650 h.acquire() 1651 h.flush() 1652 h.close() 1653 except (IOError, ValueError): 1654 # Ignore errors which might be caused 1655 # because handlers have been closed but 1656 # references to them are still around at 1657 # application exit. 1658 pass 1659 finally: 1660 h.release() 1510 1661 except: 1511 1662 if raiseExceptions: … … 1514 1665 1515 1666 #Let's try and shutdown automatically on application exit... 1516 try: 1517 import atexit 1518 atexit.register(shutdown) 1519 except ImportError: # for Python versions < 2.0 1520 def exithook(status, old_exit=sys.exit): 1521 try: 1522 shutdown() 1523 finally: 1524 old_exit(status) 1525 1526 sys.exit = exithook 1667 import atexit 1668 atexit.register(shutdown) 1669 1670 # Null handler 1671 1672 class NullHandler(Handler): 1673 """ 1674 This handler does nothing. It's intended to be used to avoid the 1675 "No handlers could be found for logger XXX" one-off warning. This is 1676 important for library code, which may contain code to log events. If a user 1677 of the library does not configure logging, the one-off warning might be 1678 produced; to avoid this, the library developer simply needs to instantiate 1679 a NullHandler and add it to the top-level logger of the library module or 1680 package. 1681 """ 1682 def handle(self, record): 1683 pass 1684 1685 def emit(self, record): 1686 pass 1687 1688 def createLock(self): 1689 self.lock = None 1690 1691 # Warnings integration 1692 1693 _warnings_showwarning = None 1694 1695 def _showwarning(message, category, filename, lineno, file=None, line=None): 1696 """ 1697 Implementation of showwarnings which redirects to logging, which will first 1698 check to see if the file parameter is None. If a file is specified, it will 1699 delegate to the original warnings implementation of showwarning. Otherwise, 1700 it will call warnings.formatwarning and will log the resulting string to a 1701 warnings logger named "py.warnings" with level logging.WARNING. 1702 """ 1703 if file is not None: 1704 if _warnings_showwarning is not None: 1705 _warnings_showwarning(message, category, filename, lineno, file, line) 1706 else: 1707 s = warnings.formatwarning(message, category, filename, lineno, line) 1708 logger = getLogger("py.warnings") 1709 if not logger.handlers: 1710 logger.addHandler(NullHandler()) 1711 logger.warning("%s", s) 1712 1713 def captureWarnings(capture): 1714 """ 1715 If capture is true, redirect all warnings to the logging package. 1716 If capture is False, ensure that warnings are not redirected to logging 1717 but to their original destinations. 1718 """ 1719 global _warnings_showwarning 1720 if capture: 1721 if _warnings_showwarning is None: 1722 _warnings_showwarning = warnings.showwarning 1723 warnings.showwarning = _showwarning 1724 else: 1725 if _warnings_showwarning is not None: 1726 warnings.showwarning = _warnings_showwarning 1727 _warnings_showwarning = None -
python/trunk/Lib/logging/config.py
r2 r391 1 # Copyright 2001-20 07by Vinay Sajip. All Rights Reserved.1 # Copyright 2001-2013 by Vinay Sajip. All Rights Reserved. 2 2 # 3 3 # Permission to use, copy, modify, and distribute this software and its … … 20 20 by Apache's log4j system. 21 21 22 Should work under Python versions >= 1.5.2, except that source line 23 information is not available unless 'sys._getframe()' is. 24 25 Copyright (C) 2001-2008 Vinay Sajip. All Rights Reserved. 22 Copyright (C) 2001-2013 Vinay Sajip. All Rights Reserved. 26 23 27 24 To use, simply 'import logging' and log away! 28 25 """ 29 26 30 import sys, logging, logging.handlers, string, socket, struct, os, traceback, types 27 import sys, logging, logging.handlers, socket, struct, os, traceback, re 28 import types, cStringIO 31 29 32 30 try: … … 53 51 _listener = None 54 52 55 def fileConfig(fname, defaults=None, disable_existing_loggers= 1):53 def fileConfig(fname, defaults=None, disable_existing_loggers=True): 56 54 """ 57 55 Read the logging configuration from a ConfigParser-format file. … … 61 59 developer provides a mechanism to present the choices and load the chosen 62 60 configuration). 63 In versions of ConfigParser which have the readfp method [typically64 shipped in 2.x versions of Python], you can pass in a file-like object65 rather than a filename, in which case the file-like object will be read66 using readfp.67 61 """ 68 62 import ConfigParser 69 63 70 64 cp = ConfigParser.ConfigParser(defaults) 71 if hasattr( cp, 'readfp') and hasattr(fname, 'readline'):65 if hasattr(fname, 'readline'): 72 66 cp.readfp(fname) 73 67 else: … … 90 84 def _resolve(name): 91 85 """Resolve a dotted name to a global object.""" 92 name = string.split(name,'.')86 name = name.split('.') 93 87 used = name.pop(0) 94 88 found = __import__(used) … … 103 97 104 98 def _strip_spaces(alist): 105 return map(lambda x: string.strip(x), alist) 99 return map(lambda x: x.strip(), alist) 100 101 def _encoded(s): 102 return s if isinstance(s, str) else s.encode('utf-8') 106 103 107 104 def _create_formatters(cp): … … 110 107 if not len(flist): 111 108 return {} 112 flist = string.split(flist,",")109 flist = flist.split(",") 113 110 flist = _strip_spaces(flist) 114 111 formatters = {} … … 139 136 if not len(hlist): 140 137 return {} 141 hlist = string.split(hlist,",")138 hlist = hlist.split(",") 142 139 hlist = _strip_spaces(hlist) 143 140 handlers = {} … … 182 179 # configure the root first 183 180 llist = cp.get("loggers", "keys") 184 llist = string.split(llist,",")185 llist = map(lambda x: string.strip(x), llist)181 llist = llist.split(",") 182 llist = list(map(lambda x: x.strip(), llist)) 186 183 llist.remove("root") 187 184 sectname = "logger_root" … … 196 193 hlist = cp.get(sectname, "handlers") 197 194 if len(hlist): 198 hlist = string.split(hlist,",")195 hlist = hlist.split(",") 199 196 hlist = _strip_spaces(hlist) 200 197 for hand in hlist: … … 210 207 #which were in the previous configuration but 211 208 #which are not in the new configuration. 212 existing = root.manager.loggerDict.keys()209 existing = list(root.manager.loggerDict.keys()) 213 210 #The list needs to be sorted so that we can 214 211 #avoid disabling child loggers of explicitly … … 230 227 logger = logging.getLogger(qn) 231 228 if qn in existing: 232 i = existing.index(qn) 229 i = existing.index(qn) + 1 # start with the entry after qn 233 230 prefixed = qn + "." 234 231 pflen = len(prefixed) 235 232 num_existing = len(existing) 236 i = i + 1 # look at the entry after qn237 while (i < num_existing) and (existing[i][:pflen] == prefixed):238 child_loggers.append(existing[i])239 i = i +1233 while i < num_existing: 234 if existing[i][:pflen] == prefixed: 235 child_loggers.append(existing[i]) 236 i += 1 240 237 existing.remove(qn) 241 238 if "level" in opts: … … 248 245 hlist = cp.get(sectname, "handlers") 249 246 if len(hlist): 250 hlist = string.split(hlist,",")247 hlist = hlist.split(",") 251 248 hlist = _strip_spaces(hlist) 252 249 for hand in hlist: … … 264 261 logger.handlers = [] 265 262 logger.propagate = 1 266 elif disable_existing_loggers: 267 logger.disabled = 1 263 else: 264 logger.disabled = disable_existing_loggers 265 266 267 268 IDENTIFIER = re.compile('^[a-z_][a-z0-9_]*$', re.I) 269 270 271 def valid_ident(s): 272 m = IDENTIFIER.match(s) 273 if not m: 274 raise ValueError('Not a valid Python identifier: %r' % s) 275 return True 276 277 278 # The ConvertingXXX classes are wrappers around standard Python containers, 279 # and they serve to convert any suitable values in the container. The 280 # conversion converts base dicts, lists and tuples to their wrapped 281 # equivalents, whereas strings which match a conversion format are converted 282 # appropriately. 283 # 284 # Each wrapper should have a configurator attribute holding the actual 285 # configurator to use for conversion. 286 287 class ConvertingDict(dict): 288 """A converting dictionary wrapper.""" 289 290 def __getitem__(self, key): 291 value = dict.__getitem__(self, key) 292 result = self.configurator.convert(value) 293 #If the converted value is different, save for next time 294 if value is not result: 295 self[key] = result 296 if type(result) in (ConvertingDict, ConvertingList, 297 ConvertingTuple): 298 result.parent = self 299 result.key = key 300 return result 301 302 def get(self, key, default=None): 303 value = dict.get(self, key, default) 304 result = self.configurator.convert(value) 305 #If the converted value is different, save for next time 306 if value is not result: 307 self[key] = result 308 if type(result) in (ConvertingDict, ConvertingList, 309 ConvertingTuple): 310 result.parent = self 311 result.key = key 312 return result 313 314 def pop(self, key, default=None): 315 value = dict.pop(self, key, default) 316 result = self.configurator.convert(value) 317 if value is not result: 318 if type(result) in (ConvertingDict, ConvertingList, 319 ConvertingTuple): 320 result.parent = self 321 result.key = key 322 return result 323 324 class ConvertingList(list): 325 """A converting list wrapper.""" 326 def __getitem__(self, key): 327 value = list.__getitem__(self, key) 328 result = self.configurator.convert(value) 329 #If the converted value is different, save for next time 330 if value is not result: 331 self[key] = result 332 if type(result) in (ConvertingDict, ConvertingList, 333 ConvertingTuple): 334 result.parent = self 335 result.key = key 336 return result 337 338 def pop(self, idx=-1): 339 value = list.pop(self, idx) 340 result = self.configurator.convert(value) 341 if value is not result: 342 if type(result) in (ConvertingDict, ConvertingList, 343 ConvertingTuple): 344 result.parent = self 345 return result 346 347 class ConvertingTuple(tuple): 348 """A converting tuple wrapper.""" 349 def __getitem__(self, key): 350 value = tuple.__getitem__(self, key) 351 result = self.configurator.convert(value) 352 if value is not result: 353 if type(result) in (ConvertingDict, ConvertingList, 354 ConvertingTuple): 355 result.parent = self 356 result.key = key 357 return result 358 359 class BaseConfigurator(object): 360 """ 361 The configurator base class which defines some useful defaults. 362 """ 363 364 CONVERT_PATTERN = re.compile(r'^(?P<prefix>[a-z]+)://(?P<suffix>.*)$') 365 366 WORD_PATTERN = re.compile(r'^\s*(\w+)\s*') 367 DOT_PATTERN = re.compile(r'^\.\s*(\w+)\s*') 368 INDEX_PATTERN = re.compile(r'^\[\s*(\w+)\s*\]\s*') 369 DIGIT_PATTERN = re.compile(r'^\d+$') 370 371 value_converters = { 372 'ext' : 'ext_convert', 373 'cfg' : 'cfg_convert', 374 } 375 376 # We might want to use a different one, e.g. importlib 377 importer = __import__ 378 379 def __init__(self, config): 380 self.config = ConvertingDict(config) 381 self.config.configurator = self 382 # Issue 12718: winpdb replaces __import__ with a Python function, which 383 # ends up being treated as a bound method. To avoid problems, we 384 # set the importer on the instance, but leave it defined in the class 385 # so existing code doesn't break 386 if type(__import__) == types.FunctionType: 387 self.importer = __import__ 388 389 def resolve(self, s): 390 """ 391 Resolve strings to objects using standard import and attribute 392 syntax. 393 """ 394 name = s.split('.') 395 used = name.pop(0) 396 try: 397 found = self.importer(used) 398 for frag in name: 399 used += '.' + frag 400 try: 401 found = getattr(found, frag) 402 except AttributeError: 403 self.importer(used) 404 found = getattr(found, frag) 405 return found 406 except ImportError: 407 e, tb = sys.exc_info()[1:] 408 v = ValueError('Cannot resolve %r: %s' % (s, e)) 409 v.__cause__, v.__traceback__ = e, tb 410 raise v 411 412 def ext_convert(self, value): 413 """Default converter for the ext:// protocol.""" 414 return self.resolve(value) 415 416 def cfg_convert(self, value): 417 """Default converter for the cfg:// protocol.""" 418 rest = value 419 m = self.WORD_PATTERN.match(rest) 420 if m is None: 421 raise ValueError("Unable to convert %r" % value) 422 else: 423 rest = rest[m.end():] 424 d = self.config[m.groups()[0]] 425 #print d, rest 426 while rest: 427 m = self.DOT_PATTERN.match(rest) 428 if m: 429 d = d[m.groups()[0]] 430 else: 431 m = self.INDEX_PATTERN.match(rest) 432 if m: 433 idx = m.groups()[0] 434 if not self.DIGIT_PATTERN.match(idx): 435 d = d[idx] 436 else: 437 try: 438 n = int(idx) # try as number first (most likely) 439 d = d[n] 440 except TypeError: 441 d = d[idx] 442 if m: 443 rest = rest[m.end():] 444 else: 445 raise ValueError('Unable to convert ' 446 '%r at %r' % (value, rest)) 447 #rest should be empty 448 return d 449 450 def convert(self, value): 451 """ 452 Convert values to an appropriate type. dicts, lists and tuples are 453 replaced by their converting alternatives. Strings are checked to 454 see if they have a conversion format and are converted if they do. 455 """ 456 if not isinstance(value, ConvertingDict) and isinstance(value, dict): 457 value = ConvertingDict(value) 458 value.configurator = self 459 elif not isinstance(value, ConvertingList) and isinstance(value, list): 460 value = ConvertingList(value) 461 value.configurator = self 462 elif not isinstance(value, ConvertingTuple) and\ 463 isinstance(value, tuple): 464 value = ConvertingTuple(value) 465 value.configurator = self 466 elif isinstance(value, basestring): # str for py3k 467 m = self.CONVERT_PATTERN.match(value) 468 if m: 469 d = m.groupdict() 470 prefix = d['prefix'] 471 converter = self.value_converters.get(prefix, None) 472 if converter: 473 suffix = d['suffix'] 474 converter = getattr(self, converter) 475 value = converter(suffix) 476 return value 477 478 def configure_custom(self, config): 479 """Configure an object with a user-supplied factory.""" 480 c = config.pop('()') 481 if not hasattr(c, '__call__') and hasattr(types, 'ClassType') and type(c) != types.ClassType: 482 c = self.resolve(c) 483 props = config.pop('.', None) 484 # Check for valid identifiers 485 kwargs = dict([(k, config[k]) for k in config if valid_ident(k)]) 486 result = c(**kwargs) 487 if props: 488 for name, value in props.items(): 489 setattr(result, name, value) 490 return result 491 492 def as_tuple(self, value): 493 """Utility function which converts lists to tuples.""" 494 if isinstance(value, list): 495 value = tuple(value) 496 return value 497 498 class DictConfigurator(BaseConfigurator): 499 """ 500 Configure logging using a dictionary-like object to describe the 501 configuration. 502 """ 503 504 def configure(self): 505 """Do the configuration.""" 506 507 config = self.config 508 if 'version' not in config: 509 raise ValueError("dictionary doesn't specify a version") 510 if config['version'] != 1: 511 raise ValueError("Unsupported version: %s" % config['version']) 512 incremental = config.pop('incremental', False) 513 EMPTY_DICT = {} 514 logging._acquireLock() 515 try: 516 if incremental: 517 handlers = config.get('handlers', EMPTY_DICT) 518 for name in handlers: 519 if name not in logging._handlers: 520 raise ValueError('No handler found with ' 521 'name %r' % name) 522 else: 523 try: 524 handler = logging._handlers[name] 525 handler_config = handlers[name] 526 level = handler_config.get('level', None) 527 if level: 528 handler.setLevel(logging._checkLevel(level)) 529 except StandardError, e: 530 raise ValueError('Unable to configure handler ' 531 '%r: %s' % (name, e)) 532 loggers = config.get('loggers', EMPTY_DICT) 533 for name in loggers: 534 try: 535 self.configure_logger(name, loggers[name], True) 536 except StandardError, e: 537 raise ValueError('Unable to configure logger ' 538 '%r: %s' % (name, e)) 539 root = config.get('root', None) 540 if root: 541 try: 542 self.configure_root(root, True) 543 except StandardError, e: 544 raise ValueError('Unable to configure root ' 545 'logger: %s' % e) 546 else: 547 disable_existing = config.pop('disable_existing_loggers', True) 548 549 logging._handlers.clear() 550 del logging._handlerList[:] 551 552 # Do formatters first - they don't refer to anything else 553 formatters = config.get('formatters', EMPTY_DICT) 554 for name in formatters: 555 try: 556 formatters[name] = self.configure_formatter( 557 formatters[name]) 558 except StandardError, e: 559 raise ValueError('Unable to configure ' 560 'formatter %r: %s' % (name, e)) 561 # Next, do filters - they don't refer to anything else, either 562 filters = config.get('filters', EMPTY_DICT) 563 for name in filters: 564 try: 565 filters[name] = self.configure_filter(filters[name]) 566 except StandardError, e: 567 raise ValueError('Unable to configure ' 568 'filter %r: %s' % (name, e)) 569 570 # Next, do handlers - they refer to formatters and filters 571 # As handlers can refer to other handlers, sort the keys 572 # to allow a deterministic order of configuration 573 handlers = config.get('handlers', EMPTY_DICT) 574 deferred = [] 575 for name in sorted(handlers): 576 try: 577 handler = self.configure_handler(handlers[name]) 578 handler.name = name 579 handlers[name] = handler 580 except StandardError, e: 581 if 'target not configured yet' in str(e): 582 deferred.append(name) 583 else: 584 raise ValueError('Unable to configure handler ' 585 '%r: %s' % (name, e)) 586 587 # Now do any that were deferred 588 for name in deferred: 589 try: 590 handler = self.configure_handler(handlers[name]) 591 handler.name = name 592 handlers[name] = handler 593 except StandardError, e: 594 raise ValueError('Unable to configure handler ' 595 '%r: %s' % (name, e)) 596 597 # Next, do loggers - they refer to handlers and filters 598 599 #we don't want to lose the existing loggers, 600 #since other threads may have pointers to them. 601 #existing is set to contain all existing loggers, 602 #and as we go through the new configuration we 603 #remove any which are configured. At the end, 604 #what's left in existing is the set of loggers 605 #which were in the previous configuration but 606 #which are not in the new configuration. 607 root = logging.root 608 existing = root.manager.loggerDict.keys() 609 #The list needs to be sorted so that we can 610 #avoid disabling child loggers of explicitly 611 #named loggers. With a sorted list it is easier 612 #to find the child loggers. 613 existing.sort() 614 #We'll keep the list of existing loggers 615 #which are children of named loggers here... 616 child_loggers = [] 617 #now set up the new ones... 618 loggers = config.get('loggers', EMPTY_DICT) 619 for name in loggers: 620 name = _encoded(name) 621 if name in existing: 622 i = existing.index(name) 623 prefixed = name + "." 624 pflen = len(prefixed) 625 num_existing = len(existing) 626 i = i + 1 # look at the entry after name 627 while (i < num_existing) and\ 628 (existing[i][:pflen] == prefixed): 629 child_loggers.append(existing[i]) 630 i = i + 1 631 existing.remove(name) 632 try: 633 self.configure_logger(name, loggers[name]) 634 except StandardError, e: 635 raise ValueError('Unable to configure logger ' 636 '%r: %s' % (name, e)) 637 638 #Disable any old loggers. There's no point deleting 639 #them as other threads may continue to hold references 640 #and by disabling them, you stop them doing any logging. 641 #However, don't disable children of named loggers, as that's 642 #probably not what was intended by the user. 643 for log in existing: 644 logger = root.manager.loggerDict[log] 645 if log in child_loggers: 646 logger.level = logging.NOTSET 647 logger.handlers = [] 648 logger.propagate = True 649 elif disable_existing: 650 logger.disabled = True 651 652 # And finally, do the root logger 653 root = config.get('root', None) 654 if root: 655 try: 656 self.configure_root(root) 657 except StandardError, e: 658 raise ValueError('Unable to configure root ' 659 'logger: %s' % e) 660 finally: 661 logging._releaseLock() 662 663 def configure_formatter(self, config): 664 """Configure a formatter from a dictionary.""" 665 if '()' in config: 666 factory = config['()'] # for use in exception handler 667 try: 668 result = self.configure_custom(config) 669 except TypeError, te: 670 if "'format'" not in str(te): 671 raise 672 #Name of parameter changed from fmt to format. 673 #Retry with old name. 674 #This is so that code can be used with older Python versions 675 #(e.g. by Django) 676 config['fmt'] = config.pop('format') 677 config['()'] = factory 678 result = self.configure_custom(config) 679 else: 680 fmt = config.get('format', None) 681 dfmt = config.get('datefmt', None) 682 result = logging.Formatter(fmt, dfmt) 683 return result 684 685 def configure_filter(self, config): 686 """Configure a filter from a dictionary.""" 687 if '()' in config: 688 result = self.configure_custom(config) 689 else: 690 name = config.get('name', '') 691 result = logging.Filter(name) 692 return result 693 694 def add_filters(self, filterer, filters): 695 """Add filters to a filterer from a list of names.""" 696 for f in filters: 697 try: 698 filterer.addFilter(self.config['filters'][f]) 699 except StandardError, e: 700 raise ValueError('Unable to add filter %r: %s' % (f, e)) 701 702 def configure_handler(self, config): 703 """Configure a handler from a dictionary.""" 704 formatter = config.pop('formatter', None) 705 if formatter: 706 try: 707 formatter = self.config['formatters'][formatter] 708 except StandardError, e: 709 raise ValueError('Unable to set formatter ' 710 '%r: %s' % (formatter, e)) 711 level = config.pop('level', None) 712 filters = config.pop('filters', None) 713 if '()' in config: 714 c = config.pop('()') 715 if not hasattr(c, '__call__') and hasattr(types, 'ClassType') and type(c) != types.ClassType: 716 c = self.resolve(c) 717 factory = c 718 else: 719 cname = config.pop('class') 720 klass = self.resolve(cname) 721 #Special case for handler which refers to another handler 722 if issubclass(klass, logging.handlers.MemoryHandler) and\ 723 'target' in config: 724 try: 725 th = self.config['handlers'][config['target']] 726 if not isinstance(th, logging.Handler): 727 config['class'] = cname # restore for deferred configuration 728 raise StandardError('target not configured yet') 729 config['target'] = th 730 except StandardError, e: 731 raise ValueError('Unable to set target handler ' 732 '%r: %s' % (config['target'], e)) 733 elif issubclass(klass, logging.handlers.SMTPHandler) and\ 734 'mailhost' in config: 735 config['mailhost'] = self.as_tuple(config['mailhost']) 736 elif issubclass(klass, logging.handlers.SysLogHandler) and\ 737 'address' in config: 738 config['address'] = self.as_tuple(config['address']) 739 factory = klass 740 kwargs = dict([(k, config[k]) for k in config if valid_ident(k)]) 741 try: 742 result = factory(**kwargs) 743 except TypeError, te: 744 if "'stream'" not in str(te): 745 raise 746 #The argument name changed from strm to stream 747 #Retry with old name. 748 #This is so that code can be used with older Python versions 749 #(e.g. by Django) 750 kwargs['strm'] = kwargs.pop('stream') 751 result = factory(**kwargs) 752 if formatter: 753 result.setFormatter(formatter) 754 if level is not None: 755 result.setLevel(logging._checkLevel(level)) 756 if filters: 757 self.add_filters(result, filters) 758 return result 759 760 def add_handlers(self, logger, handlers): 761 """Add handlers to a logger from a list of names.""" 762 for h in handlers: 763 try: 764 logger.addHandler(self.config['handlers'][h]) 765 except StandardError, e: 766 raise ValueError('Unable to add handler %r: %s' % (h, e)) 767 768 def common_logger_config(self, logger, config, incremental=False): 769 """ 770 Perform configuration which is common to root and non-root loggers. 771 """ 772 level = config.get('level', None) 773 if level is not None: 774 logger.setLevel(logging._checkLevel(level)) 775 if not incremental: 776 #Remove any existing handlers 777 for h in logger.handlers[:]: 778 logger.removeHandler(h) 779 handlers = config.get('handlers', None) 780 if handlers: 781 self.add_handlers(logger, handlers) 782 filters = config.get('filters', None) 783 if filters: 784 self.add_filters(logger, filters) 785 786 def configure_logger(self, name, config, incremental=False): 787 """Configure a non-root logger from a dictionary.""" 788 logger = logging.getLogger(name) 789 self.common_logger_config(logger, config, incremental) 790 propagate = config.get('propagate', None) 791 if propagate is not None: 792 logger.propagate = propagate 793 794 def configure_root(self, config, incremental=False): 795 """Configure a root logger from a dictionary.""" 796 root = logging.getLogger() 797 self.common_logger_config(root, config, incremental) 798 799 dictConfigClass = DictConfigurator 800 801 def dictConfig(config): 802 """Configure logging using a dictionary.""" 803 dictConfigClass(config).configure() 268 804 269 805 … … 279 815 """ 280 816 if not thread: 281 raise NotImplementedError , "listen() needs threading to work"817 raise NotImplementedError("listen() needs threading to work") 282 818 283 819 class ConfigStreamHandler(StreamRequestHandler): … … 305 841 while len(chunk) < slen: 306 842 chunk = chunk + conn.recv(slen - len(chunk)) 307 #Apply new configuration. We'd like to be able to308 #create a StringIO and pass that in, but unfortunately309 #1.5.2 ConfigParser does not support reading file310 #objects, only actual files. So we create a temporary311 #file and remove it later.312 file = tempfile.mktemp(".ini")313 f = open(file, "w")314 f.write(chunk)315 f.close()316 843 try: 317 fileConfig(file) 318 except (KeyboardInterrupt, SystemExit): 319 raise 844 import json 845 d =json.loads(chunk) 846 assert isinstance(d, dict) 847 dictConfig(d) 320 848 except: 321 traceback.print_exc() 322 os.remove(file) 849 #Apply new configuration. 850 851 file = cStringIO.StringIO(chunk) 852 try: 853 fileConfig(file) 854 except (KeyboardInterrupt, SystemExit): 855 raise 856 except: 857 traceback.print_exc() 858 if self.server.ready: 859 self.server.ready.set() 323 860 except socket.error, e: 324 if type(e.args) != types.TupleType:861 if not isinstance(e.args, tuple): 325 862 raise 326 863 else: … … 337 874 338 875 def __init__(self, host='localhost', port=DEFAULT_LOGGING_CONFIG_PORT, 339 handler=None ):876 handler=None, ready=None): 340 877 ThreadingTCPServer.__init__(self, (host, port), handler) 341 878 logging._acquireLock() … … 343 880 logging._releaseLock() 344 881 self.timeout = 1 882 self.ready = ready 345 883 346 884 def serve_until_stopped(self): … … 356 894 abort = self.abort 357 895 logging._releaseLock() 358 359 def serve(rcvr, hdlr, port): 360 server = rcvr(port=port, handler=hdlr) 361 global _listener 362 logging._acquireLock() 363 _listener = server 364 logging._releaseLock() 365 server.serve_until_stopped() 366 367 return threading.Thread(target=serve, 368 args=(ConfigSocketReceiver, 369 ConfigStreamHandler, port)) 896 self.socket.close() 897 898 class Server(threading.Thread): 899 900 def __init__(self, rcvr, hdlr, port): 901 super(Server, self).__init__() 902 self.rcvr = rcvr 903 self.hdlr = hdlr 904 self.port = port 905 self.ready = threading.Event() 906 907 def run(self): 908 server = self.rcvr(port=self.port, handler=self.hdlr, 909 ready=self.ready) 910 if self.port == 0: 911 self.port = server.server_address[1] 912 self.ready.set() 913 global _listener 914 logging._acquireLock() 915 _listener = server 916 logging._releaseLock() 917 server.serve_until_stopped() 918 919 return Server(ConfigSocketReceiver, ConfigStreamHandler, port) 370 920 371 921 def stopListening(): … … 374 924 """ 375 925 global _listener 376 if _listener: 377 logging._acquireLock() 378 _listener.abort = 1 379 _listener = None 926 logging._acquireLock() 927 try: 928 if _listener: 929 _listener.abort = 1 930 _listener = None 931 finally: 380 932 logging._releaseLock() -
python/trunk/Lib/logging/handlers.py
r2 r391 1 # Copyright 2001-20 07by Vinay Sajip. All Rights Reserved.1 # Copyright 2001-2013 by Vinay Sajip. All Rights Reserved. 2 2 # 3 3 # Permission to use, copy, modify, and distribute this software and its … … 17 17 """ 18 18 Additional handlers for the logging package for Python. The core package is 19 based on PEP 282 and comments thereto in comp.lang.python, and influenced by 20 Apache's log4j system. 21 22 Copyright (C) 2001-2009 Vinay Sajip. All Rights Reserved. 19 based on PEP 282 and comments thereto in comp.lang.python. 20 21 Copyright (C) 2001-2013 Vinay Sajip. All Rights Reserved. 23 22 24 23 To use, simply 'import logging.handlers' and log away! 25 24 """ 26 25 27 import logging, socket, types, os, string, cPickle, struct, time, re28 from stat import ST_DEV, ST_INO 26 import errno, logging, socket, os, cPickle, struct, time, re 27 from stat import ST_DEV, ST_INO, ST_MTIME 29 28 30 29 try: … … 32 31 except ImportError: 33 32 codecs = None 33 try: 34 unicode 35 _unicode = True 36 except NameError: 37 _unicode = False 34 38 35 39 # … … 42 46 DEFAULT_SOAP_LOGGING_PORT = 9023 43 47 SYSLOG_UDP_PORT = 514 48 SYSLOG_TCP_PORT = 514 44 49 45 50 _MIDNIGHT = 24 * 60 * 60 # number of seconds in a day … … 103 108 If maxBytes is zero, rollover never occurs. 104 109 """ 110 # If rotation/rollover is wanted, it doesn't make sense to use another 111 # mode. If for example 'w' were specified, then if there were multiple 112 # runs of the calling application, the logs from previous runs would be 113 # lost if the 'w' is respected, because the log file would be truncated 114 # on each run. 105 115 if maxBytes > 0: 106 mode = 'a' # doesn't make sense otherwise!116 mode = 'a' 107 117 BaseRotatingHandler.__init__(self, filename, mode, encoding, delay) 108 118 self.maxBytes = maxBytes … … 113 123 Do a rollover, as described in __init__(). 114 124 """ 115 116 self.stream.close() 125 if self.stream: 126 self.stream.close() 127 self.stream = None 117 128 if self.backupCount > 0: 118 129 for i in range(self.backupCount - 1, 0, -1): … … 127 138 if os.path.exists(dfn): 128 139 os.remove(dfn) 129 os.rename(self.baseFilename, dfn) 130 #print "%s -> %s" % (self.baseFilename, dfn) 131 self.mode = 'w' 132 self.stream = self._open() 140 # Issue 18940: A file may not have been created if delay is True. 141 if os.path.exists(self.baseFilename): 142 os.rename(self.baseFilename, dfn) 143 if not self.delay: 144 self.stream = self._open() 133 145 134 146 def shouldRollover(self, record): … … 156 168 files are kept - the oldest ones are deleted. 157 169 """ 158 def __init__(self, filename, when='h', interval=1, backupCount=0, encoding=None, delay= 0, utc=0):170 def __init__(self, filename, when='h', interval=1, backupCount=0, encoding=None, delay=False, utc=False): 159 171 BaseRotatingHandler.__init__(self, filename, 'a', encoding, delay) 160 self.when = string.upper(when)172 self.when = when.upper() 161 173 self.backupCount = backupCount 162 174 self.utc = utc … … 173 185 # Case of the 'when' specifier is not important; lower or upper case 174 186 # will work. 175 currentTime = int(time.time())176 187 if self.when == 'S': 177 188 self.interval = 1 # one second … … 204 215 self.extMatch = re.compile(self.extMatch) 205 216 self.interval = self.interval * interval # multiply by units requested 206 self.rolloverAt = self.computeRollover(int(time.time())) 207 208 #print "Will rollover at %d, %d seconds from now" % (self.rolloverAt, self.rolloverAt - currentTime) 217 if os.path.exists(filename): 218 t = os.stat(filename)[ST_MTIME] 219 else: 220 t = int(time.time()) 221 self.rolloverAt = self.computeRollover(t) 209 222 210 223 def computeRollover(self, currentTime): … … 261 274 if dstNow != dstAtRollover: 262 275 if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour 263 newRolloverAt = newRolloverAt -3600276 addend = -3600 264 277 else: # DST bows out before next rollover, so we need to add an hour 265 newRolloverAt = newRolloverAt + 3600 278 addend = 3600 279 newRolloverAt += addend 266 280 result = newRolloverAt 267 281 return result … … 313 327 if self.stream: 314 328 self.stream.close() 329 self.stream = None 315 330 # get the time that this sequence started at and make it a TimeTuple 331 currentTime = int(time.time()) 332 dstNow = time.localtime(currentTime)[-1] 316 333 t = self.rolloverAt - self.interval 317 334 if self.utc: … … 319 336 else: 320 337 timeTuple = time.localtime(t) 338 dstThen = timeTuple[-1] 339 if dstNow != dstThen: 340 if dstNow: 341 addend = 3600 342 else: 343 addend = -3600 344 timeTuple = time.localtime(t + addend) 321 345 dfn = self.baseFilename + "." + time.strftime(self.suffix, timeTuple) 322 346 if os.path.exists(dfn): 323 347 os.remove(dfn) 324 os.rename(self.baseFilename, dfn) 348 # Issue 18940: A file may not have been created if delay is True. 349 if os.path.exists(self.baseFilename): 350 os.rename(self.baseFilename, dfn) 325 351 if self.backupCount > 0: 326 # find the oldest log file and delete it327 #s = glob.glob(self.baseFilename + ".20*")328 #if len(s) > self.backupCount:329 # s.sort()330 # os.remove(s[0])331 352 for s in self.getFilesToDelete(): 332 353 os.remove(s) 333 #print "%s -> %s" % (self.baseFilename, dfn) 334 self.mode = 'w' 335 self.stream = self._open() 336 currentTime = int(time.time()) 354 if not self.delay: 355 self.stream = self._open() 337 356 newRolloverAt = self.computeRollover(currentTime) 338 357 while newRolloverAt <= currentTime: … … 340 359 #If DST changes and midnight or weekly rollover, adjust for this. 341 360 if (self.when == 'MIDNIGHT' or self.when.startswith('W')) and not self.utc: 342 dstNow = time.localtime(currentTime)[-1]343 361 dstAtRollover = time.localtime(newRolloverAt)[-1] 344 362 if dstNow != dstAtRollover: 345 363 if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour 346 newRolloverAt = newRolloverAt -3600364 addend = -3600 347 365 else: # DST bows out before next rollover, so we need to add an hour 348 newRolloverAt = newRolloverAt + 3600 366 addend = 3600 367 newRolloverAt += addend 349 368 self.rolloverAt = newRolloverAt 350 369 … … 371 390 def __init__(self, filename, mode='a', encoding=None, delay=0): 372 391 logging.FileHandler.__init__(self, filename, mode, encoding, delay) 373 if not os.path.exists(self.baseFilename): 374 self.dev, self.ino = -1, -1 375 else: 376 stat = os.stat(self.baseFilename) 377 self.dev, self.ino = stat[ST_DEV], stat[ST_INO] 392 self.dev, self.ino = -1, -1 393 self._statstream() 394 395 def _statstream(self): 396 if self.stream: 397 sres = os.fstat(self.stream.fileno()) 398 self.dev, self.ino = sres[ST_DEV], sres[ST_INO] 378 399 379 400 def emit(self, record): … … 385 406 current stream. 386 407 """ 387 if not os.path.exists(self.baseFilename): 388 stat = None 389 changed = 1 390 else: 391 stat = os.stat(self.baseFilename) 392 changed = (stat[ST_DEV] != self.dev) or (stat[ST_INO] != self.ino) 393 if changed and self.stream is not None: 394 self.stream.flush() 395 self.stream.close() 396 self.stream = self._open() 397 if stat is None: 398 stat = os.stat(self.baseFilename) 399 self.dev, self.ino = stat[ST_DEV], stat[ST_INO] 408 # Reduce the chance of race conditions by stat'ing by path only 409 # once and then fstat'ing our new fd if we opened a new log stream. 410 # See issue #14632: Thanks to John Mulligan for the problem report 411 # and patch. 412 try: 413 # stat the file by path, checking for existence 414 sres = os.stat(self.baseFilename) 415 except OSError as err: 416 if err.errno == errno.ENOENT: 417 sres = None 418 else: 419 raise 420 # compare file system stat with that of our stream file handle 421 if not sres or sres[ST_DEV] != self.dev or sres[ST_INO] != self.ino: 422 if self.stream is not None: 423 # we have an open file handle, clean it up 424 self.stream.flush() 425 self.stream.close() 426 # open a new file handle and get new stat info from that fd 427 self.stream = self._open() 428 self._statstream() 400 429 logging.FileHandler.emit(self, record) 401 430 … … 507 536 ei = record.exc_info 508 537 if ei: 509 dummy = self.format(record) # just to get traceback text into record.exc_text 538 # just to get traceback text into record.exc_text ... 539 dummy = self.format(record) 510 540 record.exc_info = None # to avoid Unpickleable error 511 s = cPickle.dumps(record.__dict__, 1) 541 # See issue #14436: If msg or args are objects, they may not be 542 # available on the receiving end. So we convert the msg % args 543 # to a string, save it as msg and zap the args. 544 d = dict(record.__dict__) 545 d['msg'] = record.getMessage() 546 d['args'] = None 547 s = cPickle.dumps(d, 1) 512 548 if ei: 513 549 record.exc_info = ei # for next handler … … 550 586 Closes the socket. 551 587 """ 552 if self.sock: 553 self.sock.close() 554 self.sock = None 588 self.acquire() 589 try: 590 if self.sock: 591 self.sock.close() 592 self.sock = None 593 finally: 594 self.release() 555 595 logging.Handler.close(self) 556 596 … … 632 672 LOG_UUCP = 8 # UUCP subsystem 633 673 LOG_CRON = 9 # clock daemon 634 LOG_AUTHPRIV = 10 # security/authorization messages (private) 674 LOG_AUTHPRIV = 10 # security/authorization messages (private) 675 LOG_FTP = 11 # FTP daemon 635 676 636 677 # other codes through 15 reserved for system use … … 664 705 "cron": LOG_CRON, 665 706 "daemon": LOG_DAEMON, 707 "ftp": LOG_FTP, 666 708 "kern": LOG_KERN, 667 709 "lpr": LOG_LPR, … … 694 736 } 695 737 696 def __init__(self, address=('localhost', SYSLOG_UDP_PORT), facility=LOG_USER): 738 def __init__(self, address=('localhost', SYSLOG_UDP_PORT), 739 facility=LOG_USER, socktype=None): 697 740 """ 698 741 Initialize a handler. … … 700 743 If address is specified as a string, a UNIX socket is used. To log to a 701 744 local syslogd, "SysLogHandler(address="/dev/log")" can be used. 702 If facility is not specified, LOG_USER is used. 745 If facility is not specified, LOG_USER is used. If socktype is 746 specified as socket.SOCK_DGRAM or socket.SOCK_STREAM, that specific 747 socket type will be used. For Unix sockets, you can also specify a 748 socktype of None, in which case socket.SOCK_DGRAM will be used, falling 749 back to socket.SOCK_STREAM. 703 750 """ 704 751 logging.Handler.__init__(self) … … 706 753 self.address = address 707 754 self.facility = facility 708 if type(address) == types.StringType: 755 self.socktype = socktype 756 757 if isinstance(address, basestring): 709 758 self.unixsocket = 1 710 759 self._connect_unixsocket(address) 711 760 else: 712 761 self.unixsocket = 0 713 self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) 714 762 if socktype is None: 763 socktype = socket.SOCK_DGRAM 764 self.socket = socket.socket(socket.AF_INET, socktype) 765 if socktype == socket.SOCK_STREAM: 766 self.socket.connect(address) 767 self.socktype = socktype 715 768 self.formatter = None 716 769 717 770 def _connect_unixsocket(self, address): 718 self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM) 719 # syslog may require either DGRAM or STREAM sockets 771 use_socktype = self.socktype 772 if use_socktype is None: 773 use_socktype = socket.SOCK_DGRAM 774 self.socket = socket.socket(socket.AF_UNIX, use_socktype) 720 775 try: 721 776 self.socket.connect(address) 777 # it worked, so set self.socktype to the used type 778 self.socktype = use_socktype 722 779 except socket.error: 723 780 self.socket.close() 724 self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) 725 self.socket.connect(address) 781 if self.socktype is not None: 782 # user didn't specify falling back, so fail 783 raise 784 use_socktype = socket.SOCK_STREAM 785 self.socket = socket.socket(socket.AF_UNIX, use_socktype) 786 try: 787 self.socket.connect(address) 788 # it worked, so set self.socktype to the used type 789 self.socktype = use_socktype 790 except socket.error: 791 self.socket.close() 792 raise 726 793 727 794 # curious: when talking to the unix-domain '/dev/log' socket, a … … 738 805 integers. 739 806 """ 740 if type(facility) == types.StringType:807 if isinstance(facility, basestring): 741 808 facility = self.facility_names[facility] 742 if type(priority) == types.StringType:809 if isinstance(priority, basestring): 743 810 priority = self.priority_names[priority] 744 811 return (facility << 3) | priority … … 748 815 Closes the socket. 749 816 """ 750 if self.unixsocket: 751 self.socket.close() 817 self.acquire() 818 try: 819 if self.unixsocket: 820 self.socket.close() 821 finally: 822 self.release() 752 823 logging.Handler.close(self) 753 824 … … 769 840 exception information is present, it is NOT sent to the server. 770 841 """ 771 msg = self.format(record) 842 msg = self.format(record) + '\000' 772 843 """ 773 844 We need to convert record level to lowercase, maybe this will 774 845 change in the future. 775 846 """ 776 msg = self.log_format_string % ( 777 self.encodePriority(self.facility, 778 self.mapPriority(record.levelname)), 779 msg) 847 prio = '<%d>' % self.encodePriority(self.facility, 848 self.mapPriority(record.levelname)) 849 # Message is a string. Convert to bytes as required by RFC 5424 850 if type(msg) is unicode: 851 msg = msg.encode('utf-8') 852 msg = prio + msg 780 853 try: 781 854 if self.unixsocket: … … 783 856 self.socket.send(msg) 784 857 except socket.error: 858 self.socket.close() # See issue 17981 785 859 self._connect_unixsocket(self.address) 786 860 self.socket.send(msg) 861 elif self.socktype == socket.SOCK_DGRAM: 862 self.socket.sendto(msg, self.address) 787 863 else: 788 self.socket.send to(msg, self.address)864 self.socket.sendall(msg) 789 865 except (KeyboardInterrupt, SystemExit): 790 866 raise … … 796 872 A handler class which sends an SMTP email for each logging event. 797 873 """ 798 def __init__(self, mailhost, fromaddr, toaddrs, subject, credentials=None): 874 def __init__(self, mailhost, fromaddr, toaddrs, subject, 875 credentials=None, secure=None): 799 876 """ 800 877 Initialize the handler. … … 804 881 (host, port) tuple format for the mailhost argument. To specify 805 882 authentication credentials, supply a (username, password) tuple 806 for the credentials argument. 883 for the credentials argument. To specify the use of a secure 884 protocol (TLS), pass in a tuple for the secure argument. This will 885 only be used when authentication credentials are supplied. The tuple 886 will be either an empty tuple, or a single-value tuple with the name 887 of a keyfile, or a 2-value tuple with the names of the keyfile and 888 certificate file. (This tuple is passed to the `starttls` method). 807 889 """ 808 890 logging.Handler.__init__(self) 809 if type(mailhost) == types.TupleType:891 if isinstance(mailhost, tuple): 810 892 self.mailhost, self.mailport = mailhost 811 893 else: 812 894 self.mailhost, self.mailport = mailhost, None 813 if type(credentials) == types.TupleType:895 if isinstance(credentials, tuple): 814 896 self.username, self.password = credentials 815 897 else: 816 898 self.username = None 817 899 self.fromaddr = fromaddr 818 if type(toaddrs) == types.StringType:900 if isinstance(toaddrs, basestring): 819 901 toaddrs = [toaddrs] 820 902 self.toaddrs = toaddrs 821 903 self.subject = subject 904 self.secure = secure 905 self._timeout = 5.0 822 906 823 907 def getSubject(self, record): … … 830 914 return self.subject 831 915 832 weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']833 834 monthname = [None,835 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',836 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']837 838 def date_time(self):839 """840 Return the current date and time formatted for a MIME header.841 Needed for Python 1.5.2 (no email package available)842 """843 year, month, day, hh, mm, ss, wd, y, z = time.gmtime(time.time())844 s = "%s, %02d %3s %4d %02d:%02d:%02d GMT" % (845 self.weekdayname[wd],846 day, self.monthname[month], year,847 hh, mm, ss)848 return s849 850 916 def emit(self, record): 851 917 """ … … 856 922 try: 857 923 import smtplib 858 try: 859 from email.utils import formatdate 860 except ImportError: 861 formatdate = self.date_time 924 from email.utils import formatdate 862 925 port = self.mailport 863 926 if not port: 864 927 port = smtplib.SMTP_PORT 865 smtp = smtplib.SMTP(self.mailhost, port )928 smtp = smtplib.SMTP(self.mailhost, port, timeout=self._timeout) 866 929 msg = self.format(record) 867 930 msg = "From: %s\r\nTo: %s\r\nSubject: %s\r\nDate: %s\r\n\r\n%s" % ( 868 931 self.fromaddr, 869 string.join(self.toaddrs, ","),932 ",".join(self.toaddrs), 870 933 self.getSubject(record), 871 934 formatdate(), msg) 872 935 if self.username: 936 if self.secure is not None: 937 smtp.ehlo() 938 smtp.starttls(*self.secure) 939 smtp.ehlo() 873 940 smtp.login(self.username, self.password) 874 941 smtp.sendmail(self.fromaddr, self.toaddrs, msg) … … 911 978 } 912 979 except ImportError: 913 print 914 "logging) appear not to be available." 980 print("The Python Win32 extensions for NT (service, event "\ 981 "logging) appear not to be available.") 915 982 self._welu = None 916 983 … … 990 1057 """ 991 1058 logging.Handler.__init__(self) 992 method = string.upper(method)1059 method = method.upper() 993 1060 if method not in ["GET", "POST"]: 994 raise ValueError , "method must be GET or POST"1061 raise ValueError("method must be GET or POST") 995 1062 self.host = host 996 1063 self.url = url … … 1009 1076 Emit a record. 1010 1077 1011 Send the record to the Web server as a n URL-encoded dictionary1078 Send the record to the Web server as a percent-encoded dictionary 1012 1079 """ 1013 1080 try: … … 1018 1085 data = urllib.urlencode(self.mapLogRecord(record)) 1019 1086 if self.method == "GET": 1020 if ( string.find(url,'?') >= 0):1087 if (url.find('?') >= 0): 1021 1088 sep = '&' 1022 1089 else: … … 1026 1093 # support multiple hosts on one IP address... 1027 1094 # need to strip optional :port from host, if present 1028 i = string.find(host,":")1095 i = host.find(":") 1029 1096 if i >= 0: 1030 1097 host = host[:i] … … 1034 1101 "application/x-www-form-urlencoded") 1035 1102 h.putheader("Content-length", str(len(data))) 1036 h.endheaders() 1037 if self.method == "POST": 1038 h.send(data) 1103 h.endheaders(data if self.method == "POST" else None) 1039 1104 h.getreply() #can't do anything with the result 1040 1105 except (KeyboardInterrupt, SystemExit): … … 1083 1148 This version just zaps the buffer to empty. 1084 1149 """ 1085 self.buffer = [] 1150 self.acquire() 1151 try: 1152 self.buffer = [] 1153 finally: 1154 self.release() 1086 1155 1087 1156 def close(self): … … 1131 1200 different behaviour. 1132 1201 """ 1133 if self.target: 1134 for record in self.buffer: 1135 self.target.handle(record) 1136 self.buffer = [] 1202 self.acquire() 1203 try: 1204 if self.target: 1205 for record in self.buffer: 1206 self.target.handle(record) 1207 self.buffer = [] 1208 finally: 1209 self.release() 1137 1210 1138 1211 def close(self): … … 1141 1214 """ 1142 1215 self.flush() 1143 self.target = None 1144 BufferingHandler.close(self) 1216 self.acquire() 1217 try: 1218 self.target = None 1219 BufferingHandler.close(self) 1220 finally: 1221 self.release()
Note:
See TracChangeset
for help on using the changeset viewer.