source: python/vendor/Python-2.6.5/Lib/logging/handlers.py

Last change on this file was 2, checked in by Yuri Dario, 15 years ago

Initial import for vendor code.

  • Property svn:eol-style set to native
File size: 43.3 KB
Line 
1# Copyright 2001-2007 by Vinay Sajip. All Rights Reserved.
2#
3# Permission to use, copy, modify, and distribute this software and its
4# documentation for any purpose and without fee is hereby granted,
5# provided that the above copyright notice appear in all copies and that
6# both that copyright notice and this permission notice appear in
7# supporting documentation, and that the name of Vinay Sajip
8# not be used in advertising or publicity pertaining to distribution
9# of the software without specific, written prior permission.
10# VINAY SAJIP DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
11# ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL
12# VINAY SAJIP BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
13# ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
14# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
15# OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16
17"""
18Additional handlers for the logging package for Python. The core package is
19based on PEP 282 and comments thereto in comp.lang.python, and influenced by
20Apache's log4j system.
21
22Copyright (C) 2001-2009 Vinay Sajip. All Rights Reserved.
23
24To use, simply 'import logging.handlers' and log away!
25"""
26
27import logging, socket, types, os, string, cPickle, struct, time, re
28from stat import ST_DEV, ST_INO
29
30try:
31 import codecs
32except ImportError:
33 codecs = None
34
35#
36# Some constants...
37#
38
39DEFAULT_TCP_LOGGING_PORT = 9020
40DEFAULT_UDP_LOGGING_PORT = 9021
41DEFAULT_HTTP_LOGGING_PORT = 9022
42DEFAULT_SOAP_LOGGING_PORT = 9023
43SYSLOG_UDP_PORT = 514
44
45_MIDNIGHT = 24 * 60 * 60 # number of seconds in a day
46
47class BaseRotatingHandler(logging.FileHandler):
48 """
49 Base class for handlers that rotate log files at a certain point.
50 Not meant to be instantiated directly. Instead, use RotatingFileHandler
51 or TimedRotatingFileHandler.
52 """
53 def __init__(self, filename, mode, encoding=None, delay=0):
54 """
55 Use the specified filename for streamed logging
56 """
57 if codecs is None:
58 encoding = None
59 logging.FileHandler.__init__(self, filename, mode, encoding, delay)
60 self.mode = mode
61 self.encoding = encoding
62
63 def emit(self, record):
64 """
65 Emit a record.
66
67 Output the record to the file, catering for rollover as described
68 in doRollover().
69 """
70 try:
71 if self.shouldRollover(record):
72 self.doRollover()
73 logging.FileHandler.emit(self, record)
74 except (KeyboardInterrupt, SystemExit):
75 raise
76 except:
77 self.handleError(record)
78
79class RotatingFileHandler(BaseRotatingHandler):
80 """
81 Handler for logging to a set of files, which switches from one file
82 to the next when the current file reaches a certain size.
83 """
84 def __init__(self, filename, mode='a', maxBytes=0, backupCount=0, encoding=None, delay=0):
85 """
86 Open the specified file and use it as the stream for logging.
87
88 By default, the file grows indefinitely. You can specify particular
89 values of maxBytes and backupCount to allow the file to rollover at
90 a predetermined size.
91
92 Rollover occurs whenever the current log file is nearly maxBytes in
93 length. If backupCount is >= 1, the system will successively create
94 new files with the same pathname as the base file, but with extensions
95 ".1", ".2" etc. appended to it. For example, with a backupCount of 5
96 and a base file name of "app.log", you would get "app.log",
97 "app.log.1", "app.log.2", ... through to "app.log.5". The file being
98 written to is always "app.log" - when it gets filled up, it is closed
99 and renamed to "app.log.1", and if files "app.log.1", "app.log.2" etc.
100 exist, then they are renamed to "app.log.2", "app.log.3" etc.
101 respectively.
102
103 If maxBytes is zero, rollover never occurs.
104 """
105 if maxBytes > 0:
106 mode = 'a' # doesn't make sense otherwise!
107 BaseRotatingHandler.__init__(self, filename, mode, encoding, delay)
108 self.maxBytes = maxBytes
109 self.backupCount = backupCount
110
111 def doRollover(self):
112 """
113 Do a rollover, as described in __init__().
114 """
115
116 self.stream.close()
117 if self.backupCount > 0:
118 for i in range(self.backupCount - 1, 0, -1):
119 sfn = "%s.%d" % (self.baseFilename, i)
120 dfn = "%s.%d" % (self.baseFilename, i + 1)
121 if os.path.exists(sfn):
122 #print "%s -> %s" % (sfn, dfn)
123 if os.path.exists(dfn):
124 os.remove(dfn)
125 os.rename(sfn, dfn)
126 dfn = self.baseFilename + ".1"
127 if os.path.exists(dfn):
128 os.remove(dfn)
129 os.rename(self.baseFilename, dfn)
130 #print "%s -> %s" % (self.baseFilename, dfn)
131 self.mode = 'w'
132 self.stream = self._open()
133
134 def shouldRollover(self, record):
135 """
136 Determine if rollover should occur.
137
138 Basically, see if the supplied record would cause the file to exceed
139 the size limit we have.
140 """
141 if self.stream is None: # delay was set...
142 self.stream = self._open()
143 if self.maxBytes > 0: # are we rolling over?
144 msg = "%s\n" % self.format(record)
145 self.stream.seek(0, 2) #due to non-posix-compliant Windows feature
146 if self.stream.tell() + len(msg) >= self.maxBytes:
147 return 1
148 return 0
149
150class TimedRotatingFileHandler(BaseRotatingHandler):
151 """
152 Handler for logging to a file, rotating the log file at certain timed
153 intervals.
154
155 If backupCount is > 0, when rollover is done, no more than backupCount
156 files are kept - the oldest ones are deleted.
157 """
158 def __init__(self, filename, when='h', interval=1, backupCount=0, encoding=None, delay=0, utc=0):
159 BaseRotatingHandler.__init__(self, filename, 'a', encoding, delay)
160 self.when = string.upper(when)
161 self.backupCount = backupCount
162 self.utc = utc
163 # Calculate the real rollover interval, which is just the number of
164 # seconds between rollovers. Also set the filename suffix used when
165 # a rollover occurs. Current 'when' events supported:
166 # S - Seconds
167 # M - Minutes
168 # H - Hours
169 # D - Days
170 # midnight - roll over at midnight
171 # W{0-6} - roll over on a certain day; 0 - Monday
172 #
173 # Case of the 'when' specifier is not important; lower or upper case
174 # will work.
175 currentTime = int(time.time())
176 if self.when == 'S':
177 self.interval = 1 # one second
178 self.suffix = "%Y-%m-%d_%H-%M-%S"
179 self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}-\d{2}$"
180 elif self.when == 'M':
181 self.interval = 60 # one minute
182 self.suffix = "%Y-%m-%d_%H-%M"
183 self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}-\d{2}$"
184 elif self.when == 'H':
185 self.interval = 60 * 60 # one hour
186 self.suffix = "%Y-%m-%d_%H"
187 self.extMatch = r"^\d{4}-\d{2}-\d{2}_\d{2}$"
188 elif self.when == 'D' or self.when == 'MIDNIGHT':
189 self.interval = 60 * 60 * 24 # one day
190 self.suffix = "%Y-%m-%d"
191 self.extMatch = r"^\d{4}-\d{2}-\d{2}$"
192 elif self.when.startswith('W'):
193 self.interval = 60 * 60 * 24 * 7 # one week
194 if len(self.when) != 2:
195 raise ValueError("You must specify a day for weekly rollover from 0 to 6 (0 is Monday): %s" % self.when)
196 if self.when[1] < '0' or self.when[1] > '6':
197 raise ValueError("Invalid day specified for weekly rollover: %s" % self.when)
198 self.dayOfWeek = int(self.when[1])
199 self.suffix = "%Y-%m-%d"
200 self.extMatch = r"^\d{4}-\d{2}-\d{2}$"
201 else:
202 raise ValueError("Invalid rollover interval specified: %s" % self.when)
203
204 self.extMatch = re.compile(self.extMatch)
205 self.interval = self.interval * interval # multiply by units requested
206 self.rolloverAt = self.computeRollover(int(time.time()))
207
208 #print "Will rollover at %d, %d seconds from now" % (self.rolloverAt, self.rolloverAt - currentTime)
209
210 def computeRollover(self, currentTime):
211 """
212 Work out the rollover time based on the specified time.
213 """
214 result = currentTime + self.interval
215 # If we are rolling over at midnight or weekly, then the interval is already known.
216 # What we need to figure out is WHEN the next interval is. In other words,
217 # if you are rolling over at midnight, then your base interval is 1 day,
218 # but you want to start that one day clock at midnight, not now. So, we
219 # have to fudge the rolloverAt value in order to trigger the first rollover
220 # at the right time. After that, the regular interval will take care of
221 # the rest. Note that this code doesn't care about leap seconds. :)
222 if self.when == 'MIDNIGHT' or self.when.startswith('W'):
223 # This could be done with less code, but I wanted it to be clear
224 if self.utc:
225 t = time.gmtime(currentTime)
226 else:
227 t = time.localtime(currentTime)
228 currentHour = t[3]
229 currentMinute = t[4]
230 currentSecond = t[5]
231 # r is the number of seconds left between now and midnight
232 r = _MIDNIGHT - ((currentHour * 60 + currentMinute) * 60 +
233 currentSecond)
234 result = currentTime + r
235 # If we are rolling over on a certain day, add in the number of days until
236 # the next rollover, but offset by 1 since we just calculated the time
237 # until the next day starts. There are three cases:
238 # Case 1) The day to rollover is today; in this case, do nothing
239 # Case 2) The day to rollover is further in the interval (i.e., today is
240 # day 2 (Wednesday) and rollover is on day 6 (Sunday). Days to
241 # next rollover is simply 6 - 2 - 1, or 3.
242 # Case 3) The day to rollover is behind us in the interval (i.e., today
243 # is day 5 (Saturday) and rollover is on day 3 (Thursday).
244 # Days to rollover is 6 - 5 + 3, or 4. In this case, it's the
245 # number of days left in the current week (1) plus the number
246 # of days in the next week until the rollover day (3).
247 # The calculations described in 2) and 3) above need to have a day added.
248 # This is because the above time calculation takes us to midnight on this
249 # day, i.e. the start of the next day.
250 if self.when.startswith('W'):
251 day = t[6] # 0 is Monday
252 if day != self.dayOfWeek:
253 if day < self.dayOfWeek:
254 daysToWait = self.dayOfWeek - day
255 else:
256 daysToWait = 6 - day + self.dayOfWeek + 1
257 newRolloverAt = result + (daysToWait * (60 * 60 * 24))
258 if not self.utc:
259 dstNow = t[-1]
260 dstAtRollover = time.localtime(newRolloverAt)[-1]
261 if dstNow != dstAtRollover:
262 if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour
263 newRolloverAt = newRolloverAt - 3600
264 else: # DST bows out before next rollover, so we need to add an hour
265 newRolloverAt = newRolloverAt + 3600
266 result = newRolloverAt
267 return result
268
269 def shouldRollover(self, record):
270 """
271 Determine if rollover should occur.
272
273 record is not used, as we are just comparing times, but it is needed so
274 the method signatures are the same
275 """
276 t = int(time.time())
277 if t >= self.rolloverAt:
278 return 1
279 #print "No need to rollover: %d, %d" % (t, self.rolloverAt)
280 return 0
281
282 def getFilesToDelete(self):
283 """
284 Determine the files to delete when rolling over.
285
286 More specific than the earlier method, which just used glob.glob().
287 """
288 dirName, baseName = os.path.split(self.baseFilename)
289 fileNames = os.listdir(dirName)
290 result = []
291 prefix = baseName + "."
292 plen = len(prefix)
293 for fileName in fileNames:
294 if fileName[:plen] == prefix:
295 suffix = fileName[plen:]
296 if self.extMatch.match(suffix):
297 result.append(os.path.join(dirName, fileName))
298 result.sort()
299 if len(result) < self.backupCount:
300 result = []
301 else:
302 result = result[:len(result) - self.backupCount]
303 return result
304
305 def doRollover(self):
306 """
307 do a rollover; in this case, a date/time stamp is appended to the filename
308 when the rollover happens. However, you want the file to be named for the
309 start of the interval, not the current time. If there is a backup count,
310 then we have to get a list of matching filenames, sort them and remove
311 the one with the oldest suffix.
312 """
313 if self.stream:
314 self.stream.close()
315 # get the time that this sequence started at and make it a TimeTuple
316 t = self.rolloverAt - self.interval
317 if self.utc:
318 timeTuple = time.gmtime(t)
319 else:
320 timeTuple = time.localtime(t)
321 dfn = self.baseFilename + "." + time.strftime(self.suffix, timeTuple)
322 if os.path.exists(dfn):
323 os.remove(dfn)
324 os.rename(self.baseFilename, dfn)
325 if self.backupCount > 0:
326 # find the oldest log file and delete it
327 #s = glob.glob(self.baseFilename + ".20*")
328 #if len(s) > self.backupCount:
329 # s.sort()
330 # os.remove(s[0])
331 for s in self.getFilesToDelete():
332 os.remove(s)
333 #print "%s -> %s" % (self.baseFilename, dfn)
334 self.mode = 'w'
335 self.stream = self._open()
336 currentTime = int(time.time())
337 newRolloverAt = self.computeRollover(currentTime)
338 while newRolloverAt <= currentTime:
339 newRolloverAt = newRolloverAt + self.interval
340 #If DST changes and midnight or weekly rollover, adjust for this.
341 if (self.when == 'MIDNIGHT' or self.when.startswith('W')) and not self.utc:
342 dstNow = time.localtime(currentTime)[-1]
343 dstAtRollover = time.localtime(newRolloverAt)[-1]
344 if dstNow != dstAtRollover:
345 if not dstNow: # DST kicks in before next rollover, so we need to deduct an hour
346 newRolloverAt = newRolloverAt - 3600
347 else: # DST bows out before next rollover, so we need to add an hour
348 newRolloverAt = newRolloverAt + 3600
349 self.rolloverAt = newRolloverAt
350
351class WatchedFileHandler(logging.FileHandler):
352 """
353 A handler for logging to a file, which watches the file
354 to see if it has changed while in use. This can happen because of
355 usage of programs such as newsyslog and logrotate which perform
356 log file rotation. This handler, intended for use under Unix,
357 watches the file to see if it has changed since the last emit.
358 (A file has changed if its device or inode have changed.)
359 If it has changed, the old file stream is closed, and the file
360 opened to get a new stream.
361
362 This handler is not appropriate for use under Windows, because
363 under Windows open files cannot be moved or renamed - logging
364 opens the files with exclusive locks - and so there is no need
365 for such a handler. Furthermore, ST_INO is not supported under
366 Windows; stat always returns zero for this value.
367
368 This handler is based on a suggestion and patch by Chad J.
369 Schroeder.
370 """
371 def __init__(self, filename, mode='a', encoding=None, delay=0):
372 logging.FileHandler.__init__(self, filename, mode, encoding, delay)
373 if not os.path.exists(self.baseFilename):
374 self.dev, self.ino = -1, -1
375 else:
376 stat = os.stat(self.baseFilename)
377 self.dev, self.ino = stat[ST_DEV], stat[ST_INO]
378
379 def emit(self, record):
380 """
381 Emit a record.
382
383 First check if the underlying file has changed, and if it
384 has, close the old stream and reopen the file to get the
385 current stream.
386 """
387 if not os.path.exists(self.baseFilename):
388 stat = None
389 changed = 1
390 else:
391 stat = os.stat(self.baseFilename)
392 changed = (stat[ST_DEV] != self.dev) or (stat[ST_INO] != self.ino)
393 if changed and self.stream is not None:
394 self.stream.flush()
395 self.stream.close()
396 self.stream = self._open()
397 if stat is None:
398 stat = os.stat(self.baseFilename)
399 self.dev, self.ino = stat[ST_DEV], stat[ST_INO]
400 logging.FileHandler.emit(self, record)
401
402class SocketHandler(logging.Handler):
403 """
404 A handler class which writes logging records, in pickle format, to
405 a streaming socket. The socket is kept open across logging calls.
406 If the peer resets it, an attempt is made to reconnect on the next call.
407 The pickle which is sent is that of the LogRecord's attribute dictionary
408 (__dict__), so that the receiver does not need to have the logging module
409 installed in order to process the logging event.
410
411 To unpickle the record at the receiving end into a LogRecord, use the
412 makeLogRecord function.
413 """
414
415 def __init__(self, host, port):
416 """
417 Initializes the handler with a specific host address and port.
418
419 The attribute 'closeOnError' is set to 1 - which means that if
420 a socket error occurs, the socket is silently closed and then
421 reopened on the next logging call.
422 """
423 logging.Handler.__init__(self)
424 self.host = host
425 self.port = port
426 self.sock = None
427 self.closeOnError = 0
428 self.retryTime = None
429 #
430 # Exponential backoff parameters.
431 #
432 self.retryStart = 1.0
433 self.retryMax = 30.0
434 self.retryFactor = 2.0
435
436 def makeSocket(self, timeout=1):
437 """
438 A factory method which allows subclasses to define the precise
439 type of socket they want.
440 """
441 s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
442 if hasattr(s, 'settimeout'):
443 s.settimeout(timeout)
444 s.connect((self.host, self.port))
445 return s
446
447 def createSocket(self):
448 """
449 Try to create a socket, using an exponential backoff with
450 a max retry time. Thanks to Robert Olson for the original patch
451 (SF #815911) which has been slightly refactored.
452 """
453 now = time.time()
454 # Either retryTime is None, in which case this
455 # is the first time back after a disconnect, or
456 # we've waited long enough.
457 if self.retryTime is None:
458 attempt = 1
459 else:
460 attempt = (now >= self.retryTime)
461 if attempt:
462 try:
463 self.sock = self.makeSocket()
464 self.retryTime = None # next time, no delay before trying
465 except socket.error:
466 #Creation failed, so set the retry time and return.
467 if self.retryTime is None:
468 self.retryPeriod = self.retryStart
469 else:
470 self.retryPeriod = self.retryPeriod * self.retryFactor
471 if self.retryPeriod > self.retryMax:
472 self.retryPeriod = self.retryMax
473 self.retryTime = now + self.retryPeriod
474
475 def send(self, s):
476 """
477 Send a pickled string to the socket.
478
479 This function allows for partial sends which can happen when the
480 network is busy.
481 """
482 if self.sock is None:
483 self.createSocket()
484 #self.sock can be None either because we haven't reached the retry
485 #time yet, or because we have reached the retry time and retried,
486 #but are still unable to connect.
487 if self.sock:
488 try:
489 if hasattr(self.sock, "sendall"):
490 self.sock.sendall(s)
491 else:
492 sentsofar = 0
493 left = len(s)
494 while left > 0:
495 sent = self.sock.send(s[sentsofar:])
496 sentsofar = sentsofar + sent
497 left = left - sent
498 except socket.error:
499 self.sock.close()
500 self.sock = None # so we can call createSocket next time
501
502 def makePickle(self, record):
503 """
504 Pickles the record in binary format with a length prefix, and
505 returns it ready for transmission across the socket.
506 """
507 ei = record.exc_info
508 if ei:
509 dummy = self.format(record) # just to get traceback text into record.exc_text
510 record.exc_info = None # to avoid Unpickleable error
511 s = cPickle.dumps(record.__dict__, 1)
512 if ei:
513 record.exc_info = ei # for next handler
514 slen = struct.pack(">L", len(s))
515 return slen + s
516
517 def handleError(self, record):
518 """
519 Handle an error during logging.
520
521 An error has occurred during logging. Most likely cause -
522 connection lost. Close the socket so that we can retry on the
523 next event.
524 """
525 if self.closeOnError and self.sock:
526 self.sock.close()
527 self.sock = None #try to reconnect next time
528 else:
529 logging.Handler.handleError(self, record)
530
531 def emit(self, record):
532 """
533 Emit a record.
534
535 Pickles the record and writes it to the socket in binary format.
536 If there is an error with the socket, silently drop the packet.
537 If there was a problem with the socket, re-establishes the
538 socket.
539 """
540 try:
541 s = self.makePickle(record)
542 self.send(s)
543 except (KeyboardInterrupt, SystemExit):
544 raise
545 except:
546 self.handleError(record)
547
548 def close(self):
549 """
550 Closes the socket.
551 """
552 if self.sock:
553 self.sock.close()
554 self.sock = None
555 logging.Handler.close(self)
556
557class DatagramHandler(SocketHandler):
558 """
559 A handler class which writes logging records, in pickle format, to
560 a datagram socket. The pickle which is sent is that of the LogRecord's
561 attribute dictionary (__dict__), so that the receiver does not need to
562 have the logging module installed in order to process the logging event.
563
564 To unpickle the record at the receiving end into a LogRecord, use the
565 makeLogRecord function.
566
567 """
568 def __init__(self, host, port):
569 """
570 Initializes the handler with a specific host address and port.
571 """
572 SocketHandler.__init__(self, host, port)
573 self.closeOnError = 0
574
575 def makeSocket(self):
576 """
577 The factory method of SocketHandler is here overridden to create
578 a UDP socket (SOCK_DGRAM).
579 """
580 s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
581 return s
582
583 def send(self, s):
584 """
585 Send a pickled string to a socket.
586
587 This function no longer allows for partial sends which can happen
588 when the network is busy - UDP does not guarantee delivery and
589 can deliver packets out of sequence.
590 """
591 if self.sock is None:
592 self.createSocket()
593 self.sock.sendto(s, (self.host, self.port))
594
595class SysLogHandler(logging.Handler):
596 """
597 A handler class which sends formatted logging records to a syslog
598 server. Based on Sam Rushing's syslog module:
599 http://www.nightmare.com/squirl/python-ext/misc/syslog.py
600 Contributed by Nicolas Untz (after which minor refactoring changes
601 have been made).
602 """
603
604 # from <linux/sys/syslog.h>:
605 # ======================================================================
606 # priorities/facilities are encoded into a single 32-bit quantity, where
607 # the bottom 3 bits are the priority (0-7) and the top 28 bits are the
608 # facility (0-big number). Both the priorities and the facilities map
609 # roughly one-to-one to strings in the syslogd(8) source code. This
610 # mapping is included in this file.
611 #
612 # priorities (these are ordered)
613
614 LOG_EMERG = 0 # system is unusable
615 LOG_ALERT = 1 # action must be taken immediately
616 LOG_CRIT = 2 # critical conditions
617 LOG_ERR = 3 # error conditions
618 LOG_WARNING = 4 # warning conditions
619 LOG_NOTICE = 5 # normal but significant condition
620 LOG_INFO = 6 # informational
621 LOG_DEBUG = 7 # debug-level messages
622
623 # facility codes
624 LOG_KERN = 0 # kernel messages
625 LOG_USER = 1 # random user-level messages
626 LOG_MAIL = 2 # mail system
627 LOG_DAEMON = 3 # system daemons
628 LOG_AUTH = 4 # security/authorization messages
629 LOG_SYSLOG = 5 # messages generated internally by syslogd
630 LOG_LPR = 6 # line printer subsystem
631 LOG_NEWS = 7 # network news subsystem
632 LOG_UUCP = 8 # UUCP subsystem
633 LOG_CRON = 9 # clock daemon
634 LOG_AUTHPRIV = 10 # security/authorization messages (private)
635
636 # other codes through 15 reserved for system use
637 LOG_LOCAL0 = 16 # reserved for local use
638 LOG_LOCAL1 = 17 # reserved for local use
639 LOG_LOCAL2 = 18 # reserved for local use
640 LOG_LOCAL3 = 19 # reserved for local use
641 LOG_LOCAL4 = 20 # reserved for local use
642 LOG_LOCAL5 = 21 # reserved for local use
643 LOG_LOCAL6 = 22 # reserved for local use
644 LOG_LOCAL7 = 23 # reserved for local use
645
646 priority_names = {
647 "alert": LOG_ALERT,
648 "crit": LOG_CRIT,
649 "critical": LOG_CRIT,
650 "debug": LOG_DEBUG,
651 "emerg": LOG_EMERG,
652 "err": LOG_ERR,
653 "error": LOG_ERR, # DEPRECATED
654 "info": LOG_INFO,
655 "notice": LOG_NOTICE,
656 "panic": LOG_EMERG, # DEPRECATED
657 "warn": LOG_WARNING, # DEPRECATED
658 "warning": LOG_WARNING,
659 }
660
661 facility_names = {
662 "auth": LOG_AUTH,
663 "authpriv": LOG_AUTHPRIV,
664 "cron": LOG_CRON,
665 "daemon": LOG_DAEMON,
666 "kern": LOG_KERN,
667 "lpr": LOG_LPR,
668 "mail": LOG_MAIL,
669 "news": LOG_NEWS,
670 "security": LOG_AUTH, # DEPRECATED
671 "syslog": LOG_SYSLOG,
672 "user": LOG_USER,
673 "uucp": LOG_UUCP,
674 "local0": LOG_LOCAL0,
675 "local1": LOG_LOCAL1,
676 "local2": LOG_LOCAL2,
677 "local3": LOG_LOCAL3,
678 "local4": LOG_LOCAL4,
679 "local5": LOG_LOCAL5,
680 "local6": LOG_LOCAL6,
681 "local7": LOG_LOCAL7,
682 }
683
684 #The map below appears to be trivially lowercasing the key. However,
685 #there's more to it than meets the eye - in some locales, lowercasing
686 #gives unexpected results. See SF #1524081: in the Turkish locale,
687 #"INFO".lower() != "info"
688 priority_map = {
689 "DEBUG" : "debug",
690 "INFO" : "info",
691 "WARNING" : "warning",
692 "ERROR" : "error",
693 "CRITICAL" : "critical"
694 }
695
696 def __init__(self, address=('localhost', SYSLOG_UDP_PORT), facility=LOG_USER):
697 """
698 Initialize a handler.
699
700 If address is specified as a string, a UNIX socket is used. To log to a
701 local syslogd, "SysLogHandler(address="/dev/log")" can be used.
702 If facility is not specified, LOG_USER is used.
703 """
704 logging.Handler.__init__(self)
705
706 self.address = address
707 self.facility = facility
708 if type(address) == types.StringType:
709 self.unixsocket = 1
710 self._connect_unixsocket(address)
711 else:
712 self.unixsocket = 0
713 self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
714
715 self.formatter = None
716
717 def _connect_unixsocket(self, address):
718 self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
719 # syslog may require either DGRAM or STREAM sockets
720 try:
721 self.socket.connect(address)
722 except socket.error:
723 self.socket.close()
724 self.socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
725 self.socket.connect(address)
726
727 # curious: when talking to the unix-domain '/dev/log' socket, a
728 # zero-terminator seems to be required. this string is placed
729 # into a class variable so that it can be overridden if
730 # necessary.
731 log_format_string = '<%d>%s\000'
732
733 def encodePriority(self, facility, priority):
734 """
735 Encode the facility and priority. You can pass in strings or
736 integers - if strings are passed, the facility_names and
737 priority_names mapping dictionaries are used to convert them to
738 integers.
739 """
740 if type(facility) == types.StringType:
741 facility = self.facility_names[facility]
742 if type(priority) == types.StringType:
743 priority = self.priority_names[priority]
744 return (facility << 3) | priority
745
746 def close (self):
747 """
748 Closes the socket.
749 """
750 if self.unixsocket:
751 self.socket.close()
752 logging.Handler.close(self)
753
754 def mapPriority(self, levelName):
755 """
756 Map a logging level name to a key in the priority_names map.
757 This is useful in two scenarios: when custom levels are being
758 used, and in the case where you can't do a straightforward
759 mapping by lowercasing the logging level name because of locale-
760 specific issues (see SF #1524081).
761 """
762 return self.priority_map.get(levelName, "warning")
763
764 def emit(self, record):
765 """
766 Emit a record.
767
768 The record is formatted, and then sent to the syslog server. If
769 exception information is present, it is NOT sent to the server.
770 """
771 msg = self.format(record)
772 """
773 We need to convert record level to lowercase, maybe this will
774 change in the future.
775 """
776 msg = self.log_format_string % (
777 self.encodePriority(self.facility,
778 self.mapPriority(record.levelname)),
779 msg)
780 try:
781 if self.unixsocket:
782 try:
783 self.socket.send(msg)
784 except socket.error:
785 self._connect_unixsocket(self.address)
786 self.socket.send(msg)
787 else:
788 self.socket.sendto(msg, self.address)
789 except (KeyboardInterrupt, SystemExit):
790 raise
791 except:
792 self.handleError(record)
793
794class SMTPHandler(logging.Handler):
795 """
796 A handler class which sends an SMTP email for each logging event.
797 """
798 def __init__(self, mailhost, fromaddr, toaddrs, subject, credentials=None):
799 """
800 Initialize the handler.
801
802 Initialize the instance with the from and to addresses and subject
803 line of the email. To specify a non-standard SMTP port, use the
804 (host, port) tuple format for the mailhost argument. To specify
805 authentication credentials, supply a (username, password) tuple
806 for the credentials argument.
807 """
808 logging.Handler.__init__(self)
809 if type(mailhost) == types.TupleType:
810 self.mailhost, self.mailport = mailhost
811 else:
812 self.mailhost, self.mailport = mailhost, None
813 if type(credentials) == types.TupleType:
814 self.username, self.password = credentials
815 else:
816 self.username = None
817 self.fromaddr = fromaddr
818 if type(toaddrs) == types.StringType:
819 toaddrs = [toaddrs]
820 self.toaddrs = toaddrs
821 self.subject = subject
822
823 def getSubject(self, record):
824 """
825 Determine the subject for the email.
826
827 If you want to specify a subject line which is record-dependent,
828 override this method.
829 """
830 return self.subject
831
832 weekdayname = ['Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat', 'Sun']
833
834 monthname = [None,
835 'Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun',
836 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']
837
838 def date_time(self):
839 """
840 Return the current date and time formatted for a MIME header.
841 Needed for Python 1.5.2 (no email package available)
842 """
843 year, month, day, hh, mm, ss, wd, y, z = time.gmtime(time.time())
844 s = "%s, %02d %3s %4d %02d:%02d:%02d GMT" % (
845 self.weekdayname[wd],
846 day, self.monthname[month], year,
847 hh, mm, ss)
848 return s
849
850 def emit(self, record):
851 """
852 Emit a record.
853
854 Format the record and send it to the specified addressees.
855 """
856 try:
857 import smtplib
858 try:
859 from email.utils import formatdate
860 except ImportError:
861 formatdate = self.date_time
862 port = self.mailport
863 if not port:
864 port = smtplib.SMTP_PORT
865 smtp = smtplib.SMTP(self.mailhost, port)
866 msg = self.format(record)
867 msg = "From: %s\r\nTo: %s\r\nSubject: %s\r\nDate: %s\r\n\r\n%s" % (
868 self.fromaddr,
869 string.join(self.toaddrs, ","),
870 self.getSubject(record),
871 formatdate(), msg)
872 if self.username:
873 smtp.login(self.username, self.password)
874 smtp.sendmail(self.fromaddr, self.toaddrs, msg)
875 smtp.quit()
876 except (KeyboardInterrupt, SystemExit):
877 raise
878 except:
879 self.handleError(record)
880
881class NTEventLogHandler(logging.Handler):
882 """
883 A handler class which sends events to the NT Event Log. Adds a
884 registry entry for the specified application name. If no dllname is
885 provided, win32service.pyd (which contains some basic message
886 placeholders) is used. Note that use of these placeholders will make
887 your event logs big, as the entire message source is held in the log.
888 If you want slimmer logs, you have to pass in the name of your own DLL
889 which contains the message definitions you want to use in the event log.
890 """
891 def __init__(self, appname, dllname=None, logtype="Application"):
892 logging.Handler.__init__(self)
893 try:
894 import win32evtlogutil, win32evtlog
895 self.appname = appname
896 self._welu = win32evtlogutil
897 if not dllname:
898 dllname = os.path.split(self._welu.__file__)
899 dllname = os.path.split(dllname[0])
900 dllname = os.path.join(dllname[0], r'win32service.pyd')
901 self.dllname = dllname
902 self.logtype = logtype
903 self._welu.AddSourceToRegistry(appname, dllname, logtype)
904 self.deftype = win32evtlog.EVENTLOG_ERROR_TYPE
905 self.typemap = {
906 logging.DEBUG : win32evtlog.EVENTLOG_INFORMATION_TYPE,
907 logging.INFO : win32evtlog.EVENTLOG_INFORMATION_TYPE,
908 logging.WARNING : win32evtlog.EVENTLOG_WARNING_TYPE,
909 logging.ERROR : win32evtlog.EVENTLOG_ERROR_TYPE,
910 logging.CRITICAL: win32evtlog.EVENTLOG_ERROR_TYPE,
911 }
912 except ImportError:
913 print "The Python Win32 extensions for NT (service, event "\
914 "logging) appear not to be available."
915 self._welu = None
916
917 def getMessageID(self, record):
918 """
919 Return the message ID for the event record. If you are using your
920 own messages, you could do this by having the msg passed to the
921 logger being an ID rather than a formatting string. Then, in here,
922 you could use a dictionary lookup to get the message ID. This
923 version returns 1, which is the base message ID in win32service.pyd.
924 """
925 return 1
926
927 def getEventCategory(self, record):
928 """
929 Return the event category for the record.
930
931 Override this if you want to specify your own categories. This version
932 returns 0.
933 """
934 return 0
935
936 def getEventType(self, record):
937 """
938 Return the event type for the record.
939
940 Override this if you want to specify your own types. This version does
941 a mapping using the handler's typemap attribute, which is set up in
942 __init__() to a dictionary which contains mappings for DEBUG, INFO,
943 WARNING, ERROR and CRITICAL. If you are using your own levels you will
944 either need to override this method or place a suitable dictionary in
945 the handler's typemap attribute.
946 """
947 return self.typemap.get(record.levelno, self.deftype)
948
949 def emit(self, record):
950 """
951 Emit a record.
952
953 Determine the message ID, event category and event type. Then
954 log the message in the NT event log.
955 """
956 if self._welu:
957 try:
958 id = self.getMessageID(record)
959 cat = self.getEventCategory(record)
960 type = self.getEventType(record)
961 msg = self.format(record)
962 self._welu.ReportEvent(self.appname, id, cat, type, [msg])
963 except (KeyboardInterrupt, SystemExit):
964 raise
965 except:
966 self.handleError(record)
967
968 def close(self):
969 """
970 Clean up this handler.
971
972 You can remove the application name from the registry as a
973 source of event log entries. However, if you do this, you will
974 not be able to see the events as you intended in the Event Log
975 Viewer - it needs to be able to access the registry to get the
976 DLL name.
977 """
978 #self._welu.RemoveSourceFromRegistry(self.appname, self.logtype)
979 logging.Handler.close(self)
980
981class HTTPHandler(logging.Handler):
982 """
983 A class which sends records to a Web server, using either GET or
984 POST semantics.
985 """
986 def __init__(self, host, url, method="GET"):
987 """
988 Initialize the instance with the host, the request URL, and the method
989 ("GET" or "POST")
990 """
991 logging.Handler.__init__(self)
992 method = string.upper(method)
993 if method not in ["GET", "POST"]:
994 raise ValueError, "method must be GET or POST"
995 self.host = host
996 self.url = url
997 self.method = method
998
999 def mapLogRecord(self, record):
1000 """
1001 Default implementation of mapping the log record into a dict
1002 that is sent as the CGI data. Overwrite in your class.
1003 Contributed by Franz Glasner.
1004 """
1005 return record.__dict__
1006
1007 def emit(self, record):
1008 """
1009 Emit a record.
1010
1011 Send the record to the Web server as an URL-encoded dictionary
1012 """
1013 try:
1014 import httplib, urllib
1015 host = self.host
1016 h = httplib.HTTP(host)
1017 url = self.url
1018 data = urllib.urlencode(self.mapLogRecord(record))
1019 if self.method == "GET":
1020 if (string.find(url, '?') >= 0):
1021 sep = '&'
1022 else:
1023 sep = '?'
1024 url = url + "%c%s" % (sep, data)
1025 h.putrequest(self.method, url)
1026 # support multiple hosts on one IP address...
1027 # need to strip optional :port from host, if present
1028 i = string.find(host, ":")
1029 if i >= 0:
1030 host = host[:i]
1031 h.putheader("Host", host)
1032 if self.method == "POST":
1033 h.putheader("Content-type",
1034 "application/x-www-form-urlencoded")
1035 h.putheader("Content-length", str(len(data)))
1036 h.endheaders()
1037 if self.method == "POST":
1038 h.send(data)
1039 h.getreply() #can't do anything with the result
1040 except (KeyboardInterrupt, SystemExit):
1041 raise
1042 except:
1043 self.handleError(record)
1044
1045class BufferingHandler(logging.Handler):
1046 """
1047 A handler class which buffers logging records in memory. Whenever each
1048 record is added to the buffer, a check is made to see if the buffer should
1049 be flushed. If it should, then flush() is expected to do what's needed.
1050 """
1051 def __init__(self, capacity):
1052 """
1053 Initialize the handler with the buffer size.
1054 """
1055 logging.Handler.__init__(self)
1056 self.capacity = capacity
1057 self.buffer = []
1058
1059 def shouldFlush(self, record):
1060 """
1061 Should the handler flush its buffer?
1062
1063 Returns true if the buffer is up to capacity. This method can be
1064 overridden to implement custom flushing strategies.
1065 """
1066 return (len(self.buffer) >= self.capacity)
1067
1068 def emit(self, record):
1069 """
1070 Emit a record.
1071
1072 Append the record. If shouldFlush() tells us to, call flush() to process
1073 the buffer.
1074 """
1075 self.buffer.append(record)
1076 if self.shouldFlush(record):
1077 self.flush()
1078
1079 def flush(self):
1080 """
1081 Override to implement custom flushing behaviour.
1082
1083 This version just zaps the buffer to empty.
1084 """
1085 self.buffer = []
1086
1087 def close(self):
1088 """
1089 Close the handler.
1090
1091 This version just flushes and chains to the parent class' close().
1092 """
1093 self.flush()
1094 logging.Handler.close(self)
1095
1096class MemoryHandler(BufferingHandler):
1097 """
1098 A handler class which buffers logging records in memory, periodically
1099 flushing them to a target handler. Flushing occurs whenever the buffer
1100 is full, or when an event of a certain severity or greater is seen.
1101 """
1102 def __init__(self, capacity, flushLevel=logging.ERROR, target=None):
1103 """
1104 Initialize the handler with the buffer size, the level at which
1105 flushing should occur and an optional target.
1106
1107 Note that without a target being set either here or via setTarget(),
1108 a MemoryHandler is no use to anyone!
1109 """
1110 BufferingHandler.__init__(self, capacity)
1111 self.flushLevel = flushLevel
1112 self.target = target
1113
1114 def shouldFlush(self, record):
1115 """
1116 Check for buffer full or a record at the flushLevel or higher.
1117 """
1118 return (len(self.buffer) >= self.capacity) or \
1119 (record.levelno >= self.flushLevel)
1120
1121 def setTarget(self, target):
1122 """
1123 Set the target handler for this handler.
1124 """
1125 self.target = target
1126
1127 def flush(self):
1128 """
1129 For a MemoryHandler, flushing means just sending the buffered
1130 records to the target, if there is one. Override if you want
1131 different behaviour.
1132 """
1133 if self.target:
1134 for record in self.buffer:
1135 self.target.handle(record)
1136 self.buffer = []
1137
1138 def close(self):
1139 """
1140 Flush, set the target to None and lose the buffer.
1141 """
1142 self.flush()
1143 self.target = None
1144 BufferingHandler.close(self)
Note: See TracBrowser for help on using the repository browser.