我有一个带有三个处理程序的记录器:所有进程的通用日志文件,控制台和每个进程的附加日志文件。 Propagate设置为false,但在进程内打印的消息在屏幕和常规日志文件上都加倍。
这是主模块的代码:
import multiprocessing
import global_vars
import logging
logPath = 'logs'
fileName = "stdout.log"
def init_logger(secondary_logfile=None):
logFormatter = logging.Formatter("%(asctime)s [%(processName)-12.12s] [%(threadName)-12.12s] [%(levelname)-5.5s] [%(filename)s:%(lineno)d] %(message)s")
rootLogger = logging.getLogger(__name__)
# first handler is general log
fileHandler = logging.FileHandler("{0}/{1}".format(logPath, fileName))
fileHandler.setFormatter(logFormatter)
rootLogger.addHandler(fileHandler)
# second handler is logging to console
consoleHandler = logging.StreamHandler()
consoleHandler.setFormatter(logFormatter)
rootLogger.addHandler(consoleHandler)
# third handler is process-specific log
if secondary_logfile:
fileHandler1 = logging.FileHandler("{0}/{1}".format(logPath, secondary_logfile))
fileHandler1.setFormatter(logFormatter)
rootLogger.addHandler(fileHandler1)
rootLogger.setLevel("DEBUG") # log everything
rootLogger.propagate = False
return rootLogger
rootLogger = init_logger()
logger = rootLogger.getChild(__name__)
# this class contains process-specific globals, shared among all functions
# of the specific process
class Shared():
def __init__(self, shared_var, logger):
self.shared_var = shared_var
self.logger = logger
def get(self):
return self.shared_var
def make_global(process_id, logger):
g = Shared(process_id, logger)
global_vars.multiprocess_globals["g"] = g
def process_func(process_id):
g = global_vars.multiprocess_globals["g"] # take g from process global scope
# use g
g.logger.debug('Message from specific logger {}, mirrored into main log: {}'.format(process_id, g.get()))
def run(process_id):
# init secondary logger which will mirror into common log
secondary_logger = init_logger(process_id)
# create variable in process global scope
make_global(process_id, secondary_logger)
# run function which will use it
process_func(process_id)
if __name__ == '__main__':
logger.debug('This is log message in the main program')
# init processes
processes = []
for i in range(1,4):
p = multiprocessing.Process(target=run, args=(i,))
p.daemon = True # for tensorflow https://github.com/tensorflow/tensorflow/issues/5448
processes.append(p)
# Run processes:
for p in processes:
p.start()
for p in processes:
p.join()
logger.debug("This is again log message in main program")
Global_vars模块内容只是:
multiprocess_globals = {}
屏幕和主日志(logs/stdout.log
)输出为:
$ python3 multiprocess_globals.py
2018-08-31 18:33:41,754 [MainProcess ] [MainThread ] [DEBUG] [multiprocess_globals.py:75] This is log message in the main program
2018-08-31 18:33:41,756 [Process-1 ] [MainThread ] [DEBUG] [multiprocess_globals.py:58] Message from specific logger 1, mirrored into main log: 1
2018-08-31 18:33:41,756 [Process-1 ] [MainThread ] [DEBUG] [multiprocess_globals.py:58] Message from specific logger 1, mirrored into main log: 1
2018-08-31 18:33:41,757 [Process-2 ] [MainThread ] [DEBUG] [multiprocess_globals.py:58] Message from specific logger 2, mirrored into main log: 2
2018-08-31 18:33:41,757 [Process-2 ] [MainThread ] [DEBUG] [multiprocess_globals.py:58] Message from specific logger 2, mirrored into main log: 2
2018-08-31 18:33:41,757 [Process-3 ] [MainThread ] [DEBUG] [multiprocess_globals.py:58] Message from specific logger 3, mirrored into main log: 3
2018-08-31 18:33:41,757 [Process-3 ] [MainThread ] [DEBUG] [multiprocess_globals.py:58] Message from specific logger 3, mirrored into main log: 3
2018-08-31 18:33:41,758 [MainProcess ] [MainThread ] [DEBUG] [multiprocess_globals.py:91] This is again log message in main program
如何删除重复的邮件?单进程日志文件(1,2,3等)都可以。
PS初始化新的记录器实际上是否安全,它指向同一个日志文件?我只是不知道,如何创建这样的设置。
通过外部初始化rootLogger然后在进程中附加新处理程序来解决问题:
import multiprocessing
import global_vars
import logging
logPath = 'logs'
fileName = "stdout.log"
#def init_logger(secondary_logfile=None):
logFormatter = logging.Formatter("%(asctime)s [%(processName)-12.12s] [%(threadName)-12.12s] [%(levelname)-5.5s] [%(filename)s:%(lineno)d] %(message)s")
rootLogger = logging.getLogger(__name__)
# first handler is general log
fileHandler = logging.FileHandler("{0}/{1}".format(logPath, fileName))
fileHandler.setFormatter(logFormatter)
rootLogger.addHandler(fileHandler)
# second handler is logging to console
consoleHandler = logging.StreamHandler()
consoleHandler.setFormatter(logFormatter)
rootLogger.addHandler(consoleHandler)
rootLogger.setLevel("DEBUG") # log everything
rootLogger.propagate = False
# third handler is process-specific log
def init_logger2(secondary_logfile, rootLogger):
fileHandler1 = logging.FileHandler("{0}/{1}".format(logPath, secondary_logfile))
fileHandler1.setFormatter(logFormatter)
rootLogger.addHandler(fileHandler1)
return rootLogger
#rootLogger = init_logger()
logger = rootLogger.getChild(__name__)
# this class contains process-specific globals, shared among all functions
# of the specific process
class Shared():
def __init__(self, shared_var, logger):
self.shared_var = shared_var
self.logger = logger
def get(self):
return self.shared_var
def make_global(process_id, logger):
g = Shared(process_id, logger)
global_vars.multiprocess_globals["g"] = g
def process_func(process_id):
g = global_vars.multiprocess_globals["g"] # take g from process global scope
# use g
g.logger.debug('Message from specific logger {}, mirrored into main log: {}'.format(process_id, g.get()))
def run(process_id):
# init secondary logger which will mirror into common log
secondary_logger = init_logger2(process_id, rootLogger)
# create variable in process global scope
make_global(process_id, secondary_logger)
# run function which will use it
process_func(process_id)
if __name__ == '__main__':
logger.debug('This is log message in the main program')
# init processes
processes = []
for i in range(1,4):
p = multiprocessing.Process(target=run, args=(i,))
p.daemon = True # for tensorflow https://github.com/tensorflow/tensorflow/issues/5448
processes.append(p)
# Run processes:
for p in processes:
p.start()
for p in processes:
p.join()
logger.debug("This is again log message in main program")