File manager - Edit - /home/newsbmcs.com/public_html/static/img/logo/pcp.py.tar
Back
usr/lib/python3/dist-packages/twisted/protocols/pcp.py 0000644 00000016022 15030123565 0017112 0 ustar 00 # -*- test-case-name: twisted.test.test_pcp -*- # Copyright (c) Twisted Matrix Laboratories. # See LICENSE for details. """ Producer-Consumer Proxy. """ from zope.interface import implementer from twisted.internet import interfaces @implementer(interfaces.IProducer, interfaces.IConsumer) class BasicProducerConsumerProxy: """ I can act as a man in the middle between any Producer and Consumer. @ivar producer: the Producer I subscribe to. @type producer: L{IProducer<interfaces.IProducer>} @ivar consumer: the Consumer I publish to. @type consumer: L{IConsumer<interfaces.IConsumer>} @ivar paused: As a Producer, am I paused? @type paused: bool """ consumer = None producer = None producerIsStreaming = None iAmStreaming = True outstandingPull = False paused = False stopped = False def __init__(self, consumer): self._buffer = [] if consumer is not None: self.consumer = consumer consumer.registerProducer(self, self.iAmStreaming) # Producer methods: def pauseProducing(self): self.paused = True if self.producer: self.producer.pauseProducing() def resumeProducing(self): self.paused = False if self._buffer: # TODO: Check to see if consumer supports writeSeq. self.consumer.write("".join(self._buffer)) self._buffer[:] = [] else: if not self.iAmStreaming: self.outstandingPull = True if self.producer is not None: self.producer.resumeProducing() def stopProducing(self): if self.producer is not None: self.producer.stopProducing() if self.consumer is not None: del self.consumer # Consumer methods: def write(self, data): if self.paused or (not self.iAmStreaming and not self.outstandingPull): # We could use that fifo queue here. self._buffer.append(data) elif self.consumer is not None: self.consumer.write(data) self.outstandingPull = False def finish(self): if self.consumer is not None: self.consumer.finish() self.unregisterProducer() def registerProducer(self, producer, streaming): self.producer = producer self.producerIsStreaming = streaming def unregisterProducer(self): if self.producer is not None: del self.producer del self.producerIsStreaming if self.consumer: self.consumer.unregisterProducer() def __repr__(self) -> str: return f"<{self.__class__}@{id(self):x} around {self.consumer}>" class ProducerConsumerProxy(BasicProducerConsumerProxy): """ProducerConsumerProxy with a finite buffer. When my buffer fills up, I have my parent Producer pause until my buffer has room in it again. """ # Copies much from abstract.FileDescriptor bufferSize = 2 ** 2 ** 2 ** 2 producerPaused = False unregistered = False def pauseProducing(self): # Does *not* call up to ProducerConsumerProxy to relay the pause # message through to my parent Producer. self.paused = True def resumeProducing(self): self.paused = False if self._buffer: data = "".join(self._buffer) bytesSent = self._writeSomeData(data) if bytesSent < len(data): unsent = data[bytesSent:] assert ( not self.iAmStreaming ), "Streaming producer did not write all its data." self._buffer[:] = [unsent] else: self._buffer[:] = [] else: bytesSent = 0 if ( self.unregistered and bytesSent and not self._buffer and self.consumer is not None ): self.consumer.unregisterProducer() if not self.iAmStreaming: self.outstandingPull = not bytesSent if self.producer is not None: bytesBuffered = sum(len(s) for s in self._buffer) # TODO: You can see here the potential for high and low # watermarks, where bufferSize would be the high mark when we # ask the upstream producer to pause, and we wouldn't have # it resume again until it hit the low mark. Or if producer # is Pull, maybe we'd like to pull from it as much as necessary # to keep our buffer full to the low mark, so we're never caught # without something to send. if self.producerPaused and (bytesBuffered < self.bufferSize): # Now that our buffer is empty, self.producerPaused = False self.producer.resumeProducing() elif self.outstandingPull: # I did not have any data to write in response to a pull, # so I'd better pull some myself. self.producer.resumeProducing() def write(self, data): if self.paused or (not self.iAmStreaming and not self.outstandingPull): # We could use that fifo queue here. self._buffer.append(data) elif self.consumer is not None: assert ( not self._buffer ), "Writing fresh data to consumer before my buffer is empty!" # I'm going to use _writeSomeData here so that there is only one # path to self.consumer.write. But it doesn't actually make sense, # if I am streaming, for some data to not be all data. But maybe I # am not streaming, but I am writing here anyway, because there was # an earlier request for data which was not answered. bytesSent = self._writeSomeData(data) self.outstandingPull = False if not bytesSent == len(data): assert ( not self.iAmStreaming ), "Streaming producer did not write all its data." self._buffer.append(data[bytesSent:]) if (self.producer is not None) and self.producerIsStreaming: bytesBuffered = sum(len(s) for s in self._buffer) if bytesBuffered >= self.bufferSize: self.producer.pauseProducing() self.producerPaused = True def registerProducer(self, producer, streaming): self.unregistered = False BasicProducerConsumerProxy.registerProducer(self, producer, streaming) if not streaming: producer.resumeProducing() def unregisterProducer(self): if self.producer is not None: del self.producer del self.producerIsStreaming self.unregistered = True if self.consumer and not self._buffer: self.consumer.unregisterProducer() def _writeSomeData(self, data): """Write as much of this data as possible. @returns: The number of bytes written. """ if self.consumer is None: return 0 self.consumer.write(data) return len(data) usr/lib/python3/dist-packages/sos/report/plugins/pcp.py 0000644 00000014063 15030124770 0017205 0 ustar 00 # Copyright (C) 2014 Michele Baldessari <michele at acksyn.org> # This file is part of the sos project: https://github.com/sosreport/sos # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions of # version 2 of the GNU General Public License. # # See the LICENSE file in the source distribution for further information. from socket import gethostname from sos.report.plugins import Plugin, RedHatPlugin, DebianPlugin, PluginOpt class Pcp(Plugin, RedHatPlugin, DebianPlugin): short_desc = 'Performance Co-Pilot data' plugin_name = 'pcp' profiles = ('system', 'performance') packages = ('pcp',) pcp_conffile = '/etc/pcp.conf' # size-limit of PCP logger and manager data collected by default (MB) option_list = [ PluginOpt('pmmgrlogs', default=100, desc='size limit in MB of pmmgr logs'), PluginOpt('pmloggerfiles', default=12, desc='number of pmlogger files to collect') ] pcp_sysconf_dir = None pcp_var_dir = None pcp_log_dir = None pcp_hostname = '' def pcp_parse_conffile(self): """ Parse PCP configuration """ try: with open(self.pcp_conffile, "r", encoding='UTF-8') as pcpconf: lines = pcpconf.readlines() except IOError: return False env_vars = {} for line in lines: if line.startswith('#'): continue try: (key, value) = line.strip().split('=') env_vars[key] = value except (ValueError, KeyError): # not a line for a key, value pair. Ignore the line. pass try: self.pcp_sysconf_dir = env_vars['PCP_SYSCONF_DIR'] self.pcp_var_dir = env_vars['PCP_VAR_DIR'] self.pcp_log_dir = env_vars['PCP_LOG_DIR'] except Exception: # pylint: disable=broad-except # Fail if all three env variables are not found return False return True def setup(self): sizelimit = (None if self.get_option("all_logs") else self.get_option("pmmgrlogs")) countlimit = (None if self.get_option("all_logs") else self.get_option("pmloggerfiles")) if not self.pcp_parse_conffile(): self._log_warn(f"could not parse {self.pcp_conffile}") return # Add PCP_SYSCONF_DIR (/etc/pcp) and PCP_VAR_DIR (/var/lib/pcp/config) # unconditionally. Obviously if someone messes up their /etc/pcp.conf # in a ridiculous way (i.e. setting PCP_SYSCONF_DIR to '/') this will # break badly. var_conf_dir = self.path_join(self.pcp_var_dir, 'config') self.add_copy_spec([ self.pcp_sysconf_dir, self.pcp_conffile, var_conf_dir ]) # We explicitly avoid /var/lib/pcp/config/{pmchart,pmlogconf,pmieconf, # pmlogrewrite} as in 99% of the cases they are just copies from the # rpms. It does not make up for a lot of size but it contains many # files self.add_forbidden_path([ self.path_join(var_conf_dir, 'pmchart'), self.path_join(var_conf_dir, 'pmlogconf'), self.path_join(var_conf_dir, 'pmieconf'), self.path_join(var_conf_dir, 'pmlogrewrite') ]) # Take PCP_LOG_DIR/pmlogger/`hostname` + PCP_LOG_DIR/pmmgr/`hostname` # The *default* directory structure for pmlogger is the following: # Dir: PCP_LOG_DIR/pmlogger/HOST/ (we only collect the HOST data # itself) # - YYYYMMDD.HH.MM.{N,N.index,N.meta} N in [0,1,...] # - Latest # - pmlogger.{log,log.prior} # # Can be changed via configuration in PCP_SYSCONF_DIR/pmlogger/control # As a default strategy, collect up to 100MB data from each dir. # Can be overwritten either via pcp.pcplogsize option or all_logs. self.pcp_hostname = gethostname() # Make sure we only add the two dirs if hostname is set, otherwise # we would collect everything if self.pcp_hostname != '': # collect pmmgr logs up to 'pmmgrlogs' size limit path = self.path_join(self.pcp_log_dir, 'pmmgr', self.pcp_hostname, '*') self.add_copy_spec(path, sizelimit=sizelimit, tailit=False) # collect newest pmlogger logs up to 'pmloggerfiles' count files_collected = 0 path = self.path_join(self.pcp_log_dir, 'pmlogger', self.pcp_hostname, '*') pmlogger_ls = self.exec_cmd(f"ls -t1 {path}") if pmlogger_ls['status'] == 0: for line in pmlogger_ls['output'].splitlines(): self.add_copy_spec(line, sizelimit=0) files_collected = files_collected + 1 if countlimit and files_collected == countlimit: break self.add_copy_spec([ # Collect PCP_LOG_DIR/pmcd and PCP_LOG_DIR/NOTICES self.path_join(self.pcp_log_dir, 'pmcd'), self.path_join(self.pcp_log_dir, 'NOTICES*'), # Collect PCP_VAR_DIR/pmns self.path_join(self.pcp_var_dir, 'pmns'), # Also collect any other log and config files # (as suggested by fche) self.path_join(self.pcp_log_dir, '*/*.log*'), self.path_join(self.pcp_log_dir, '*/*/*.log*'), self.path_join(self.pcp_log_dir, '*/*/config*') ]) # Collect a summary for the current day res = self.collect_cmd_output('pcp') if res['status'] == 0: for line in res['output'].splitlines(): if line.startswith(' pmlogger:'): arc = line.split()[-1] self.add_cmd_output( f"pmstat -S 00:00 -T 23:59 -t 5m -x -a {arc}", root_symlink="pmstat" ) break # vim: set et ts=4 sw=4 :
| ver. 1.4 |
Github
|
.
| PHP 8.2.28 | Generation time: 0.02 |
proxy
|
phpinfo
|
Settings