File manager - Edit - /home/newsbmcs.com/public_html/static/img/logo/clusters.tar
Back
ceph.py 0000644 00000004542 15027746175 0006060 0 ustar 00 # Copyright (C) 2022 Red Hat Inc., Jake Hunsaker <jhunsake@redhat.com> # This file is part of the sos project: https://github.com/sosreport/sos # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions of # version 2 of the GNU General Public License. # # See the LICENSE file in the source distribution for further information. import json from sos.collector.clusters import Cluster class ceph(Cluster): """ This cluster profile is for Ceph Storage clusters, and is primarily built around Red Hat Ceph Storage 5. Nodes are enumerated via `cephadm`; if your Ceph deployment uses cephadm but is not RHCS 5, this profile may work as intended, but it is not currently guaranteed to do so. If you are using such an environment and this profile does not work for you, please file a bug report detailing what is failing. By default, all nodes in the cluster will be returned for collection. This may not be desirable, so users are encouraged to use the `labels` option to specify a colon-delimited set of ceph node labels to restrict the list of nodes to. For example, using `-c ceph.labels=osd:mgr` will return only nodes labeled with *either* `osd` or `mgr`. """ cluster_name = 'Ceph Storage Cluster' sos_plugins = [ 'ceph_common', ] sos_options = {'log-size': 50} packages = ('cephadm',) option_list = [ ('labels', '', 'Colon delimited list of labels to select nodes with') ] def get_nodes(self): self.nodes = [] ceph_out = self.exec_primary_cmd( 'cephadm shell -- ceph orch host ls --format json', need_root=True ) if not ceph_out['status'] == 0: self.log_error( f"Could not enumerate nodes via cephadm: {ceph_out['output']}" ) return self.nodes nodes = json.loads(ceph_out['output'].splitlines()[-1]) _labels = [lab for lab in self.get_option('labels').split(':') if lab] for node in nodes: if _labels and not any(_l in node['labels'] for _l in _labels): self.log_debug(f"{node} filtered from list due to labels") continue self.nodes.append(node['hostname']) return self.nodes # vim: set et ts=4 sw=4 : pacemaker.py 0000644 00000007476 15027746175 0007102 0 ustar 00 # Copyright Red Hat 2020, Jake Hunsaker <jhunsake@redhat.com> # This file is part of the sos project: https://github.com/sosreport/sos # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions of # version 2 of the GNU General Public License. # # See the LICENSE file in the source distribution for further information. import re from xml.etree import ElementTree from sos.collector.clusters import Cluster from sos.utilities import sos_parse_version class pacemaker(Cluster): cluster_name = 'Pacemaker High Availability Cluster Manager' sos_plugins = ['pacemaker'] packages = ('pacemaker',) strict_node_list = True option_list = [ ('online', True, 'Collect nodes listed as online'), ('offline', True, 'Collect nodes listed as offline'), ('only-corosync', False, 'Only use corosync.conf to enumerate nodes') ] def get_nodes(self): self.nodes = [] # try crm_mon first try: if not self.get_option('only-corosync'): try: self.get_nodes_from_crm() except Exception as err: self.log_warn("Falling back to sourcing corosync.conf. " f"Could not parse crm_mon output: {err}") if not self.nodes: # fallback to corosync.conf, in case the node we're inspecting # is offline from the cluster self.get_nodes_from_corosync() except Exception as err: self.log_error(f"Could not determine nodes from cluster: {err}") _shorts = [n for n in self.nodes if '.' not in n] if _shorts: self.log_warn( f"WARNING: Node addresses '{','.join(_shorts)}' may not " "resolve locally if you are not running on a node in the " "cluster. Try using option '-c pacemaker.only-corosync' if " "these connections fail." ) return self.nodes def get_nodes_from_crm(self): """ Try to parse crm_mon output for node list and status. """ xmlopt = '--output-as=xml' # older pacemaker had a different option for xml output _ver = self.exec_primary_cmd('crm_mon --version') if _ver['status'] == 0: cver = _ver['output'].split()[1].split('-')[0] if sos_parse_version(cver) <= sos_parse_version('2.0.3'): xmlopt = '--as-xml' else: return _out = self.exec_primary_cmd( f"crm_mon --one-shot --inactive {xmlopt}", need_root=True ) if _out['status'] == 0: self.parse_crm_xml(_out['output']) def parse_crm_xml(self, xmlstring): """ Parse the xml output string provided by crm_mon """ _xml = ElementTree.fromstring(xmlstring) nodes = _xml.find('nodes') for node in nodes: _node = node.attrib if self.get_option('online') and _node['online'] == 'true': self.nodes.append(_node['name']) elif self.get_option('offline') and _node['online'] == 'false': self.nodes.append(_node['name']) def get_nodes_from_corosync(self): """ As a fallback measure, read corosync.conf to get the node list. Note that this prevents us from separating online nodes from offline nodes. """ self.log_warn("WARNING: unable to distinguish online nodes from " "offline nodes when sourcing from corosync.conf") cc = self.primary.read_file('/etc/corosync/corosync.conf') nodes = re.findall(r'((\sring0_addr:)(.*))', cc) for node in nodes: self.nodes.append(node[-1].strip()) # vim: set et ts=4 sw=4 : juju.py 0000644 00000020635 15027746175 0006117 0 ustar 00 # Copyright (c) 2023 Canonical Ltd., Chi Wai Chan <chiwai.chan@canonical.com> # This file is part of the sos project: https://github.com/sosreport/sos # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions of # version 2 of the GNU General Public License. # # See the LICENSE file in the source distribution for further information. import logging import json import re from sos.collector.clusters import Cluster from sos.utilities import sos_parse_version from sos.utilities import sos_get_command_output def _parse_option_string(strings=None): """Parse comma separated string.""" if not strings: return [] return [string.strip() for string in strings.split(",")] def _get_index(model_name): """Helper function to get Index. The reason why we need Index defined in function is because currently the collector.__init__ will load all the classes in this module and also Index. This will cause bug because it think Index is Cluster type. Also We don't want to provide a customized filter to remove Index class. """ class Index: """Index structure to help parse juju status output. Attributes apps, units and machines are dict which key is the app/unit/machine name and the value is list of targets which format are {model_name}:{machine_id}. """ def __init__(self, model_name): self.model_name: str = model_name self.apps = {} self.units = {} self.machines = {} self.ui_log = logging.getLogger("sos") def add_principals(self, juju_status): """Adds principal units to index.""" for app, app_info in juju_status["applications"].items(): nodes = [] units = app_info.get("units", {}) for unit, unit_info in units.items(): machine = unit_info["machine"] node = f"{self.model_name}:{machine}" self.units[unit] = [node] self.machines[machine] = [node] nodes.append(node) self.apps[app] = nodes def add_subordinates(self, juju_status): """Add subordinates to index. Since subordinates does not have units they need to be manually added. """ for app, app_info in juju_status["applications"].items(): subordinate_to = app_info.get("subordinate-to", []) for parent in subordinate_to: # If parent is missing if not self.apps.get(parent): self.ui_log.warning( f"Principal charm {parent} is missing" ) continue self.apps[app].extend(self.apps[parent]) # If parent's units is missing if "units" not in juju_status["applications"][parent]: self.ui_log.warning( f"Principal charm {parent} is missing units" ) continue units = juju_status["applications"][parent]["units"] for _, unit_info in units.items(): node = f"{self.model_name}:{unit_info['machine']}" for sub_key, _ in unit_info.get( "subordinates", {} ).items(): if sub_key.startswith(app + "/"): self.units[sub_key] = [node] def add_machines(self, juju_status): """Add machines to index. If model does not have any applications it needs to be manually added. """ for machine in juju_status["machines"].keys(): node = f"{self.model_name}:{machine}" self.machines[machine] = [node] return Index(model_name) class juju(Cluster): """ The juju cluster profile is intended to be used on juju managed clouds. It"s assumed that `juju` is installed on the machine where `sos` is called, and that the juju user has superuser privilege to the current controller. By default, the sos reports will be collected from all the applications in the current model. If necessary, you can filter the nodes by models / applications / units / machines with cluster options. Example: sos collect --cluster-type juju -c "juju.models=sos" -c "juju.apps=a,b,c" """ cmd = "juju" cluster_name = "Juju Managed Clouds" option_list = [ ("apps", "", "Filter node list by apps (comma separated regex)."), ("units", "", "Filter node list by units (comma separated string)."), ("models", "", "Filter node list by models (comma separated string)."), ( "machines", "", "Filter node list by machines (comma separated string).", ), ] def _cleanup_juju_output(self, output): """Remove leading characters before {.""" return re.sub(r"(^[^{]*)(.*)", "\\2", output, 0, re.MULTILINE) def _get_model_info(self, model_name): """Parse juju status output and return target dict. Here are couple helper functions to parse the juju principals units, subordinate units and machines. """ juju_status = self._execute_juju_status(model_name) index = _get_index(model_name=model_name) index.add_principals(juju_status) index.add_subordinates(juju_status) index.add_machines(juju_status) return index def _get_juju_version(self): """Grab the version of juju""" res = sos_get_command_output("juju version") return res['output'] def _execute_juju_status(self, model_name): model_option = f"-m {model_name}" if model_name else "" format_option = "--format json" juju_version = self._get_juju_version() if sos_parse_version(juju_version) > sos_parse_version("3"): format_option += " --no-color" status_cmd = f"{self.cmd} status {model_option} {format_option}" res = self.exec_primary_cmd(status_cmd) if not res["status"] == 0: raise Exception(f"'{status_cmd}' returned error: {res['status']}") juju_json_output = self._cleanup_juju_output((res["output"])) juju_status = None juju_status = json.loads(juju_json_output) return juju_status def _filter_by_pattern(self, key, patterns, model_info): """Filter with regex match.""" nodes = set() for pattern in patterns: for param, value in getattr(model_info, key).items(): if re.match(pattern, param): nodes.update(value or []) return nodes def _filter_by_fixed(self, key, patterns, model_info): """Filter with fixed match.""" nodes = set() for pattern in patterns: for param, value in getattr(model_info, key).items(): if pattern == param: nodes.update(value or []) return nodes def set_transport_type(self): """Dynamically change transport to 'juju'.""" return "juju" def get_nodes(self): """Get the machine numbers from `juju status`.""" models = _parse_option_string(self.get_option("models")) apps = _parse_option_string(self.get_option("apps")) units = _parse_option_string(self.get_option("units")) machines = _parse_option_string(self.get_option("machines")) filters = {"apps": apps, "units": units, "machines": machines} # Return empty nodes if no model and filter provided. if not any(filters.values()) and not models: return [] if not models: models = [""] # use current model by default nodes = set() for model in models: model_info = self._get_model_info(model) for key, resource in filters.items(): # Filter node by different policies if key == "apps": _nodes = self._filter_by_pattern(key, resource, model_info) else: _nodes = self._filter_by_fixed(key, resource, model_info) nodes.update(_nodes) return list(nodes) # vim: set et ts=4 sw=4 : ovirt.py 0000644 00000017764 15027746175 0006316 0 ustar 00 # Copyright Red Hat 2020, Jake Hunsaker <jhunsake@redhat.com> # This file is part of the sos project: https://github.com/sosreport/sos # # This copyrighted material is made available to anyone wishing to use, # modify, copy, or redistribute it subject to the terms and conditions of # version 2 of the GNU General Public License. # # See the LICENSE file in the source distribution for further information. import fnmatch from shlex import quote from sos.collector.clusters import Cluster ENGINE_KEY = '/etc/pki/ovirt-engine/keys/engine_id_rsa' class ovirt(Cluster): """ This cluster profile is for the oVirt/RHV project which provides for a virtualization cluster built ontop of KVM. Nodes enumerated will be hypervisors within the envrionment, not virtual machines running on those hypervisors. By default, ALL hypervisors within the environment are returned. This may be influenced by the 'cluster' and 'datacenter' cluster options, which will limit enumeration to hypervisors within the specific cluster and/or datacenter. The spm-only cluster option may also be used to only collect from hypervisors currently holding the SPM role. Optionally, to only collect an archive from manager and the postgresql database, use the no-hypervisors cluster option. By default, a second archive from the manager will be collected that is just the postgresql plugin configured in such a way that a dump of the manager's database that can be explored and restored to other systems will be collected. The ovirt profile focuses on the upstream, community ovirt project. The rhv profile is for Red Hat customers running RHV (formerly RHEV). The rhhi_virt profile is for Red Hat customers running RHV in a hyper-converged setup and enables gluster collections. """ cluster_name = 'Community oVirt' packages = ('ovirt-engine',) db_exec = '/usr/share/ovirt-engine/dbscripts/engine-psql.sh -c' option_list = [ ('no-database', False, 'Do not collect a database dump'), ('cluster', '', 'Only collect from hosts in this cluster'), ('datacenter', '', 'Only collect from hosts in this datacenter'), ('no-hypervisors', False, 'Do not collect from hypervisors'), ('spm-only', False, 'Only collect from SPM host(s)') ] def _run_db_query(self, query): ''' Wrapper for running DB queries on the manager. Any scrubbing of the query should be done _before_ passing the query to this method. ''' cmd = f"{self.db_exec} {quote(query)}" return self.exec_primary_cmd(cmd, need_root=True) def _sql_scrub(self, val): ''' Manually sanitize SQL queries since we can't leave this up to the driver since we do not have an actual DB connection ''' if not val: return '%' invalid_chars = ['\x00', '\\', '\n', '\r', '\032', '"', '\''] if any(x in invalid_chars for x in val): self.log_warn(f"WARNING: Cluster option \'{val}\' contains invalid" " characters. Using '%%' instead.") return '%' return val def _check_for_engine_keys(self): ''' Checks for the presence of the VDSM ssh keys the manager uses for communication with hypervisors. This only runs if we're locally on the RHV-M, *and* if no ssh-keys are called out on the command line, *and* no --password option is given. ''' if self.primary.local: if not any([self.opts.ssh_key, self.opts.password, self.opts.password_per_node]): if self.primary.file_exists(ENGINE_KEY): self.add_default_ssh_key(ENGINE_KEY) self.log_debug("Found engine SSH key. User command line" " does not specify a key or password, using" " engine key.") def setup(self): self.pg_pass = False if not self.get_option('no-database'): self.conf = self.parse_db_conf() self.format_db_cmd() self._check_for_engine_keys() def format_db_cmd(self): cluster = self._sql_scrub(self.get_option('cluster')) datacenter = self._sql_scrub(self.get_option('datacenter')) self.dbquery = ("SELECT host_name from vds where cluster_id in " "(select cluster_id FROM cluster WHERE name like " f"'{cluster}' and storage_pool_id in (SELECT id FROM " f"storage_pool WHERE name like '{datacenter}'))") if self.get_option('spm-only'): # spm_status is an integer with the following meanings # 0 - Normal (not SPM) # 1 - Contending (SPM election in progress, but is not SPM) # 2 - SPM self.dbquery += ' AND spm_status = 2' self.log_debug(f'Query command for ovirt DB set to: {self.dbquery}') def get_nodes(self): if self.get_option('no-hypervisors'): return [] res = self._run_db_query(self.dbquery) if res['status'] == 0: nodes = res['output'].splitlines()[2:-1] return [n.split('(')[0].strip() for n in nodes] raise Exception(f'database query failed, return code: {res["status"]}') def run_extra_cmd(self): if not self.get_option('no-database') and self.conf: return self.collect_database() return False def parse_db_conf(self): conf = {} engconf = '/etc/ovirt-engine/engine.conf.d/10-setup-database.conf' res = self.exec_primary_cmd(f'cat {engconf}', need_root=True) if res['status'] == 0: config = res['output'].splitlines() for line in config: try: k = str(line.split('=')[0]) v = str(line.split('=')[1].replace('"', '')) conf[k] = v except IndexError: # not a valid line to parse config values from, ignore pass return conf return False def collect_database(self): plugin = 'postgresql' sos_opt = ( f"-k {plugin}.dbname={self.conf['ENGINE_DB_DATABASE']} " f"-k {plugin}.dbhost={self.conf['ENGINE_DB_HOST']} " f"-k {plugin}.dbport={self.conf['ENGINE_DB_PORT']} " f"-k {plugin}.dbuser={self.conf['ENGINE_DB_USER']}" ) cmd = ( f"PGPASSWORD={self.conf['ENGINE_DB_PASSWORD']} /usr/sbin/sos " f"report --name=postgresql --batch -o postgresql {sos_opt}" ) db_sos = self.exec_primary_cmd(cmd, need_root=True) for line in db_sos['output'].splitlines(): if fnmatch.fnmatch(line, '*sosreport-*tar*'): _pg_dump = line.strip() self.primary.manifest.add_field('postgresql_dump', _pg_dump.split('/')[-1]) return _pg_dump self.log_error('Failed to gather database dump') return False class rhv(ovirt): cluster_name = 'Red Hat Virtualization' packages = ('rhevm', 'rhvm') sos_preset = 'rhv' def set_node_label(self, node): if node.address == self.primary.address: return 'manager' if node.is_installed('ovirt-node-ng-nodectl'): return 'rhvh' return 'rhelh' class rhhi_virt(rhv): cluster_name = 'Red Hat Hyperconverged Infrastructure - Virtualization' sos_plugins = ('gluster',) sos_plugin_options = {'gluster.dump': 'on'} sos_preset = 'rhv' def check_enabled(self): return (self.primary.is_installed('rhvm') and self._check_for_rhhiv()) def _check_for_rhhiv(self): ret = self._run_db_query('SELECT count(server_id) FROM gluster_server') if ret['status'] == 0: # if there are any entries in this table, RHHI-V is in use return ret['output'].splitlines()[2].strip() != '0' return False # vim: set et ts=4 sw=4 : __pycache__/ocp.cpython-310.pyc 0000644 00000027321 15027746175 0012261 0 ustar 00 o -�_g.@ � @ s@ d dl Z d dlmZ d dlmZ d dlmZ G dd� de�ZdS )� N)�quote)�Cluster)� is_executablec s� e Zd ZdZdZdZdZdZdZdZ dZ g d�Zed d � �Z dd� Zd d� Z� fdd�Zdd� Zdd� Zdd� Zdd� Zdd� Zdd� Zdd� Zdd � Zd!d"� Zd#d$� Zd%d&� Z� ZS )'�ocpa� This profile is for use with OpenShift Container Platform (v4) clusters instead of the kubernetes profile. This profile will favor using the `oc` transport type, which means it will leverage a locally installed `oc` binary. This is also how node enumeration is done. To instead use SSH to connect to the nodes, use the '--transport=control_persist' option. Thus, a functional `oc` binary for the user executing sos collect is required. Functional meaning that the user can run `oc` commands with clusterAdmin privileges. If this requires the use of a secondary configuration file, specify that path with the 'kubeconfig' cluster option. This config file will also be used on a single master node to perform API collections if the `with-api` option is enabled (default disabled). If no `kubeconfig` option is given, but `with-api` is enabled, the cluster profile will attempt to use a well-known default kubeconfig file if it is available on the host. Alternatively, provide a clusterAdmin access token either via the 'token' cluster option or, preferably, the SOSOCPTOKEN environment variable. By default, this profile will enumerate only master nodes within the cluster, and this may be changed by overriding the 'role' cluster option. To collect from all nodes in the cluster regardless of role, use the form -c ocp.role=''. Filtering nodes by a label applied to that node is also possible via the label cluster option, though be aware that this is _combined_ with the role option mentioned above. To avoid redundant collections of OCP API information (e.g. 'oc get' commands), this profile will attempt to enable the API collections on only a single master node. If the none of the master nodes have a functional 'oc' binary available, *and* the --no-local option is used, that means that no API data will be collected. zOpenShift Container Platform v4)zopenshift-hyperkubezopenshift-clientsFNzsos-collect-tmp� ))�labelr z3Colon delimited list of labels to select nodes with)�role�masterz*Colon delimited list of roles to filter on)� kubeconfigr zPath to the kubeconfig file)�tokenr z1Service account token to use for oc authorization)�with-apiFz'Collect OCP API data from a master node)�api-urlr z.Alternate API URL of an external control-planec C s� | j sZd| _ | jj�� r?| jjd| jjjd�}|d dkr0tj�| jjj|d � � � d��| _ n| �d� | �d |d � �� | � d �rQ| j d| � d �� �7 _ | �d| j � �� | j S ) N�oczwhich oc)�chroot�statusr �output�/zHUnable to to determine PATH for 'oc' command, node enumeration may fail.zLocating 'oc' failed: r z --kubeconfig zoc base command set to )�_oc_cmd�primary�host�in_container�run_command�sysroot�os�path�join�strip�lstrip�log_warn� log_debug� get_option)�self�_oc_path� r# �</usr/lib/python3/dist-packages/sos/collector/clusters/ocp.py�oc_cmdL s. ���� �z ocp.oc_cmdc C s | j � d|� �S )zcFormat the oc command to optionall include the kubeconfig file if one is specified � )r% )r! �cmdr# r# r$ � fmt_oc_cmdf s zocp.fmt_oc_cmdc C s0 | � | �d| j� d| �d�� ���}|d dkS )zXAttempt to login to the API using the oc command using a provided token z.login --insecure-skip-tls-verify=True --token=r&