Mini Shell
# coding=utf-8
#
# Copyright © Cloud Linux GmbH & Cloud Linux Software, Inc 2010-2019 All Rights Reserved
#
# Licensed under CLOUD LINUX LICENSE AGREEMENT
# http://cloudlinux.com/docs/LICENSE.TXT
import csv
import logging
import os
import sys
from sqlalchemy.exc import OperationalError, ProgrammingError
import lvestats
import lvestats.lib.commons.decorators
from lvestats.lib import uidconverter
from lvestats.lib.commons import dateutil
from lvestats.lib.commons.func import get_ascii_table
from lvestats.lib.commons.logsetup import setup_logging
from lvestats.lib.config import ConfigError
from lvestats.lib.config import read_config, is_normalized_user_cpu
from lvestats.lib.dbengine import make_db_engine, MakeDbException
from lvestats.lib.jsonhandler import prepare_data_json
from lvestats.lib.lveinfolib import OutputFormatter, HistoryShowUnion, \
servers_info, get_supported_columns
from lvestats.lib.lveinfolib_gov import HistoryShowDBGov
from lvestats.lib.parsers.lveinfoargparse import lveinfo_parser, dbgov_parser, DEF_BLANK_VALUE
log = setup_logging({}, caller_name='lveinfo', file_level=logging.ERROR, console_level=logging.ERROR)
NAME = 'lveinfo'
def _convert_to_dict(rows, fields):
"""
Convert rows and its headers name to list dictionaries
:param list|tuple|generator rows:
:param list|tuple fields:
:return list:
"""
return [dict(zip(fields, row_)) for row_ in rows]
def save_csv(rows, fields, stream):
"""
Save rows and its headers to stream
:param list|tuple|generator rows:
:param list|tuple fields: first csv file line
:param file stream: stream to write data
:return:
"""
csv_writer = csv.writer(stream)
csv_writer.writerow(fields)
csv_writer.writerows(rows)
def is_cpu_output_normalizable(namespace):
"""
Normalization is done only if namespace.style == 'user', user id is specified, and either
csv or json output selected
:param namespace: command line options/settings
:return: True if normalizagin of CPU should be done
"""
return namespace.style == "user" and (namespace.csv or namespace.json) and namespace.id
def normalize_cpu(rows, fields):
"""
Normalize CPU data, making limit always be 100%. This is used to show end user usage relative to their limits
:param list rows: rows with data
:param list|tuple fields: columns name
:return list: normalized rows
"""
if 'lCPU' in fields:
lcpu_index = fields.index('lCPU')
else:
return rows
if 'aCPU' in fields:
acpuindex = fields.index('aCPU')
else:
acpuindex = -1
if 'mCPU' in fields:
mcpuindex = fields.index('mCPU')
else:
mcpuindex = -1
new_rows = []
for _prow in rows:
_row = list(_prow)
new_rows.append(_row)
lcpu = float(_row[lcpu_index])
_row[lcpu_index] = 100.
if acpuindex != -1:
_row[acpuindex] = float(_row[acpuindex]) / lcpu * 100
if mcpuindex != -1:
_row[mcpuindex] = float(_row[mcpuindex]) / lcpu * 100
return new_rows
def main_(config, dbengine=None, argv_=None):
if dbengine is None:
try:
dbengine = make_db_engine(cfg=config)
except MakeDbException as e:
log.error(str(e))
return
if argv_ is None:
_args = sys.argv[1:]
else:
_args = list(argv_)
if '--dbgov' in _args:
namespace, output_formatted, rows = dbgov_main(config, _args, dbengine)
else:
parser = lveinfo_parser(name=NAME, ver=lvestats.__version__, config=config, dbengine=dbengine)
namespace = parser.parse_args(_args) # parsing command line
if namespace.servers_info:
output_formatted = OutputFormatter(('Server', 'LVE version'))
rows = servers_info(dbengine)
else:
output_formatted, rows = lvestats_main(dbengine, namespace, parser)
output_formatted.set_rows(rows)
# return values in bytes for json and csv
# otherwise, return bytes
if namespace.csv or namespace.json:
output_formatted.add_order(fields=['aIO', 'mIO', 'lIO'], order=lambda x_: round(x_ / 1024., 3))
if namespace.csv:
save_csv(output_formatted, output_formatted.get_fields(), stream=namespace.csv)
elif namespace.json:
return prepare_data_json(_convert_to_dict(output_formatted, output_formatted.fields))
else:
# make ascii-table output human readable
output_formatted.add_order(fields=['aVMem', 'mVMem', 'lVMem', 'aPMem', 'mPMem', 'lPMem', 'aIO', 'mIO', 'lIO'],
order='bytes')
output_formatted.add_order(
fields=['aEP', 'mEP', 'lEP', 'EPf', 'VMemF', 'CPUf', 'aNproc', 'mNproc', 'lNproc',
'PMemF', 'NprocF', 'IOf', 'aIOPS', 'mIOPS', 'lIOPS', 'IOPSf'], order='powers_of_1000')
output_formatted.add_order(['uCPU', 'uEP', 'uVMem', 'uPMem', 'uIO', 'uNproc', 'uIOPS'], 'percentage')
return get_ascii_table(output_formatted, fields=output_formatted.fields)
def lvestats_main(dbengine, namespace, parser):
output_formatted = OutputFormatter(
namespace.show_columns,
# convert From To %m-%d %H:%M (for example "12-29 09:46")
orders=[(['From', 'To'], 'datetime'), (['From', 'To'], 'strftime')])
output_formatted.add_order(fields=['aCPU', 'mCPU', 'lCPU', 'aIO', 'mIO', 'lIO'],
order=lambda x_: int(round(x_)))
utc_from = dateutil.local_to_gm(getattr(namespace, 'from'))
utc_to = dateutil.local_to_gm(namespace.to)
if os.environ.get('LVEINFODEBUG'):
log_ = setup_logging({}, caller_name='lveinfo', console_level=logging.DEBUG)
else:
log_ = None
history_show = HistoryShowUnion(
dbengine=dbengine,
period_from=utc_from,
period_to=utc_to,
uid=namespace.id,
show_columns=output_formatted.get_fields(),
server_id=namespace.server_id,
time_unit=namespace.time_unit,
limit=namespace.limit,
by_usage=namespace.by_usage,
by_usage_percentage=namespace.percentage / 100.,
by_fault=namespace.by_fault,
threshold=namespace.threshold,
order_by=namespace.order_by,
log=log_ or log)
history_show.set_normalised_output()
if namespace.time_unit == -1: # using dynamic time-unit
rows = history_show.proceed_dyn_time_unit()
else:
rows = history_show.proceed()
if ((namespace.user or namespace.reseller_name)
and namespace.display_username):
output_formatted.add_order(
['ID'], lambda x: namespace.user or namespace.reseller_name)
elif namespace.display_username:
# try convert user id to user name
output_formatted.add_order(
['ID'], lambda x: uidconverter.uid_to_username(
x, parser.get_default('server_id'),
namespace.server_id, dbengine) or x)
# replace unsupported columns item to blank-value
supported_columns_ = get_supported_columns(lve_version=parser.lve_version)
all_columns = get_supported_columns()
unsupported_columns = set(all_columns).difference(set(supported_columns_))
if unsupported_columns:
output_formatted.add_order(
fields=unsupported_columns,
order=lambda _: DEF_BLANK_VALUE if namespace.blank_value is None else namespace.blank_value)
if is_normalized_user_cpu() and is_cpu_output_normalizable(namespace):
rows = normalize_cpu(rows, history_show.show_columns)
return output_formatted, rows
def dbgov_main(config, _args, dbengine):
_args.remove('--dbgov')
parser = dbgov_parser(config=config, dbengine=dbengine)
namespace = parser.parse_args(_args) # parsing command line
output_formatted = OutputFormatter(
fields=namespace.format,
orders=[
(['FROM', 'TO'], 'datetime'),
(['FROM', 'TO'], lambda dt: OutputFormatter.strftime(
dt, '%m-%d %H:%M')),
(['TS'], 'datetime'),
(['TS'], lambda dt: OutputFormatter.strftime(
dt, '%m-%d %H:%M:%S')),
(['CPU'], lambda x: 0 if x < 0.001 else x),
(['READ', 'WRITE'], lambda x: 0 if x < 0.001 else x),
]
)
utc_from = dateutil.local_to_gm(getattr(namespace, 'from'))
utc_to = dateutil.local_to_gm(namespace.to)
history_show = HistoryShowDBGov(
dbengine=dbengine, period_from=utc_from, period_to=utc_to,
uid=namespace.id, show_columns=output_formatted.get_fields(),
server_id=namespace.server_id, order_by=namespace.order_by,
limit=namespace.limit, cfg=config, time_unit=namespace.time_unit,
by_usage=namespace.by_usage, by_usage_percentage=namespace.percentage / 100.
)
if namespace.time_unit == -1:
rows = history_show.history_dbgov_show_dynamic()
else:
rows = history_show.history_dbgov_show()
return namespace, output_formatted, rows
@lvestats.lib.commons.decorators.no_sigpipe
def main(config, *args, **kwargs):
try:
str_ = main_(config, *args, **kwargs)
except (OperationalError, ProgrammingError) as ex:
log.error(str(ex))
else:
if str_:
print(str_)
if __name__ == '__main__':
try:
main(read_config())
except ConfigError as ce:
ce.log_and_exit()
Zerion Mini Shell 1.0