Mini Shell

Direktori : /proc/self/root/proc/thread-self/root/opt/maint/bin/
Upload File :
Current File : //proc/self/root/proc/thread-self/root/opt/maint/bin/check_db_owners.py

#!/opt/imh-python/bin/python3
"""Check for orphaned databases"""
from argparse import ArgumentParser
import json
import os
import pwd
import platform
from contextlib import closing
from multiprocessing import Queue, Process
import pymysql
from pymysql.cursors import Cursor
from pymysql.optionfile import Parser as PyMySQLParser
import psycopg2
from rads import send_email

DEST_EMAIL = "reclamations@imhadmin.net"
TOOL_URL = 'https://docs.cpanel.net/whm/scripts/the-dbmaptool-script/'
DBINDEX = '/var/cpanel/databases/dbindex.db.json'
ROOT_MYSQL = {
    'leechprotect',
    'cphulkd',
    'modsec',
    'mysql',
    'horde',
    'sys',
    'information_schema',
    'performance_schema',
    'perl5',
}
ROOT_PGSQL = {'nagios_check', 'postgres'}


def read_dbmap() -> tuple[dict[str, str], dict[str, str]]:
    # The backup scheduler already runs /usr/local/cpanel/bin/dbindex,
    # so no need to run that again before checking.
    with open(DBINDEX, encoding='utf-8') as file:
        dbindex = {k.lower(): v for k, v in json.load(file).items()}
    return dbindex.get('mysql', {}), dbindex.get('pgsql', {})


def mysql_conn(**kwargs) -> pymysql.Connection:
    """Open and return a MySQL connection"""
    try:
        cfg = PyMySQLParser(strict=False)
        cfg.read('/root/.my.cnf')
        unix_socket = cfg.get('client', 'socket')
    except Exception:
        unix_socket = '/var/lib/mysql/mysql.sock'
    return pymysql.connect(
        read_default_file='/root/.my.cnf',
        unix_socket=unix_socket,
        **kwargs,
    )


def list_mysql_dbs() -> set[str]:
    with mysql_conn() as conn, conn.cursor() as cur:
        cur: Cursor
        cur.execute('SHOW DATABASES')
        return {x[0] for x in cur.fetchall() if x[0] not in ROOT_MYSQL}


def pgsql_query(*args) -> list[tuple]:
    """Execute a pgsql query"""
    pg_uid = pwd.getpwnam('postgres').pw_uid
    with closing(Queue()) as queue:
        kwargs = {
            'environ': {'PGPASSFILE': '/var/lib/pgsql/.pgpass'},
            'queue': queue,
            'pg_uid': pg_uid,
        }
        proc = Process(
            target=_setuid_pgsql_query,
            args=args,
            kwargs=kwargs,
            daemon=True,
        )
        proc.start()
        ret = queue.get()
        proc.join(timeout=1)
        if proc.is_alive():
            proc.kill()
    if isinstance(ret, list):
        return ret
    raise ret  # if it's not a list, it's an exception object


def _setuid_pgsql_query(
    *args,
    environ: dict,
    queue: Queue,
    pg_uid: int,
) -> None:
    """Do not call this directly. See ``pgsql_query``"""
    # By dropping privs to the postgres user, this forked process will be able
    # to access pgsql using "peer" auth.
    # See: https://www.postgresql.org/docs/9.1/auth-methods.html
    os.setuid(pg_uid)
    os.environ.update(environ)
    try:
        conn = psycopg2.connect(connect_timeout=3)
        cur = conn.cursor()
        try:
            cur.execute(*args)
            queue.put(cur.fetchall())
            queue.put([])
            return
        finally:
            cur.close()
            conn.close()
    except Exception as exc:
        queue.put(exc)


def list_pgsql_dbs() -> set[str]:
    rows = pgsql_query(
        'SELECT datname FROM pg_database WHERE datistemplate = false'
    )
    return {row[0] for row in rows if row[0] not in ROOT_PGSQL}


def parse_args():
    parser = ArgumentParser(description=__doc__)
    group = parser.add_mutually_exclusive_group(required=True)
    # fmt: off
    group.add_argument(
        '--cron', action='store_true', help=f"Email results to {DEST_EMAIL}"
    )
    group.add_argument(
        '--mysql', action='store_true',
        help="Print orphaned MySQL dbs to stdout",
    )
    group.add_argument(
        '--pgsql', action='store_true',
        help="Print orphaned PostgreSQL dbs to stdout",
    )
    # fmt: on
    return parser.parse_args()


def main():
    """Check for orphaned databases"""
    args = parse_args()
    mysql_owners, pgsql_owners = read_dbmap()
    if args.cron or args.mysql:
        extra_mysql = list_mysql_dbs().difference(mysql_owners.keys())
    if args.cron or args.pgsql:
        extra_pgsql = list_pgsql_dbs().difference(pgsql_owners.keys())
    if args.cron:
        alert(extra_mysql, 'MySQL')
        alert(extra_pgsql, 'PostgreSQL')
    if args.mysql:
        if extra_mysql:
            print(*sorted(extra_mysql), sep='\n')
    if args.pgsql:
        if extra_pgsql:
            print(*sorted(extra_pgsql), sep='\n')


def alert(extras: set[str], db_type: str):
    if not extras:
        return
    lines = '\n'.join(extras)
    host = platform.node().split('.', maxsplit=1)[0]
    send_email(
        to_addr=DEST_EMAIL,
        errs=True,
        subject=f"Orphaned {db_type} databases on {host}",
        body=f"""The following databases were found in {db_type} on {host}, but
aren't associated with a cPanel user in {DBINDEX}.

If the cPanel user still exists, you can associate it with the user using
cPanel's dbmaptool
{TOOL_URL}

Then run /usr/local/cpanel/bin/dbindex to update the ownership file.

Otherwise, it might be a database left behind after removeacct failed to delete
it, in which case you can drop the database.

Orphaned {db_type} databases found:
{lines}
""",
    )


if __name__ == '__main__':
    main()

Zerion Mini Shell 1.0