Esempio n. 1
0
def bootstrap_from(peer_url):
    crypted = _peer_boxes[peer_url].encrypt("please")
    please_base64 = crypted.encode('base64')

    server = yield deferToThread(ServerProxy, peer_url)
    remote_schema = yield server.peering.schema_version(
        _own_key.pk.encode('hex'), please_base64)

    print("Initializing database...")
    yield database.clean_database()

    local_schema = yield database.get_schema_version()

    if remote_schema != local_schema:
        raise Exception(
            "Unable to bootstrap from {}: remote database schema version is {}, local {}"
            .format(peer_url, remote_schema, local_schema))

    logging.debug("Remote database schema: {}; local schema: {}".format(
        remote_schema, local_schema))

    hosts = yield deferToThread(server.peering.all_hosts,
                                _own_key.pk.encode('hex'), please_base64)

    #logging.debug("Hosts from peer: {}".format(hosts))
    print("Copying data of {} hosts from peer".format(len(hosts)), end="")

    count = 0
    for host in hosts:
        if count % 100 == 0:
            print(".", end="")
            sys.stdout.flush()
        count += 1
        yield database.bootstrap_cracker(host)

        host_ip = host[1]

        crypted = _peer_boxes[peer_url].encrypt(host_ip)
        base64 = crypted.encode('base64')
        response = yield deferToThread(server.peering.all_reports_for_host,
                                       _own_key.pk.encode('hex'), base64)
        #logging.debug("All reports response: {}".format(response))

        for r in response:
            database.bootstrap_report(r)
    print(" Done")

    for table in ["info", "legacy", "history", "country_history"]:
        print("Copying {} table from peer...".format(table))
        crypted = _peer_boxes[peer_url].encrypt(table)
        base64 = crypted.encode('base64')
        rows = yield deferToThread(server.peering.dump_table,
                                   _own_key.pk.encode('hex'), base64)
        for row in rows:
            database.bootstrap_table(table, row)
Esempio n. 2
0
def bootstrap_from(peer_url):
    crypted = _peer_boxes[peer_url].encrypt("please")
    please_base64 = crypted.encode('base64')

    server = yield deferToThread(ServerProxy, peer_url)
    remote_schema = yield server.peering.schema_version(_own_key.pk.encode('hex'), please_base64)

    print("Initializing database...")
    yield database.clean_database()

    local_schema = yield database.get_schema_version()

    if remote_schema != local_schema:
        raise Exception("Unable to bootstrap from {}: remote database schema version is {}, local {}".format(peer_url, remote_schema, local_schema))

    logging.debug("Remote database schema: {}; local schema: {}".format(remote_schema, local_schema))

    hosts = yield deferToThread(server.peering.all_hosts, _own_key.pk.encode('hex'), please_base64)

    #logging.debug("Hosts from peer: {}".format(hosts))
    print("Copying data of {} hosts from peer".format(len(hosts)), end="")

    count = 0
    for host in hosts:
        if count%100 == 0:
            print(".", end="")
            sys.stdout.flush()
        count += 1
        yield database.bootstrap_cracker(host)

        host_ip = host[1]

        crypted = _peer_boxes[peer_url].encrypt(host_ip)
        base64 = crypted.encode('base64')
        response = yield deferToThread(server.peering.all_reports_for_host, _own_key.pk.encode('hex'), base64)
        #logging.debug("All reports response: {}".format(response))

        for r in response:
            database.bootstrap_report(r)
    print(" Done")

    for table in [ "info", "legacy", "history", "country_history" ]:
        print("Copying {} table from peer...".format(table))
        crypted = _peer_boxes[peer_url].encrypt(table)
        base64 = crypted.encode('base64')
        rows = yield deferToThread(server.peering.dump_table, _own_key.pk.encode('hex'), base64)
        for row in rows:
            database.bootstrap_table(table, row)
Esempio n. 3
0
    if not args.force and (args.recreate_database or args.evolve_database
                           or args.purge_legacy_addresses
                           or args.purge_reported_addresses
                           or args.recreate_database
                           or args.purge_ip is not None):
        print(
            "WARNING: do not run this method when denyhosts-server is running."
        )
        reply = raw_input("Are you sure you want to continue (Y/N): ")
        if not reply.upper().startswith('Y'):
            sys.exit()

    if args.recreate_database:
        single_shot = True
        database.clean_database().addCallbacks(stop_reactor, stop_reactor)

    if args.evolve_database:
        single_shot = True
        database.evolve_database().addCallbacks(stop_reactor, stop_reactor)

    if args.purge_legacy_addresses:
        single_shot = True
        controllers.purge_legacy_addresses().addCallbacks(
            stop_reactor, stop_reactor)

    if args.purge_reported_addresses:
        single_shot = True
        controllers.purge_reported_addresses().addCallbacks(
            stop_reactor, stop_reactor)
from pathlib import Path
import json
import numpy as np
from subprocess import run
from database import clean_database, read_database, add_to_database
from info_database import update_info_database
from datetime import datetime

output = Path('/data3/bolensadrien/output')

clean_database()

database_entries = read_database()
yes_all = False
for entry in database_entries:
    status = entry['status']
    raw_files_exist = entry.get('raw_files_exist', False)
    entry['raw_files_exist'] = raw_files_exist
    name = entry['name']
    result_dir = output / name

    if status == 'processed' or status == 'overtime':
        if raw_files_exist:
            answer = None
            while answer not in ("yes", "no"):
                print(f"{name} still contains the raw results files.")
                answer = input(f"Delete the arrays folder and slurm files? ")
                if answer == "yes":
                    #  prof_file = result_dir / 'array-1' / 'file.prof'
                    #  if prof_file.is_file():
                    #      run(['cp', prof_file, result_dir])
Esempio n. 5
0
        or args.bootstrap_from_peer
        or args.purge_ip is not None):
        print("WARNING: do not run this method when denyhosts-server is running.")
        reply = raw_input("Are you sure you want to continue (Y/N): ")
        if not reply.upper().startswith('Y'):
            sys.exit()

    if args.check_peers:
        if peering.check_peers():
            sys.exit(0)
        else:
            sys.exit(1)

    if args.recreate_database:
        single_shot = True
        database.clean_database().addCallbacks(stop_reactor, stop_reactor)

    if args.evolve_database:
        single_shot = True
        database.evolve_database().addCallbacks(stop_reactor, stop_reactor)

    if args.bootstrap_from_peer:
        single_shot = True
        peering.bootstrap_from(args.bootstrap_from_peer).addCallbacks(stop_reactor, stop_reactor)

    if args.purge_legacy_addresses:
        single_shot = True
        controllers.purge_legacy_addresses().addCallbacks(stop_reactor, stop_reactor)

    if args.purge_reported_addresses:
        single_shot = True
Esempio n. 6
0
def run_main():
    global configfile
    global maintenance_job, legacy_sync_job
    global main_xmlrpc_handler

    parser = argparse.ArgumentParser(description="DenyHosts sync server")
    parser.add_argument("-c", "--config", default="/etc/dh_syncserver.conf", help="Configuration file")
    parser.add_argument("--recreate-database", action="store_true", help="Wipe and recreate the database")
    parser.add_argument(
        "--evolve-database", action="store_true", help="Evolve the database to the latest schema version"
    )
    parser.add_argument(
        "--purge-legacy-addresses",
        action="store_true",
        help="Purge all hosts downloaded from the legacy server. DO NOT USE WHEN DH_SYNCSERVER IS RUNNING!",
    )
    parser.add_argument(
        "--purge-reported-addresses",
        action="store_true",
        help="Purge all hosts that have been reported by clients. DO NOT USE WHEN DH_SYNCSERVER IS RUNNING!",
    )
    parser.add_argument(
        "--purge-ip",
        action="store",
        help="Purge ip address from both legacy and reported host lists. DO NOT USE WHEN DH_SYNCSERVER IS RUNNING!",
    )
    parser.add_argument(
        "-f", "--force", action="store_true", help="Do not ask for confirmation, execute action immediately"
    )
    args = parser.parse_args()

    configfile = args.config

    config.read_config(args.config)

    configure_logging()

    Registry.DBPOOL = adbapi.ConnectionPool(config.dbtype, **config.dbparams)
    Registry.register(models.Cracker, models.Report, models.Legacy)

    single_shot = False

    if not args.force and (
        args.recreate_database
        or args.evolve_database
        or args.purge_legacy_addresses
        or args.purge_reported_addresses
        or args.recreate_database
        or args.purge_ip is not None
    ):
        print("WARNING: do not run this method when dh_syncserver is running.")
        reply = raw_input("Are you sure you want to continue (Y/N): ")
        if not reply.upper().startswith("Y"):
            sys.exit()

    if args.recreate_database:
        single_shot = True
        database.clean_database().addCallbacks(stop_reactor, stop_reactor)

    if args.evolve_database:
        single_shot = True
        database.evolve_database().addCallbacks(stop_reactor, stop_reactor)

    if args.purge_legacy_addresses:
        single_shot = True
        controllers.purge_legacy_addresses().addCallbacks(stop_reactor, stop_reactor)

    if args.purge_reported_addresses:
        single_shot = True
        controllers.purge_reported_addresses().addCallbacks(stop_reactor, stop_reactor)

    if args.purge_ip is not None:
        single_shot = True
        controllers.purge_ip(args.purge_ip).addCallbacks(stop_reactor, stop_reactor)

    if not single_shot:
        signal.signal(signal.SIGHUP, sighup_handler)
        reactor.addSystemEventTrigger("after", "startup", database.check_database_version)

        main_xmlrpc_handler = views.Server()
        if config.enable_debug_methods:
            d = debug_views.DebugServer(main_xmlrpc_handler)
            main_xmlrpc_handler.putSubHandler("debug", d)
        start_listening(config.listen_port)

        # Set up maintenance and legacy sync jobs
        schedule_jobs()

    # Start reactor
    logging.info("Starting dh_syncserver version {}".format(__init__.version))
    reactor.run()