示例#1
0
 async def health_check(self):
     while True:
         await asyncio.sleep(1)
         for shard, last_checkin in self.last_checkin.items():
             if last_checkin is not None and last_checkin < datetime.now() - timedelta(seconds=5):
                 logger.error(f"--- SHARD {shard} MISSED ITS HEARTBEAT, DEREGISTERING... ---")
                 self.registered[shard] = False
                 self.last_checkin[shard] = None
示例#2
0
 async def cache_guild(self, guild):
     '''cache interesting information about all the messages in a guild'''
     logger.debug(f"Downloading messages in {len(guild.channels)} channels for '{guild.name}'...")
     for channel in guild.text_channels:
         try:
             await self.cache_channel(channel)
         except Forbidden:
             logger.warning(f"Insuffcient permissions to download messages from '{guild.name}.{channel.name}'")
         except HTTPException as e:
             logger.error(f"Caught {e} when downloading '{guild.name}.{channel.name}'")
             logger.error("trying again in 10 seconds...")
             await asyncio.sleep(10)
             try:
                 await self.cache_channel(channel)
             except Exception:
                 logger.exception("failed to download channel a second time, giving up :(")
示例#3
0
def train(model, optimizer, scheduler, ema, bert_config, start_step, steps_num,
          epoch):
    model.train()
    tokenizer, train_dataloader = load_data(bert_config)
    clamped_losses = []
    origin_losses = []
    exact_match_total = 0
    f1_total = 0
    exact_match = 0
    f1 = 0
    logger.info("start_step train:")
    softmax = torch.nn.Softmax(dim=-1)
    log_sofmax = torch.nn.LogSoftmax(dim=-1)
    for step, batch in enumerate(train_dataloader):
        try:
            optimizer.zero_grad()
            batch = tuple(t.to(device) for t in batch)
            input_ids, input_mask, segment_ids, start_positions, end_positions = batch
            start_positions, end_positions = start_positions.to(
                device), end_positions.to(device)
            input_mask = input_mask.float()
            start_embeddings, end_embeddings = model(input_ids, input_mask,
                                                     segment_ids)
            loss1 = F.nll_loss(log_sofmax(start_embeddings),
                               start_positions,
                               reduction='mean')
            loss2 = F.nll_loss(log_sofmax(end_embeddings),
                               end_positions,
                               reduction='mean')
            loss = (loss1 + loss2) / 2
            logger.info(f"Origin Loss: {loss}, epoch: {epoch}, step: {step}")
            origin_losses.append(loss.item())

            pre_start, pre_end, probabilities = find_max_proper_batch(
                softmax(start_embeddings), softmax(end_embeddings))
            pre_loss = loss
            cur_res = convert_pre_res(input_ids, pre_start, pre_end,
                                      start_positions, end_positions,
                                      probabilities, tokenizer)

            loss = torch.clamp(loss, min=config.min_loss, max=config.max_loss)
            logger.info(f"Clamped Loss: {loss}, epoch: {epoch}, step: {step}")
            clamped_losses.append(loss.item())
            loss.backward()
            record_info(valid_result=cur_res, epoch=epoch, is_continue=True)
            exact_match_total, f1_total, exact_match, f1 = evaluate_valid_result(
                cur_res, exact_match_total, f1_total,
                (step + 1) * config.n_batch)
            visual_data(model,
                        loss,
                        pre_loss,
                        optimizer,
                        epoch,
                        step,
                        exact_match_total,
                        f1_total,
                        exact_match,
                        f1,
                        label="train")
            optimizer.step()
            scheduler.step()
            if config.use_ema:
                for name, p in model.named_parameters():
                    if p.requires_grad: ema.update_parameter(name, p)

            if step % config.interval_save == 0:
                save_model(model, optimizer, step)
                record_info(origin_losses, r_type="train", epoch=epoch)
                origin_losses = []
        except Exception:
            logger.error(traceback.format_exc())
    loss_avg = np.mean(clamped_losses)
    logger.info("Epoch {:8d} loss {:8f}\n".format(epoch, loss_avg))
示例#4
0
    # parser = argparse.ArgumentParser()
    # parser.add_argument("--mode", action="store", dest="mode", default="train",
    #                     help="train/test/debug")
    # pargs = parser.parse_args()
    mode = config.mode
    logger.info("Current device is {}".format(device))
    if mode == "train":
        train_entry()
    elif mode == "debug":
        config.batch_size = 2
        config.num_steps = 32
        config.test_num_batches = 2
        config.val_num_batches = 2
        config.checkpoint = 2
        config.period = 1
        train_entry()
    elif mode == "test":
        test_entry()
    elif mode == "classify":
        classify_data()
    else:
        print("Unknown mode")
        exit(0)


if __name__ == '__main__':
    try:
        main()
    except Exception:
        logger.error(traceback.format_exc())
示例#5
0
    logfilepath = os.path.join(os.path.dirname(os.path.realpath(sys.argv[0])),
                               finishlogfile)
    with open(logfilepath, 'wb+') as f:
        f.write(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'))


if __name__ == '__main__':
    # 获取时间戳
    logger.info("----------------- Progrem Start ---------------------")
    try:
        paramsfile = "param.json"
        paramsfilepath = os.path.join(work_path, paramsfile)
        with open(paramsfilepath) as f:
            params = json.load(f)
    except Exception as E:
        logger.error("Can not find {0}".format(paramsfile))
        sys.exit(1)

    # 处理debug标签
    debug_flag = params.get("debug")
    if debug_flag is True:
        fullname = "debug.log"
        sys.stderr = open(fullname, "w+")
    else:
        sys.stderr = None

    # 处理最大连接数
    max_socket_count = params.get("maxsocket")
    if max_socket_count <= 100:
        max_socket_count = 100
    elif max_socket_count >= 1000:
示例#6
0
def bruteforce_interface(portScan_result_list, timeout, no_default_dict, proto_list, pool):
    password_total = Password_total()
    password_total.init(no_default_dict)
    try:
        import psycopg2
        tasksQueue = Queue.Queue()
        postgreSQL_login = PostgreSQL_login()
        for one_portscan_result in portScan_result_list:
            service = one_portscan_result.get("service").lower()
            ipaddress = one_portscan_result.get("ipaddress")
            port = one_portscan_result.get("port")
            if "postgresql" in service and "postgresql" in proto_list:
                tasksQueue.put(
                    (postgreSQL_login.login, (ipaddress, port, password_total.PostgreSQL_user_passwd_pair_list)))

        runner = Runer(100)
        runner.taskQueue = tasksQueue
        runner.start()
    except Exception as E:
        logger.warning("Can not import OpenSSL,PostgreSQL pass")

    # 协程扫描
    patch_all()
    try:
        import gevent_openssl
        gevent_openssl.monkey_patch()
        from bruteforce.rdp_check import check_rdp
        SSL_FLAG = True
    except Exception as E:
        logger.error("Can not import OpenSSL,RDP pass")
        SSL_FLAG = False

    if SSL_FLAG:
        rdp_login = RDP_login(timeout)  # 1.07500004768

    smb_login = SMB_login(timeout)  # 1.08800005913
    ssh_login = SSH_login(timeout)  # 30.617000103
    ftp_login = FTP_login(timeout)  # 9.10599994659
    mysql_login = MySQL_login(timeout)  # 15.7749998569
    mssql_login = MSSQL_login(timeout)  # 1.04799985886
    redis_login = Redis_login(timeout)  # 12.3710000515
    mongo_login = MongoDB_login(timeout)  # 12.9830000401
    memcached_login = Memcached_login(timeout)  # 2.07899999619
    vnc_login = VNC_login(timeout)  # 6.06700015068
    pool = pool
    tasks = []
    for one_portscan_result in portScan_result_list:
        service = one_portscan_result.get("service").lower()
        ipaddress = one_portscan_result.get("ipaddress")
        port = one_portscan_result.get("port")

        # 快的扫描
        if SSL_FLAG:
            if ("ms-wbt-server" in service or "rdp" in service) and "rdp" in proto_list and SSL_FLAG:
                task = pool.spawn(rdp_login.login, ipaddress, port, password_total.RDP_user_passwd_pair_list)
                tasks.append(task)

        if "ssh" in service and "ssh" in proto_list:  # 原生支持协程,直接扫描
            ssh_login.login_with_pool(ipaddress, port, password_total.SSH_user_passwd_pair_list, pool.size)

        if "mongodb" in service and "mongodb" in proto_list:
            task = pool.spawn(mongo_login.login, ipaddress, port, password_total.MongoDB_user_passwd_pair_list)
            tasks.append(task)
        if "ftp" in service and "ftp" in proto_list:
            task = pool.spawn(ftp_login.login, ipaddress, port, password_total.FTP_user_passwd_pair_list)
            tasks.append(task)
        if "microsoft-ds" in service and "smb" in proto_list:
            task = pool.spawn(smb_login.login, ipaddress, port, password_total.SMB_user_passwd_pair_list)
            tasks.append(task)
        if "mysql" in service and "mysql" in proto_list:
            task = pool.spawn(mysql_login.login, ipaddress, port, password_total.MYSQL_user_passwd_pair_list)
            tasks.append(task)
        if "ms-sql-s" in service and "mssql" in proto_list:
            task = pool.spawn(mssql_login.login, ipaddress, port, password_total.MSSQL_user_passwd_pair_list)
            tasks.append(task)
        if "redis" in service and "redis" in proto_list:
            task = pool.spawn(redis_login.login, ipaddress, port, password_total.Redis_user_passwd_pair_list)
            tasks.append(task)
        if "memcached" in service and "memcached" in proto_list:
            task = pool.spawn(memcached_login.login, ipaddress, port, password_total.Memcached_user_passwd_pair_list)
            tasks.append(task)
        if "vnc" in service and "vnc" in proto_list:
            task = pool.spawn(vnc_login.login, ipaddress, port, password_total.VNC_user_passwd_pair_list)
            tasks.append(task)
    gevent.joinall(tasks)