Exemplo n.º 1
0
async def user_blogs(ctx, handle, *args):
    obj = json.loads(requests.get(CF_BLOG_API_BASE + handle).text)
    usr = json.loads(requests.get(CF_USER_INFO_BASE + handle).text)
    lbr , ubr = -3000, 3000
    lbt , ubt = 0 , 10**10
    tags = []
    parameter_check = True

    for arg in args:
        
        if len(arg) >= 3 and arg[:3] in [ 'd<<' , 'd>=' ]:
            arg_type, arg_val = arg[:3], arg[3:]
            timestamp, err = get_time_stamp(arg_val)
            if timestamp is None:
                parameter_check = False
                response = get_error_embed('Invalid parameters', err)
                break
            else:
                if arg_type == 'd>=':
                    lbt = max(lbt , timestamp)
                else:
                    ubt = min(ubt , timestamp)
       
        elif len(arg) >= 3 and arg[:3] in [ 'r<<' , 'r>=' ]:
            arg_type, arg_val = arg[:3], arg[3:]
            rating, err = get_rating_bound(arg_val)
            if rating is None:
                parameter_check = False
                response = get_error_embed('Invalid parameters', err)
                break
            else:
                if arg_type == 'r>=':
                    lbr = max(lbr , rating)
                else:
                    ubr = min(ubr , rating)
       
        elif arg[:1] == '+':
            arg_val = arg[1:]
            tags.append(arg_val)
       
        else:
            parameter_check = False
            response = get_error_embed('Invalid parameters', 'Please enter valid parameters and try again')
            break
            
    if obj['status'] == "OK" and parameter_check is True:
        blog_list = [blog for blog in obj['result'] 
                if lbr <= blog['rating'] and blog['rating'] < ubr 
                and lbt <= blog['creationTimeSeconds'] and blog['creationTimeSeconds'] < ubt 
                and tag_matches(blog , tags)]
        response = get_blog_embed(title=f'{handle}\'s recent blog list:', blog_list=blog_list)
        user_info = usr['result'][0]    
        user_image_url = 'https:' + user_info['titlePhoto']
        response.set_thumbnail(url=user_image_url)
    elif parameter_check is True:
        response = get_error_embed(name='CF API Error', value=f"{obj['comment']}")
    await ctx.send(embed=response)
Exemplo n.º 2
0
    def remout(cls, path: str):  # type: (str) -> Asset
        asset = cls(path)
        try:
            with open(asset.json_path, "r", encoding='utf-8') as json_file:
                asset.info = json.load(json_file)
        except:
            if os.path.exists(asset.json_path):
                os.rename(asset.json_path,
                          asset.json_path + "@error_" + utils.get_time_stamp())
            asset.info = asset.get_empty_info()
            with open(asset.json_path, 'w', encoding='utf-8') as json_file:
                json.dump(asset.info, json_file, indent=4, ensure_ascii=False)

        asset.standardize_info()

        if not asset.update_system_tags():
            asset.update_search_set()
Exemplo n.º 3
0
    def add_to_library(self, context, objects: typing.List[bpy.types.Object],
                       info: dict):

        id = utils.get_slug(info.get("name", "")).strip('-_')
        if not id:
            id = utils.get_slug(
                utils.get_longest_substring(
                    [object.name for object in objects])).strip('-_')
        if not id:
            id = "untitled_" + utils.get_time_stamp()
        id = self.ensure_unique_id(id)

        asset_folder = os.path.join(self.library, id)

        if not info.get("name"):
            info["name"] = id.replace('_', ' ')

        do_move_images = info.pop('do_move_images')

        asset = Asset.new(asset_folder)
        asset.update_info(info)

        blend_file_path = os.path.join(asset_folder, id + ".blend")
        bpy.data.libraries.write(blend_file_path,
                                 set(objects),
                                 fake_user=True,
                                 path_remap='ABSOLUTE',
                                 compress=True)

        initialize_asset = utils.get_script('initialize_asset.py')
        argv = []
        if do_move_images:
            argv.append('-move_textures')
        bl_utils.run_blender(blend_file_path,
                             initialize_asset,
                             argv,
                             use_atool=True,
                             library_path=self.library)

        self[id] = asset

        threading.Thread(target=self.render_icon, args=(id, context)).start()

        update_search(context.window_manager, context)

        return id, blend_file_path
Exemplo n.º 4
0
    def default(cls, path: os.DirEntry):  # type: (os.DirEntry) -> Asset
        asset = cls(path)
        try:
            global reading_time
            start = timer()
            with open(asset.json_path, "r", encoding='utf-8') as json_file:
                asset.info = json.load(json_file)
            reading_time += timer() - start
        except:
            if os.path.exists(asset.json_path):
                os.rename(asset.json_path,
                          asset.json_path + "@error_" + utils.get_time_stamp())
            asset.info = asset.get_empty_info()
            with open(asset.json_path, 'w', encoding='utf-8') as json_file:
                json.dump(asset.info, json_file, indent=4, ensure_ascii=False)

        asset.standardize_info()

        if not asset.update_system_tags():
            asset.update_search_set()

        return asset
Exemplo n.º 5
0
    def add_files_to_library(self, context, files, info: dict):

        id = utils.get_slug(info.get("name", "")).strip('-_')
        if not id:
            files = utils.File_Filter.from_files(files)

            blends = files.get_by_extension('.blend')
            if blends:
                blend = max(blends, key=os.path.getmtime)
                id = blend.stem

            if not id:
                images = files.get_by_type("image")
                if images:
                    image_names = [image.stem for image in images]
                    id = utils.get_slug(
                        utils.get_longest_substring(image_names)).strip('-_')

            if not id:
                id = "untitled_" + utils.get_time_stamp()

        id = self.ensure_unique_id(id)
        asset_folder = os.path.join(self.library, id)

        if not info.get("name"):
            info["name"] = id.replace('_', ' ')

        asset = Asset.new(asset_folder)
        asset.update_info(info)

        asset.move_to_folder(files)

        self[id] = asset

        threading.Thread(target=self.render_icon, args=(id, context)).start()

        update_search(context.window_manager, context)

        return id
Exemplo n.º 6
0
    def make_screen_shot(self, context, asset):
        space_data = context.space_data

        initial_show_overlays = space_data.overlay.show_overlays
        # not to use it if panels are not transparent
        initial_show_region_toolbar = space_data.show_region_toolbar
        initial_show_region_ui = space_data.show_region_ui

        space_data.overlay.show_overlays = False
        space_data.show_region_toolbar = False
        space_data.show_region_ui = False

        # bad
        bpy.ops.wm.redraw_timer(type='DRAW_WIN', iterations=1)

        time_stamp = utils.get_time_stamp()
        file_basename = "".join(("screen_shot_", time_stamp, ".png"))
        screen_shot_path = os.path.join(asset.gallery, file_basename)
        bpy.ops.screen.screenshot(filepath=screen_shot_path, full=False)

        space_data.overlay.show_overlays = initial_show_overlays
        space_data.show_region_toolbar = initial_show_region_toolbar
        space_data.show_region_ui = initial_show_region_ui
def training(local_rank, config):

    config["device"] = "cuda" if config["active_gpu_ids"] else "cpu"

    rank = idist.get_rank()
    manual_seed(config["seed"] + rank)

    device = idist.device()

    logger = setup_logger(name="Carbon Black Semantic Segmentation Training",
                          distributed_rank=local_rank)

    log_basic_info(logger, config)

    output_path = config["output_path"]

    if rank == 0:
        if config["stop_iteration"] is None:
            now = utils.get_time_stamp()
        else:
            now = f"stop-on-{config['stop_iteration']}"

        folder_name = (
            f"{config['architecture']}-{config['encoder']}-{config['encoder_weights']}_"
            f"backend-{idist.backend()}-{idist.get_world_size()}_{now}")

        output_path = Path(output_path) / folder_name
        output_path.mkdir(parents=True, exist_ok=True)
        config["output_path"] = output_path.as_posix()
        config["task_name"] = output_path.stem

        logger.info(f"Output path: {output_path}")

        if "cuda" in idist.device().type:
            config["cuda_device_name"] = torch.cuda.get_device_name(local_rank)

        setup_trains_logging(config)

    dataloader_train, dataloader_val = get_dataloaders(config)

    config["num_iterations_per_epoch"] = len(dataloader_train)
    config["num_epochs"] = round(config["num_iterations"] /
                                 config["num_iterations_per_epoch"])
    model = modeling.get_model(config)

    optimizer = get_optimizer(model, config)
    loss = get_loss()

    lr_scheduler = get_lr_scheduler(optimizer, config)

    trainer = create_trainer(model, optimizer, loss, lr_scheduler,
                             dataloader_train.sampler, config, logger)

    metrics = get_metrics(loss)

    # We define two evaluators as they wont have exactly similar roles:
    # - `evaluator` will save the best model based on validation score
    evaluator = create_supervised_evaluator(model,
                                            metrics=metrics,
                                            device=device,
                                            non_blocking=True)
    evaluator_train = create_supervised_evaluator(model,
                                                  metrics=metrics,
                                                  device=device,
                                                  non_blocking=True)

    if rank == 0:
        # Setup TensorBoard logging on trainer and evaluators. Logged values are:
        #  - Training metrics, e.g. running average loss values
        #  - Learning rate
        #  - Evaluation train/test metrics
        evaluators = {"training": evaluator_train, "validation": evaluator}
        tb_logger = common.setup_tb_logging(output_path,
                                            trainer,
                                            optimizer,
                                            evaluators=evaluators)

        example_prediction_logger = ExamplePredictionLogger(
            tb_logger, model, device)

    def run_validation(engine):
        epoch = trainer.state.epoch
        state = evaluator_train.run(dataloader_train)
        data_subset = "Train"
        log_metrics(logger, epoch, state.times["COMPLETED"], data_subset,
                    state.metrics)
        log_confusion_matrix(tb_logger, epoch, data_subset, state.metrics)

        state = evaluator.run(dataloader_val)
        data_subset = "Val"
        log_metrics(logger, epoch, state.times["COMPLETED"], data_subset,
                    state.metrics)
        log_confusion_matrix(tb_logger, epoch, data_subset, state.metrics)
        example_prediction_logger.log_visualization(dataloader_val.dataset,
                                                    epoch)

    trainer.add_event_handler(
        Events.EPOCH_COMPLETED(every=config["validate_every"])
        | Events.COMPLETED, run_validation)

    # Store 3 best models by validation accuracy:
    common.gen_save_best_models_by_val_score(
        save_handler=get_save_handler(config),
        evaluator=evaluator,
        models={"model": model},
        metric_name="accuracy",
        n_saved=3,
        trainer=trainer,
        tag="validation",
    )

    # TODO: Add early stopping

    # In order to check training resuming we can stop training on a given iteration
    if config["stop_iteration"] is not None:

        @trainer.on(Events.ITERATION_STARTED(once=config["stop_iteration"]))
        def _():
            logger.info(
                f"Stop training on {trainer.state.iteration} iteration")
            trainer.terminate()

    # noinspection PyBroadException
    try:
        trainer.run(dataloader_train, max_epochs=config["num_epochs"])
    except Exception:
        import traceback

        print(traceback.format_exc())

    if rank == 0:
        # noinspection PyUnboundLocalVariable
        tb_logger.close()
Exemplo n.º 8
0
    def train(self,
              iterations=100,
              optimizer='Adam',
              learning_rate=1e-4,
              weight_decay=0,
              momentum=0,
              betas=(0.9, 0.999),
              save_name=None,
              save_every=None,
              print_every=10):
        """Trains the network

        Args:
            iterations (int, optional): Number of iterations. Defaults to 10.
            optimizer (str, optional): 'Adam', 'SGD', 'SGD_Nesterov', 'RMSprop'
                                        or 'Adagrad'. Defaults to 'Adam'.
            learning_rate (float, optional): Learning rate. Defaults to 1e-4.
            weight_decay (float, optional): Regularization parameter.
                                            Defaults to 0.
            momentum (float, optional): Momentum of 'SGD', 'SGD_Nesterov'
                                        or 'RMSprop'. Defaults to 0.
            betas (tuple of floats, optional): Betas for Adam.
                                               Defaults to (0.9, 0.999).
            save_every (int, optional): Saves every specified iteration.
                                        Defaults to None.
            save_name (str, optional): String added to time_stamp.
                                       Defaults to None.
            print_every (int, optional): Prints every specified iteration.
                                         Defaults to None.
        """

        # Store hyper parameters
        self.optimizer_name = optimizer
        self.learning_rate = learning_rate
        self.weight_decay = weight_decay
        self.momentum = momentum
        self.betas = betas
        self.print_every = print_every
        self.save_every = save_every

        # reset if not loaded
        if self._loaded is False:
            self.train_loader.iteration = 0
            self.loss_best = float('inf')
            self._loss_list = []
            self._loaded = True

        # create new time stamp and folder if necessary
        if save_every is not None and self.time_stamp_path is None:
            time_stamp = utils.get_time_stamp()
            if save_name is not None:
                time_stamp = time_stamp + '_' + save_name
            print("timestamp: " + time_stamp + '\n')
            self.time_stamp_path = os.path.join(self.networks_path, time_stamp)
            os.mkdir(self.time_stamp_path)

        # save hyper parameters
        if save_every is not None:
            self._save_hyper()

        # pass number of iterations to train loader
        self.train_loader.set_iterations(iterations)

        # set optimizer and loss
        self._set_optimizer()
        self.loss = modules.MSELoss()

        # set timers
        time_start_total = time.time()
        self._time_start_print = time.time()
        self._time_start_save = time.time()

        self.net.train(mode=True)
        # training loop
        for _, (iteration, single, pairs, scc, type_data,
                size) in enumerate(self.train_loader):

            self.optimizer.zero_grad()
            pred = self.net(single, pairs, 1, size)
            loss = self.loss(pred, scc)
            loss.backward()
            self.optimizer.step()

            self._loss_list.append(loss.item())

            # print message
            if print_every is not None and (iteration % print_every == 0):
                self._print_loss(iteration)

            # save to file
            if save_every is not None and (iteration % save_every == 0):
                self._save(iteration)

        # print total time
        dt = time.time() - time_start_total
        print("\ntotal time: " + utils.get_formatted_time(dt))
 def _ignore_date(self, date):
     return utils.get_time_stamp(date, '%Y-%m-%d') > utils.get_time_stamp(
         self.ignore_before, '%Y-%m-%d')
Exemplo n.º 10
0
def kafka_to_disk():
    print('启动前检测上次运行时是否存在意外中断的数据文件......')
    print('搜索最近一次执行脚本产生的时间目录......')
    # 待处理临时文件列表
    tmp_list = []
    try:
        for category_dir in os.listdir(local_file_path):
            if len(os.listdir(local_file_path + os.sep + category_dir)) > 0:
                for date_dir in os.listdir(local_file_path + os.sep +
                                           category_dir):
                    if len(
                            os.listdir(local_file_path + os.sep +
                                       category_dir + os.sep + date_dir)) > 0:
                        for file in os.listdir(local_file_path + os.sep +
                                               category_dir + os.sep +
                                               date_dir):
                            if suffix in file:
                                tmp_list.append(local_file_path + os.sep +
                                                category_dir + os.sep +
                                                date_dir + os.sep + file)
    except Exception as e:
        print('上次运行程序没有临时文件')
    if len(tmp_list) == 0:
        print('未扫描任何残留临时文件')
    else:
        print('开始修复残留临时文件......')
    tmp_num = 0
    for tmp in tmp_list:
        os.rename(tmp, tmp.split('.')[0] + '.out')
        tmp_num += 1
    print('本次启动共修复残留临时文件★★★★★-----{}个-----★★★★★'.format(tmp_num))

    category_poor = {
        '1': 'news',
        '2': 'weibo',
        '3': 'weixin',
        '4': 'app',
        '5': 'newspaper',
        '6': 'luntan',
        '7': 'blog',
        '8': 'video',
        '9': 'shangji',
        '10': 'shangjia',
        '11': 'gtzy',
        '12': 'zfztb',
        '13': 'gyfp',
        '14': 'gjz',
        '15': 'zfxx',
        '16': 'ptztb',
        '17': 'company',
        '18': 'house',
        '19': 'hospital',
        '20': 'bank',
        '21': 'zone',
        '22': 'express',
        '23': 'zpgw',
        '24': 'zscq',
        '25': 'hotel',
        '26': 'cpws',
        '27': 'gxqy',
        '28': 'gpjj',
        '29': 'dtyy',
        '30': 'bdbk'
    }

    file_poor = {}  # 子类池用于文件计数器
    time_stamp_poor = {}  # 子类时间戳池,用于触发文件切换
    time_stamp = utils.get_time_stamp()  # 初始化毫秒级时间戳 :20180509103015125
    with open('shangjia.out', 'r', encoding='utf-8') as f:
        n_list = f.readlines()

        # 提取第8个字段,用于创建目录
        for n in n_list:
            category = category_poor[n.split('|')[1]]
            category_dir = local_file_path + os.sep + category
            time_dir = category_dir + os.sep + time_stamp[0:8]
            if not os.path.exists(time_dir):
                os.makedirs(time_dir)

            # 提取第2个字段,用于生成文件名
            if n.split('|')[7] in time_stamp_poor:
                shot_file_name = time_stamp_poor[n.split('|')
                                                 [7]] + '_' + n.split('|')[7]
            else:
                shot_file_name = time_stamp + '_' + n.split('|')[7]
            file_name = time_dir + os.sep + shot_file_name + '.tmp'

            # 给每一个文件设定一个计数器,从0自增到strip_number时,用时间戳生成第二文件名
            if n.split('|')[7] not in file_poor:
                file_poor[n.split('|')[7]] = 0
            with open(file_name, 'a', encoding='utf-8') as f1:
                f1.write(n)
                file_poor[n.split('|')[7]] += 1

            # 触发切换文件的操作
            if file_poor[n.split('|')[7]] == 15:
                os.rename(file_name, file_name.split('.')[0] + '.out')
                time_stamp_poor[n.split('|')[7]] = utils.get_time_stamp()
                file_poor[n.split('|')[7]] = 0
Exemplo n.º 11
0
 def __init__(self, time_str, open_id, gbid):
     self.time_str = time_str
     self.timestamp = utils.get_time_stamp(time_str)
     self.day = utils.get_day(time_str)
     self.account = open_id
     self.gbid = gbid
Exemplo n.º 12
0
 def test_get_time_stamp(self):
     self.assertEqual(
         utils.get_time_stamp('processed/IDR714.T.201706090812.png'),
         '201706090812')
Exemplo n.º 13
0
def kafka_to_disk():
    print('启动前检测上次运行时是否存在意外中断的数据文件......')
    print('搜索最近一次执行脚本产生的时间目录......')
    # 待处理临时文件列表
    tmp_list = []
    try:
        for category_dir in os.listdir(local_file_path):
            if len(os.listdir(local_file_path + os.sep + category_dir)) > 0:
                for date_dir in os.listdir(local_file_path + os.sep +
                                           category_dir):
                    if len(
                            os.listdir(local_file_path + os.sep +
                                       category_dir + os.sep + date_dir)) > 0:
                        for file in os.listdir(local_file_path + os.sep +
                                               category_dir + os.sep +
                                               date_dir):
                            if suffix in file:
                                tmp_list.append(local_file_path + os.sep +
                                                category_dir + os.sep +
                                                date_dir + os.sep + file)
    except Exception as e:
        pass
    if len(tmp_list) == 0:
        print('未扫描任何残留临时文件')
    else:
        print('开始修复残留临时文件......')
    tmp_num = 0
    for tmp in tmp_list:
        os.rename(tmp, tmp.split('.')[0] + '.out')
        tmp_num += 1
    print('本次启动共修复残留临时文件★★★★★-----{}个-----★★★★★'.format(tmp_num))

    category_poor = {
        '1': 'news',
        '2': 'weibo',
        '3': 'weixin',
        '4': 'app',
        '5': 'newspaper',
        '6': 'luntan',
        '7': 'blog',
        '8': 'video',
        '9': 'shangji',
        '10': 'shangjia',
        '11': 'gtzy',
        '12': 'zfztb',
        '13': 'gyfp',
        '14': 'gjz',
        '15': 'zfxx',
        '16': 'ptztb',
        '17': 'company',
        '18': 'house',
        '19': 'hospital',
        '20': 'bank',
        '21': 'zone',
        '22': 'express',
        '23': 'zpgw',
        '24': 'zscq',
        '25': 'hotel',
        '26': 'cpws',
        '27': 'gxqy',
        '28': 'gpjj',
        '29': 'dtyy',
        '30': 'bdbk'
    }

    time_stamp = utils.get_time_stamp()  # 初始化毫秒级时间戳 : 20180509103015125
    consumer = KafkaConsumer(topic,
                             group_id=group_id,
                             auto_offset_reset=auto_offset_reset,
                             bootstrap_servers=eval(bootstrap_servers))
    print('连接kafka成功,数据筛选中......')
    file_poor = {}  # 子类池用于文件计数器
    time_stamp_poor = {}  # 子类时间戳池,用于触发文件切换
    time_stamp = utils.get_time_stamp()  # 初始化毫秒级时间戳 :20180509103015125
    for message in consumer:

        # 提取第8个字段自动匹配目录进行创建
        if message.value.decode().split('|')[1] in category_poor:
            category = category_poor[message.value.decode().split('|')[1]]
        else:
            print(message.value.decode())
            continue
        category_dir = local_file_path + os.sep + category
        time_dir = category_dir + os.sep + time_stamp[0:8]
        if not os.path.exists(time_dir):
            os.makedirs(time_dir)

        # 提取第2个字段,用于生成文件名
        if message.value.decode().split('|')[7] in time_stamp_poor:
            shot_file_name = time_stamp_poor[message.value.decode().split(
                '|')[7]] + '_' + message.value.decode().split('|')[7]
        else:
            shot_file_name = time_stamp + '_' + message.value.decode().split(
                '|')[7]
        file_name = time_dir + os.sep + shot_file_name + '.tmp'

        # 给每一个文件设定一个计数器
        if message.value.decode().split('|')[7] not in file_poor:
            file_poor[message.value.decode().split('|')[7]] = 0

        with open(file_name, 'a', encoding='utf-8') as f1:
            f1.write(message.value.decode())
            file_poor[message.value.decode().split('|')[7]] += 1

        # 触发切换文件的操作,用时间戳生成第二文件名
        if file_poor[message.value.decode().split('|')[7]] == strip_number:
            os.rename(file_name, file_name.split('.')[0] + '.out')
            time_stamp_poor[message.value.decode().split('|')
                            [7]] = utils.get_time_stamp()
            file_poor[message.value.decode().split('|')[7]] = 0
Exemplo n.º 14
0
    def is_need_delete(self):
        end_time = utils.get_time_stamp('2020-07-10 18:00:00')
        if self.first_login_time > end_time:
            return True

        return False