def main(): args = parse_args() logger, model_dir, tb_log_dir = create_logger(config, args.cfg, 'train') logger.info(pprint.pformat(args)) logger.info(config) if config['IMP_TYPE'] == 'predefine': trainer = PredefineTrainer(config, logger, model_dir, tb_log_dir) elif config['IMP_TYPE'] == 'RE': trainer = RETrainer(config, logger, model_dir, tb_log_dir) elif config['IMP_TYPE'] == 'SE': trainer = SETrainer(config, logger, model_dir, tb_log_dir) else: raise NotImplementedError("Trainer type error.") for epoch in range(config['TRAIN']['NUM_EPOCH']): trainer.train() trainer.eval() if config['IMP_TYPE'] == 'RE' and (epoch + 1) % 10 == 0: trainer.re_based_get_imp() if epoch == config['TRAIN']['NUM_EPOCH'] - 1: trainer.save_checkpoint('final.pth') trainer.update_lr() trainer.writer.close()
def go_get_em_fogdog(event, context): Config.check_env() logger = create_logger() if 'debug' in event.keys(): logger.info('Running in debug') dog = Fogdog(logger, debug=True, debug_data=event['debug']['data'], send_msg=bool(event['debug']['dispatch'])) else: dog = Fogdog(logger) dog.fetch()
def task(self, task='is_rxcui_ingredient', **kwargs): self.logger = create_logger(task) self.logger.info(f'start task {task}...') if 'timeout' in kwargs: self.timeout = kwargs.get('timeout') if task == 'is_rxcui_ingredient': assert 'rxcui' in kwargs, 'rxcui code must be provided for this task' return self.__task_is_rxcui_ingredient(rxcui=kwargs.get('rxcui')) elif task == 'rxcui2ndc': pass elif task == 'ndc2rxcui': assert 'ndc' in kwargs, 'ndc code must be provided for this task' return self.__task_ndc2rxcui(ndc=kwargs.get('ndc')) # reset timeout to config timeout self.timeout = REQUESTS_TIMEOUT
from typing import List from unittest import TestCase from cv2 import cv2 import config from diff import find_diff_random from diff.DiffSink import DiffSink from diff.FaceFinder import FaceFinder from diff.FaceSquare import FaceSquare from pipelines import first_pipeline from services import batch_data_loader_service, video_service, image_service, file_service, face_recog_service from services.RedisService import RedisService from util.BatchData import BatchData logger = config.create_logger(__name__) FaceFinder = FaceFinder class TestFindDiffSwatch(TestCase): def test_load_metadata(self): # Arrange files_c = file_service.walk_to_path(Path(config.TRAIN_PARENT_PATH_C), filename_endswith="metadata.json") files_d = file_service.walk_to_path(Path(config.TRAIN_PARENT_PATH_D), filename_endswith="metadata.json") files = files_c + files_d for f in files: batch_data: BatchData = batch_data_loader_service.load_batch_from_path(
PreTrainedTokenizer ) from transformers.configuration_utils import PretrainedConfig import tensorflow as tf from src.schema import ( InputExample, InputFeatures, Config ) from src.data_process import ( AgNewsDataProcessor, THCNewsDataProcessor ) from config import create_logger logger = create_logger() def convert_single_example( example_index: int, example: InputExample, label2id: Dict[str, int], max_seq_length: int, tokenizer: BertTokenizer ) -> InputFeatures: """Converts a single `InputExample` into a single `InputFeatures`. example_index: 用于展示example中的前几例数据 """ parameters = { "text": example.text_a, "add_special_tokens": True, "padding": True, "max_length": max_seq_length, "return_attention_mask": True,
Xiuming Zhang, MIT CSAIL July 2017 """ from os import remove, rename from os.path import abspath, dirname, basename from time import time import numpy as np import bpy import bmesh from mathutils import Vector, Matrix, Quaternion from mathutils.bvhtree import BVHTree from xiuminglib.blender import object as xb_object import config logger, thisfile = config.create_logger(abspath(__file__)) def add_camera(xyz=(0, 0, 0), rot_vec_rad=(0, 0, 0), name=None, proj_model='PERSP', f=35, sensor_fit='HORIZONTAL', sensor_width=32, sensor_height=18, clip_start=0.1, clip_end=100): """ Add camera to current scene
# coding: utf-8 import os import urllib3 from cardpay.api_client import is_no_proxy_case from config import create_logger logger = create_logger(__name__) proxy = os.getenv('HTTPS_PROXY', os.getenv('HTTP_PROXY')) if proxy: http = urllib3.ProxyManager(proxy) no_proxy_http = urllib3.PoolManager() else: http = urllib3.PoolManager() no_proxy_http = urllib3.PoolManager() def do_get(url): if is_no_proxy_case(url): r = no_proxy_http.request('GET', url) else: r = http.request('GET', url) logger.info("%s %s", r.status, r._request_url)
# -*- coding: utf-8 -*- import worktools, subprocess, time, sys, config logger = config.create_logger(__name__) class ConvertionController: def __init__(self, fileDatalList, threads): self.fileDatalList = fileDatalList self.threads = threads def start(self): logger.info('Converting files to .wav and slice apart...') logger.info('Starting WorkerController() with conv tasks...') controller = worktools.WorkerController(self.fileDatalList, convertion_func, max_workers=self.threads) controller.run() def convertion_func(fileData): for part_info in fileData.content: uniquilizer = config.generate_random_string() new_filename = fileData.filename + '_' + str(part_info['part_num']) + '_' + uniquilizer + '.wav' cmd = 'avconv -i %s -ss %i -t %i -ac %i -ar %i %s%s' % ( config.INDEX_PATH + fileData.index_filename, part_info['start_time'], fileData.part_duration, config.AUDIO_CHANNELS, config.AUDIO_RATE, config.AUDIO_PATH, new_filename) logger.debug(cmd) output_err = subprocess.PIPE process = subprocess.Popen(cmd, shell=True, stdout=sys.stdout, stderr=output_err)
# help='choose a model: CNN, RNN, RCNN, RNN_Att, DPCNN, Transformer') parser.add_argument('--word', default=True, type=bool, help='True for word, False for char') parser.add_argument('--max_length', default=400, type=int, help='True for word, False for char') parser.add_argument('--dictionary', default=None, type=str, help='dictionary path') args = parser.parse_args() logger = create_logger(root_path + '/logs/main.log') if __name__ == '__main__': model_name = 'bert' x = import_module('models.' + model_name) # if model_name in ['bert', 'xlnet', 'roberta']: # config.bert_path = config.root_path + '/model/' + model_name + '/' # if 'bert' in model_name: # config.tokenizer = BertTokenizer.from_pretrained(config.bert_path) # elif 'xlnet' in model_name: # config.tokenizer = XLNetTokenizer.from_pretrained(config.bert_path) # elif 'roberta' in model_name: # config.tokenizer = RobertaTokenizer.from_pretrained(config.bert_path) # else: # raise NotImplementedError