rc='rc', rrg='rrg', qty='quantity', ts='to_sell', byb='byb', r5p='r5p', r4p='r4p', r3p='r3p', r2p='r2p', r1p='r1p', tpre='ts_min_price', bsr='bsr', qtydt='qtydt', aday='aday' # special='', ) for item in druid_dict: druid_dict[item] = data_dict[druid_dict[item]] # print('druid', druid_dict) dataOutput.save_data_to_db(update_sql, insert_sql, the_asin, druid_dict, db_name=db_name) if __name__ == '__main__': log_name = sys.argv[0].split('/')[-1].split('.')[0] print(log_name) debug_log = Logger(log_name=log_name) db_log = Logger(log_level='DB', log_name=log_name) myRedis = GetRedis().return_redis(debug_log) dataQ = DataQueue(myRedis, debug_log) goods_data_save(dataQ, debug_log, db_log)
def main(): # Load training configuration args = parser.parse_args() net = args.net initial_checkpoint = args.ckpt out_dir = args.out_dir weight_decay = args.weight_decay momentum = args.momentum optimizer = args.optimizer init_lr = args.init_lr epochs = args.epochs epoch_save = args.epoch_save epoch_rcnn = args.epoch_rcnn epoch_mask = args.epoch_mask batch_size = args.batch_size train_set_list = args.train_set_list val_set_list = args.val_set_list num_workers = args.num_workers lr_schdule = train_config['lr_schedule'] data_dir = args.data_dir label_types = config['label_types'] train_dataset_list = [] val_dataset_list = [] for i in range(len(train_set_list)): set_name = train_set_list[i] label_type = label_types[i] if label_type == 'bbox': dataset = BboxReader(data_dir, set_name, config, mode='train') elif label_type == 'mask': dataset = MaskReader(data_dir, set_name, config, mode='train') train_dataset_list.append(dataset) for i in range(len(val_set_list)): set_name = val_set_list[i] label_type = label_types[i] if label_type == 'bbox': dataset = BboxReader(data_dir, set_name, config, mode='val') elif label_type == 'mask': dataset = MaskReader(data_dir, set_name, config, mode='val') val_dataset_list.append(dataset) train_loader = DataLoader(ConcatDataset(train_dataset_list), batch_size=batch_size, shuffle=True, num_workers=num_workers, pin_memory=True, collate_fn=train_collate) val_loader = DataLoader(ConcatDataset(val_dataset_list), batch_size=batch_size, shuffle=False, num_workers=num_workers, pin_memory=True, collate_fn=train_collate) # Initilize network net = getattr(this_module, net)(net_config) net = net.cuda() optimizer = getattr(torch.optim, optimizer) # optimizer = optimizer(net.parameters(), lr=init_lr, weight_decay=weight_decay) optimizer = optimizer(net.parameters(), lr=init_lr, weight_decay=weight_decay, momentum=momentum) start_epoch = 0 if initial_checkpoint: print('[Loading model from %s]' % initial_checkpoint) checkpoint = torch.load(initial_checkpoint) start_epoch = checkpoint['epoch'] state = net.state_dict() state.update(checkpoint['state_dict']) try: net.load_state_dict(state) optimizer.load_state_dict(checkpoint['optimizer']) except: print('Load something failed!') traceback.print_exc() start_epoch = start_epoch + 1 model_out_dir = os.path.join(out_dir, 'model') tb_out_dir = os.path.join(out_dir, 'runs') if not os.path.exists(model_out_dir): os.makedirs(model_out_dir) logfile = os.path.join(out_dir, 'log_train') sys.stdout = Logger(logfile) print('[Training configuration]') for arg in vars(args): print(arg, getattr(args, arg)) print('[Model configuration]') pprint.pprint(net_config) print('[start_epoch %d, out_dir %s]' % (start_epoch, out_dir)) print('[length of train loader %d, length of valid loader %d]' % (len(train_loader), len(val_loader))) # Write graph to tensorboard for visualization writer = SummaryWriter(tb_out_dir) train_writer = SummaryWriter(os.path.join(tb_out_dir, 'train')) val_writer = SummaryWriter(os.path.join(tb_out_dir, 'val')) # writer.add_graph(net, (torch.zeros((16, 1, 128, 128, 128)).cuda(), [[]], [[]], [[]], [torch.zeros((16, 128, 128, 128))]), verbose=False) for i in tqdm(range(start_epoch, epochs + 1), desc='Total'): # learning rate schedule if isinstance(optimizer, torch.optim.SGD): lr = lr_schdule(i, init_lr=init_lr, total=epochs) for param_group in optimizer.param_groups: param_group['lr'] = lr else: lr = np.inf if i >= epoch_rcnn: net.use_rcnn = True else: net.use_rcnn = False if i >= epoch_mask: net.use_mask = True else: net.use_mask = False print('[epoch %d, lr %f, use_rcnn: %r, use_mask: %r]' % (i, lr, net.use_rcnn, net.use_mask)) train(net, train_loader, optimizer, i, train_writer) validate(net, val_loader, i, val_writer) print state_dict = net.state_dict() for key in state_dict.keys(): state_dict[key] = state_dict[key].cpu() if i % epoch_save == 0: torch.save( { 'epoch': i, 'out_dir': out_dir, 'state_dict': state_dict, 'optimizer': optimizer.state_dict() }, os.path.join(model_out_dir, '%03d.ckpt' % i)) writer.close() train_writer.close() val_writer.close()
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import sys sys.path.append('../') import time import requests from threading import Thread from utils.util import Logger, GetRedis, UrlQueue, KeyWordQueue, return_PST pst_now = lambda: return_PST().strftime('%Y-%m-%d %H:%M:%S') date_now = lambda: time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())) debug_log = Logger() myRedis = GetRedis().return_redis(debug_log) urlQ = UrlQueue(myRedis, debug_log) send_api = 'https://sys.selmetrics.com/pyapi/send' tm_format = 'PT: %s\nGMT+8: %s\n' def send_email_api(url_in, json_in): return requests.post(url_in, json=json_in, verify=False) def look_set_len(myRedis, sName, set_type=''): r = myRedis the_set = r.smembers(sName) the_len = len(the_set) print('\n%s len: %s\n' % (set_type, the_len)) return the_len