예제 #1
0
파일: l2o_train.py 프로젝트: NVlabs/ASG
def prepare_optimizee(args, sgd_in_names, obs_shape, hidden_size, actor_critic,
                      current_optimizee_step, prev_optimizee_step):
    prev_optimizee_step += current_optimizee_step
    current_optimizee_step = 0

    model = resnet101(pretrained=True)
    num_ftrs = model.fc.in_features
    fc_layers = nn.Sequential(
        nn.Linear(num_ftrs, 512),
        nn.ReLU(inplace=True),
        nn.Linear(512, args.num_class),
    )
    model.fc_new = fc_layers

    train_blocks = args.train_blocks.split('.')
    # default turn-off fc, turn-on fc_new
    for param in model.fc.parameters():
        param.requires_grad = False
    ##### Freeze several bottom layers (Optional) #####
    non_train_blocks = [
        'conv1', 'bn1', 'layer1', 'layer2', 'layer3', 'layer4', 'fc'
    ]
    for name in train_blocks:
        try:
            non_train_blocks.remove(name)
        except Exception:
            print(
                "cannot find block name %s\nAvailable blocks are: conv1, bn1, layer1, layer2, layer3, layer4, fc"
                % name)
    for name in non_train_blocks:
        for param in getattr(model, name).parameters():
            param.requires_grad = False

    # Setup optimizer
    sgd_in = []
    for name in train_blocks:
        if name != 'fc':
            sgd_in.append({'params': get_params(model, [name]), 'lr': args.lr})
        else:
            sgd_in.append({
                'params': get_params(model, ["fc_new"]),
                'lr': args.lr
            })
    base_lrs = [group['lr'] for group in sgd_in]
    optimizer = SGD(sgd_in,
                    lr=args.lr,
                    momentum=args.momentum,
                    weight_decay=args.weight_decay)

    model = model.cuda()
    model.eval()
    return model, optimizer, current_optimizee_step, prev_optimizee_step
예제 #2
0
def settings(*args, **kwargs):
    '''
    修改配置
    '''
    username = kwargs.get('username')
    params = utils.get_params(
        ['former_show_col', 'future_show_col', 'currency', 'currency_rate_rmb2ab', 'currency_rate_rmb2dollar'])
    pwd_old = request.params.get('pwd_old')
    pwd_new1 = request.params.get('pwd_new1')
    pwd_new2 = request.params.get('pwd_new2')
    if pwd_old and pwd_new1 and pwd_new2:
        if pwd_new2 != pwd_new1:
            return {'retcode': 0, 'msg': '两次新密码不相同'}
        if len(pwd_new1) > 20:
            return {'retcode': 0, 'msg': '密码长度不能大于20位'}
        sql = get_s_sql('user', ['*'], {'username': username, 'pwd': hashlib.md5(pwd_old).hexdigest()})
        r = sql_conn.execute(sql)
        if not r:
            return {'retcode': 0, 'msg': '原密码错误'}
        params['pwd'] = hashlib.md5(pwd_new1).hexdigest()

    sql = get_u_sql('user', params, {'username': username})
    sql_conn.execute(sql)
    sql_conn.commit()
    return {'retcode': 1}
예제 #3
0
def i_cashier_post(*args, **kwargs):
    '''
    插入数据
    '''
    params = utils.get_params(cashier_keys)
    type = request.params.get('type')  # pay or get
    params['actual_money'] = abs(float(params['actual_money'])) if type == 'get' else -abs(
        float(params['actual_money']))

    def _insert(date):
        params['date'] = date
        sql = get_i_sql('cashier', params)
        sql_conn.execute(sql)

    days = request.params.get('days')
    future_date = request.params.get('future_date')

    if days and future_date:
        try:
            days = int(days)
            cur_date = datetime.strptime(params['date'], '%Y-%m-%d')
            futute_date = datetime.strptime(future_date, '%Y-%m-%d')
        except:
            return {'retcode': 0}
        while cur_date <= futute_date:
            _insert(cur_date.strftime('%Y-%m-%d'))
            cur_date += timedelta(days=days)
    else:
        _insert(params['date'])
    sql_conn.commit()
    return {'retcode': 1}
예제 #4
0
def u_cashier(*args, **kwargs):
    '''
    更新数据
    '''
    param = utils.get_params(cashier_keys )
    sql = get_u_sql('cashier', param,{'cashier_id':request.params.get('cashier_id')})
    sql_conn.execute(sql)
    return {'retcode': 1}
예제 #5
0
def prepare_optimizee(args, sgd_in_names, obs_shape, hidden_size, actor_critic, current_optimizee_step, prev_optimizee_step):
    prev_optimizee_step += current_optimizee_step
    current_optimizee_step = 0

    ##### Vgg16 #####
    vgg = vgg16(pretrained=True)
    model = FCN_Vgg(n_class=args.num_class)
    model.copy_params_from_vgg16(vgg)
    ###################

    # Setup optimizer
    sgd_in = [
        {'params': get_params(model, ["conv1_1", "conv1_2"]), 'lr': args.lr},
        {'params': get_params(model, ["conv2_1", "conv2_2"]), 'lr': args.lr},
        {'params': get_params(model, ["conv3_1", "conv3_2", "conv3_3"]), 'lr': args.lr},
        {'params': get_params(model, ["conv4_1", "conv4_2", "conv4_3"]), 'lr': args.lr},
        {'params': get_params(model, ["conv5_1", "conv5_2", "conv5_3"]), 'lr': args.lr},
        {'params': get_params(model, ["fc6", "fc7"]), 'lr': args.lr},
        {'params': get_params(model, ["score_fr", "score_pool3", "score_pool4", "upscore2", "upscore8", "upscore_pool4"]), 'lr': args.lr},
    ]
    optimizer = torch.optim.SGD(sgd_in, lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)

    # Specify which GPUs to use
    model = model.cuda()
    model.eval()

    return model, optimizer, current_optimizee_step, prev_optimizee_step
예제 #6
0
def i_user(*args, **kwargs):
    '''
    插入用户
    '''
    params = utils.get_params(['username', 'pwd', 'group_id'])
    params['pwd']=hashlib.md5(params['pwd']).hexdigest()
    sql = get_i_sql('user', params)
    sql_conn.execute(sql)
    return {'retcode': 1}
예제 #7
0
def d_cashier(*args, **kwargs):
    '''
    删除数据
    '''
    param = utils.get_params(['cashier_id'])
    sql = get_d_sql('cashier', param)
    sql_conn.execute(sql)
    sql_conn.commit()
    return {'retcode': 1}
예제 #8
0
def new_account(*args, **kwargs):
    '''
    新增账户
    '''
    cd = utils.get_params(['account_name', 'account_desc', 'account_currency', 'account_about', 'account_label'])
    sql = get_i_sql('account', cd)
    sql_conn.execute(sql)
    sql_conn.commit()
    item_id = sql_conn.last_id()
    return {'retcode': 1, 'item_id': str(item_id)}
예제 #9
0
def s_cashier_list_post(*args, **kwargs):
    '''
    返回明细的内容
    '''
    param = utils.get_params(['date1', 'date2'])
    is_output_xls = request.params.get('is_output_xls')  # 标记是否导出到xls
    user_cond = '' if 'admin' in kwargs.get('access') else 'pay_user="******" and '% kwargs.get('username')
    #支出还是收入的筛选
    type=request.params.get('type')
    type_cond={'get':'actual_money>=0 and ','pay':' actual_money<=0 and '}[type] if type else ''
    keys = request.params.get('show_cols').split(';')
    cashier_keys_ = cashier_keys + ['client_name', 'account_name', 'proof_name', 'paytype_name']
    keys = filter(lambda x: x in keys, cashier_keys_)
    order_type='desc' if request.params.get('ctrl_typ')=='list' else ''
    def _get_sql(keys, index=None):
        limit_sql = ' limit {0},20'.format(index) if index != None else ''

        sql = get_s_sql('cashier,client,proof,account,paytype', keys, {}) + \
              " where {user_cond} {type_cond}  cashier.client_id=client.client_id and \
              cashier.proof_id=proof.proof_id and cashier.account_id=account.account_id and " \
              "cashier.paytype_id=paytype.paytype_id and date>='{date1}' and  date<= '{date2}'" \
              " order by date {order_type} {limit_sql}".\
                  format(limit_sql=limit_sql,order_type=order_type,user_cond=user_cond,
                         type_cond=type_cond,**param)
        return sql

    sql_index = utils.get_sql_index()
    total_num = 0
    if sql_index == 0 and not is_output_xls:
        # 计算数据的条数
        sql = _get_sql(['count(*)'])
        r = sql_conn.execute(sql, 1)
        total_num = r[0]

    key_ = keys + ['cashier_id', 'account_currency']
    sql = _get_sql(key_, None) if is_output_xls else _get_sql(key_, sql_index)

    r = sql_conn.execute(sql)
    data = utils.sql_result(r, keys + ['cashier_id', 'account_currency'])
    data = cashier_utils.exchage_data(data, kwargs)
    if is_output_xls:
        url = cashier_utils.output_xls(data, cashier_keys_2ch, keys, param['date1'], param['date2'])
        return {'retcode': 1, 'url': url}
    print keys
    return {'retcode': 1,
            'data': template('cashier_list_include', data=data, cashier_keys_2ch=cashier_keys_2ch, keys=keys),
            'total_num': total_num}
예제 #10
0
def s_cashier_detail(*args, **kwargs):
    '''
    获取一条收纳数据的明细
    '''
    param = utils.get_params(['cashier_id'])
    if 'admin' not in kwargs.get('access'):
        param['pay_user'] = kwargs.get('username')
    keys = copy(cashier_keys) + ['cashier_id', 'account_currency', 'cashier.account_id']
    keys.remove('account_id')

    sql = get_s_sql('cashier,account', keys, param) + ' and account.account_id=cashier.account_id'
    r = sql_conn.execute(sql)
    if not r: return '系统出错'
    cashier_data = utils.sql_result(r, keys)[0]
    cashier_data = cashier_utils.exchage_data(cashier_data, kwargs)
    data = cashier_utils.get_data_in_detail(sql_conn, kwargs)
    return template('cashier_detail', cashier_data=cashier_data, data=data, ctrl_type='update')
예제 #11
0
    model = Segception.Efficient(num_classes=n_classes, weights='imagenet', input_shape=(None, None, channels))

    # optimizer
    learning_rate = tfe.Variable(lr)
    optimizer = tf.train.AdamOptimizer(learning_rate)

    # Init models (optional, just for get_params function)
    init_model(model, input_shape=(batch_size, width, height, channels))

    variables_to_restore = model.variables
    variables_to_save = model.variables
    variables_to_optimize = model.variables

    # Init saver. can use also ckpt = tfe.Checkpoint((model=model, optimizer=optimizer,learning_rate=learning_rate, global_step=global_step)
    saver_model = tfe.Saver(var_list=variables_to_save)
    restore_model = tfe.Saver(var_list=variables_to_restore)

    # restore if model saved and show number of params
    restore_state(restore_model, name_best_model)
    get_params(model)

    train(loader=loader, optimizer=optimizer, model=model, epochs=epochs, batch_size=batch_size, augmenter='segmentation', lr=learning_rate,
          init_lr=lr, saver=saver_model, variables_to_optimize=variables_to_optimize, name_best_model=name_best_model,
          evaluation=True, aux_loss=False, preprocess_mode=preprocess_mode)

    # Test best model
    print('Testing model')
    test_acc, test_miou = get_metrics(loader, model, loader.n_classes, train=False, flip_inference=True, scales=[0.75,  1, 1.5],
                                      write_images=True, preprocess_mode=preprocess_mode, time_exect=False)
    print('Test accuracy: ' + str(test_acc.numpy()))
    print('Test miou: ' + str(test_miou))
예제 #12
0
파일: train.py 프로젝트: VITA-Group/ASG
def main():
    global args, best_prec1
    PID = os.getpid()
    args = parser.parse_args()
    prepare_seed(args.rand_seed)

    if args.timestamp == 'none':
        args.timestamp = "{:}".format(time.strftime('%h-%d-%C_%H-%M-%s', time.gmtime(time.time())))

    # Log outputs
    if args.evaluate:
        args.save_dir = args.save_dir + "/Visda17-Res101-evaluate" + \
            "%s/%s"%('/'+args.resume if args.resume != 'none' else '', args.timestamp)
    else:
        args.save_dir = args.save_dir + \
            "/Visda17-Res101-%s-train.%s-LR%.2E-epoch%d-batch%d-seed%d"%(
                   "LWF%.2f"%args.lwf if args.lwf > 0 else "XE", args.train_blocks, args.lr, args.epochs, args.batch_size, args.rand_seed) + \
            "%s/%s"%('/'+args.resume if args.resume != 'none' else '', args.timestamp)
    logger = prepare_logger(args)

    data_transforms = {
        'train': transforms.Compose([
            transforms.Resize(224),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
        ]),
        'val': transforms.Compose([
            transforms.Resize(224),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
        ]),
    }

    kwargs = {'num_workers': 20, 'pin_memory': True}
    trainset = VisDA17(txt_file=os.path.join(args.data, "train/image_list.txt"), root_dir=os.path.join(args.data, "train"), transform=data_transforms['train'])
    valset = VisDA17(txt_file=os.path.join(args.data, "validation/image_list.txt"), root_dir=os.path.join(args.data, "validation"), transform=data_transforms['val'], label_one_hot=True)
    train_loader = DataLoader(trainset, batch_size=args.batch_size, shuffle=True, **kwargs)
    val_loader = DataLoader(valset, batch_size=args.batch_size, shuffle=False, **kwargs)

    model = resnet101(pretrained=True)
    num_ftrs = model.fc.in_features
    fc_layers = nn.Sequential(
        nn.Linear(num_ftrs, 512),
        nn.ReLU(inplace=True),
        nn.Linear(512, args.num_class),
    )
    model.fc_new = fc_layers

    train_blocks = args.train_blocks.split('.')
    # default turn-off fc, turn-on fc_new
    for param in model.fc.parameters():
        param.requires_grad = False
    ##### Freeze several bottom layers (Optional) #####
    non_train_blocks = ['conv1', 'bn1', 'layer1', 'layer2', 'layer3', 'layer4', 'fc']
    for name in train_blocks:
        try:
            non_train_blocks.remove(name)
        except Exception:
            print("cannot find block name %s\nAvailable blocks are: conv1, bn1, layer1, layer2, layer3, layer4, fc"%name)
    for name in non_train_blocks:
        for param in getattr(model, name).parameters():
            param.requires_grad = False

    # Setup optimizer
    factor = 0.1
    sgd_in = []
    for name in train_blocks:
        if name != 'fc':
            sgd_in.append({'params': get_params(model, [name]), 'lr': factor*args.lr})
        else:
            sgd_in.append({'params': get_params(model, ["fc_new"]), 'lr': args.lr})
    base_lrs = [ group['lr'] for group in sgd_in ]
    optimizer = SGD(sgd_in, lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay)

    # Optionally resume from a checkpoint
    if args.resume != 'none':
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            best_prec1 = checkpoint['best_prec1']
            model.load_state_dict(checkpoint['state_dict'])
            print("=> loaded checkpoint '{}' (epoch {})".format(args.resume, checkpoint['epoch']))
        else:
            print("=ImageClassdata> no checkpoint found at '{}'".format(args.resume))

    model = model.cuda()

    model_old = None
    if args.lwf > 0:
        # create a fixed model copy for Life-long learning
        model_old = resnet101(pretrained=True)
        for param in model_old.parameters():
            param.requires_grad = False
        model_old.eval()
        model_old.cuda()

    if args.evaluate:
        prec1 = validate(val_loader, model)
        print(prec1)
        exit(0)

    # Main training loop
    iter_max = args.epochs * len(train_loader)
    iter_stat = IterNums(iter_max)
    for epoch in range(args.start_epoch, args.epochs):
        print("<< ============== JOB (PID = %d) %s ============== >>"%(PID, args.save_dir))
        logger.log("Epoch: %d"%(epoch+1))
        # train for one epoch
        train(train_loader, model, optimizer, base_lrs, iter_stat, epoch, logger.writer, model_old=model_old, adjust_lr=True)

        # evaluate on validation set
        prec1 = validate(val_loader, model)
        logger.writer.add_scalar("prec", prec1, epoch)

        # remember best prec@1 and save checkpoint
        is_best = prec1 > best_prec1
        best_prec1 = max(prec1, best_prec1)
        save_checkpoint(args.save_dir, {
            'epoch': epoch + 1,
            'state_dict': model.state_dict(),
            'best_prec1': best_prec1,
        }, is_best)

    logging.info('Best accuracy: {prec1:.3f}'.format(prec1=best_prec1))
예제 #13
0
def train(n_classes=11,
          batch_size=16,
          epochs=100,
          width=960,
          height=720,
          crop_factor_x=2,
          crop_factor_y=1.25,
          init_lr=1e-4,
          median_frequency=.15,
          zoom_augmentation=.2,
          dataset_path='datasets/endoscopy',
          weights_path='weights/endoscopy/model',
          preprocess='imagenet'):

    CONFIG = {}
    CONFIG['n_classes'] = n_classes
    CONFIG['batch_size'] = batch_size
    CONFIG['epochs'] = epochs
    CONFIG['width'] = width
    CONFIG['height'] = height
    CONFIG['crop_factor_x'] = crop_factor_x
    CONFIG['crop_factor_y'] = crop_factor_y
    CONFIG['width_train'] = int(
        CONFIG['width'] /
        CONFIG['crop_factor_x'])  # will be cropped from width_test size
    CONFIG['height_train'] = int(
        CONFIG['height'] /
        CONFIG['crop_factor_y'])  # will be cropped from height_test size
    CONFIG['init_lr'] = init_lr
    CONFIG['median_frequency'] = median_frequency
    CONFIG['zoom_augmentation'] = zoom_augmentation
    CONFIG['dataset_path'] = dataset_path
    CONFIG['weights_path'] = weights_path
    CONFIG['preprocess'] = preprocess

    assert CONFIG['width'] * (
        1 - CONFIG['zoom_augmentation']) >= CONFIG['width_train']
    assert CONFIG['height'] * (
        1 - CONFIG['zoom_augmentation']) >= CONFIG['height_train']

    # GPU to use
    n_gpu = 0
    os.environ["CUDA_VISIBLE_DEVICES"] = str(n_gpu)
    # Loader
    loader = Loader.Loader(dataFolderPath=CONFIG['dataset_path'],
                           n_classes=CONFIG['n_classes'],
                           width=CONFIG['width'],
                           height=CONFIG['height'],
                           median_frequency=CONFIG['median_frequency'])
    print('Dataset loaded...')
    # build model
    #model = MiniNetv2.MiniNetv2p(num_classes=CONFIG['n_classes'])
    model = ResNet50.ResNet50Seg(CONFIG['n_classes'],
                                 input_shape=(None, None, 3),
                                 weights='imagenet')

    # optimizer
    learning_rate = tf.Variable(CONFIG['init_lr'])
    optimizer = tf.keras.optimizers.Adam(learning_rate)
    loss_function = tf.keras.losses.CategoricalCrossentropy()

    # restore if model saved and show number of params
    restore_state(model, CONFIG['weights_path'])

    init_model(model, (1, CONFIG['width'], CONFIG['height'], 3))
    get_params(model)

    # Train
    print('Training...')
    _train(loader=loader,
           optimizer=optimizer,
           loss_function=loss_function,
           model=model,
           config=CONFIG,
           lr=learning_rate,
           name_best_model=CONFIG['weights_path'],
           evaluation=True,
           preprocess_mode=CONFIG['preprocess'])

    print('Testing model')
    test_acc, test_miou = get_metrics(loader,
                                      model,
                                      loader.n_classes,
                                      train=False,
                                      flip_inference=True,
                                      scales=[1, 2, 1.5, 0.5, 0.75],
                                      write_images=True,
                                      preprocess_mode=CONFIG['preprocess'],
                                      time_exect=True)
    print('Test accuracy: ' + str(test_acc.numpy()))
    print('Test miou: ' + str(test_miou.numpy()))
예제 #14
0
def main():
    global args, best_mIoU
    PID = os.getpid()
    args = parser.parse_args()
    prepare_seed(args.rand_seed)
    device = torch.device("cuda:" + str(args.gpus))

    if args.timestamp == 'none':
        args.timestamp = "{:}".format(
            time.strftime('%h-%d-%C_%H-%M-%s', time.gmtime(time.time())))

    switch_model = args.switch_model
    assert switch_model in ["deeplab50", "deeplab101"]

    # Log outputs
    if args.evaluate:
        args.save_dir = args.save_dir + "/GTA5-%s-evaluate"%switch_model + \
            "%s/%s"%('/'+args.resume if args.resume != 'none' else '', args.timestamp)
    else:
        args.save_dir = args.save_dir + \
            "/GTA5_512x512-{model}-LWF.stg{csg_stages}.w{csg_weight}-APool.{apool}-Aug.{augment}-chunk{chunks}-mlp{mlp}.K{csg_k}-LR{lr}.bone{factor}-epoch{epochs}-batch{batch_size}-seed{seed}".format(
                    model=switch_model,
                    csg_stages=args.csg_stages,
                    mlp=args.mlp,
                    csg_weight=args.csg,
                    apool=args.apool,
                    augment=args.augment,
                    chunks=args.chunks,
                    csg_k=args.csg_k,
                    lr="%.2E"%args.lr,
                    factor="%.1f"%args.factor,
                    epochs=args.epochs,
                    batch_size=args.batch_size,
                    seed=args.rand_seed
                    ) + \
            "%s/%s"%('/'+args.resume if args.resume != 'none' else '', args.timestamp)
    logger = prepare_logger(args)

    from config_seg import config as data_setting
    data_setting.batch_size = args.batch_size
    train_loader = get_train_loader(data_setting,
                                    GTA5,
                                    test=False,
                                    augment=args.augment)

    args.stages = [int(stage) for stage in args.csg_stages.split('.')
                   ] if len(args.csg_stages) > 0 else []
    chunks = [int(chunk) for chunk in args.chunks.split('.')
              ] if len(args.chunks) > 0 else []
    assert len(chunks) == 1 or len(chunks) == len(args.stages)
    if len(chunks) < len(args.stages):
        chunks = [chunks[0]] * len(args.stages)

    if switch_model == 'deeplab50':
        layers = [3, 4, 6, 3]
    elif switch_model == 'deeplab101':
        layers = [3, 4, 23, 3]
    model = csg_builder.CSG(deeplab,
                            get_head=None,
                            K=args.csg_k,
                            stages=args.stages,
                            chunks=chunks,
                            task='new-seg',
                            apool=args.apool,
                            mlp=args.mlp,
                            base_encoder_kwargs={
                                'num_seg_classes': args.num_classes,
                                'layers': layers
                            })

    threds = 3
    evaluator = SegEvaluator(
        Cityscapes(data_setting, 'val', None),
        args.num_classes,
        np.array([0.485, 0.456, 0.406]),
        np.array([0.229, 0.224, 0.225]),
        model.encoder_q, [
            1,
        ],
        False,
        devices=args.gpus,
        config=data_setting,
        threds=threds,
        verbose=False,
        save_path=None,
        show_image=False
    )  # just calculate mIoU, no prediction file is generated
    # verbose=False, save_path="./prediction_files", show_image=True, show_prediction=True)  # generate prediction files

    # Setup optimizer
    factor = args.factor
    sgd_in = [
        {
            'params': get_params(model.encoder_q, ["conv1"]),
            'lr': factor * args.lr
        },
        {
            'params': get_params(model.encoder_q, ["bn1"]),
            'lr': factor * args.lr
        },
        {
            'params': get_params(model.encoder_q, ["layer1"]),
            'lr': factor * args.lr
        },
        {
            'params': get_params(model.encoder_q, ["layer2"]),
            'lr': factor * args.lr
        },
        {
            'params': get_params(model.encoder_q, ["layer3"]),
            'lr': factor * args.lr
        },
        {
            'params': get_params(model.encoder_q, ["layer4"]),
            'lr': factor * args.lr
        },
        {
            'params': get_params(model.encoder_q, ["fc_new"]),
            'lr': args.lr
        },
    ]
    base_lrs = [group['lr'] for group in sgd_in]
    optimizer = SGD(sgd_in,
                    lr=args.lr,
                    momentum=args.momentum,
                    weight_decay=args.weight_decay)

    # Optionally resume from a checkpoint
    if args.resume != 'none':
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume,
                                    map_location=lambda storage, loc: storage)
            args.start_epoch = checkpoint['epoch']
            best_mIoU = checkpoint['best_mIoU']
            msg = model.load_state_dict(checkpoint['state_dict'])
            print("resume weights: ", msg)
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
        else:
            print("=ImageClassdata> no checkpoint found at '{}'".format(
                args.resume))

    model = model.to(device)

    if args.evaluate:
        mIoU = validate(evaluator, model, -1)
        print(mIoU)
        exit(0)

    # Main training loop
    iter_max = args.epochs * len(train_loader)
    iter_stat = IterNums(iter_max)
    for epoch in range(args.start_epoch, args.epochs):
        print("<< ============== JOB (PID = %d) %s ============== >>" %
              (PID, args.save_dir))
        logger.log("Epoch: %d" % (epoch + 1))
        # train for one epoch
        train(args,
              train_loader,
              model,
              optimizer,
              base_lrs,
              iter_stat,
              epoch,
              logger,
              device,
              adjust_lr=epoch < args.epochs)

        # evaluate on validation set
        torch.cuda.empty_cache()
        mIoU = validate(evaluator, model, epoch)
        logger.writer.add_scalar("mIoU", mIoU, epoch + 1)
        logger.log("mIoU: %f" % mIoU)

        # remember best mIoU and save checkpoint
        is_best = mIoU > best_mIoU
        best_mIoU = max(mIoU, best_mIoU)
        save_checkpoint(
            args.save_dir, {
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'best_mIoU': best_mIoU,
            }, is_best)

    logging.info('Best accuracy: {mIoU:.3f}'.format(mIoU=best_mIoU))
예제 #15
0
def main():
    global args, best_mIoU
    args = parser.parse_args()
    pid = os.getpid()

    # Log outputs
    args.name = "GTA5_Vgg16_batch%d_512x512_Poly_LR%.1e_1to%.1f_all_lwf.%d_epoch%d" % (
        args.batch_size, args.lr, args.factor, args.lwf, args.epochs)
    if args.resume:
        args.name += "_resumed"
    directory = "runs/%s/" % (args.name)
    if not os.path.exists(directory):
        os.makedirs(directory)
    filename = directory + 'train.log'
    for handler in logging.root.handlers[:]:
        logging.root.removeHandler(handler)
    rootLogger = logging.getLogger()
    logFormatter = logging.Formatter(
        "%(asctime)s [%(levelname)-5.5s]  %(message)s")
    fileHandler = logging.FileHandler(filename)
    fileHandler.setFormatter(logFormatter)
    rootLogger.addHandler(fileHandler)

    consoleHandler = logging.StreamHandler()
    consoleHandler.setFormatter(logFormatter)
    rootLogger.addHandler(consoleHandler)
    rootLogger.setLevel(logging.INFO)

    writer = SummaryWriter(directory)

    from config_seg import config as data_setting
    data_setting.batch_size = args.batch_size
    train_loader = get_train_loader(data_setting, GTA5, test=False)

    ##### Vgg16 #####
    vgg = vgg16(pretrained=True)
    model = FCN_Vgg(n_class=args.num_class)
    model.copy_params_from_vgg16(vgg)
    ###################
    threds = 1
    evaluator = SegEvaluator(Cityscapes(data_setting, 'val', None),
                             args.num_class,
                             np.array([0.485, 0.456, 0.406]),
                             np.array([0.229, 0.224, 0.225]),
                             model, [
                                 1,
                             ],
                             False,
                             devices=args.gpus,
                             config=data_setting,
                             threds=threds,
                             verbose=False,
                             save_path=None,
                             show_image=False)

    # Setup optimizer
    ##### Vgg16 #####
    sgd_in = [
        {
            'params': get_params(model, ["conv1_1", "conv1_2"]),
            'lr': args.factor * args.lr
        },
        {
            'params': get_params(model, ["conv2_1", "conv2_2"]),
            'lr': args.factor * args.lr
        },
        {
            'params': get_params(model, ["conv3_1", "conv3_2", "conv3_3"]),
            'lr': args.factor * args.lr
        },
        {
            'params': get_params(model, ["conv4_1", "conv4_2", "conv4_3"]),
            'lr': args.factor * args.lr
        },
        {
            'params': get_params(model, ["conv5_1", "conv5_2", "conv5_3"]),
            'lr': args.factor * args.lr
        },
        {
            'params': get_params(model, ["fc6", "fc7"]),
            'lr': args.factor * args.lr
        },
        {
            'params':
            get_params(model, [
                "score_fr", "score_pool3", "score_pool4", "upscore2",
                "upscore8", "upscore_pool4"
            ]),
            'lr':
            args.lr
        },
    ]
    base_lrs = [group['lr'] for group in sgd_in]
    optimizer = torch.optim.SGD(sgd_in,
                                lr=args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    # Optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            best_mIoU = checkpoint['best_mIoU']
            model.load_state_dict(checkpoint['state_dict'])
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
        else:
            print("=ImageClassdata> no checkpoint found at '{}'".format(
                args.resume))

    model = model.cuda()
    model_old = None
    if args.lwf > 0:
        # create a fixed model copy for Life-long learning
        model_old = vgg16(pretrained=True)
        ###################
        for param in model_old.parameters():
            param.requires_grad = False
        model_old.eval()
        model_old.cuda()

    if args.evaluate:
        mIoU = validate(evaluator, model)
        print(mIoU)

    # Main training loop
    iter_max = args.epochs * math.ceil(len(train_loader) / args.iter_size)
    iter_stat = IterNums(iter_max)
    for epoch in range(args.start_epoch, args.epochs):
        logging.info("============= " + args.name + " ================")
        logging.info("============= PID: " + str(pid) + " ================")
        logging.info("Epoch: %d" % (epoch + 1))
        # train for one epoch
        train(args,
              train_loader,
              model,
              optimizer,
              base_lrs,
              iter_stat,
              epoch,
              writer,
              model_old=model_old,
              adjust_lr=epoch < args.epochs)
        # evaluate on validation set
        torch.cuda.empty_cache()
        mIoU = validate(evaluator, model)
        writer.add_scalar("mIoU", mIoU, epoch)
        # remember best mIoU and save checkpoint
        is_best = mIoU > best_mIoU
        best_mIoU = max(mIoU, best_mIoU)
        save_checkpoint(
            directory, {
                'epoch': epoch + 1,
                'state_dict': model.state_dict(),
                'best_mIoU': best_mIoU,
            }, is_best)

    logging.info('Best accuracy: {mIoU:.3f}'.format(mIoU=best_mIoU))
STACK_NAME = os.environ.get('STACK_NAME')


INSTANCE_ID = get_param(f"/ct-manager/{STACK_NAME}/instance-id")
INSTANCE_ALIAS = get_param(f"/ct-manager/{STACK_NAME}/instance-alias")
SOURCE_PHONE = get_param(f"/ct-manager/{STACK_NAME}/source-phone")
REGION = get_param(f"/ct-manager/{STACK_NAME}/region")
BEEP_PROMPT = get_param(f"/ct-manager/{STACK_NAME}/beep-prompt")
DEFAULT_AGENT =get_param(f"/ct-manager/{STACK_NAME}/default-agent")
PRESIGNED_URL_DURATION = get_param(f"/ct-manager/{STACK_NAME}/presigned-url-duration")


TAGS = config['tags']


SSM_PARAMS = get_params('/ct-manager/{}'.format(STACK_NAME))
assert len(SSM_PARAMS), "No hay parametros previos, asegure de implementar primero el stack base"


client = boto3.client('connect')

if INSTANCE_ID == 'None':
    print(f'Buscando {INSTANCE_ALIAS} en {REGION}')
    INSTANCE_ID = find_instance_id(client, INSTANCE_ALIAS)

if SOURCE_PHONE == 'None':
    print(f'Buscando {INSTANCE_ALIAS} en {REGION}')
    SOURCE_PHONE_LIST = find_source_phone(client, INSTANCE_ID)
    if len(SOURCE_PHONE_LIST):
        SOURCE_PHONE = SOURCE_PHONE_LIST[0]['PhoneNumber']
예제 #17
0
def main():
    global args, best_prec1
    PID = os.getpid()
    args = parser.parse_args()
    prepare_seed(args.rand_seed)

    if args.timestamp == 'none':
        args.timestamp = "{:}".format(
            time.strftime('%h-%d-%C_%H-%M-%s', time.gmtime(time.time())))

    # Log outputs
    if args.evaluate:
        args.save_dir = args.save_dir + "/Visda17-Res101-evaluate" + \
            "%s/%s"%('/'+args.resume.replace('/', '+') if args.resume != 'none' else '', args.timestamp)
    else:
        args.save_dir = args.save_dir + \
            "/VisDA-Res101-CSG.stg{csg_stages}.w{csg_weight}-APool.{apool}-Aug.{augment}-chunk{chunks}-mlp{mlp}.K{csg_k}-LR{lr}.bone{factor}-epoch{epochs}-batch{batch_size}-seed{seed}".format(
                    csg_stages=args.csg_stages,
                    mlp=args.mlp,
                    csg_weight=args.csg,
                    apool=args.apool,
                    augment=args.augment,
                    chunks=args.chunks,
                    csg_k=args.csg_k,
                    lr="%.2E"%args.lr,
                    factor="%.1f"%args.factor,
                    epochs=args.epochs,
                    batch_size=args.batch_size,
                    seed=args.rand_seed
                    ) + \
            "%s/%s"%('/'+args.resume.replace('/', '+') if args.resume != 'none' else '', args.timestamp)
    logger = prepare_logger(args)

    data_transforms = {
        'val':
        transforms.Compose([
            transforms.Resize(224),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                 std=[0.229, 0.224, 0.225])
        ]),
    }
    if args.augment:
        data_transforms['train'] = transforms.Compose([
            RandAugment(1, 6., augment_list),
            transforms.Resize(224),
            transforms.RandomCrop(224),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                 std=[0.229, 0.224, 0.225])
        ])
    else:
        data_transforms['train'] = transforms.Compose([
            transforms.Resize(224),
            transforms.CenterCrop(224),
            transforms.ToTensor(),
            transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                 std=[0.229, 0.224, 0.225])
        ])

    kwargs = {'num_workers': 20, 'pin_memory': True}
    if args.augment:
        # two source
        trainset = VisDA17(txt_file=os.path.join(args.data,
                                                 "train/image_list.txt"),
                           root_dir=os.path.join(args.data, "train"),
                           transform=TwoCropsTransform(
                               data_transforms['train'],
                               data_transforms['train']))
    else:
        # one source
        trainset = VisDA17(txt_file=os.path.join(args.data,
                                                 "train/image_list.txt"),
                           root_dir=os.path.join(args.data, "train"),
                           transform=data_transforms['train'])
    train_loader = DataLoader(trainset,
                              batch_size=args.batch_size,
                              shuffle=True,
                              drop_last=True,
                              **kwargs)
    valset = VisDA17(txt_file=os.path.join(args.data,
                                           "validation/image_list.txt"),
                     root_dir=os.path.join(args.data, "validation"),
                     transform=data_transforms['val'],
                     label_one_hot=True)
    val_loader = DataLoader(valset,
                            batch_size=args.batch_size,
                            shuffle=False,
                            **kwargs)

    args.stages = [int(stage) for stage in args.csg_stages.split('.')
                   ] if len(args.csg_stages) > 0 else []
    chunks = [int(chunk) for chunk in args.chunks.split('.')
              ] if len(args.chunks) > 0 else []
    assert len(chunks) == 1 or len(chunks) == len(args.stages)
    if len(chunks) < len(args.stages):
        chunks = [chunks[0]] * len(args.stages)

    def get_head(num_ftrs, num_classes):
        _dim = 512
        return nn.Sequential(
            nn.Linear(num_ftrs, _dim),
            nn.ReLU(inplace=False),
            nn.Linear(_dim, num_classes),
        )

    model = csg_builder.CSG(
        resnet101,
        get_head=get_head,
        K=args.csg_k,
        stages=args.stages,
        chunks=chunks,
        apool=args.apool,
        mlp=args.mlp,
    )

    train_blocks = "conv1.bn1.layer1.layer2.layer3.layer4.fc"
    train_blocks = train_blocks.split('.')
    # Setup optimizer
    factor = args.factor
    sgd_in = []
    for name in train_blocks:
        if name != 'fc':
            sgd_in.append({
                'params': get_params(model.encoder_q, [name]),
                'lr': factor * args.lr
            })
        else:
            # no update to fc but to fc_new
            sgd_in.append({
                'params': get_params(model.encoder_q, ["fc_new"]),
                'lr': args.lr
            })
            if model.mlp:
                sgd_in.append({
                    'params': get_params(model.encoder_q, ["fc_csg"]),
                    'lr': args.lr
                })
    base_lrs = [group['lr'] for group in sgd_in]
    optimizer = SGD(sgd_in,
                    lr=args.lr,
                    momentum=args.momentum,
                    weight_decay=args.weight_decay)

    # Optionally resume from a checkpoint
    if args.resume != 'none':
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            checkpoint = torch.load(args.resume,
                                    map_location=lambda storage, loc: storage)
            args.start_epoch = checkpoint['epoch']
            best_prec1 = checkpoint['best_prec1']
            msg = model.load_state_dict(checkpoint['state_dict'], strict=False)
            print("resume weights: ", msg)
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))
        else:
            print("=ImageClassdata> no checkpoint found at '{}'".format(
                args.resume))

    model = model.cuda()

    if args.evaluate:
        prec1 = validate(val_loader, model, args, 0)
        print(prec1)
        exit(0)

    # Main training loop
    iter_max = args.epochs * len(train_loader)
    iter_stat = IterNums(iter_max)
    for epoch in range(args.start_epoch, args.epochs):
        print("<< ============== JOB (PID = %d) %s ============== >>" %
              (PID, args.save_dir))
        logger.log("Epoch: %d" % (epoch + 1))
        train(train_loader,
              model,
              optimizer,
              base_lrs,
              iter_stat,
              epoch,
              logger,
              args,
              adjust_lr=epoch < args.epochs)

        prec1 = validate(val_loader, model, args, epoch)
        logger.writer.add_scalar("prec", prec1, epoch + 1)
        logger.log("prec: %f" % prec1)

        # remember best prec@1 and save checkpoint
        is_best = prec1 > best_prec1
        best_prec1 = max(prec1, best_prec1)
        save_checkpoint(args.save_dir, {
            'epoch': epoch + 1,
            'state_dict': model.state_dict(),
            'best_prec1': best_prec1,
        },
                        is_best,
                        keep_last=1)

    logging.info('Best accuracy: {prec1:.3f}'.format(prec1=best_prec1))