Пример #1
0
def test_validate_fail(faker):
    trafaret_format = t.Dict({
        'check': t.Int()
    })
    data_to_check = {
        'check': faker.word()
    }
    with pytest.raises(app_exceptions.ValidateDataError):
        validate(data_to_check=data_to_check, trafaret_format=trafaret_format)
Пример #2
0
async def identity_user(*, conn: SAConnection, credentials_data: dict) -> User:
    """
    Check if user exist in database
    :param conn: connection to database
    :param credentials_data: dict user credentials
    :return: dict with user data
    """
    credentials_format = t.Dict({
        t.Key('username'): t.Or(t.String, t.Int),
        t.Key('password'): t.Or(t.String, t.Int)
    })

    try:
        credentials_data = validate(data_to_check=credentials_data,
                                    trafaret_format=credentials_format)
    except app_exceptions.ValidateDataError:
        raise auth_exceptions.AuthenticateNoCredentials

    # look for user in database
    try:
        user = await get_user(conn=conn, username=credentials_data['username'])
    except app_exceptions.DoesNotExist:
        raise auth_exceptions.AuthenticateErrorCredentials

    # check user password
    if not validate_password(password=credentials_data['password'],
                             password_hash=user['password']):
        raise auth_exceptions.AuthenticateErrorCredentials

    return User(user)  # type: ignore
Пример #3
0
def test_validate_success(faker):
    trafaret_format = t.Dict({
        'check': t.Int()
    })
    data_to_check = {
        'check': faker.random_number()
    }
    res_data = validate(data_to_check=data_to_check, trafaret_format=trafaret_format)

    assert res_data == data_to_check
Пример #4
0
async def create_user(*, conn: SAConnection, user_data: dict) -> User:
    """
    Create user object in database
    :param conn: connector to database
    :param user_data: dict with user data
    :return:
    """
    user_format = t.Dict({
        t.Key('username'): t.Or(t.String, t.Int),
        t.Key('password'): t.Or(t.String, t.Int)
    })

    user_data = validate(data_to_check=user_data, trafaret_format=user_format)
    user_data['password'] = generate_password_hash(
        password=user_data['password'])

    user = await create_objects(conn=conn, table=users, data=user_data)
    return User(user[0])  # type: ignore
Пример #5
0
def signup():
    # Signup page is rendered when method == GET;
    # Form data is processed when method == POST;
    # Signup page with flashed message(s) is rendered if input is invalid.
    if request.method == 'POST':
        name = request.form['name']
        username = request.form['username']
        passwd = request.form['passwd']
        valid = validate(name, username, passwd)
        if valid == 1:
            # checking if username exists or not..
            sql = "SELECT id FROM usercreds WHERE username='******';"
            cursor.execute(sql.format(username))
            match = cursor.fetchone()
            if match is None:
                sql = "INSERT INTO usercreds (username,password,name) VALUES (%s,%s,%s);"
                try:
                    cursor.execute(
                        sql, (username, hashgen(passwd, method='sha1'), name))
                    conn.commit()
                except Exception as e:
                    return '''
                    Something weird happened. We will soon fix it.<br/>
                    Click <a href="{{url_for('signup')">here</a> to retry.
                    '''
                session['curr_uid'] = username
                global logged_in
                logged_in = True
                return redirect(url_for('feed'))
            else:
                # when username exists..
                return render_template('signup.html',
                                       title='Sign Up',
                                       errmsg='Username unavailable')
        else:
            return render_template('signup.html',
                                   title='Sign Up',
                                   errmsg='Invalid fields!')
    if request.method == 'GET':
        return render_template('signup.html', title='Sign Up')
Пример #6
0
def test_validate():
    batch_size = 5  # batch size
    vocab_threshold = 5  # minimum word count threshold

    img_folder = COCO_SMALL + '/cocoapi/images/val2014/'
    annotations_file = COCO_SMALL + '/cocoapi/annotations/captions_val2014.json'

    transform_train = transforms.Compose([
        transforms.Resize(256),  # smaller edge of image resized to 256
        transforms.RandomCrop(224),  # get 224x224 crop from random location
        transforms.RandomHorizontalFlip(
        ),  # horizontally flip image with probability=0.5
        transforms.ToTensor(),  # convert the PIL Image to a tensor
        transforms.Normalize(
            (0.485, 0.456, 0.406),  # normalize image for pre-trained model
            (0.229, 0.224, 0.225))
    ])

    val_data_loader = val_get_loader(transform=transform_train,
                                     batch_size=batch_size,
                                     vocab_threshold=vocab_threshold,
                                     annotations_file=annotations_file,
                                     img_folder=img_folder)

    criterion = nn.CrossEntropyLoss()
    embed_size = 200
    hidden_size = 256

    # The size of the vocabulary.
    vocab_size = len(val_data_loader.dataset.vocab)

    # Initialize the encoder and decoder, and set each to inference mode.
    encoder = EncoderCNN(hidden_size)
    decoder = DecoderRNN(embed_size, hidden_size, vocab_size)

    # bleu_score, loss = validate(encoder, decoder, val_data_loader, criterion, vocab_size)
    # print(bleu_score, loss)
    loss = validate(encoder, decoder, val_data_loader, criterion, vocab_size)
    print(loss)
def main():
    # Import settings (note that default debug settings are used)
    parser = argparse.ArgumentParser(description='TGS Challenge Main Script')
    parser.add_argument('--trn_path',
                        type=str,
                        default='./data/debug_train/',
                        help='path to training directory (default: debug)')
    parser.add_argument('--msk_path',
                        type=str,
                        default='./data/debug_masks',
                        help='path to mask directory (default: debug)')
    parser.add_argument('--tst_path',
                        type=str,
                        default='./data/debug_test/',
                        help='path to test directory (default: debug)')
    parser.add_argument('--mod_path',
                        type=str,
                        default='./weights/model_tmp/',
                        help='path to model weights directory (default: tmp)')
    parser.add_argument('--batch_size',
                        type=int,
                        default=3,
                        help='input batch size (default: 3)')
    parser.add_argument('--epochs',
                        type=int,
                        default=10,
                        help='number of epochs to train for (default: 10)')
    parser.add_argument('--starting_epoch',
                        type=int,
                        default=1,
                        help='index of starting epoch (default: 1)')
    parser.add_argument('--lr',
                        type=float,
                        default=0.001,
                        help='learning rate (default: 0.001)')
    parser.add_argument('--lr_patience',
                        type=int,
                        default=10,
                        help='num epochs to wait for LR reduce (default: 10)')
    parser.add_argument('--print_every',
                        type=int,
                        default=1,
                        help='num batches before printing (default: 1)')
    parser.add_argument('--NUM_TRAIN',
                        type=int,
                        default=6,
                        help='num samples in split train set (default: 6)')
    parser.add_argument('--NUM_FULL',
                        type=int,
                        default=9,
                        help='num samples in full train set (default: 9)')
    args = parser.parse_args()

    # Define some variables relative to parser inputs
    trn_path = args.trn_path
    msk_path = args.msk_path
    tst_path = args.tst_path
    mod_path = args.mod_path
    starting_epoch = args.starting_epoch
    NUM_TRAIN = args.NUM_TRAIN
    NUM_FULL = args.NUM_FULL

    record_name = 'best_record.pickle'
    history_name = 'training_history.pickle'

    # Validate specified model path
    restart_token = check_dir(mod_path)  # Returns None if path exists

    # Define model (comment out irrelevant models as necessary)
    # net = ResSeg33(ResidualBlock)
    # net = ResSeg33_Reg(ResBlock_Reg)
    # net = ResSegVar(ResidualBlock, [3, 4, 6, 3]) # 45 layers
    net = ResSegVar(ResBlock_Reg, [6, 8, 12, 6])  # 77 layers

    # Loss function
    criterion = nn.CrossEntropyLoss()
    # Optimizer
    optimizer = optim.Adam(net.parameters(), lr=args.lr)

    # Define or load training history
    def format_epoch_fname(start_num):
        return mod_path + 'epoch_%s.pth' % start_num

    best_record = {}
    training_history = {}
    if restart_token:  # Starting from scratch
        curr_epoch = 1
        best_record['epoch'] = 0
        best_record['val_loss'] = 1e10
        best_record['mean_iou'] = 0
    else:
        print 'Resuming training from epoch:', starting_epoch
        net.load_state_dict(torch.load(format_epoch_fname(starting_epoch)))
        curr_epoch = starting_epoch + 1
        best_record = load_pickle(mod_path + record_name)
        training_history = load_pickle(mod_path + history_name)

    # Define device and dtype
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    dtype = torch.float32
    # Parallelization init and set net to CUDA if possible
    if torch.cuda.is_available():
        net.cuda()
        net = torch.nn.DataParallel(net,
                                    device_ids=range(
                                        torch.cuda.device_count()))

    # Load data
    paths = (trn_path, msk_path, tst_path)
    stats = (NUM_TRAIN, NUM_FULL, args.batch_size)
    trn_set, val_set, tst_set = data_formatter(paths, stats)
    # Unpack data
    trn_data, trn_load = trn_set
    val_data, val_load = val_set
    tst_data, tst_load = tst_set

    # Define automatic LR reduction scheduler
    scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer=optimizer,
                                                     mode='min',
                                                     patience=args.lr_patience,
                                                     min_lr=1e-10)
    # Model API parameters
    param_dict = {
        'loader': None,
        'net': net,
        'criterion': criterion,
        'optimizer': optimizer,
        'epoch': 1,
        'args': args,
        'device': device,
        'dtype': dtype
    }
    # Note: epoch starts from 1, not 0
    for i, epoch in enumerate(range(curr_epoch, args.epochs + 1)):
        # Update epoch
        param_dict['epoch'] = epoch
        # Train
        param_dict['loader'] = trn_load
        trn_log = train(**param_dict)
        # Validate
        param_dict['loader'] = val_load
        val_loss, mean_iou = validate(**param_dict)

        # Update logging files
        training_history['epoch_%s' % (i + 1)] = trn_log
        # Save weights if avg_iou score improves
        if val_loss < best_record['val_loss']:
            best_record['epoch'] = epoch
            best_record['val_loss'] = val_loss
            best_record['mean_iou'] = mean_iou
            torch.save(net.state_dict(), format_epoch_fname(epoch))

        # Print best record information
        print '--------------------------------------'
        print 'best record: [epoch %d], [val_loss %.4f], [mean_iou %.4f]' % (
            best_record['epoch'], best_record['val_loss'],
            best_record['mean_iou'])
        print '--------------------------------------'
        print ''

        # Save logging information every epoch
        save_pickle(data=training_history, path=mod_path + history_name)
        save_pickle(data=best_record, path=mod_path + record_name)

        scheduler.step(val_loss)