Exemplo n.º 1
0
def alexnet():
    if not valid_request(request, UPLOAD_FOLDER): return redirect(request.url)
    # hard coded image as orig.png
    model = AlexNet(pretrained=True)
    model.eval()

    image = Image.open(os.path.join(UPLOAD_FOLDER, 'orig.png'))
    image = image.convert('RGB')

    out = topk_to_rank_string(*model(image))

    return render_template('/pages/alexnet.html', out=Markup(out[:-2]))
Exemplo n.º 2
0
def main():
    train_images = tf.data.TextLineDataset(
        '/home/caique/datasets/caltech101/caltech101_train.txt'
    )
    train_labels = tf.data.TextLineDataset(
        '/home/caique/datasets/caltech101/caltech101_train_labels.txt'
    )
    valid_images = tf.data.TextLineDataset(
        '/home/caique/datasets/caltech101/caltech101_test.txt'
    )
    valid_labels = tf.data.TextLineDataset(
        '/home/caique/datasets/caltech101/caltech101_test_labels.txt'
    )

    drop_data = tf.data.Dataset.zip((train_images, train_labels))
    drop_data = drop_data.map(get_parser(False)).batch(1)
    valid_data = tf.data.Dataset.zip((valid_images, valid_labels))
    valid_data = valid_data.map(get_parser(False)).batch(120)
    train_data = tf.data.Dataset.zip((train_images, train_labels))
    train_data = train_data.map(get_parser(True)).shuffle(3030).batch(101)

    session = tf.Session()
    model = AlexNet(101)

    saver = tf.train.Saver()
    saver.restore(session, './variables/alexnet-caltech101-finetunned-2-2')

    # dropped_filters = drop_filters(
    #     session, model, drop_data, valid_data, drop_total=500,
    #     drop_n=20
    # )

    # model.train(session, train_data, valid_data,
    #             epochs=40,
    #             lr=0.00001,
    #             # dropped_filters=dropped_filters,
    #             # train_layers=['fc8'],
    #             # weights_path='alexnet_weights.npy',
    #             variables_path='./variables/alexnet-caltech101-finetunned-33',
    #             model_name='alexnet-caltech101-finetunned-2')
    session.run(model.iterator.make_initializer(valid_data))
    print('final eval: {}'.format(model.eval(session)))
    session.close()
Exemplo n.º 3
0
        optimizer.zero_grad()

        outputs = net(signals)
        loss = criterion(outputs, labels)
        loss.backward()
        optimizer.step()

        running_loss += loss.item()
        #if batch_index > 50:
        # if batch_index % 100 == 0:    # print every 2000 mini-batches
        #     print('[%d, %5d] loss: %.3f' %
        #           (epoch + 1, batch_index + 1, running_loss / (batch_index+1) ))
        #     running_loss = 0.0

    print("Avg Loss Train: {}".format(running_loss / len(train_loader)))

    # Validation
    print("Epoch: {} - Val".format(epoch))
    net.eval()
    running_loss = 0.
    with torch.no_grad():
        for batch_index, (signals, labels) in enumerate(tqdm(val_loader)):

            signals, labels = signals.to(device=device), labels.to(
                device=device)

            outputs = net(signals)
            loss = criterion(outputs, labels)
            running_loss += loss.item()

        print("Avg Loss Val: {}".format(running_loss / len(val_loader)))
Exemplo n.º 4
0
        accumulated_train_loss.append(loss.item())

        # zero gradients
        optimizer.zero_grad()

        # backward
        loss.backward()

        # Step to update optimizer params
        optimizer.step()

        iteration += 1

    # Validation
    # Set mode in inference mode
    model.eval()
    accumulated_val_loss = []
    for batch_x, batch_y in val_generator:
        # Forward
        preds = model(batch_x)

        # compute loss
        loss = loss_func(preds, batch_y)
        accumulated_val_loss.append(loss.item())

    train_loss = sum(accumulated_train_loss) / len(accumulated_train_loss)
    val_loss = sum(accumulated_val_loss) / len(accumulated_val_loss)
    print("Epoch: {} -- -- train loss: {}, val loss: {}".format(
        epoch, train_loss, val_loss))
    tensorboard.add_scalar("Loss/Train", train_loss, epoch)
    tensorboard.add_scalar("Loss/Validation", val_loss, epoch)