コード例 #1
0
def train(args):
    adapt = args.adapt

    train_transforms = torchvision.transforms.Compose([
        transforms.Resize((255, 255)),
        transforms.ToTensor(),
    ])
    cell_dataset = Cell_Dataset(transform=train_transforms)

    train_loader = DataLoader(cell_dataset,
                              batch_size=args.batch_size,
                              shuffle=True,
                              num_workers=args.num_workers)

    net = EmbeddingNet()
    if adapt:
        print("Adapt the adapt module for new dataset")
        for name, param in net.named_parameters():
            if 'adapt' not in name:
                param.require_grad = False
    else:
        print("Train with ILSV2015")

    # Optimizer
    optimizer = optim.SGD(net.parameters(),
                          lr=args.lr,
                          momentum=0.9,
                          weight_decay=args.weight_decay)
    scheduler = optim.lr_scheduler.MultiStepLR(optimizer=optimizer,
                                               milestones=args.lr_steps)

    criterion = nn.MSELoss()
    net.cuda()
    for epoch in range(args.epochs):
        # Each epoch has a training and validation phase
        net.train()
        log_loss = []

        for i_batch, (input_im, patch_im,
                      input_label) in enumerate(train_loader):
            inputs_im, patch_im, input_label = input_im.cuda(), patch_im.cuda(
            ), input_label.cuda()

            output_heatmap = net(inputs_im, patch_im)
            #            print(output_heatmap.shape, input_label.shape)
            loss = criterion(output_heatmap, input_label)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            log_loss += [loss.item()]

            if i_batch % 10 == 0:
                log = 'Epoch: %3d, Batch: %5d, ' % (epoch + 1, i_batch)
                log += 'Total Loss: %6.3f, ' % (np.mean(log_loss))
                print(log, datetime.datetime.now())

        scheduler.step()
コード例 #2
0
def main(args):
    args.cuda = not args.no_cuda and torch.cuda.is_available()
    if args.cuda is True:
        print("##########Running in CUDA GPU##########")
        kwargs = {'num_workers': 4, 'pin_memory': True}
    else:
        print("##########Running in CPU##########")
        kwargs = {}
    torch.manual_seed(args.seed)
    if args.cuda:
        torch.cuda.manual_seed(args.seed)

    train_data = myDataset("./data", [1, 2, 3, 4, 5, 6], args.nframes, "train",
                           None)
    train_loader = Data.DataLoader(dataset=train_data,
                                   batch_size=args.batch_size,
                                   shuffle=True,
                                   **kwargs)

    # dev_data = myDataset("./data", None, args.nframes, "dev", "enrol")
    # dev_loader = Data.DataLoader(dataset=dev_data, batch_size=args.test_batch_size, shuffle=False, **kwargs)

    test_data = myDataset("./data", None, args.nframes, "test", "enrol")
    test_loader = Data.DataLoader(dataset=test_data,
                                  batch_size=args.test_batch_size,
                                  shuffle=False,
                                  **kwargs)

    embeddingNet = EmbeddingNet(embedding_size=64,
                                num_classes=train_data.nspeakers)
    # embeddingNet = embeddingNet.double()
    if args.cuda:
        print("##########model is in cuda mode##########")
        gpu_ids = [0, 1, 2, 3, 4, 5, 6, 7]
        embeddingNet.cuda()
        # embeddingNet = nn.DataParallel(embeddingNet, device_ids=[0])

    criterion = nn.CrossEntropyLoss()
    # optimizer = optim.SGD(embeddingNet.parameters(), lr=args.lr, momentum=args.momentum)
    optimizer = optim.Adam(embeddingNet.parameters(),
                           lr=args.lr,
                           weight_decay=0.001)
    scheduler = lr_scheduler.StepLR(optimizer, 5)
    start_epoch = 0
    if args.resume is True:
        embeddingNet, optimizer, start_epoch, loss = load_model(
            args.load_epoch, args.load_step, args.load_loss, embeddingNet,
            optimizer, "./weights/")

    # for epoch in range(start_epoch, args.epochs):
    # scheduler.step()
    # train(train_loader, embeddingNet, optimizer, criterion, epoch, args)
    # dev(dev_loader, embeddingNet, epoch)

    scores = test(test_loader, embeddingNet)
    write_scores(scores, "scores.npy")
コード例 #3
0
def create_embedder(embedding_model=''):
    embedder = EmbeddingNet()
    if embedding_model != '':
        embedder.load_state_dict(torch.load(embedding_model))

    if torch.cuda.device_count() > 1:
        print("Let's use", torch.cuda.device_count(), "GPUs!")
        embedder = torch.nn.DataParallel(embedder)

    embedder.cuda()
    return embedder