def getModel(ckptdir, convergence):
    ### convergence is the number of gibbs iterations to converge
    lastckpt = max([int(f[:-5]) for f in os.listdir(ckptdir)])
    numCkpts = lastckpt - convergence + 1

    _, occurrences, doc_count, _ = utils.loadCheckpoint(ckptdir / (str(convergence) + ".ckpt"))
    occurrences = occurrences/numCkpts
    doc_count = doc_count/numCkpts

    for i in range(convergence+1, lastckpt + 1):
        _, occur, doc_c, _ = utils.loadCheckpoint(ckptdir/(str(i) + ".ckpt"))
        occurrences += occur/numCkpts
        doc_count += doc_c/numCkpts
    return doc_count, occurrences
def checkConvergence(ckptdir):
    lastckpt = max([int(f[:-5]) for f in os.listdir(ckptdir)])

    _, _, doc_count, _ = utils.loadCheckpoint(ckptdir/"0.ckpt")
    doc_total = sum(doc_count)
    docs = [[d] for d in doc_count/doc_total]

    for i in range(1, lastckpt + 1):
        _, _, doc_count, _ = utils.loadCheckpoint(ckptdir/(str(i) + ".ckpt"))
        for i, d in enumerate(doc_count/doc_total):
            docs[i].append(d)

    x = np.arange(lastckpt+1) + 1
    for k in range(len(docs)):
        plt.plot(x, docs[k])
    plt.title("Distributions of each topics")
    plt.xlabel("Number of Gibbs sweeps")
    plt.ylabel("Probability")
    plt.show()
Exemple #3
0
    #             param.requires_grad = False
    #             print(name, ' fixed')

    FHAD_criterion = FHADLoss(args).to(device=args.device)
    dataloaders = {
        'HAND3D': {
            'train': dataset_FHAD_loader,
            'valid': dataset_FHAD_loader_valid
        }
    }
    args.n_kps = 21
    loss = FHAD_criterion
    args.model_id = 'pretrain_best'
    _, model, optimizer = loadCheckpoint(args,
                                         model,
                                         optimizer,
                                         best=True,
                                         load_pretrain=False)
    args.model_id += '_FHAD'

    _ = valid(args,
              -1,
              args.max_epochs_train,
              0,
              dataset_FHAD_loader_valid,
              model,
              loss,
              display_2D=False,
              display_3D=False)
    # Train
    train(args, dataloaders, model, loss, optimizer)
Exemple #4
0
def continuous_frame_recognition():
    """ Using RNN network to recognize the action. """
    start_epoch = 1

    # -----------------------------------------------------
    # Create Model, optimizer, scheduler, and loss function
    # -----------------------------------------------------
    # extractor = resnet50(pretrained=True).to(DEVICE)
    recurrent = LSTM_Net(2048,
                         opt.hidden_dim,
                         opt.output_dim,
                         num_layers=opt.layers,
                         bias=True,
                         batch_first=False,
                         dropout=opt.dropout,
                         bidirectional=opt.bidirection,
                         seq_predict=False).to(DEVICE)

    # ----------------------------------------------
    # For signal direction LSTM
    #   weight_ih_l0 torch.Size([512, 2048])
    #   weight_hh_l0 torch.Size([512, 128])
    #   bias_ih_l0 torch.Size([512])
    #   bias_hh_l0 torch.Size([512])
    #
    # For bidirectional LSTM, reverse layer is added.
    #   weight_ih_l0_reverse torch.Size([512, 2048])
    #   weight_hh_l0_reverse torch.Size([512, 128])
    #   bias_ih_l0_reverse torch.Size([512])
    #   bias_hh_l0_reverse torch.Size([512])
    # ----------------------------------------------

    # Weight_init
    if "orthogonal" in opt.weight_init:
        for layer, param in recurrent.recurrent.named_parameters():
            print("{} {}".format(layer, param.shape))
            if len(param.shape) >= 2:
                nn.init.orthogonal_(param)

    # Bias_init
    if "forget_bias_0" in opt.bias_init:
        for layer, param in recurrent.recurrent.named_parameters():
            if layer.startswith("bias"):
                size = param.shape[0]
                start = int(size * 0.25)
                end = int(size * 0.5)
                param[start:end].data.fill_(0)

    if "forget_bias_1" in opt.bias_init:
        for layer, param in recurrent.recurrent.named_parameters():
            if layer.startswith("bias"):
                size = param.shape[0]
                start = int(size * 0.25)
                end = int(size * 0.5)
                param[start:end].data.fill_(1)

    # Set optimizer
    if opt.optimizer == "Adam":
        optimizer = optim.Adam(recurrent.parameters(),
                               lr=opt.lr,
                               betas=(opt.b1, opt.b2),
                               weight_decay=opt.weight_decay)
    elif opt.optimizer == "SGD":
        optimizer = optim.SGD(recurrent.parameters(),
                              lr=opt.lr,
                              momentum=opt.momentum,
                              weight_decay=opt.weight_decay)
    elif opt.optimizer == "ASGD":
        optimizer = optim.ASGD(recurrent.parameters(),
                               lr=opt.lr,
                               lambd=1e-4,
                               alpha=0.75,
                               t0=1000000.0,
                               weight_decay=opt.weight_decay)
    elif opt.optimizer == "Adadelta":
        optimizer = optim.Adadelta(recurrent.parameters(),
                                   lr=opt.lr,
                                   rho=0.9,
                                   eps=1e-06,
                                   weight_decay=opt.weight_decay)
    elif opt.optimizer == "Adagrad":
        optimizer = optim.Adagrad(recurrent.parameters(),
                                  lr=opt.lr,
                                  lr_decay=0,
                                  weight_decay=opt.weight_decay,
                                  initial_accumulator_value=0)
    elif opt.optimizer == "SparseAdam":
        optimizer = optim.SparseAdam(recurrent.parameters(),
                                     lr=opt.lr,
                                     betas=(opt.b1, opt.b2),
                                     eps=1e-08)
    elif opt.optimizer == "Adamax":
        optimizer = optim.Adamax(recurrent.parameters(),
                                 lr=opt.lr,
                                 betas=(opt.b1, opt.b2),
                                 eps=1e-08,
                                 weight_decay=opt.weight_dacay)
    else:
        raise argparse.ArgumentError

    scheduler = optim.lr_scheduler.MultiStepLR(optimizer,
                                               milestones=opt.milestones,
                                               gamma=opt.gamma)

    # Load parameter
    if opt.pretrain:
        recurrent = utils.loadModel(opt.pretrain, recurrent)
    if opt.resume:
        recurrent, optimizer, start_epoch, scheduler = utils.loadCheckpoint(
            opt.resume, recurrent, optimizer, scheduler)

    # Set criterion
    criterion = nn.CrossEntropyLoss().to(DEVICE)

    # Set dataloader
    transform = transforms.ToTensor()

    trainlabel = os.path.join(opt.train, "label", "gt_train.csv")
    trainfeature = os.path.join(opt.train, "feature", "train")
    vallabel = os.path.join(opt.val, "label", "gt_valid.csv")
    valfeature = os.path.join(opt.val, "feature", "valid")

    train_set = dataset.TrimmedVideos(None,
                                      trainlabel,
                                      trainfeature,
                                      downsample=opt.downsample,
                                      transform=transform)
    train_loader = DataLoader(train_set,
                              batch_size=opt.batch_size,
                              shuffle=True,
                              collate_fn=utils.collate_fn,
                              num_workers=opt.threads)

    # Show the memory used by neural network
    print("The neural network allocated GPU with {:.1f} MB".format(
        torch.cuda.memory_allocated() / 1024 / 1024))

    #------------------
    # Train the models
    #------------------
    trainloss = []
    trainaccs = []
    valloss = []
    valaccs = []
    epochs = []

    for epoch in range(start_epoch, opt.epochs + 1):
        scheduler.step()

        # Save the train loss and train accuracy
        max_trainaccs = max(trainaccs) if len(trainaccs) else 0
        min_trainloss = min(trainloss) if len(trainloss) else 0
        recurrent, loss, acc = train(recurrent, train_loader, optimizer, epoch,
                                     criterion, max_trainaccs, min_trainloss)
        trainloss.append(loss)
        trainaccs.append(acc)

        # validate the model with several downsample ratio
        loss_list, acc_list, label_list = [], [], []
        for downsample in [1, 2, 4, 6, 12]:
            val_set = dataset.TrimmedVideos(None,
                                            vallabel,
                                            valfeature,
                                            downsample=downsample,
                                            transform=transform)
            val_loader = DataLoader(val_set,
                                    batch_size=1,
                                    shuffle=True,
                                    collate_fn=utils.collate_fn,
                                    num_workers=opt.threads)
            print("[Epoch {}] [Validation] [Downsample: {:2d}]".format(
                epoch, downsample))
            acc, loss = val(recurrent, val_loader, epoch, criterion)

            loss_list.append(loss)
            acc_list.append(acc)
            label_list.append('val_{}'.format(downsample))

        valloss.append(loss_list)
        valaccs.append(acc_list)

        # Save the epochs
        epochs.append(epoch)

        # with open(os.path.join(opt.log, "problem_2", opt.tag, 'statistics.txt'), 'w') as textfile:
        #     textfile.write("\n".join(map(lambda x: str(x), (trainloss, trainaccs, valloss, valaccs, epochs))))

        records = list(
            map(lambda x: np.array(x),
                (trainloss, trainaccs, valloss, valaccs, epochs)))
        for record, name in zip(records,
                                ('trainloss.txt', 'trainaccs.txt',
                                 'valloss.txt', 'valaccs.txt', 'epochs.txt')):
            np.savetxt(os.path.join(opt.log, "problem_2", opt.tag, name),
                       record)

        if epoch % opt.save_interval == 0:
            savepath = os.path.join(opt.checkpoints, "problem_2", opt.tag,
                                    str(epoch) + '.pth')
            utils.saveCheckpoint(savepath, recurrent, optimizer, scheduler,
                                 epoch)

        # Draw the accuracy / loss curve
        draw_graphs(trainloss, valloss, trainaccs, valaccs, epochs,
                    "problem_2", label_list)

    return recurrent
Exemple #5
0
def main(opt):
    """ 
    Main process of train.py 

    Parameters
    ----------
    opt : namespace
        The option (hyperparameters) of these model
    """
    if opt.fixrandomseed:
        seed = 1334
        torch.manual_seed(seed)
        
        if opt.cuda: 
            torch.cuda.manual_seed(seed)

    print("==========> Loading datasets")
    img_transform = Compose([ToTensor(), Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]) if opt.normalize else ToTensor()

    # Dataset 
    train_loader, val_loader = getDataset(opt, img_transform)

    # TODO: Parameters Selection
    # TODO: Mean shift Layer Handling
    # Load Model
    print("==========> Building model")
    model = ImproveNet(opt.rb)
    
    # ----------------------------------------------- #
    # Loss: L1 Norm / L2 Norm                         #
    #   Perceptual Model (Optional)                   # 
    #   TODO Append Layer (Optional)                  #
    # ----------------------------------------------- #
    criterion  = nn.MSELoss(reduction='mean')
    perceptual = None if (opt.perceptual is None) else getPerceptualModel(opt.perceptual).eval()

    # ----------------------------------------------- #
    # Optimizer and learning rate scheduler           #
    # ----------------------------------------------- #
    print("==========> Setting Optimizer: {}".format(opt.optimizer))
    optimizer = getOptimizer(model, opt)
    scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=opt.milestones, gamma=opt.gamma)

    # ----------------------------------------------- #
    # Option: resume training process from checkpoint #
    # ----------------------------------------------- #
    if opt.resume:
        if os.path.isfile(opt.resume):
            print("=> loading checkpoint '{}'".format(opt.resume))
            model, optimizer, _, _, scheduler = utils.loadCheckpoint(opt.resume, model, optimizer, scheduler)
        else:
            raise Exception("=> no checkpoint found at '{}'".format(opt.resume))

    # ----------------------------------------------- #
    # Option: load weights from a pretrain network    #
    # ----------------------------------------------- #
    if opt.pretrained:
        if os.path.isfile(opt.pretrained):
            print("=> loading pretrained model '{}'".format(opt.pretrained))
            model = utils.loadModel(opt.pretrained, model, True)
        else:
            raise Exception("=> no pretrained model found at '{}'".format(opt.pretrained))

    # Select training device
    if opt.cuda:
        print("==========> Setting GPU")

        model = nn.DataParallel(model, device_ids=[i for i in range(opt.gpus)]).cuda()
        criterion = criterion.cuda()

        if perceptual is not None:
            perceptual = perceptual.cuda()
    else:
        print("==========> Setting CPU")
        
        model = model.cpu()
        criterion = criterion.cpu()

        if perceptual is not None:
            perceptual = perceptual.cpu()

    # Create container
    length     = opt.epochs * len(train_loader) // opt.val_interval
    loss_iter  = np.empty(length, dtype=float)
    perc_iter  = np.empty(length, dtype=float)
    psnr_iter  = np.empty(length, dtype=float)
    ssim_iter  = np.empty(length, dtype=float)
    mse_iter   = np.empty(length, dtype=float)
    lr_iter    = np.empty(length, dtype=float)
    iterations = np.empty(length, dtype=float)

    loss_iter[:]  = np.nan
    perc_iter[:]  = np.nan
    psnr_iter[:]  = np.nan
    ssim_iter[:]  = np.nan
    mse_iter[:]   = np.nan
    lr_iter[:]    = np.nan
    iterations[:] = np.nan

    # Set plotter to plot the loss curves 
    twinx = (opt.perceptual is not None)
    fig, axis = getFigureSpec(len(train_loader), twinx)

    # Set Model Saving Function
    if opt.save_item == "model":
        print("==========> Save Function: saveModel()")
        saveCheckpoint = utils.saveModel
    elif opt.save_item == "checkpoint":
        print("==========> Save Function: saveCheckpoint()")
        saveCheckpoint = utils.saveCheckpoint
    else:
        raise ValueError("Save Checkpoint Function Error")

    # Start Training
    print("==========> Training")
    for epoch in range(opt.starts, opt.epochs + 1):
        loss_iter, perc_iter, mse_iter, psnr_iter, ssim_iter, lr_iter, iterations, _, _ = train(
            model, optimizer, criterion, perceptual, train_loader, val_loader, scheduler, epoch, 
            loss_iter, perc_iter, mse_iter, psnr_iter, ssim_iter, lr_iter, iterations, 
            opt, name, fig, axis, saveCheckpoint
        )

        scheduler.step()

    # Save the last checkpoint for resume training
    utils.saveCheckpoint(os.path.join(opt.checkpoints, name, "final.pth"), model, optimizer, scheduler, epoch, len(train_loader))

    # TODO: Fine tuning

    return
Exemple #6
0
def gibbs(W, α, γ, K, activation_level, ckptdir, ckpt=None):
    # documents are a list of all document names
    # ckptdir is the directory where checkpoints are stored
    # ckpt is the current checkpoint (int)

    # z[i] = class of document i, where i enumerates the distinct doc_labels
    # doc_count[k] = number of documents of class k
    if ckpt is None:
        documents = utils.loadDocuments(directory=training_dir)
        z = np.random.choice(K, len(documents))

        doc_count = np.zeros(K, dtype=int)
        # occurrences[k,w] = number of occurrences of word_id w in documents of class k
        # word_count[k] = total number of words in documents of class k
        occurrences = np.zeros((K, W))

        for i, d in enumerate(documents):
            if (i + 1) % int(len(documents) / 250) == 0:
                print(
                    str((i + 1) * 100 / float(len(documents))) +
                    "% completed!")
                print(doc_count)
            doc_count[z[i]] += 1
            w = utils.loadWordsF(d, activation_level)
            for word in w:
                occurrences[z[i], word] += 1

        with open(str(ckptdir / "0.ckpt"), "w+") as f:
            saveCkpt(z, occurrences, doc_count, documents, f)
        print("Initial Loading completed!")
        ckpt = 0

    else:
        z, occurrences, doc_count, documents = utils.loadCheckpoint(
            ckptdir / (str(ckpt) + ".ckpt"))
        print("Loaded from Checkpoint")

    word_count = np.sum(occurrences, axis=1)

    while True:
        ckpt += 1
        for i in range(len(documents)):
            if (i + 1) % int(len(documents) / 100) == 0:
                print(
                    str((i + 1) * 100 / float(len(documents))) +
                    "% completed!")
            # get the words,counts for document i
            # and remove this document from the counts
            w = utils.loadWordsF(documents[i], activation_level)

            for word in w:
                occurrences[z[i], word] -= 1
            word_count[z[i]] -= len(w)
            doc_count[z[i]] -= 1

            # Find the log probability that this document belongs to class k, marginalized over θ and β
            logp = []
            for k in range(K):
                value = 0
                probw = np.log(γ + occurrences[k]) - np.log(γ * W +
                                                            word_count[k])
                for word in w:
                    value += probw[word]
                logp.append(value + np.log(α + doc_count[k]))
            p = np.exp(logp - np.max(logp))
            p = p / sum(p)

            # Assign this document to a new class, chosen randomly, and add back the counts
            k = np.random.choice(K, p=p)
            z[i] = k

            for word in w:
                occurrences[z[i], word] += 1

            word_count[k] += len(w)
            doc_count[k] += 1

        with open(str(ckptdir / (str(ckpt) + ".ckpt")), "w+") as f:
            saveCkpt(z, occurrences, doc_count, documents, f)
        print("Checkpoint", str(ckpt), "completed!")

        yield np.copy(z)
Exemple #7
0
def main():
    currDateTime = time.strftime('%Y%m%d_%H%M%S')

    # Set up argparser.
    parser = argparse.ArgumentParser()
    parseTrainEval = parser.add_mutually_exclusive_group()
    parseTrainEval.add_argument("-t",
                                "--train",
                                help="Use training mode",
                                action="store_true")
    parseTrainEval.add_argument("-e",
                                "--evaluate",
                                help="Use evaluation mode",
                                action="store_true")
    parser.add_argument(
        "-b",
        "--batch_size",
        type=int,
        default=8,
        help=
        "Batch size to use for training or evaluation depending on what mode you're in"
    )
    parser.add_argument("-s",
                        "--save_frequency",
                        type=int,
                        default=5,
                        help="Save a checkpoint every SAVE_FREQUENCY epochs")
    parser.add_argument("-c",
                        "--checkpoint_directory",
                        type=str,
                        default="./checkpoints",
                        help="Directory to save checkpoints to")
    parser.add_argument("-n",
                        "--num_epochs",
                        type=int,
                        default=50,
                        help="Number of epochs to train for")
    parser.add_argument("-l",
                        "--load_checkpoint",
                        type=str,
                        help="Path of model checkpoint to load and use")
    parser.add_argument(
        "-f",
        "--checkpoint_basename",
        type=str,
        default="checkpoint_" + currDateTime,
        help=
        "Basename to use for saved checkpoints. Gets appended with the epoch no. at saving"
    )
    parser.add_argument("--logfile_path",
                        type=str,
                        default="./logfile_" + currDateTime + ".csv",
                        help="Path to the logfile to use during training")

    args = parser.parse_args()

    print("Initialising...")
    if (args.checkpoint_directory):
        args.checkpoint_directory = os.path.dirname(args.checkpoint_directory)

    if (args.load_checkpoint and not os.path.isfile(args.load_checkpoint)):
        sys.exit(
            "Error: specified checkpoint either doesn't exist or isn't a file."
        )

    # Transforms to put into a tensor and normalise the incoming Pillow images.
    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])

    print("	Loading datasets...")
    # Set up datasets, model and loss/optimiser. If there's cuda available then send to the GPU.
    trainset = NYUD2(root='/media/hdd1/Datasets',
                     split='train',
                     transform=transform)
    testset = NYUD2(root='/media/hdd1/Datasets',
                    split='test',
                    transform=transform)

    print("	Initialising model...")
    model = Autoencoder()
    epoch = 0  # This is used when resuming training and is overwritten on load.

    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    print("	Using device:	" + str(device))

    if torch.cuda.device_count() > 1:
        print("	Using		%d CUDA-capable devices" % torch.cuda.device_count())
        model = nn.DataParallel(model)

    model.to(device)

    print("	Configuring optimiser...")
    criterion = nn.MSELoss()
    criterion = criterion.to(device)
    optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)

    if (args.evaluate):
        print("\n### Evaluation Mode ###\n")
        if (args.load_checkpoint):
            print("Loading model checkpoint for evaluation from " +
                  args.load_checkpoint)
            model, epoch, optimizer, loss = utils.loadCheckpoint(
                args.load_checkpoint, model)
        print("Evaluating model with batch size %d..." % args.batch_size)
        print(evaluate(model, criterion, testset, batch_size=args.batch_size))
    elif (args.train):
        print("\n### Training Mode ###\n")
        if (args.load_checkpoint):
            print("Training from checkpoint: " + args.load_checkpoint)
            model, epoch, optimizer, loss = utils.loadCheckpoint(
                args.load_checkpoint, model)
        train(model,
              optimizer,
              criterion,
              trainset,
              logfile_path=args.logfile_path,
              batch_size=args.batch_size,
              epoch=epoch,
              num_epochs=args.num_epochs,
              save_freq=args.save_frequency,
              checkpoint_dir=args.checkpoint_directory,
              checkpoint_basename=args.checkpoint_basename)
    else:
        sys.exit(
            "Error: No mode selected. Use `./main.py -h` for usage instructions."
        )
def temporal_action_segmentation():
    """ Using RNN network to segmentation the action. """
    start_epoch = 1

    #------------------------------------------------------
    # Create Model, optimizer, scheduler, and loss function
    #------------------------------------------------------
    recurrent = LSTM_Net(2048,
                         opt.hidden_dim,
                         opt.output_dim,
                         num_layers=opt.layers,
                         bias=True,
                         batch_first=False,
                         dropout=opt.dropout,
                         bidirectional=opt.bidirection,
                         seq_predict=True).to(DEVICE)

    # Weight_init
    if "orthogonal" in opt.weight_init:
        for layer, param in recurrent.recurrent.named_parameters():
            print("{} {}".format(layer, param.shape))
            if len(param.shape) >= 2:
                nn.init.orthogonal_(param)

    # Bias_init
    if "forget_bias_0" in opt.bias_init:
        for layer, param in recurrent.recurrent.named_parameters():
            if layer.startswith("bias"):
                start = int(param.shape[0] * 0.25)
                end = int(param.shape[0] * 0.5)
                param[start:end].data.fill_(0)

    if "forget_bias_1" in opt.bias_init:
        for layer, param in recurrent.recurrent.named_parameters():
            if layer.startswith("bias"):
                start = int(param.shape[0] * 0.25)
                end = int(param.shape[0] * 0.5)
                param[start:end].data.fill_(1)

    # Set optimizer
    if opt.optimizer == "Adam":
        optimizer = optim.Adam(recurrent.parameters(),
                               lr=opt.lr,
                               betas=(opt.b1, opt.b2),
                               weight_decay=opt.weight_decay)
    elif opt.optimizer == "SGD":
        optimizer = optim.SGD(recurrent.parameters(),
                              lr=opt.lr,
                              momentum=opt.momentum,
                              weight_decay=opt.weight_decay)
    elif opt.optimizer == "ASGD":
        optimizer = optim.ASGD(recurrent.parameters(),
                               lr=opt.lr,
                               lambd=1e-4,
                               alpha=0.75,
                               t0=1000000.0,
                               weight_decay=opt.weight_decay)
    elif opt.optimizer == "Adadelta":
        optimizer = optim.Adadelta(recurrent.parameters(),
                                   lr=opt.lr,
                                   rho=0.9,
                                   eps=1e-06,
                                   weight_decay=opt.weight_decay)
    elif opt.optimizer == "Adagrad":
        optimizer = optim.Adagrad(recurrent.parameters(),
                                  lr=opt.lr,
                                  lr_decay=0,
                                  weight_decay=opt.weight_decay,
                                  initial_accumulator_value=0)
    elif opt.optimizer == "SparseAdam":
        optimizer = optim.SparseAdam(recurrent.parameters(),
                                     lr=opt.lr,
                                     betas=(opt.b1, opt.b2),
                                     eps=1e-08)
    elif opt.optimizer == "Adamax":
        optimizer = optim.Adamax(recurrent.parameters(),
                                 lr=opt.lr,
                                 betas=(opt.b1, opt.b2),
                                 eps=1e-08,
                                 weight_decay=opt.weight_dacay)
    else:
        raise argparse.ArgumentError

    scheduler = optim.lr_scheduler.MultiStepLR(optimizer,
                                               milestones=opt.milestones,
                                               gamma=opt.gamma)

    # Load parameter
    if opt.pretrain:
        recurrent = utils.loadModel(opt.pretrain, recurrent)
        print("Loaded pretrain model: {}".format(opt.pretrain))
    if opt.resume:
        recurrent, optimizer, start_epoch, scheduler = utils.loadCheckpoint(
            opt.resume, recurrent, optimizer, scheduler)
        print("Resume training: {}".format(opt.resume))

    # Set criterion
    criterion = nn.CrossEntropyLoss().to(DEVICE)

    # Set dataloader
    transform = transforms.ToTensor()

    trainlabel = os.path.join(opt.train, "labels", "train")
    trainfeature = os.path.join(opt.train, "feature", "train")
    vallabel = os.path.join(opt.val, "labels", "valid")
    valfeature = os.path.join(opt.val, "feature", "valid")

    train_set = dataset.FullLengthVideos(
        None,
        trainlabel,
        trainfeature,
        downsample=opt.train_downsample,
        transform=transform,
        summarize=opt.summarize,
        sampling=opt.sampling,
    )
    train_loader = DataLoader(train_set,
                              batch_size=opt.batch_size,
                              shuffle=True,
                              collate_fn=utils.collate_fn_seq,
                              num_workers=opt.threads)
    val_set = dataset.FullLengthVideos(
        None,
        vallabel,
        valfeature,
        downsample=opt.val_downsample,
        transform=transform,
        summarize=None,
        sampling=0,
    )
    val_loader = DataLoader(val_set,
                            batch_size=1,
                            shuffle=False,
                            collate_fn=utils.collate_fn_seq,
                            num_workers=opt.threads)
    val_set_2 = dataset.FullLengthVideos(None,
                                         vallabel,
                                         valfeature,
                                         downsample=opt.train_downsample,
                                         transform=transform,
                                         summarize=None,
                                         sampling=0)
    val_loader_2 = DataLoader(val_set_2,
                              batch_size=1,
                              shuffle=False,
                              collate_fn=utils.collate_fn_seq,
                              num_workers=opt.threads)

    # Show the memory used by neural network
    print("The neural network allocated GPU with {:.1f} MB".format(
        torch.cuda.memory_allocated() / 1024 / 1024))

    #------------------
    # Train the models
    #------------------
    trainloss, trainaccs, valloss, valaccs = [], [], [], []
    epochs = []
    categories = [name.split('.')[0] for name in os.listdir(valfeature)]

    # Pre-test of the pretrain model
    acc, loss = val(recurrent, val_loader, 0, criterion)
    valloss.append(loss)
    valaccs.append(acc)
    epochs.append(0)

    for epoch in range(start_epoch, opt.epochs + 1):
        scheduler.step()

        # Save the train loss and train accuracy
        max_trainaccs = max(trainaccs) if len(trainaccs) > 0 else 0
        min_trainloss = min(trainloss) if len(trainloss) > 0 else 0
        recurrent, acc, loss = train(recurrent, train_loader, optimizer, epoch,
                                     criterion, max_trainaccs, min_trainloss)
        trainloss.append(loss)
        trainaccs.append(acc)

        # validate the model with several downsample ratio
        acc, loss = val(recurrent, val_loader, epoch, criterion)
        valloss.append(loss)
        valaccs.append(acc)

        acc, loss = val(recurrent,
                        val_loader_2,
                        epoch,
                        criterion,
                        visual=False)

        # Save the epochs
        epochs.append(epoch)

        for x, y in ((trainloss, "trainloss.txt"),
                     (trainaccs, "trainaccs.txt"), (valloss, "valloss.txt"),
                     (valaccs, "valaccs.txt"), (epochs, "epochs.txt")):
            np.savetxt(os.path.join(opt.log, "problem_3", opt.tag, y),
                       np.array(x))

        if epoch % opt.save_interval == 0:
            savepath = os.path.join(opt.checkpoints, "problem_3", opt.tag,
                                    str(epoch) + '.pth')
            utils.saveCheckpoint(savepath, recurrent, optimizer, scheduler,
                                 epoch)

        # Draw the accuracy / loss curve
        draw_graphs(trainloss,
                    valloss,
                    trainaccs,
                    valaccs,
                    epochs,
                    label=categories)

    return recurrent
Exemple #9
0
def main():
    transform = transforms.Compose([
        transforms.Resize((448, 448)),
        transforms.ToTensor(),
        transforms.Normalize(mean=[0.485, 0.456, 0.406],
                             std=[0.229, 0.224, 0.225])
    ])

    grid_num = 7 if args.command == "basic" else 14

    trainset = dataset.MyDataset(root="hw2_train_val/train15000",
                                 grid_num=grid_num,
                                 train=args.augment,
                                 transform=transform)

    testset = dataset.MyDataset(grid_num=grid_num,
                                root="hw2_train_val/val1500",
                                train=False,
                                transform=transform)

    trainLoader = DataLoader(trainset,
                             batch_size=args.batchs,
                             shuffle=True,
                             num_workers=args.worker)
    testLoader = DataLoader(testset,
                            batch_size=1,
                            shuffle=False,
                            num_workers=args.worker)
    device = utils.selectDevice(show=True)

    if args.command == "basic":
        model = models.Yolov1_vgg16bn(pretrained=True).to(device)
        criterion = models.YoloLoss(7., 2., 5., 0.5, device).to(device)
        optimizer = optim.SGD(model.parameters(),
                              lr=args.lr,
                              weight_decay=1e-4)
        scheduler = optim.lr_scheduler.MultiStepLR(optimizer, [20, 45, 55],
                                                   gamma=0.1)
        start_epoch = 0

        if args.load:
            model, optimizer, start_epoch, scheduler = utils.loadCheckpoint(
                args.load, model, optimizer, scheduler)

        model = train(model,
                      criterion,
                      optimizer,
                      scheduler,
                      trainLoader,
                      testLoader,
                      start_epoch,
                      args.epochs,
                      device,
                      lr=args.lr,
                      grid_num=7)

    elif args.command == "improve":
        model_improve = models.Yolov1_vgg16bn_Improve(
            pretrained=True).to(device)
        criterion = models.YoloLoss(14., 2., 5, 0.5, device).to(device)
        optimizer = optim.SGD(model_improve.parameters(),
                              lr=args.lr,
                              weight_decay=1e-4)
        scheduler = optim.lr_scheduler.MultiStepLR(optimizer, [20, 40, 70],
                                                   gamma=0.1)
        start_epoch = 0

        if args.load:
            model_improve, optimizer, start_epoch, scheduler = utils.loadCheckpoint(
                args.load, model, optimizer, scheduler)

        model_improve = train(model_improve,
                              criterion,
                              optimizer,
                              scheduler,
                              trainLoader,
                              testLoader,
                              start_epoch,
                              args.epochs,
                              device,
                              lr=args.lr,
                              grid_num=7,
                              save_name="Yolov1-Improve")