Esempio n. 1
0
            pred = output.data.max(1)[1]
            correct += pred.eq(target.data).sum()

            # test loss average
            loss_avg += loss.data[0]

        state['test_loss'] = loss_avg / len(test_loader)
        state['test_accuracy'] = correct / len(test_loader.dataset)

    # Main loop
    best_accuracy = 0.0
    for epoch in range(args.epochs):
        if epoch in args.schedule:
            state['learning_rate'] *= args.gamma
            for param_group in optimizer.param_groups:
                param_group['lr'] = state['learning_rate']

        state['epoch'] = epoch
        train()
        test()
        if state['test_accuracy'] > best_accuracy:
            best_accuracy = state['test_accuracy']
            torch.save(net.state_dict(),
                       os.path.join(args.save, 'model.pytorch'))
        log.write('%s\n' % json.dumps(state))
        log.flush()
        print(state)
        print("Best accuracy: %f" % best_accuracy)

    log.close()
Esempio n. 2
0
    best_accuracy = 0.0
    for epoch in range(args.epochs):
        epochstarttime = datetime.datetime.now()

        if epoch in args.schedule:
            state['learning_rate'] *= args.gamma
            for param_group in optimizer.param_groups:
                param_group['lr'] = state['learning_rate']

        state['epoch'] = epoch
        train()
        test()
        if state['test_accuracy'] > best_accuracy:
            best_accuracy = state['test_accuracy']
            torch.save(net.state_dict(), os.path.join(args.save, modeloutput))
        log.write('%s\n' % json.dumps(state))
        # print(state)
        log.write("Best accuracy: %f\n" % best_accuracy)

        epochendtime = datetime.datetime.now()
        log.write(
            f'end: {epochendtime}; len: {epochendtime - epochstarttime}{nextline}'
        )
        log.flush()

    endtime = datetime.datetime.now()
    log.write(f'end: {endtime}; len: {endtime - starttime}{nextline}')
    log.flush()
    log.close()