Exemplo n.º 1
0
def main(args):
    print(args)
    paths = download_data()
    for i in paths:
        parse_data(i)
    print("=============== Parsing dataset complete ===============")

    forecast_imputation()

    # Load configurations
    configs = load_config("config.yml")
    modelConfig = configs["model"]

    time = dt.now().strftime("%d-%m-%Y %H:%M:%S")
    logdir = "runs/" + time

    # # Initialize SummaryWriter for tensorboard
    writer = Logger(logdir)
    write_configs(writer, modelConfig)

    # # Preprocess the data
    train_loader, validation_loader, test_loader, data_mean, data_std, forecast_mean, forecast_std = load_dataset(
        difference=0, batch_size=modelConfig["batchsize"])

    # # Baseline model
    # baseline_model = Persistance(18, writer)
    # initialize Model
    model = NN_Model(input_dim=train_loader.dataset.tensors[0].size(1),
                     output_dim=1,
                     hidden_layers=modelConfig["hiddenlayers"],
                     writer=writer,
                     device=args.device)

    model.train(train_loader,
                validation_loader,
                epochs=modelConfig["epochs"],
                lr=modelConfig["lr"],
                step_size=modelConfig["step_size"],
                gamma=modelConfig["gamma"],
                weight_decay=modelConfig["weight_decay"])

    try:
        x = load_latest(10, 18, data_mean.item(), data_std.item(),
                        forecast_mean, forecast_std)

        ypred = model.predict(x)
        ypred = (ypred * data_std.item()) + data_mean.item()
        print("Model running successful!")

    except Exception as err:
        print("Error message: ", err)
        ypred = args.prev
        print("model running failed... sending prev value")

    args.prev = ypred

    # b_rmse, b_ypred, b_ytest = baseline_model.test(test_loader)
    # rmse, ypred, ytest = model.test(test_loader)

    # print("RMSE:  ", rmse)
    # print("BASELINE: ", b_rmse)

    # writer.add_text("RMSE", str(rmse.item()), 0)
    # writer.add_text("RMSE/Baseline", str(b_rmse.item()), 0)

    ####################
    # Lagged Corr      #
    ####################
    # lagged_vals = get_lagged_correlation(ypred = ypred,
    #                                 ytrue = test_loader.dataset.tensors[1],
    #                                 num_delta= 180 )
    # writer.draw_lagged_correlation(lagged_vals)

    # y_test_unnormalized = (ytest * data_std) + data_mean
    # y_pred_unnormalized = (ypred * data_std) + data_mean

    # trade_env = Trader(y_test_unnormalized.tolist(), y_pred_unnormalized.tolist(), writer, 18)
    # trade_env.trade()
    # result = trade_env.pay_back()
    # print ("tota profit", result)

    writer.close()

    print("ypred: ", ypred)
    return ypred
Exemplo n.º 2
0
def main(args):
	global best_acc

	# create checkpoint dir
	if not isdir(args.checkpoint):
		mkdir_p(args.checkpoint)

	# create model
	print("==> creating model '{}', num_classes={}".format(args.arch, args.num_classes))
	model = models.__dict__[args.arch](length = args.length, num_classes=args.num_classes)

	# define loss function (criterion) and optimizer
	criterion = torch.nn.MSELoss().cuda()

	optimizer = torch.optim.RMSprop(model.parameters(), 
								lr=args.lr,
								momentum=args.momentum,
								weight_decay=args.weight_decay)

	# optionally resume from a checkpoint
	title = 'OpenPose-' + args.arch
	if args.resume:
		if isfile(args.resume):
			print("=> loading checkpoint '{}'".format(args.resume))
			checkpoint = torch.load(args.resume)
			args.start_epoch = checkpoint['epoch']
			best_acc = checkpoint['best_acc']
			model.load_state_dict(checkpoint['state_dict'])
			optimizer.load_state_dict(checkpoint['optimizer'])
			print("=> loaded checkpoint '{}' (epoch {})"
				  .format(args.resume, checkpoint['epoch']))
			logger = Logger(join(args.checkpoint, 'log.txt'), title=title, resume=True)
		else:
			print("=> no checkpoint found at '{}'".format(args.resume))
	else:        
		if isfile(args.model_weight):
			weight_dict = torch.load(args.model_weight)
			# print (weight_dict.keys())
			# print (model.state_dict().keys())
			model.load_state_dict(weight_dict)
		logger = Logger(join(args.checkpoint, 'log.txt'), title=title)
		logger.set_names(['Epoch', 'LR', 'Train Loss', 'Val Loss', 'Train Acc', 'Val Acc'])

	model = torch.nn.DataParallel(model).cuda()

	cudnn.benchmark = True
	print('    Total params: %.2fM' % (sum(p.numel() for p in model.parameters())/1000000.0))

	# Data loading code
	Select_Models  = ['Model0', 'Model1']
	Select_Actinos = ['ArmRotate','Fist','WristRotate', 'Tap']
	if os.path.exists('./data/RenderHand/split.pickle'):
		import pickle
		split = pickle.load(open('./data/RenderHand/split.pickle'))
	else:
		split = Split('./data/RenderHand/', Hand_Model = Select_Models, Hand_Action = Select_Actinos)
	
	train_loader = torch.utils.data.DataLoader(
		datasets.Hand(split),
		batch_size=args.train_batch, shuffle=True,
		num_workers=args.workers, pin_memory=True)

	print ("train loader", len(train_loader))

	val_loader = torch.utils.data.DataLoader(
		datasets.Hand(split, isTrain = 0),
		batch_size=args.test_batch, shuffle=False,
		num_workers=args.workers, pin_memory=True)

	print ("valid loader", len(val_loader))

	if args.evaluate:
		print('\nEvaluation only') 
		loss, acc, predictions = validate(val_loader, model, criterion, args.num_classes, args.debug, args.flip)
		print(loss, acc)
		save_pred(predictions, checkpoint=args.checkpoint)
		return

	lr = args.lr
	for epoch in range(args.start_epoch, args.epochs):
		lr = adjust_learning_rate(optimizer, epoch, lr, args.schedule, args.gamma)
		print('\nEpoch: %d | LR: %.8f' % (epoch + 1, lr))

		# train for one epoch
		train_loss, train_acc = train(train_loader, model, criterion, optimizer, args.debug, args.flip)

		# evaluate on validation set
		valid_loss, valid_acc, predictions = validate(val_loader, model, criterion, args.num_classes,
													  args.debug, args.flip)

		# append logger file
		logger.append([epoch + 1, lr, train_loss, valid_loss, train_acc, valid_acc])

		# remember best acc and save checkpoint
		is_best = valid_acc > best_acc
		best_acc = max(valid_acc, best_acc)
		save_checkpoint({
			'epoch': epoch + 1,
			'arch': args.arch,
			'state_dict': model.state_dict(),
			'best_acc': best_acc,
			'optimizer' : optimizer.state_dict(),
		}, predictions, is_best, checkpoint=args.checkpoint)

	logger.close()
	# logger.plot(['Train Loss', 'Val Loss'])
	# savefig(os.path.join(args.checkpoint, 'Loss.eps'))
	logger.plot(['Train Acc', 'Val Acc'])
	savefig(os.path.join(args.checkpoint, 'Acc.eps'))