示例#1
0
def training_init():
    global count, val_loss, s
    model.train()
    bar = progressbar.ProgressBar(max_value=max_iterations)
    for i, (sample_real,
            sample_render) in enumerate(zip(real_loader, render_loader)):
        # forward steps
        xdata_real = Variable(sample_real['xdata'].cuda())
        label_real = Variable(sample_real['label'].cuda())
        ydata_real = [
            Variable(sample_real['ydata_bin'].cuda()),
            Variable(sample_real['ydata_res'].cuda())
        ]
        output_real = model(xdata_real, label_real)
        xdata_render = Variable(sample_render['xdata'].cuda())
        label_render = Variable(sample_render['label'].cuda())
        ydata_render = [
            Variable(sample_render['ydata_bin'].cuda()),
            Variable(sample_render['ydata_res'].cuda())
        ]
        output_render = model(xdata_render, label_render)
        # loss
        ydata_bin = torch.cat((ydata_real[0], ydata_render[0]))
        ydata_res = torch.cat((ydata_real[1], ydata_render[1]))
        output_bin = torch.cat((output_real[0], output_render[0]))
        output_res = torch.cat((output_real[1], output_render[1]))
        Lc = ce_loss(output_bin, ydata_bin)
        Lr = mse_loss(output_res, ydata_res)
        loss = Lc + 0.5 * math.exp(-2 * s) * Lr + s
        # parameter updates
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        s = 0.5 * math.log(Lr)
        # store
        count += 1
        writer.add_scalar('train_loss', loss.item(), count)
        writer.add_scalar('alpha', 0.5 * math.exp(-2 * s), count)
        if i % 1000 == 0:
            ytest, yhat_test, test_labels = testing()
            spio.savemat(results_file, {
                'ytest': ytest,
                'yhat_test': yhat_test,
                'test_labels': test_labels
            })
            tmp_val_loss = get_error2(ytest, yhat_test, test_labels,
                                      num_classes)
            writer.add_scalar('val_loss', tmp_val_loss, count)
            val_loss.append(tmp_val_loss)
        # cleanup
        del xdata_real, xdata_render, label_real, label_render, ydata_real, ydata_render
        del ydata_bin, ydata_res, output_bin, output_res
        del output_real, output_render, loss, sample_real, sample_render
        bar.update(i)
        # stop
        if i == max_iterations:
            break
    render_loader.dataset.shuffle_images()
    real_loader.dataset.shuffle_images()
示例#2
0
def training():
	global count, val_loss, s, num_ensemble
	model.train()
	bar = progressbar.ProgressBar(max_value=max_iterations)
	for i, (sample_real, sample_render) in enumerate(zip(real_loader, render_loader)):
		# forward steps
		# output
		xdata_real = Variable(sample_real['xdata'].cuda())
		label_real = Variable(sample_real['label'].cuda())
		ydata_real = [Variable(sample_real['ydata_bin'].cuda()), Variable(sample_real['ydata'].cuda())]
		output_real = model(xdata_real, label_real)
		xdata_render = Variable(sample_render['xdata'].cuda())
		label_render = Variable(sample_render['label'].cuda())
		ydata_render = [Variable(sample_render['ydata_bin'].cuda()), Variable(sample_render['ydata'].cuda())]
		output_render = model(xdata_render, label_render)
		# loss
		ydata_bin = torch.cat((ydata_real[0], ydata_render[0]))
		ydata = torch.cat((ydata_real[1], ydata_render[1]))
		output_bin = torch.cat((output_real[0], output_render[0]))
		_, ind = torch.max(output_bin, dim=1)
		y = torch.index_select(cluster_centers_, 0, ind)
		output = y + torch.cat((output_real[1], output_render[1]))
		Lc = ce_loss(output_bin, ydata_bin)
		Lr = gve_loss(output, ydata)
		loss = Lc + math.exp(-s)*Lr + s
		# parameter updates
		optimizer.zero_grad()
		loss.backward()
		optimizer.step()
		s = math.log(Lr)
		# store
		writer.add_scalar('train_loss', loss.item(), count)
		writer.add_scalar('alpha', math.exp(-s), count)
		if i % 500 == 0:
			ytest, yhat_test, test_labels = testing()
			tmp_val_loss = get_error2(ytest, yhat_test, test_labels, num_classes)
			writer.add_scalar('val_loss', tmp_val_loss, count)
			val_loss.append(tmp_val_loss)
		count += 1
		if count % optimizer.c == optimizer.c / 2:
			ytest, yhat_test, test_labels = testing()
			num_ensemble += 1
			results_file = os.path.join(results_dir, 'num' + str(num_ensemble))
			spio.savemat(results_file, {'ytest': ytest, 'yhat_test': yhat_test, 'test_labels': test_labels})
		# cleanup
		del xdata_real, xdata_render, label_real, label_render, ydata_real, ydata_render
		del ydata_bin, ydata, output_bin, output
		del output_real, output_render, sample_real, sample_render, loss
		bar.update(i)
		# stop
		if i == max_iterations:
			break
	render_loader.dataset.shuffle_images()
	real_loader.dataset.shuffle_images()
示例#3
0
def training():
    global count, val_loss
    model.train()
    bar = progressbar.ProgressBar(max_value=max_iterations)
    for i, (sample_real,
            sample_render) in enumerate(zip(real_loader, render_loader)):
        # forward steps
        xdata_real = Variable(sample_real['xdata'].cuda())
        label_real = Variable(sample_real['label'].cuda())
        ydata_real = [
            Variable(sample_real['ydata_bin'].cuda()),
            Variable(sample_real['ydata'].cuda())
        ]
        output_real = model(xdata_real, label_real)
        loss_real = criterion(output_real, ydata_real)
        xdata_render = Variable(sample_render['xdata'].cuda())
        label_render = Variable(sample_render['label'].cuda())
        ydata_render = [
            Variable(sample_render['ydata_bin'].cuda()),
            Variable(sample_render['ydata'].cuda())
        ]
        output_render = model(xdata_render, label_render)
        loss_render = criterion(output_render, ydata_render)
        loss = loss_real + loss_render
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
        # store
        writer.add_scalar('train_loss', loss.item(), count)
        if i % 1000 == 0:
            ytest, yhat_test, test_labels = testing()
            spio.savemat(results_file, {
                'ytest': ytest,
                'yhat_test': yhat_test,
                'test_labels': test_labels
            })
            tmp_val_loss = get_error2(ytest, yhat_test, test_labels,
                                      num_classes)
            writer.add_scalar('val_loss', tmp_val_loss, count)
            val_loss.append(tmp_val_loss)
        count += 1
        # cleanup
        del xdata_real, xdata_render, label_real, label_render, ydata_real, ydata_render
        del output_real, output_render, loss_real, loss_render, sample_real, sample_render, loss
        bar.update(i)
        # stop
        if i == max_iterations:
            break
    render_loader.dataset.shuffle_images()
    real_loader.dataset.shuffle_images()
示例#4
0
def save_checkpoint(filename):
    torch.save(model.state_dict(), filename)


for epoch in range(args.num_epochs):
    tic = time.time()
    scheduler.step()
    # training step
    training()
    # save model at end of epoch
    save_checkpoint(model_file)
    # validation
    ytest, yhat_test, test_labels = testing()
    print('\nMedErr: {0}'.format(
        get_error2(ytest, yhat_test, test_labels, num_classes)))
    # time and output
    toc = time.time() - tic
    print('Epoch: {0} done in time {1}s'.format(epoch, toc))
    # cleanup
    gc.collect()
writer.close()
val_loss = np.stack(val_loss)
spio.savemat(plots_file, {'val_loss': val_loss})

# evaluate the model
ytest, yhat_test, test_labels = testing()
print('\nMedErr: {0}'.format(
    get_error2(ytest, yhat_test, test_labels, num_classes)))
spio.savemat(results_file, {
    'ytest': ytest,
示例#5
0
		ypred_res = output[1].data.cpu().numpy()
		y = kmeans_dict[ypred_bin, :] + ypred_res
		ypred.append(y / np.maximum(np.linalg.norm(y, 2, 1, True), 1e-10))
		ytrue.append(sample['ydata'].numpy())
		labels.append(sample['label'].numpy())
		del xdata, label, output, sample
		gc.collect()
	ypred = np.concatenate(ypred)
	ytrue = np.concatenate(ytrue)
	labels = np.concatenate(labels)
	model.train()
	return ytrue, ypred, labels


ytest, yhat_test, test_labels = testing()
print('\nMedErr: {0}'.format(get_error2(ytest, yhat_test, test_labels, num_classes)))
results_file = os.path.join(results_dir, 'num'+str(num_ensemble))
spio.savemat(results_file, {'ytest': ytest, 'yhat_test': yhat_test, 'test_labels': test_labels})

for epoch in range(args.num_epochs):
	tic = time.time()
	# training step
	training()
	# validation
	ytest, yhat_test, test_labels = testing()
	tmp_val_loss = get_error2(ytest, yhat_test, test_labels, num_classes)
	print('\nMedErr: {0}'.format(tmp_val_loss))
	writer.add_scalar('val_loss', tmp_val_loss, count)
	val_loss.append(tmp_val_loss)
	# time and output
	toc = time.time() - tic