Ejemplo n.º 1
0
def train_AE(X, y, k, i):
    '''
    :param X: Training Set, n * d
    :param y: Training Labels, n * 1
    :param k: Amount of clusters
    :param i: Used to generate the name of result file, see line 100
    :return: (purity, NMI)
    '''
    lam2 = 10 ** -2
    delta = 0.1
    rate = 0.001
    activation = TANH
    print('-----------------start pre-training 1-----------------')
    pre_1 = AutoEncoder(X, y, k, [1024, 512, 1024], max_iter=5, delta=delta,
                        lam2=lam2, file_name=PARAMS_NAMES[JAFFE], read_params=False, rate=rate,
                        activation=activation)
    pre_1.train()
    print('-----------------start pre-training 2-----------------')
    pre_2 = AutoEncoder(pre_1.H, y, k, [512, 300, 512], max_iter=5, delta=delta,
                        lam2=lam2, file_name=PARAMS_NAMES[JAFFE], read_params=False, rate=rate,
                        activation=activation)
    pre_2.train()
    print('-----------------start training-----------------')
    ae = AutoEncoder(X, y, k, [1024, 512, 300, 512, 1024], max_iter=35, delta=delta,
                     lam2=lam2, file_name=PARAMS_NAMES[JAFFE], read_params=False, rate=rate,
                     decay_threshold=50, activation=activation)
    ae.W[1] = pre_1.W[1]
    ae.W[4] = pre_1.W[2]
    ae.b[1] = pre_1.b[1]
    ae.b[4] = pre_1.b[2]

    ae.W[2] = pre_2.W[1]
    ae.W[3] = pre_2.W[2]
    ae.b[2] = pre_2.b[1]
    ae.b[3] = pre_2.b[2]
    ae.train()
    name = DIR_NAME + 'ae_' + str(i) + '.mat'
    # return train_baseline(ae.H, y, k, 10, KM)
    km = KMeans(k)
    y_pred = km.fit_predict(ae.H, y)
    p, mi = cal_metric2(y_pred, y, k)
    scio.savemat(name, {'y_predicted': y_pred, 'y': y})
    return p, mi
Ejemplo n.º 2
0
save_name = args.save_name
sample_percent = args.sample_percent
feature_output_path = args.feature_output_path
layers = list(map(int, args.layers.split(',')))

X, labels, file_names = load_data(image_dir,
                                  sample_percent=sample_percent,
                                  return_names=True)
length, width = X[0].shape
input_size = length * width
X = torch.Tensor(X).view(-1, input_size).type(torch.float32)
X /= 255

if load_path is None:
    model = AutoEncoder([input_size] + layers)
    model.train(X=X, batch_size=batch_size, epochs=epochs, verbose=True)
else:
    model = AutoEncoder.load(load_path)

if feature_output_path is not None:
    print('Saving learned features...')
    new_features = model(X)
    new_features = new_features.detach().numpy()
    root_dir = os.getcwd()
    try:
        os.mkdir(root_dir + '\\' + feature_output_path)
    except FileExistsError:  # if directory already exists thats ok
        pass
    for label in np.unique(labels):
        try:
            os.mkdir(root_dir + '\\' + feature_output_path + '\\' + label)
Ejemplo n.º 3
0
def main(args=None):
	parser = argparse.ArgumentParser(description='Simple training script.')
	parser.add_argument('--cls_id', help='class id', type=int)
	parser.add_argument('--version', help='model version', type=float)
	parser.add_argument('--gamma', help='gamma for the SoftL1Loss', type=float, default=9.0)
	parser.add_argument('--lr', help='lr for optimization', type=float, default=1e-4)
	parser.add_argument('--epoches', help='num of epoches for optimization', type=int, default=4)
	parser.add_argument('--resume_epoch', help='trained model for resume', type=int, default=0)
	parser.add_argument('--batch_size', help='batch size for optimization', type=int, default=10)
	parser.add_argument('--checkpoints', help='checkpoints path', type=str, default='voc_checkpoints')
	parser = parser.parse_args(args)

	cls_name = classes[parser.cls_id]
	parser.checkpoints = '_'.join([parser.checkpoints,cls_name])
	if not os.path.isdir(parser.checkpoints):
		os.mkdir(parser.checkpoints)
	print('will save checkpoints in '+parser.checkpoints)
	cls_dir = "../context_profile/voc_detection_{:s}_p10/"\
		.format(cls_name)
	batch_size = parser.batch_size
	print('[data prepare]....')
	dataloader_train = DataLoader(Fetch('train_benign', root_dir=cls_dir), batch_size=batch_size, num_workers=2, shuffle=True)

	print('[model prepare]....')
	use_gpu = torch.cuda.device_count()>0

	model = AutoEncoder(parser.gamma)
	if use_gpu:
		model = torch.nn.DataParallel(model).cuda()
	optimizer = torch.optim.Adam(model.parameters(), lr=parser.lr)
	scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, patience=2, verbose=True)
	if parser.resume_epoch > 0 :
		checkpoint_name = os.path.join(parser.checkpoints, 'model_{:1.1f}_epoch{:d}.pt'.format(parser.version, parser.resume_epoch))
		if not os.path.isfile(checkpoint_name):
			raise ValueError('No checkpoint file {:s}'.format(checkpoint_name))
		model.load_state_dict(torch.load(checkpoint_name))
		print('model loaded from {:s}'.format(checkpoint_name))

	print('[model training]...')
	loss_hist = []
	epoch_loss = []
	num_iter = len(dataloader_train)
	for epoch_num in range(parser.resume_epoch, parser.epoches):
		model.train()
		for iter_num, sample in enumerate(dataloader_train):
			if True:#try:
				optimizer.zero_grad()
				if use_gpu:
					data = sample['data'].cuda().float()
				else:
					data = sample['data'].float()
					
				loss = model(data).mean()
				if bool(loss==0):
					continue 
				loss.backward()
				torch.nn.utils.clip_grad_norm_(model.parameters(), 0.1)
				optimizer.step()
				epoch_loss.append(float(loss))
				loss_hist.append(float(loss))
				if iter_num % 30 == 0:
					print('Epoch {:d}/{:d} | Iteration: {:d}/{:d} | loss: {:1.5f}'.format(
						epoch_num+1, parser.epoches, iter_num+1, num_iter, float(loss)))
				if iter_num % 3000 == 0:
					scheduler.step(np.mean(epoch_loss))
					epoch_loss = []
		if epoch_num < 1:
			continue
		checkpoint_name = os.path.join(parser.checkpoints, 'model_{:1.1f}_epoch{:d}.pt'.format(parser.version, epoch_num+1))
		torch.save(model.state_dict(), checkpoint_name)
		print('Model saved as {:s}'.format(checkpoint_name))

	np.save('loss_hist.npy', loss_hist)