def main():

    n_unique = 0
    
    #checking model
    print('[INFO] Searching for model...(',model_file,')\n')
    if os.path.isfile(model_file):
        print('[INFO] Model found.\n')

    else:
        print('[INFO] Model not found!\n')
        print('[INFO] Creating model...')
        print('---------------------\n')

    # Loading Dataset
        print('Load datatrain...')
        if os.path.isfile(datatrain_file):
            print('[INFO] Datatrain found!\n')
            	
        else:
            print('[INFO] Datatrain not found!\n')
            print('[INFO] Creating datatrain...\n')

            if(face_localization):
                if not os.path.exists(dir_datatrain_new):
                    ds.preprocess(dir_datatrain,dir_datatrain_new,(width,height))

                ds.generate_data(datatrain_file,dir_datatrain_new+'/'+dir_datatrain,(width,height),normalized)
                
            else:
                ds.generate_data(datatrain_file,dir_datatrain,(width,height),normalized)	

        print('[INFO] Start training...\n')
        if not os.path.exists(dir_logs):
            os.makedirs(dir_logs)

        if not os.path.exists(dir_logs_tb):
            os.makedirs(dir_logs_tb)
            os.makedirs(dir_logs_tb+'/training')
            os.makedirs(dir_logs_tb+'/validation')

        st.train(model_file,epoch,datatrain_file,width,height,channel,v_split)

    if(run == 'recognition'):
        recognition_data_test()
        
    elif(run == 'counting'):
        
        counting_from_video()

    gc.collect()
示例#2
0
文件: main.py 项目: shutakahama/SVM
def main():
    parser = argparse.ArgumentParser(description='svm')
    parser.add_argument('--dataset', type=str, default='normal')  # データセットのタイプ
    parser.add_argument('--data_num', type=int, default=100)  # データ数
    parser.add_argument('--play', type=str, default='svm')  # 行う操作
    parser.add_argument('--C', type=int, default=0.1)  #
    parser.add_argument('--epoch', type=int, default=50)  #
    parser.add_argument('--gamma', type=int, default=10)  #
    parser.add_argument('--select_method', type=str, default='kkt')  #
    parser.add_argument('--kernel_shape', type=str, default='gaussian')  #
    args = parser.parse_args()

    # データ読み込み
    x_train, y_train, x_test, y_test = dataset.generate_data(dataset=args.dataset, data_num=args.data_num)
    # dataset.data_plot(x_train, y_train)

    start = time.time()
    if args.play == 'svm':
        model = SVM(x_train, y_train, x_test, y_test, args)
        acc, alpha, b, pred = model.run()
    # elif args.play == 'boosting':
    #     acc, pred = boosting(x_train, y_train)
    # elif args.play == 'adaboost':
    #     acc, pred = adaboost(x_train, y_train)
    tm = time.time() - start

    print(f'time {tm} [sec]')
    print(f'train accuracy={acc}')
    if args.play == 'svm':
        acc_test = model.test()  # テストデータで性能を検証
        print(f'test accuracy={acc_test}')
        model.plot()

    model.plot_pred()

    return acc, acc_test, tm
示例#3
0
    if os.path.isfile(check_ptr):
        curr_state = T.load(check_ptr)
        epoch = curr_state["epoch"] + 1
        rnn.load_state_dict(curr_state["rnn_state"])
        optimizer.load_state_dict(curr_state["opti_state"])
        print("Model loaded.")
    else:
        epoch = 1

    (chx, mhx, rv) = (None, None, None)

    for epoch in range(epoch, args.iterations + 1):
        llprint("\rIteration {ep}/{tot}".format(ep=epoch, tot=args.iterations))
        optimizer.zero_grad()

        input_data, target_output = dataset.generate_data(
            batch_size, args.bits, args.cuda)

        if rnn.debug:
            output, (chx, mhx, rv), v = rnn(input_data, (None, mhx, None),
                                            reset_experience=True,
                                            pass_through_memory=True)
        else:
            output, (chx, mhx, rv) = rnn(input_data, (None, mhx, None),
                                         reset_experience=True,
                                         pass_through_memory=True)

        # show_example(input_data, target_output, F.sigmoid(output))

        loss = criterion(output, target_output)

        loss.backward()
示例#4
0
import os

# Training Parameters
train = os.environ["train"] == "True"
batch_size = int(os.environ["batch_size"])
num_epochs = int(os.environ["num_epochs"])
display_step = int(os.environ["display_step"])

# Network Parameters
input_size_h = int(os.environ["input_size_h"])
input_size_w = int(os.environ["input_size_w"])
num_channels = int(os.environ["num_channels"])

# Generate and load TFRecord
if train:
    num_samples, num_train_samples, num_test_samples = generate_data()
    tfrecord = load_data()

# TF Graph Input
X = tf.placeholder(tf.float32, shape=[None, input_size_h, input_size_w, num_channels])
Y = tf.placeholder(tf.float32, shape=[None, 1])
mean = tf.constant(np.load("processed/mean.npy"), dtype=tf.float32)
std = tf.constant(np.load("processed/std.npy"), dtype=tf.float32)
# TF Graph
def network(x, weights, biases):
    x = tf.reshape(x, shape=[-1, input_size_h, input_size_w, num_channels])
    x = tf.subtract(x, mean)
    x = tf.divide(x, std)
    x = tf.expand_dims(x, axis=1)
    x = tf.transpose(x, perm=[0, 4, 2, 3, 1])
示例#5
0
	print(f'model loading time:{time()-t1}s')
	
	t2 = time()
	if args.txt is not None:
		hoge = TorchJson(args.txt)
		data = hoge.load_json(device)# return tensor on GPU
		for k, v in data.items():
			shape = (args.batch, ) + v.size()[1:] 
			data[k] = v.expand(*shape).clone()
			# print('k, v', k, *v.size())
			# print(*shape)
		
	else:
		data = {}
		for k in ['depot_xy', 'customer_xy', 'demand', 'car_start_node', 'car_capacity']:
			elem = [generate_data(device, batch = 1, n_car = args.n_car, n_depot = args.n_depot, n_customer = args.n_customer, seed = args.seed)[k].squeeze(0) for j in range(args.batch)]
			data[k] = torch.stack(elem, 0)
	
	# for k, v in data.items():
	# 	print('k, v', k, v.size())
	# 	print(v.type())# dtype of tensor
	
	print(f'data generate time:{time()-t1}s')
	pretrained = pretrained.to(device)
	# data = list(map(lambda x: x.to(device), data))
	pretrained.eval()
	with torch.no_grad():
		costs, _, pis = pretrained(data, return_pi = True, decode_type = args.decode_type)
	# print('costs:', costs)
	idx_in_batch = torch.argmin(costs, dim = 0)
	cost = costs[idx_in_batch].cpu().numpy()