Esempio n. 1
0
def main():
    information, initial_val, initial_t, final_t, delta_t, Val_set, Jaco_set = Data_Generator.main(
        OutputFile=False,
        initial_val=Val_Set[len(Val_Set) - 1],
        old_information="",
        Calc_Jaco=True)
    for ttl in range(0, GENERATOR_LOOP):
        print(ttl, GENERATOR_LOOP)
Esempio n. 2
0
def main_train(index, args):
    print("Running main on TPU:{}".format(index))
    torch.manual_seed(2)
    try:
        dev = xm.xla_device()
    except:
        pass

    def cpu(x):
        return x.cpu() if x is not None else None

    def tpu(x):
        return x.to(dev)

    def gpu(x):
        return x.cuda() if x is not None else None

    # Select device
    device = gpu if args.cuda else cpu
    device = tpu if args.tpu else device
    # optimizer list
    Up_to_Down = []

    # Method which creates a module optimizer and add it to the given list
    def add_optimizer(module, model_optimizers=()):  #needs a change for lr tpu
        if args.param_init != 0.0:
            for param in module.parameters():
                param.data.uniform_(-args.param_init, args.param_init)
        optimizer = torch.optim.SGD(module.parameters(), lr=args.learning_rate)
        for direction in model_optimizers:
            direction.append(optimizer)
        return optimizer

    #Dataset
    train_dataset = Dataset(input_left=args.real_left,
                            input_right=args.real_right,
                            output_left=args.disp_left,
                            output_right=args.disp_right)
    # data Sampler
    if args.tpu and args.distributed:
        world_size = xm.xrt_world_size()
        train_sampler = torch.utils.data.distributed.DistributedSampler(
            train_dataset,
            num_replicas=world_size,
            rank=xm.get_ordinal(),
            shuffle=True)
    else:
        world_size = 1
        train_sampler = None
    #Dataset loader
    params_training = {
        'batch_size': args.batch_size,
        'shuffle': not train_sampler,
        'num_workers': args.num_workers,
        'drop_last': True,
        "sampler": train_sampler
    }
    params_validation = {
        'batch_size': args.batch_size_v,
        'shuffle': True,
        'num_workers': args.num_workers,
        'drop_last': True
    }
    data = {
        "training":
        Data_Generator(train_dataset,
                       params_training,
                       tpu=args.tpu,
                       device=dev if args.tpu else None)
    }
    if args.validation:
        data["validation"] = Data_Generator(Dataset(
            input_left=args.real_left_v,
            input_right=args.real_right_v,
            output_left=args.disp_left_v,
            output_right=args.disp_right_v),
                                            params_validation,
                                            tpu=args.tpu,
                                            device=dev if args.tpu else None)
    # model
    D = device(
        Down_Convolution(num_filters=parameters["num_filters"],
                         kernels=parameters["kernels"],
                         strides=parameters["strides"]))  #parms to be filled
    add_optimizer(D, [Up_to_Down])
    U = device(
        Up_Conv(up_kernels=parameters["up_kernels"],
                i_kernels=parameters["i_kernels"],
                pr_kernels=parameters["pr_kernels"],
                up_filters=parameters["up_filters"],
                down_filters=parameters["down_filters"],
                index=parameters["index"],
                pr_filters=parameters["pr_filters"]))  #parms to be filled
    add_optimizer(U, [Up_to_Down])
    DispNet_ = device(DispNet(D, U, device))
    #dataset
    DispNet_trainer = Trainer(Data_Generator=data["training"],
                              optimizers=Up_to_Down,
                              Network=DispNet_,
                              schedule_coeff=parameters["schedule_coeff"],
                              batch_size=args.batch_size)
    trainers = [DispNet_trainer]

    def save_models(name, step):
        torch.save(
            DispNet_,
            '{0}{1}:DispNet_step_size_{2}.pth'.format(name, args.save, step))
        torch.save(
            D, '{0}{1}:Down_step_size_{2}.pth'.format(name, args.save, step))
        torch.save(U,
                   '{0}{1}:UP_step_size_{2}.pth'.format(name, args.save, step))

    def training(loggers):  # this may be used as intput to the swamp tpu
        for steps in range(1, args.iterations + 1):
            for trainer in trainers:
                trainer.step(steps)

            if steps % args.log_interval == 0:
                print('STEP {0} x {1}'.format(steps, args.batch_size))
                for logger in loggers:
                    logger.log()
            if steps % args.save_interval == 0 and args.model_path != "":
                save_models(args.model_path, steps)

    #defining loggers
    DispNet_logger = Logger("DispNet",
                            DispNet_trainer,
                            log_interval=args.log_interval)
    #starting training
    start = time()
    training(loggers=[DispNet_logger])  #loggers to be defined
    print("Total training time taken:{0:.2f}s".format(time() - start))
Esempio n. 3
0
def main_train(index, args):
    print("Running main on TPU:{}".format(index))
    torch.manual_seed(2)
    try:
        dev = xm.xla_device()
    except:
        pass

    def cpu(x):
        return x.cpu() if x is not None else None

    def tpu(x):
        return x.to(dev)

    def gpu(x):
        return x.cuda() if x is not None else None

    # Select device
    device = gpu if args.cuda else cpu
    device = tpu if args.tpu else device
    # optimizer list
    Up_to_Down = []

    def weights_init(m):
        if isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
            torch.nn.init.xavier_normal_(m.weight.data)
            m.bias.data.fill_(0)

    # Method which creates a module optimizer and add it to the given list
    def add_optimizer(module,
                      model_optimizers=(),
                      num_workers=1):  #needs a change for lr tpu
        module.apply(weights_init)
        optimizer = torch.optim.Adadelta(module.parameters(),
                                         lr=args.learning_rate * num_workers,
                                         weight_decay=0.001)
        for direction in model_optimizers:
            direction.append(optimizer)
        return optimizer

    #Dataset
    train_dataset = Dataset(input_left=args.real_left,
                            input_right=args.real_right,
                            output_left=args.disp_left,
                            output_right=args.disp_right)
    # data Sampler
    if args.tpu:
        world_size = xm.xrt_world_size()
        train_sampler = torch.utils.data.distributed.DistributedSampler(
            train_dataset,
            num_replicas=world_size,
            rank=xm.get_ordinal(),
            shuffle=True)
        tpu_params = {
            'loader_prefetch_size': args.loader_prefetch_size,
            'device_prefetch_size': args.device_prefetch_size
        }
    else:
        world_size = 1
        train_sampler = None
        tpu_params = {}
    #Dataset loader
    params_training = {
        'batch_size': args.batch_size,
        'shuffle': not train_sampler,
        'num_workers': args.num_workers,
        'drop_last': True,
        "sampler": train_sampler
    }
    params_validation = {
        'batch_size': args.batch_size_v,
        'shuffle': True,
        'num_workers': args.num_workers,
        'drop_last': True
    }
    data = {
        "training":
        Data_Generator(train_dataset,
                       params_training,
                       tpu=args.tpu,
                       device=dev if args.tpu else None,
                       tpu_params=tpu_params)
    }
    if args.validation:
        val_dataset = Dataset(input_left=args.real_left_v,
                              input_right=args.real_right_v,
                              output_left=args.disp_left_v,
                              output_right=args.disp_right_v,
                              validate=True)
        data["validation"] = Data_Generator(val_dataset,
                                            params_validation,
                                            tpu=args.tpu,
                                            device=dev if args.tpu else None,
                                            tpu_params=tpu_params)
    # model
    D = device(
        SDMU_Encoder(num_filters=parameters["num_filters"],
                     kernels=parameters["kernels"],
                     strides=parameters["strides"],
                     corr=args.corr,
                     D=args.D))  #parms to be filled
    add_optimizer(D, [Up_to_Down], num_workers=world_size)
    U = device(
        SDMU_Depth(up_kernels=parameters["up_kernels"],
                   i_kernels=parameters["i_kernels"],
                   pr_kernels=parameters["pr_kernels"],
                   up_filters=parameters["up_filters"],
                   down_filters=parameters["down_filters"],
                   index=parameters["index"],
                   pr_filters=parameters["pr_filters"]))  #parms to be filled
    add_optimizer(U, [Up_to_Down], num_workers=world_size)
    DispNet_ = device(SDMU(D, U, device, args.c1, args.c2, args.c3, args.c4))
    #dataset
    DispNet_trainer = Trainer(Data_Generator=data["training"],
                              optimizers=Up_to_Down,
                              Network=DispNet_,
                              schedule_coeff=parameters["schedule_coeff"],
                              batch_size=args.batch_size)
    trainers = [DispNet_trainer]

    def save_models(name, step):
        m = {
            'model_state_dict': DispNet_.state_dict(),
            'optimizerD_state_dict': Up_to_Down[0].state_dict(),
            'optimizerU_state_dict': Up_to_Down[1].state_dict(),
        }
        torch.save(
            m, '{0}{1}:DispNet_step_size_{2}.pt'.format(name, args.save, step))

    def training(loggers):  # this may be used as intput to the swamp tpu
        for steps in range(1, args.iterations + 1):
            for trainer in trainers:
                trainer.step(steps)

            if steps % args.log_interval == 0:
                print('STEP {0} x {1}'.format(steps, args.batch_size))
                for logger in loggers:
                    logger.log(steps)
            if steps % args.save_interval == 0 and args.model_path != "":
                save_models(args.model_path, steps)

    # define Validator
    validator = None
    if args.validation:
        validator = Validator(Network=DispNet_,
                              Data_Generator=data["validation"])
    #defining loggers
    DispNet_logger = Logger("SMDU",
                            DispNet_trainer,
                            validator=validator,
                            val_log_interval=args.val_log_interval,
                            log_interval=args.log_interval,
                            TPU=args.tpu)
    #starting training
    start = time()
    training(loggers=[DispNet_logger])  #loggers to be defined
    print("Total training time taken:{0:.2f}s".format(time() - start))
Esempio n. 4
0
def main():
    if Parameter.BOX_DIMENSION_READ == True:
        information, initial_val, initial_t, final_t, delta_t, states, Jacobian = Read_Model(
            Parameter.MODEL_FILE)
    else:
        information, initial_val, initial_t, final_t, delta_t, states, Jacobian = Data_Generator.main(
            OutputFile=False,
            initial_val=[],
            old_information="",
            Calc_Jaco=False)
    for ttl in range(0, Parameter.GENERATOR_LOOP):
        print(ttl, Parameter.GENERATOR_LOOP)
        new_states = np.array(states)
        epsilon = Parameter.EPSILON
        min_set = []
        max_set = []
        axis_set = []
        for kase in range(0, len(initial_val)):
            min_set.append(min(new_states[:, kase]))
            max_set.append(max(new_states[:, kase]))

        for i in range(0, len(new_states) - 1):
            dir_vec = states[i + 1] - states[i]

        for kase in range(0, len(initial_val)):
            val = min_set[kase] - epsilon / 2
            tmp = []
            while 1:
                tmp.append(val)
                if val > max_set[kase]:
                    break
                val += epsilon
            axis_set.append(tmp)

        #pool = multiprocessing.Pool(processes = )
        #Box_Fill = pool.map(, )

        #with poolcontext(processes = Parameter.MULTI_CORE) as pool:
        #    Box_Fill = pool.map(partial(FindBox, axis_set=axis_set, epsilon=epsilon), states)
        Box_Fill = []
        for i in range(0, len(states)):
            Box_Fill.append(FindBox(states[i], axis_set, epsilon))
        #print(Box_Fill)
        Box_Fill = set(Box_Fill)
        if ttl + 1 == Parameter.GENERATOR_LOOP:
            break
        else:
            information, initial_val, initial_t, final_t, delta_t, states, Jacobian = Data_Generator.main(
                OutputFile=False,
                initial_val=states[len(states) - 1],
                old_information=information,
                Calc_Jaco=False)
        print("++++++++++++++++++++++++++++++")
        print("Box counting dimension: ")
        Box_Dimension = np.log(len(Box_Fill)) / np.log(1 / epsilon)
        print(epsilon, len(Box_Fill), Box_Dimension)
        print("++++++++++++++++++++++++++++++")

    print("++++++++++++++++++++++++++++++")
    print("Box counting dimension: ")
    Box_Dimension = np.log(len(Box_Fill)) / np.log(1 / epsilon)
    print(epsilon, len(Box_Fill), Box_Dimension)
    print("++++++++++++++++++++++++++++++")
Esempio n. 5
0
def main():
    if Parameter.LYAPUNOV_READ_FILE:
        information, initial_val, initial_t, _, delta_t, Val_Set, Jacobian = Read_Model(
            Parameter.MODEL_FILE)
    else:
        information, initial_val, initial_t, _, delta_t, Val_Set, Jacobian = Data_Generator.main(
            OutputFile=False,
            initial_val=[],
            old_information="",
            Calc_Jaco=True)

    information, initial_val, _, _, delta_t, Val_Set, Jacobian = Data_Generator.main(
        OutputFile=False,
        initial_val=Val_Set[len(Val_Set) - 1],
        old_information="",
        Calc_Jaco=True)
    output_vals = [[0 for n in range((len(initial_val)))]]
    final_mat_norm = np.eye(len(initial_val))
    curr_time = initial_t
    time_series = [initial_t]
    for ttl in range(0, Parameter.GENERATOR_LOOP):
        print("Lyapunov spectrum")
        print(ttl, Parameter.GENERATOR_LOOP)
        for kase in range(0, len(Jacobian) - 1):
            if ttl != 0 and kase == 0:
                continue
            if kase % 1000 == 0:
                print(kase, len(Jacobian) - 2, end="\r")
            final_mat_norm = Jacobian[kase] * np.matrix(final_mat_norm)
            final_mat, final_mat_norm = Gram_Schmidt(final_mat_norm)
            new_output = deepcopy(output_vals[len(output_vals) - 1])
            for i in range(0, len(new_output)):
                norm = np.linalg.norm(final_mat[:, i])
                new_output[i] = (
                    (new_output[i] * (curr_time - time_series[0])) +
                    np.log(norm)) / (curr_time + delta_t - time_series[0])
            curr_time += delta_t
            time_series.append(curr_time)
            output_vals.append(new_output)

        print(output_vals[len(output_vals) - 1])

        if ttl + 1 != Parameter.GENERATOR_LOOP:
            information, _, _, _, _, Val_Set, Jacobian = Data_Generator.main(
                OutputFile=False,
                initial_val=Val_Set[len(Val_Set) - 1],
                old_information=information,
                Calc_Jaco=True)

        print()

    fig = plt.gcf()
    fig.set_size_inches(25, 3)
    output_vals = np.matrix(output_vals)
    plt.grid(True)

    for i in range(0, len(initial_val)):
        val = np.array(output_vals[:, i].reshape(-1).reshape(-1))
        plt.plot(time_series, val[0], COLOR_LOOP[(i + 1) % len(COLOR_LOOP)])

    sum_val = []
    for i in range(0, len(output_vals)):
        sum_val.append(np.sum(output_vals[i][0]))
    plt.plot(time_series, sum_val, COLOR_LOOP[0])

    print()
    output_str = "Output Vals\nLyapunov vals: \t"
    output_str += str(output_vals[len(output_vals) - 1])
    output_str += "\n sum: " + str(sum_val[len(sum_val) - 1])
    print(output_str)
    plt.show()
Esempio n. 6
0
def train_and_test(train_data_size, offset_into_train_dataset):

    with open(september_meta_report, 'r') as csv_meta_report:
        reader = csv.reader(csv_meta_report)
        next(reader)  # ignore the path to dataset
        next(reader)  # ignore the unique number of ip samples

        trn_size = int(next(reader)[1])
        opt_size = int(next(reader)[1])
        tst_size = int(next(reader)[1])

    gen_class = Data_Generator.Data_Gen()
    trn_gen = gen_class.trn_data_gen(
        train_data_size, offset_into_file=offset_into_train_dataset)
    tst_gen = gen_class.tst_data_gen(trn_size + opt_size, tst_size)

    # Training Parameters
    learning_rate = 0.01
    num_steps = 1  # epochs
    display_step = 50000

    # Network Parameters
    num_hidden_1 = 8  # 1st layer num features
    num_hidden_2 = 4  # 2nd layer num features (the latent dim)
    num_input = 10  #

    X = tf.placeholder("float", [None, num_input])

    weights = {
        'encoder_h1': tf.Variable(tf.random_normal([num_input, num_hidden_1])),
        'encoder_h2':
        tf.Variable(tf.random_normal([num_hidden_1, num_hidden_2])),
        'decoder_h1':
        tf.Variable(tf.random_normal([num_hidden_2, num_hidden_1])),
        'decoder_h2': tf.Variable(tf.random_normal([num_hidden_1, num_input])),
    }
    biases = {
        'encoder_b1': tf.Variable(tf.random_normal([num_hidden_1])),
        'encoder_b2': tf.Variable(tf.random_normal([num_hidden_2])),
        'decoder_b1': tf.Variable(tf.random_normal([num_hidden_1])),
        'decoder_b2': tf.Variable(tf.random_normal([num_input])),
    }

    # Building the encoder
    def encoder(x):
        # Encoder Hidden layer with sigmoid activation #1
        layer_1 = tf.nn.sigmoid(
            tf.add(tf.matmul(x, weights['encoder_h1']), biases['encoder_b1']))
        # Encoder Hidden layer with sigmoid activation #2
        layer_2 = tf.nn.sigmoid(
            tf.add(tf.matmul(layer_1, weights['encoder_h2']),
                   biases['encoder_b2']))
        return layer_2

    # Building the decoder
    def decoder(x):
        # Decoder Hidden layer with sigmoid activation #1
        layer_1 = tf.nn.sigmoid(
            tf.add(tf.matmul(x, weights['decoder_h1']), biases['decoder_b1']))
        # Decoder Hidden layer with sigmoid activation #2
        layer_2 = tf.nn.sigmoid(
            tf.add(tf.matmul(layer_1, weights['decoder_h2']),
                   biases['decoder_b2']))
        return layer_2

    # Construct model
    encoder_op = encoder(X)
    decoder_op = decoder(encoder_op)

    # Prediction
    y_pred = decoder_op
    # Targets (Labels) are the input data.
    y_true = X

    # Define loss and optimizer, minimize the root mean squared error
    loss = tf.sqrt(tf.reduce_mean(tf.square(tf.subtract(y_true, y_pred))))
    # optimizer = tf.train.RMSPropOptimizer(learning_rate).minimize(loss)
    optimizer = tf.train.AdamOptimizer(learning_rate).minimize(loss)
    # Initialize the variables (i.e. assign their default value)
    init = tf.global_variables_initializer()

    start_time = time.time()

    # Start Training
    # Start a new TF session
    with tf.Session() as sess:

        # Run the initializer
        sess.run(init)

        for epoch in range(0, num_steps):
            print('Epoch: ', epoch)
            # Training
            for i in range(1, train_data_size + 1):
                # Prepare Data
                # (only features are needed, not labels)
                batch_x, _ = next(trn_gen)

                # Run optimization op (backprop) and cost op (to get loss value)
                _, l = sess.run([optimizer, loss], feed_dict={X: batch_x})
                # Display logs per step
                if i % display_step == 0 or i == 1:
                    print('Step %i: Minibatch Loss: %f' % (i, l))

        avg_rmse = 0
        rmse_list = []
        # Testing
        for i in range(1, tst_size + 1):
            batch_x, _ = next(tst_gen)
            g = sess.run(decoder_op, feed_dict={X: batch_x})
            rmse = sqrt(mean_squared_error(np.asarray(batch_x), g))
            avg_rmse += (rmse / tst_size)
            rmse_list.append(rmse)
        # calculating standard deviation
        standart_dev = statistics.stdev(rmse_list)
    print("--- %s seconds ---" % (time.time() - start_time))
    return [avg_rmse, standart_dev]
Esempio n. 7
0
                                              save_weights_only=True,
                                              verbose=1,
                                              period=5)

# Create Early Stopping callback
early_stop = keras.callbacks.EarlyStopping(monitor='val_acc',
                                           min_delta=0.001,
                                           patience=5,
                                           verbose=1,
                                           mode='max')

# Generate Data - eval samples = 1000, so overall samples must be significantly greater
[
    train_data, train_labels, eval_data, eval_labels, train_data_class,
    eval_data_class
] = DG.data_gen_2(samples=samples)


# Build Network
def create_GaussNet2():
    model = keras.Sequential()
    model.add(keras.layers.Dense(4, input_dim=2, activation=tf.nn.relu))
    if FLAG_hidden_layer_1 == True:
        model.add(keras.layers.Dense(32, activation=tf.nn.relu))
    if FLAG_hidden_layer_2 == True:
        model.add(keras.layers.Dense(4, activation=tf.nn.relu))
    if FLAG_hidden_layer_3 == True:
        model.add(keras.layers.Dense(4, activation=tf.nn.relu))
    if FLAG_hidden_layer_4 == True:
        model.add(keras.layers.Dense(4, activation=tf.nn.relu))
    if FLAG_hidden_layer_5 == True:
Esempio n. 8
0
data = Dataset("F:\datasets\disp\Sampler\FlyingThings3D\RGB_cleanpass\left",
               r"F:\datasets\disp\Sampler\FlyingThings3D\RGB_cleanpass\right",
               "F:\datasets\disp\FlyingThings3D_subset\train\disparity\right",
               "F:\datasets\disp\FlyingThings3D_subset\train\disparity\right")
data = Dataset(
    "F:\datasets\disp\Sampler\FlyingThings3D\RGB_cleanpass\left",
    r"F:\datasets\disp\Sampler\FlyingThings3D\RGB_cleanpass\right",
    r"F:\datasets\disp\FlyingThings3D_subset\train\disparity\right",
    r"F:\datasets\disp\FlyingThings3D_subset\train\disparity\right")
params_training = {
    'batch_size': 1,
    'shuffle': True,
    'num_workers': 0,
    'drop_last': True
}
lo = Data_Generator(data, params_training)
k = lo.next_batch()
k[0].shape
k[1][0]
k[1][0].shape
k[1][1].shape
from Data_Generator import *
data = Dataset(
    "F:\datasets\disp\Sampler\FlyingThings3D\RGB_cleanpass\left",
    r"F:\datasets\disp\Sampler\FlyingThings3D\RGB_cleanpass\right",
    r"F:\datasets\disp\FlyingThings3D_subset\train\disparity\right",
    r"F:\datasets\disp\FlyingThings3D_subset\train\disparity\right")
params_training = {
    'batch_size': 1,
    'shuffle': True,
    'num_workers': 0,
                                                     testrate, valrate,
                                                     valbool)
        for item in training:
            training_list.append(item)
            ytraining_list.append(tumorclasses.index(tclass))
        for item in prediction:
            prediction_list.append(item)
            yprediction_list.append(tumorclasses.index(tclass))
        for item in validation:
            validation_list.append(item)
            yvalidation_list.append(tumorclasses.index(tclass))

    train_generator = Data_Generator(training_list,
                                     ytraining_list,
                                     batch_size,
                                     input_size,
                                     n_channels,
                                     num_classes,
                                     shuffle=True)
    step_size_train = train_generator.batch_len

    if valbool == 1:
        val_generator = Data_Generator(validation_list,
                                       yvalidation_list,
                                       batch_size,
                                       input_size,
                                       n_channels,
                                       num_classes,
                                       shuffle=False)
        step_size_val = val_generator.batch_len
Esempio n. 10
0
random_train_image_transformation = config['dataset'][
    'random_train_image_transformation']

train_csv_file, test_csv_file = create_specialized_csv(target_type,
                                                       train_samples,
                                                       test_samples,
                                                       load_existing_cache,
                                                       data_location)

batch_dataset_train = bds.BatchDataset(train_csv_file, batch_size)
data_generator_train = Data_Generator.Data_Generator(
    target_type,
    batch_dataset_train,
    image_width,
    image_height,
    train_base_image_path,
    include_resized_mini_images=True,
    output_test_images=output_test_images,
    queue_workers=3,
    queue_size=50,
    color=True,
    random_image_transformation=random_train_image_transformation)
#zip_path='./data/stage_1_train_images/rsna-intracranial-hemorrhage-detection.zip')

batch_dataset_test = bds.BatchDataset(test_csv_file, batch_size)
data_generator_test = Data_Generator.Data_Generator(
    target_type,
    batch_dataset_test,
    image_width,
    image_height,
    test_base_image_path,
    include_resized_mini_images=True,
Esempio n. 11
0
actor_lr_decay_step = 5000
actor_lr_decay_rate = 0.96
critic_lr_decay_step = 5000
critic_lr_decay_rate = 0.96
disable_tensorboard = False
load_path = ''
output_dir = 'tsp_model/A2C'
log_dir = 'runs/A2C'
data_dir = 'data/tsp/A2C'

torch.manual_seed(random_seed)

if not disable_tensorboard:
    writer = SummaryWriter(os.path.join(log_dir, 'tsp'))

training_dataset = Data_Generator.VRPDataset(node_num=seq_len,
                                             num_samples=train_size)
# val_dataset = Data_Generator.VRPDataset(filename='./data/tsp/tsp20_test.txt')   # if specify filename, other arguments not required
training_dataloader = DataLoader(training_dataset,
                                 batch_size=batch_size,
                                 shuffle=False,
                                 num_workers=1)
# validation_dataloader = DataLoader(val_dataset, batch_size=1, shuffle=False, num_workers=1)

# instantiate the Neural Combinatorial Opt with RL module
model = NeuralCombOptRL(embedding_dim, hidden_dim, seq_len, n_glimpses,
                        n_process_blocks, C, use_tanh, beam_size, is_train,
                        use_cuda, vehicle_init_capacity, p_dim, R)

# Load the model parameters from a saved state
if load_path != '':
    print('[*] Loading model from {}'.format(load_path))
def create_one_group(i):
    data = Data_Generator.TSPDataset(500, 10)
    np.save('./DataSets/trainset' + str(i + 1) + '.npy', data)
    print('trainset ' + str(i + 1) + ' is done!')