def train(M, FLAGS, model_name=None): print("train called") #nn = importlib.import_module(".{}".format(FLAGS.nn), package='networks') data = get_data(FLAGS.src, FLAGS) print(f"The number of train data is {data.train_num}") print(f"The number of test data is {data.test_num}") print(f"The number of validation data is {data.val_num}") images, labels = data.train.next_batch(128) print(f"The shape of the batch images is {images.shape}") print(f"The shape of the batch labels is {labels.shape}") print(np.max(images)) print(np.min(images)) # This code shows data shape and plots it. for i in range(1): first_data = images[i] print(first_data.shape) #trans_data = np.transpose(first_data, (1, 2, 0)).astype(np.float32) # trans_data = np.tile(np.transpose(first_data, (1, 2, 0)).astype(np.float32), (1, 1, 3)) #print(trans_data.shape) #first_data = np.tile(first_data, (1, 1, 3)) plt.imshow(first_data) plt.show() print(f"The label of the image is {np.argmax(labels[i])}")
config.label_type = 'vec' config.label_order = 'freq_first' # cuda use_cuda = torch.cuda.is_available() and len(opt.gpus) > 0 if use_cuda: torch.cuda.set_device(opt.gpus[0]) torch.cuda.manual_seed(opt.seed) print(use_cuda) # data print('loading data...\n') start_time = time.time() item_train, label_train, \ item_test, label_test = dataset.get_data(config) ''' trainset = dataset.Dataset(item_train, label_train, config.label_set_size, input_type = config.module_type) validset = dataset.Dataset(item_test, label_test, config.label_set_size, input_type = config.module_type) testset = dataset.Dataset(item_test, label_test, config.label_set_size, input_type = config.module_type) ''' trainloader = dataloader.wgan_data_loader(item_train, label_train, config.batch_size, config, shuffle=True, balance=True) validloader = dataloader.wgan_data_loader(item_test, label_test, config.batch_size, config,
# [0, 1, 0, 1, 1] """ result = tf.zeros(depth) for scalar in input: result += tf.one_hot(scalar, depth) return result """ Main stub """ print(colors.BOLD, 'Loading dataset...', colors.ENDC) data_set, labels = data.get_data() print(colors.BOLD, 'Transforming data...', colors.ENDC, end='') data_set = list(map(partial(transform_input_one_hot, depth=__dim__), data_set)) data_set = tf.stack(data_set) data_set = tf.map_fn(lambda t: t / tf.reduce_max(t), data_set) train = data_set[:800] validation = data_set[800:] tf.InteractiveSession() train_np = train.eval() validation_np = validation.eval()