Exemplo n.º 1
0
def load_data(args):
    """
    Modify this to load your data and labels
    """

    validation_data_params = {
        "dim": (args.patch_dim, args.patch_dim, args.patch_dim),
        "batch_size": 1,
        "n_in_channels": args.number_input_channels,
        "n_out_channels": 1,
        "train_test_split": args.train_test_split,
        "augment": False,
        "shuffle": False,
        "seed": args.random_seed
    }
    validation_generator = DataGenerator(False, args.data_path,
                                         **validation_data_params)

    # for batch_idx in tqdm(range(validation_generator.num_batches),
    #                       desc="Predicting on batch"):

    batch_idx = 0
    imgs, msks = validation_generator.get_batch(batch_idx)
    fileIDs = validation_generator.get_batch_fileIDs(batch_idx)
    """
    OpenVINO uses channels first tensors (NCHWD).
    TensorFlow usually does channels last (NHWDC).
    So we need to transpose the axes.
    """
    imgs = imgs.transpose((0, 4, 1, 2, 3))
    msks = msks.transpose((0, 4, 1, 2, 3))

    return imgs, msks, fileIDs
Exemplo n.º 2
0
def main(args):

    print("Starting")
    callbacks_list = list()

    logdir = 'logs/scalars/' + datetime.now().strftime('%Y%m%d-%H%M%S')
    # Keras Tensorboard callback

    tb_callback = keras.callbacks.TensorBoard(log_dir=logdir)
    callbacks_list.append(tb_callback)
    print(callbacks_list)

    train_loader = DataGenerator(mode='train')
    val_loader = DataGenerator(mode='val')

    print('loaded data generators')
    model = siamese_architecture()

    print('training')
    train(n_epochs=args.n_epochs,
          lr=args.lr,
          model=model,
          train_gen=train_loader,
          val_gen=val_loader,
          callbacks_list=callbacks_list)
Exemplo n.º 3
0
    def __init__(self):

        self.epoch = 150
        self.lr = 0.005
        self.lr_drop = 5
        self.batch_size = 1
        self.model = build_model()
        self.trainLoader = DataGenerator('./data/rgb_train.txt',
                                         batch_size=self.batch_size,
                                         random_shift=True,
                                         transform=trainAug())
        self.valLoader = DataGenerator('./data/rgb_val.txt',
                                       batch_size=self.batch_size,
                                       random_shift=False,
                                       transform=valAug())
Exemplo n.º 4
0
def handle_camera():
    # Load input video
    data_loader = CameraLoader().start()
    (fourcc, fps, frameSize) = data_loader.videoinfo()
    print('the video is {} f/s'.format(fps))
    # =========== end video ===============
    # Load detection loader
    print('Loading YOLO model..')
    sys.stdout.flush()
    det_loader = DetectionLoader(data_loader, batchSize=args.detbatch).start()
    #  start a thread to read frames from the file video stream
    det_processor = DetectionProcessor(det_loader).start()
    # Load pose model

    runtime_profile = {'dt': [], 'pt': [], 'pn': []}

    #Data generator
    generator = DataGenerator(det_processor, args.fast_inference).start()

    print(enumerate())
    # # Data writer
    # save_path = os.path.join(args.outputpath, 'AlphaPose_' + ntpath.basename(video_file).split('.')[0] + '.avi')
    # # writer = DataWriter(args.save_video, save_path, cv2.VideoWriter_fourcc(*'XVID'), fps, frameSize).start()
    # writer = DataWriter(args.save_video).start()

    print('Start pose estimation...')

    return generator
Exemplo n.º 5
0
def train_function(conf, args, input, label, ch_indicator):
    """
        train function
            input args:
                conf:
                args:
                input:
                label:
                ch_indcator:
    """
    # initialize the network model
    gan = KernelNet(conf, args, input, ch_indicator)
    # data generation
    dataset = DataGenerator(conf, gan, input, label)
    # batch size
    batch_size = conf.batch_size
    for iteration in range(conf.max_iters):
        # learning rate adjust
        if iteration % conf.update_l_rate_freq == 0:
            for params in gan.optimizer_G.param_groups:
                params['lr'] /= conf.update_l_rate_rate

        # generate a batch of input and label
        for batch_index in range(iteration*batch_size, (iteration+1)*batch_size, 1):
            [g_in, d_in] = dataset.__getitem__(batch_index)
            if batch_index == iteration*batch_size:
                # if the batch index is the first of the batch, copy it
                g_batch = g_in
                d_batch = d_in
            else:
                # generate the batch, concat the rest input
                g_batch = np.concatenate((g_batch, g_in), axis=0)
                d_batch = np.concatenate((d_batch, d_in), axis=0)

        g_batch = torch.from_numpy(g_batch).float()
        d_batch = torch.from_numpy(d_batch).float()
        g_batch = g_batch.cuda()
        d_batch = d_batch.cuda()
        
        gan.train(g_batch, d_batch, iteration)
Exemplo n.º 6
0
reduce_lr = K.callbacks.ReduceLROnPlateau(monitor="val_loss", factor=0.2,
                                          patience=5, min_lr=0.0001)

callbacks = [checkpoint, tb_logs, reduce_lr]

training_data_params = {"dim": (args.patch_height, args.patch_width, args.patch_depth),
                        "batch_size": args.bz,
                        "n_in_channels": args.number_input_channels,
                        "n_out_channels": 1,
                        "train_test_split": args.train_test_split,
                        "validate_test_split": args.validate_test_split,
                        "augment": True,
                        "shuffle": True,
                        "seed": args.random_seed}

training_generator = DataGenerator("train", args.data_path,
                                   **training_data_params)
training_generator.print_info()

validation_data_params = {"dim": (args.patch_height, args.patch_width, args.patch_depth),
                          "batch_size": 1,
                          "n_in_channels": args.number_input_channels,
                          "n_out_channels": 1,
                          "train_test_split": args.train_test_split,
                          "validate_test_split": args.validate_test_split,
                          "augment": False,
                          "shuffle": False,
                          "seed": args.random_seed}
validation_generator = DataGenerator("validate", args.data_path,
                                     **validation_data_params)
validation_generator.print_info()
Exemplo n.º 7
0
                                "combined_dice_ce_loss": combined_dice_ce_loss
                            })

print("Loading images and masks from test set")

validation_data_params = {
    "dim": (args.patch_height, args.patch_width, args.patch_depth),
    "batch_size": 1,
    "n_in_channels": args.number_input_channels,
    "n_out_channels": 1,
    "train_test_split": args.train_test_split,
    "augment": False,
    "shuffle": False,
    "seed": args.random_seed
}
validation_generator = DataGenerator(False, args.data_path,
                                     **validation_data_params)

m = model.evaluate_generator(validation_generator,
                             verbose=1,
                             max_queue_size=args.num_prefetched_batches,
                             workers=args.num_data_loaders,
                             use_multiprocessing=False)

print("\n\nTest metrics")
print("============")
i = 0
for name in model.metrics_names:
    print("{} = {:.4f}".format(name, m[i]))
    i += 1

save_directory = "predictions_directory"
Exemplo n.º 8
0
K.backend.set_session(SESS)

unet_model = unet(channels_last = True)  # channels first or last

model = K.models.load_model(args.saved_model, custom_objects=unet_model.custom_objects)

print("Loading images and masks from test set")

validation_data_params = {"dim": (args.patch_height, args.patch_width, args.patch_depth),
                          "batch_size": 1,
                          "n_in_channels": args.number_input_channels,
                          "n_out_channels": 1,
                          "train_test_split": args.train_test_split,
                          "augment": False,
                          "shuffle": False, "seed": args.random_seed}
testing_generator = DataGenerator("test", args.data_path,
                                     **validation_data_params)
testing_generator.print_info()

m = model.evaluate_generator(testing_generator, verbose=1,
                             max_queue_size=args.num_prefetched_batches,
                             workers=args.num_data_loaders,
                             use_multiprocessing=False)

print("\n\nTest metrics")
print("============")
for idx, name in enumerate(model.metrics_names):
    print("{} = {:.4f}".format(name, m[idx]))


save_directory = "predictions_directory"
try:
Exemplo n.º 9
0
    class_weights = class_weight.compute_class_weight('balanced',
                                                      np.unique(y_train),
                                                      y_train)
    print(class_weights)

    sample_array = []

    BATCH_SIZE = 8
    IMG_WIDTH, IMG_HEIGHT = 1024, 1024
    TRAIN_IMG_PATH = '/media/parth/DATA/datasets/aptos_2019/train_cropped'
    print('---------------------Initialized Training---------------------\n')
    # Add Image augmentation to our generator
    train_datagen = DataGenerator(TRAIN_IMG_PATH,
                                  batch_size=BATCH_SIZE,
                                  dataframe=train,
                                  dim=(IMG_HEIGHT, IMG_WIDTH),
                                  split=True)
    print('Found {} Train Images'.format(train_datagen.__len__() * BATCH_SIZE))
    val_datagen = DataGenerator(TRAIN_IMG_PATH,
                                batch_size=BATCH_SIZE,
                                dataframe=test,
                                dim=(IMG_HEIGHT, IMG_WIDTH),
                                split=True)
    print('Found {} Val Images\n'.format(val_datagen.__len__() * BATCH_SIZE))
    # print('\n\n')
    # print(len(train_datagen))
    model = create_model((IMG_HEIGHT // 2, IMG_WIDTH // 2),
                         class_weights,
                         split=True)
Exemplo n.º 10
0
                               momentum=0.9,
                               decay=0,
                               nesterov=False)
    # mobilenet.model.compile(optimizer=opt, loss=bce_dice_loss, metrics=[iou_metric])
    mobilenet.model.compile(optimizer=opt,
                            loss='categorical_crossentropy',
                            metrics=['accuracy'])

    # Get data generators
    train_folder = "/home/dep_pic/AI_Data/AI_Train_Data/LIP/LIP/TrainVal_images/train_all"
    train_label_folder = "/home/dep_pic/AI_Data/AI_Train_Data/LIP/LIP/TrainVal_parsing_annotations/train_all"
    train_id_file = "/home/dep_pic/AI_Data/AI_Train_Data/LIP/LIP/TrainVal_images/train_id.txt"
    train_generator = DataGenerator(df=train_folder,
                                    lf=train_label_folder,
                                    id_file=train_id_file,
                                    resize=(224, 224),
                                    shuffle=True,
                                    augmentations=True,
                                    cla_num=cla_num)

    val_folder = "/home/dep_pic/AI_Data/AI_Train_Data/LIP/LIP/TrainVal_images/val_half"
    val_label_folder = "/home/dep_pic/AI_Data/AI_Train_Data/LIP/LIP/TrainVal_parsing_annotations/train_all"
    val_id_file = "/home/dep_pic/AI_Data/AI_Train_Data/LIP/LIP/TrainVal_images/val_id.txt"
    val_generator = DataGenerator(df=val_folder,
                                  lf=val_label_folder,
                                  id_file=val_id_file,
                                  resize=(224, 224),
                                  shuffle=False,
                                  augmentations=False,
                                  cla_num=cla_num)
Exemplo n.º 11
0
                'num_class': num_class,
                'split_set': 'Train',
                'shuffle': True}

params_valid = {'batch_size': 200,
                'label_type': label_type,
                'num_class': num_class,
                'split_set': 'Validation',
                'shuffle': False}

# Output models saving folder
if not os.path.isdir('./Models/'):
    os.makedirs('./Models/')  

# Generators
training_generator = DataGenerator(root_dir, **params_train)
validation_generator = DataGenerator(root_dir, **params_valid)

# Model Settings
if label_type == 'attr':
    filepath='./Models/DenseNN_model[epoch'+str(epochs)+'-batch'+str(batch_size)+'-nodes'+str(num_nodes)+']_'+label_type+'.hdf5'
elif label_type == 'class':
    filepath='./Models/DenseNN_model[epoch'+str(epochs)+'-batch'+str(batch_size)+'-nodes'+str(num_nodes)+']_'+num_class+'.hdf5'
checkpoint = ModelCheckpoint(filepath, monitor='val_loss', verbose=1, save_best_only=True, mode='min')
callbacks_list = [checkpoint]

# Model structure loading
if label_type == 'attr':
    model = dense_network_MTL(num_nodes=num_nodes)
elif label_type == 'class':
    model = dense_network_class(num_nodes=num_nodes, num_class=int(num_class.split('-')[0]))
Exemplo n.º 12
0
#imgs_test = np.load(os.path.join(sys.path[0],"imgs_test_3d.npy"))
#msks_test = np.load(os.path.join(sys.path[0],"msks_test_3d.npy"))

seed = hvd.rank()  # Make sure each worker gets different random seed
training_data_params = {
    "dim": (args.patch_height, args.patch_width, args.patch_depth),
    "batch_size": args.bz,
    "n_in_channels": args.number_input_channels,
    "n_out_channels": 1,
    "train_test_split": args.train_test_split,
    "augment": True,
    "shuffle": True,
    "seed": seed
}

training_generator = DataGenerator(True, args.data_path,
                                   **training_data_params)

validation_data_params = {
    "dim": (args.patch_height, args.patch_width, args.patch_depth),
    "batch_size": 1,
    "n_in_channels": args.number_input_channels,
    "n_out_channels": 1,
    "train_test_split": args.train_test_split,
    "augment": False,
    "shuffle": False,
    "seed": args.random_seed
}
validation_generator = DataGenerator(False, args.data_path,
                                     **validation_data_params)

# Fit the model
Exemplo n.º 13
0
Arquivo: main.py Projeto: 4AI/AGN
dataloader = DataLoader(tokenizer,
                        config['max_len'],
                        use_vae=True,
                        batch_size=config["batch_size"],
                        ae_epochs=config['ae_epochs'])
dataloader.set_train(config['train_path'])
dataloader.set_dev(config['dev_path'])
dataloader.save_autoencoder(
    os.path.join(config['save_dir'], 'autoencoder.weights'))
dataloader.save_vocab(os.path.join(config['save_dir'], 'vocab.pickle'))

accuracy_list = []
f1_list = []
for idx in range(1, config['iterations'] + 1):
    print("build generator")
    generator = DataGenerator(config['batch_size'], config['max_len'])
    generator.set_dataset(dataloader.train_set)
    metrics_callback = Metrics(
        config['batch_size'], config['max_len'], dataloader.dev_set,
        os.path.join(config['save_dir'], 'clf_model.weights'))
    config['steps_per_epoch'] = generator.steps_per_epoch
    config['output_size'] = dataloader.label_size
    model = AGNClassifier(config)
    print("start to fitting...")
    model.model.fit(generator.__iter__(),
                    steps_per_epoch=generator.steps_per_epoch,
                    epochs=config['epochs'],
                    callbacks=[metrics_callback],
                    verbose=config['verbose'])

    accuracy = max(metrics_callback.history["val_acc"])
Exemplo n.º 14
0
with open("ids_train", 'rb') as f:
    ids_train = pickle.load(f)
with open("y_train", 'rb') as f:
    y_train = pickle.load(f)
with open("ids_test", 'rb') as f:
    ids_test = pickle.load(f)
with open("y_test", 'rb') as f:
    y_test = pickle.load(f)

print(ids_train)

print("LOAD DATA FROM FILE DONE")

# exit()
train_generator = DataGenerator(ids_train,
                                y_train,
                                batch_size=8,
                                n_classes=len(classes))
valid_generator = DataGenerator(ids_test,
                                y_test,
                                batch_size=8,
                                n_classes=len(classes),
                                shuffle=False)

print("train_generator:", len(train_generator))
print("valid_generator:", len(valid_generator))

model = Sequential()
model.add(Conv1D(128, kernel_size=5, input_shape=(256, 768)))
# model.add(GlobalMaxPooling1D())
model.add(Conv1D(64, kernel_size=3, activation='relu'))
model.add(GlobalMaxPooling1D())
Exemplo n.º 15
0
                                          min_lr=0.0001)

callbacks = [checkpoint, tb_logs, reduce_lr]

training_data_params = {
    "dim": (args.patch_height, args.patch_width, args.patch_depth),
    "batch_size": args.bz,
    "n_in_channels": args.number_input_channels,
    "n_out_channels": 1,
    "train_test_split": args.train_test_split,
    "augment": True,
    "shuffle": True,
    "seed": args.random_seed
}

training_generator = DataGenerator(True, args.data_path,
                                   **training_data_params)

validation_data_params = {
    "dim": (args.patch_height, args.patch_width, args.patch_depth),
    "batch_size": 1,
    "n_in_channels": args.number_input_channels,
    "n_out_channels": 1,
    "train_test_split": args.train_test_split,
    "augment": False,
    "shuffle": False,
    "seed": args.random_seed
}
validation_generator = DataGenerator(False, args.data_path,
                                     **validation_data_params)

# Fit the model
Exemplo n.º 16
0
def train_model():
    ## GPU Setting  : 30%
    os.environ["CUDA_VISIBLE_DEVICES"] = "1"
    config = tf.ConfigProto(device_count={'GPU': 1})
    config.gpu_options.allow_growth = True
    config.gpu_options.per_process_gpu_memory_fraction = 0.4
    session = tf.Session(config=config)
    set_session(session)
    early_stopping = EarlyStopping(monitor='val_loss', patience=10)

    ## Parameter
    epochs = 150
    batch_size = 64
    cropsize = 200
    input_shape = (cropsize, 40, 1)

    print('input_shape = ', input_shape)

    # Parameters
    params = {
        'dim': input_shape,
        'batch_size': 64,
        'n_classes': 921,
        'shuffle': True
    }

    # Datasets
    with open('./data/train.txt', 'r') as fp:
        train_list = fp.readlines()

    with open('./data/val.txt', 'r') as fp:
        val_list = fp.readlines()

    with open('./data/test.txt', 'r') as fp:
        test_list = fp.readlines()

    file_list = train_list + val_list + test_list
    labels = dict()
    for i in range(len(file_list)):
        label, filename, _ = file_list[i].replace('\n', '').split('\t')
        labels[filename] = int(label)

    partition = dict()  # IDs
    partition['train'] = []
    for i in range(len(train_list)):
        label, filename, start_idx = train_list[i].replace('\n',
                                                           '').split('\t')
        partition['train'].append([filename, start_idx])
    partition['validation'] = []
    for i in range(len(val_list)):
        label, filename, start_idx = val_list[i].replace('\n', '').split('\t')
        partition['validation'].append([filename, start_idx])
    partition['evaluation'] = []
    for i in range(len(test_list)):
        label, filename, start_idx = test_list[i].replace('\n', '').split('\t')
        partition['evaluation'].append([filename, start_idx])

    # Generators
    training_generator = DataGenerator(partition['train'],
                                       labels,
                                       cropsize,
                                       flag='random',
                                       **params)
    validation_generator = DataGenerator(partition['validation'],
                                         labels,
                                         cropsize,
                                         flag='fix',
                                         **params)
    evaluation_generator = DataGenerator(partition['evaluation'],
                                         labels,
                                         cropsize,
                                         flag='fix',
                                         **params)

    ## Load Model
    model = build_CRNN_200(input_shape)
    adam = optimizers.Adam(lr=0.001)
    model.compile(loss=amsoftmax_loss, optimizer=adam, metrics=['accuracy'])
    # Train model on dataset
    history = model.fit_generator(generator=training_generator,
                                  validation_data=validation_generator,
                                  use_multiprocessing=True,
                                  epochs=epochs,
                                  verbose=1,
                                  callbacks=[early_stopping],
                                  workers=6)
    model.summary()

    now = datetime.datetime.now()
    Today = now.strftime('%Y%m%d')

    os.makedirs('best_model', exist_ok=True)
    # Save model
    model.save('./best_model/model_speaker_2sec_64.h5')

    # Evaluation
    score = model.evaluate_generator(evaluation_generator)
    print('loss, acc : ', score)

    try:
        # loss graph
        fig, loss_ax = plt.subplots()
        loss_ax.plot(history.history['loss'], 'y', label='train_loss')
        loss_ax.plot(history.history['val_loss'], 'r', label='val_loss')
        loss_ax.set_xlabel('epoch')
        loss_ax.set_ylabel('loss')
        plt.savefig('./best_model/training_curve.png')

        # loss graph
        fig, acc_ax = plt.subplots()
        acc_ax.plot(history.history['acc'], 'b', label='train_acc')
        acc_ax.plot(history.history['val_acc'], 'g', label='val_acc')
        acc_ax.set_xlabel('epoch')
        acc_ax.set_ylabel('acc')
        acc_ax.set_ylim([0, 1.1])
        fig.legend(loc='upper left')

        plt.savefig('./best_model/accuracy_curve.png')

    except Exception as e:
        print('Failed to save graph')
        print(e)
    clear_session()
Exemplo n.º 17
0
    def test(self, error_func_list=None, is_visualize=False):
        from demorender import demoAll
        total_task = len(self.test_data)
        print('total img:', total_task)

        model = self.net.model
        total_error_list = []
        num_output = self.mode[3]
        num_input = self.mode[4]
        data_generator = DataGenerator(all_image_data=self.test_data,
                                       mode=self.mode[2],
                                       is_aug=False,
                                       is_pre_read=self.is_pre_read)

        with torch.no_grad():
            model.eval()
            for i in range(len(self.test_data)):
                data = data_generator.__getitem__(i)
                x = data[0]
                x = x.to(self.net.device).float()
                y = [data[j] for j in range(1, 1 + num_input)]
                for j in range(num_input):
                    y[j] = y[j].to(x.device).float()
                    y[j] = torch.unsqueeze(y[j], 0)
                x = torch.unsqueeze(x, 0)
                outputs = model(x, *y)

                p = outputs[-1]
                x = x.squeeze().cpu().numpy().transpose(1, 2, 0)
                p = p.squeeze().cpu().numpy().transpose(1, 2, 0) * 280
                b = sio.loadmat(self.test_data[i].bbox_info_path)
                gt_y = y[0]
                gt_y = gt_y.squeeze().cpu().numpy().transpose(1, 2, 0) * 280

                temp_errors = []
                for error_func_name in error_func_list:
                    error_func = getErrorFunction(error_func_name)
                    error = error_func(gt_y, p, b['Bbox'], b['Kpt'])
                    temp_errors.append(error)
                total_error_list.append(temp_errors)
                print(self.test_data[i].init_image_path, end='  ')
                for er in temp_errors:
                    print('%.5f' % er, end=' ')
                print('')
                if is_visualize:

                    if temp_errors[0] > 0.00:
                        tex = np.load(self.test_data[i].texture_path.replace(
                            'zeroz2', 'full')).astype(np.float32)
                        init_image = np.load(
                            self.test_data[i].cropped_image_path).astype(
                                np.float32) / 255.0
                        show([p, tex, init_image], mode='uvmap')
                        init_image = np.load(
                            self.test_data[i].cropped_image_path).astype(
                                np.float32) / 255.0
                        show([gt_y, tex, init_image], mode='uvmap')
                        demobg = np.load(
                            self.test_data[i].cropped_image_path).astype(
                                np.float32)
                        init_image = demobg / 255.0
                        img1, img2 = demoAll(p, demobg, is_render=False)
                mean_errors = np.mean(total_error_list, axis=0)
                for er in mean_errors:
                    print('%.5f' % er, end=' ')
                print('')
            for i in range(len(error_func_list)):
                print(error_func_list[i], mean_errors[i])

            se_idx = np.argsort(np.sum(total_error_list, axis=-1))
            se_data_list = np.array(self.test_data)[se_idx]
            se_path_list = [a.cropped_image_path for a in se_data_list]
            sep = '\n'
            fout = open('errororder.txt', 'w', encoding='utf-8')
            fout.write(sep.join(se_path_list))
            fout.close()
Exemplo n.º 18
0
        ckpt = ModelCheckpoint(monitor='val_loss',
                               filepath=ckpt_path,
                               save_best_only=True,
                               save_weights_only=True,
                               verbose=1,
                               mode='min')

        tensorboard = TensorBoard(log_dir=args.outputs,
                                  histogram_freq=0,
                                  write_graph=True,
                                  write_images=True)

        callbacks = [lr, ckpt, tensorboard]

        train_generator = DataGenerator(path=os.path.join(
            args.dataset, 'train'),
                                        batch_size=args.batch_size)

        valid_generator = DataGenerator(path=os.path.join(args.dataset, 'val'),
                                        batch_size=args.batch_size)

        print('Training on {} samples'.format(len(train_generator)))
        print('Validating on {} samples'.format(len(valid_generator)))

        model.fit_generator(
            generator=train_generator,
            initial_epoch=0,
            epochs=args.epochs,
            verbose=1,
            validation_data=valid_generator,
            callbacks=callbacks,
from dataloader import DataGenerator

# hyper-parameters
EMBEDDING_DIM = 768
batch_size = 32
epochs = 20
rnn_units = 256

# load data
training_data = np.load('preprocessed_data/training_data.npz',
                        allow_pickle=True)
x_train = training_data['x']
y_train = training_data['y']
del training_data

# model
model = Sequential()
model.add(Masking(mask_value=0., input_shape=(None, EMBEDDING_DIM)))
model.add(Bidirectional(LSTM(rnn_units, return_sequences=True)))
model.add(Attention(bias=False))
model.add(Dense(2, activation='softmax'))
model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['acc'])
print(model.summary())

# train
train_generator = DataGenerator(x_train, y_train, batch_size=batch_size)
model.fit(train_generator, epochs=epochs)

# save model
model.save('my_model.h5')
model = unet_model.model.load_model(args.saved_model,
                                    custom_objects=unet_model.custom_objects)

print("Loading images and masks from test set")

validation_data_params = {
    "dim": (args.patch_height, args.patch_width, args.patch_depth),
    "batch_size": 1,
    "n_in_channels": args.number_input_channels,
    "n_out_channels": 1,
    "train_test_split": args.train_test_split,
    "augment": False,
    "shuffle": False,
    "seed": args.random_seed
}
testing_generator = DataGenerator("test", args.data_path,
                                  **validation_data_params)
testing_generator.print_info()

m = model.evaluate_generator(testing_generator,
                             verbose=1,
                             max_queue_size=args.num_prefetched_batches,
                             workers=args.num_data_loaders,
                             use_multiprocessing=False)

print("\n\nTest metrics")
print("============")
for idx, name in enumerate(unet_model.model.metrics_names):
    print("{} = {:.4f}".format(name, m[idx]))

save_directory = "predictions_directory"
try:
Exemplo n.º 21
0
batch_size = 128
#
model1_epoch = 40
model2_epoch = 50
learning_rate = [0.000001]

#
for i in learning_rate:

    # print("initial discriminative patch : ", end =" ")
    model1, model2 = ConvolutionNN(), ConvolutionNN()
    train_Instance, train_Instance_label, train_Instance_latent = data_generation_MIL(
        "C:\\Users\\yeon\\datasets\\CancerClassify\\Train")
    valid_Instance, valid_Instance_label, valid_Instance_latent = data_generation_MIL(
        "C:\\Users\\yeon\\datasets\\CancerClassify\\Valid")
    train_generator = DataGenerator(1).generate(train_Instance, train_Instance_label)
    valid_generator = DataGenerator(1).generate(valid_Instance, valid_Instance_label)
    num_bag = len(train_Instance)
    val_bag = len(valid_Instance)
    model1, model2 = train_step1(model1, model2, model1_epoch, model2_epoch, trn_num_bag=num_bag, val_num_bag=val_bag,
                                 batch_size=batch_size,
                                 learning_rate=i, latent_bag=train_Instance_latent, trn_loader=train_generator,
                                 val_loader=valid_generator, per1=65, per2=65)
    del train_Instance
    del train_Instance_label
    del train_Instance_latent
    del valid_Instance
    del valid_Instance_label
    del valid_Instance_latent
    del train_generator
    del valid_generator
Exemplo n.º 22
0
    checkpoint
]

training_data_params = {
    "dim": (args.patch_height, args.patch_width, args.patch_depth),
    "batch_size": args.bz,
    "n_in_channels": args.number_input_channels,
    "n_out_channels": 1,
    "train_test_split": args.train_test_split,
    "validate_test_split": args.validate_test_split,
    "augment": True,
    "shuffle": True,
    "seed": hvd.rank()
}

training_generator = DataGenerator("train", args.data_path,
                                   **training_data_params)
if (hvd.rank() == 0):
    training_generator.print_info()

validation_data_params = {
    "dim": (args.patch_height, args.patch_width, args.patch_depth),
    "batch_size": 1,
    "n_in_channels": args.number_input_channels,
    "n_out_channels": 1,
    "train_test_split": args.train_test_split,
    "validate_test_split": args.validate_test_split,
    "augment": False,
    "shuffle": False,
    "seed": args.random_seed
}
validation_generator = DataGenerator("validate", args.data_path,
Exemplo n.º 23
0
from dataloader import DataGenerator, Dataloader
from myNet import Nets
import numpy as np
import os
import time

H, W, C = 224, 224, 3
shape = (H, W, C)
n_classes = 5
batch_size = 16
epochs = 10
save_model = './model/aslCNN.h5'

train_data = DataGenerator('./train', n_classes)
valid_data = DataGenerator('./valid', n_classes).valid_generator()

nets = Nets(n_classes, shape)
model = nets.CNN()


def train():
    model.summary()
    model.fit_generator(train_data.train_generator(batch_size),
                        epochs=epochs,
                        validation_data=valid_data,
                        steps_per_epoch=len(train_data.files) // batch_size)
    model.save_weights(save_model)


def test(path):
    model.load_weights(save_model)