Example #1
0
def test(train_data_dir):
    # Prepare data augmentation configuration
    train_generator = DataGenerator(train_data_dir, TRAIN, **params)
    val_generator = DataGenerator(train_data_dir, VAL, **params)

    test_dataset(train_generator)
    test_dataset(val_generator)
def evaluate(lang='pt'):
    X, Y = util.get_X_Y(data_type='keras_tokenized_tri', lang=lang, file_type="dump")
    X = np.asarray(X)
    data_generator = DataGenerator(X,Y, lang=lang, process_x=process_x, batch_size=PARAMS['batch_size'])
    model, epoch = load_lastest(lang=lang)
    x_val, y_val = data_generator.get_validation_data()
    y_pred = model.predict(x_val)
    y_pred = y_pred.argmax(axis=-1)
    print('Model '+NAME+' val score on '+lang+': ', util.evaluate(y_val, y_pred))
Example #3
0
def train(lang='pt'):
    params = PARAMS.copy()
    initial_epoch = 0
    X, Y = util.get_X_Y(data_type='keras_tokenized_tri', lang=lang, file_type="dump")
    X = np.asarray(X)
    params['embedding_matrix'] = load_embedding_matrix(name="fasttext_sg_tri_8", tokenizer='keras_tokenized_tri',lang=lang, model_type="dump")
    params["vocab_size"] = params['embedding_matrix'].shape[0]
    params["embedding_dim"] = params['embedding_matrix'].shape[1]
    
    if not os.path.exists(PATH):
        os.makedirs(PATH)
    if not os.path.exists(PATH+'log_dir'):
        os.makedirs(PATH+'log_dir')
        
    #params["loss"] = util.focal_loss(gamma=5.,alpha=1588)
    lastest_model = load_lastest(lang=lang)
    if(lastest_model == None):
        model, params = generate_model(params)
    else:
        model = lastest_model[0]
        initial_epoch = lastest_model[1]
        
    print(model.metrics_names)
    
    params['sampler'] = FunctionSampler(func=balance_dataset,
                          kw_args={'cut_off': 0.5,
                                  'random_state': 42})
    
    data_generator = DataGenerator(X,Y, lang=lang, process_x=process_x, process_y=process_y, batch_size=PARAMS['batch_size'], sampler=params['sampler'])
    #data_generator.remove_reliable_0(pct=1.0)
    validation_data = data_generator.get_validation_data()
    print('data_generator.x: ', data_generator.__getitem__(0)[0][0:5])
    print('data_generator.y: ', data_generator.__getitem__(0)[1][0:5])

    #params["class_weights"]= data_generator.get_classes_weights()
    
    reduce_lr = ReduceLROnPlateau(monitor='val_categorical_accuracy', factor=0.2, patience=3, verbose=1)
    early_stopping = EarlyStopping(monitor='val_categorical_accuracy', min_delta=0.02, patience=10, verbose=1)
    csv_logger = CSVLogger(PATH+'traning.log', append=True)
    tensorboard_callback = TensorBoard(log_dir=PATH+'log_dir', batch_size=params["batch_size"])
    model_checkpoint = ModelCheckpoint(filepath=PATH+'weights-{epoch:03d}-{val_categorical_accuracy:.4f}-'+lang+'.hdf5',
                                               monitor='val_categorical_accuracy',
                                               verbose=1,
                                               mode='max')
    params["callbacks"] = [model_checkpoint, early_stopping, tensorboard_callback, csv_logger, reduce_lr]
    
    model.fit_generator(data_generator,
                        epochs=params["epochs"],
                        verbose=1,
                        callbacks=params["callbacks"],
                        validation_data=validation_data,
                        #workers=7,
                        #use_multiprocessing=True,
                        class_weight=params["class_weights"],
                        initial_epoch=initial_epoch)
Example #4
0
def get_eval_op():
    dataset = DataGenerator(
            sample_range=config.eval_samples,
            shuffle_samples=True,
            max_patches_per_sample=config.max_patches_per_sample
        ).make_dataset(
            split_lhs_rhs=False
        )

    X, Y = dataset.make_one_shot_iterator().get_next()
    ops = model_eval(X, Y, params={'n_candidates': tf.constant(config.n_candidates)})
    return ops
Example #5
0
def get_train_op_old():
    dataset = DataGenerator(
            sample_range=config.train_samples,      # we have 236 samples
            shuffle_samples=True,
            max_patches_per_sample=config.max_patches_per_sample
        ).make_dataset(
            split_lhs_rhs=True
        )

    lhs, lhs_label, rhs, rhs_label = dataset.make_one_shot_iterator().get_next()
    ops = model_train(lhs, lhs_label, rhs, rhs_label, params={'n_candidates': tf.constant(config.n_candidates)})
    return ops
Example #6
0
def get_object_confusion(class1, class2, similarity_model, config):

    model_config = config['model']
    benchmark_config = config['benchmark']
    model_path = model_config['model_filename']
    dataset_path = benchmark_config['dataset_path']

    params = {
        'dim': model_config['input_shape'],
        'batch_size': benchmark_config['batch_size'],
        'shuffle': False
    }

    test_dataset = ImageDataset(dataset_path, 'validation')
    test_dataset.prepare_specific(benchmark_config['test_cases'] // 2, class1,
                                  class2)
    test_generator = DataGenerator(test_dataset, **params)
    preds = np.array([])
    gts = np.array([])

    for i in tqdm(range(len(test_generator))):
        batch = test_generator[i]
        pred = similarity_model.predict_on_batch(batch[0])
        preds = np.append(preds, pred.flatten())
        gts = np.append(gts, batch[1])
        if benchmark_config['vis_output'] and not i % benchmark_config[
                'test_cases'] // (5 * benchmark_config['batch_size']):
            show_output(batch[0][0], batch[0][1], pred, batch[1])
    te_acc = compute_accuracy(preds, gts)
    print("Class 1: " + class1 + ", Class2: " + class2 +
          ", Distinguishability Score: " + str(te_acc))

    return te_acc
Example #7
0
def _main_():
    args = argparser.parse_args()
    config_path = args.config
    with open(config_path) as config_buffer:
        config = json.loads(config_buffer.read())
    weights_path = args.weights_path
    sm_model = SMModel(config['model'])
    sm_model.model.summary()
    sm_model.model.load_weights(weights_path)
    test_generator = DataGenerator(config=config['test'],
                                   preprocessing=sm_model.preprocessing,
                                   n_class=sm_model.n_class,
                                   split='test')
    encoded_pixels = []
    image_id_class_id = []
    for X, filenames in tqdm(list(test_generator)):
        preds = sm_model.model.predict_on_batch(X)
        if config['test']['tta']:
            for flip_type in ['ud', 'lr', 'udlr']:
                X_temp = flip(X.copy(), flip_type)
                pred_temp = sm_model.model.predict_on_batch(X_temp)
                preds += flip(pred_temp, flip_type)
            preds /= 4
        preds = postprocess(preds, config['postprocess'], True)
        for i in range(len(preds)):
            for j in range(4):
                encoded_pixels.append(run_length_encode(preds[i, :, :, j]))
                image_id_class_id.append(filenames[i] + '_{}'.format(j + 1))
    df = pd.DataFrame(data=encoded_pixels,
                      index=image_id_class_id,
                      columns=['EncodedPixels'])
    df.index.name = 'ImageId_ClassId'
    df.to_csv('submission.csv')
Example #8
0
def _main_():
    args = argparser.parse_args()
    config_path = args.config
    with open(config_path) as config_buffer:
        config = json.loads(config_buffer.read())
    sm_model = SMModel(config['model'])
    oof_preds = []
    oof_true_masks = []
    for i in range(5):
        config['train']['fold'] = i
        generator = DataGenerator(config=config['train'], 
                                  preprocessing=sm_model.preprocessing,
                                  n_class=sm_model.n_class, 
                                  split='val', 
                                  full_size_mask=True)
        weithts_path = os.path.join(config['train']['save_model_folder'], 
                                    'val_best_fold_{}_weights.h5'.format(i))
        sm_model.model.load_weights(weithts_path)
        print('Fold {} eval begin.'.format(i))
        for X, y in tqdm(list(generator)):
            y_preds = sm_model.model.predict(X)
            y_preds = postprocess(y_preds, config['postprocess'], True)
            oof_preds.append(y_preds)
            y = y[:, :, :, :4]
            oof_true_masks.append(y)
    oof_preds = np.concatenate(oof_preds)
    oof_true_masks = np.concatenate(oof_true_masks)
    
    cv_dice_coef = dice_coef_score(oof_true_masks, oof_preds)
    print('CV Dice Coef Score: {}'.format(cv_dice_coef))
Example #9
0
 def get_generators(self, ids_train, ids_valid):
     trainA_generator = DataGenerator(df=ids_train,
                                      channel="channel_first",
                                      apply_noise=True,
                                      phase="train",
                                      apply_online_aug=False,
                                      batch_size=self.BATCH_SIZE,
                                      n_samples=self.n_samples)
     validA_generator = DataGenerator(df=ids_valid,
                                      channel="channel_first",
                                      apply_noise=False,
                                      phase="valid",
                                      apply_online_aug=False,
                                      batch_size=self.BATCH_SIZE,
                                      n_samples=-1)
     return iter(trainA_generator), iter(validA_generator)
Example #10
0
def load_imgs(path, size):
    files = glob.glob(os.path.join(path, "*"))
    imgs = []
    for file in files:
        img = cv2.imread(file)
        img = DataGenerator.pre_process(img, size)
        imgs.append(img)
    return files, imgs
Example #11
0
def get_train_op():
    dg = DataGenerator(
            sample_range=config.train_samples,
            shuffle_samples=True,
            max_patches_per_sample=config.max_patches_per_sample
        )
    ds = dg.make_dataset(split_lhs_rhs=False)
    label_weights = dg.get_label_weights_from_dumped()
    print("Label weights used: ", label_weights)
    label_weights = tf.constant(label_weights, dtype=tf.float32)

    X, Y = ds.make_one_shot_iterator().get_next()
    ops = model_train(X, Y, params={
        'n_candidates': tf.constant(config.n_candidates),
        'label_weights': label_weights
    })
    return ops
Example #12
0
 def build_generator(self):
     self.train_generator = DataGenerator(
         args=self.args,
         dictionary=self.dictionary,
         n_classes=self.n_classes,
         feature_shapes=self.feature_shapes,
         n_anchors=self.n_anchors,
         shuffle=True)
Example #13
0
def split_path(list_imgs, size):
    path_imgs = []
    imgs = []
    for file in list_imgs:
        img = cv2.imread(file['path'])
        img = DataGenerator.pre_process(img, size)
        imgs.append(img)
        path_imgs.append(file['path'])
    return path_imgs, imgs
Example #14
0
    def __createDatasets(self):
        if self.parameters.pretrained is None:
            logging.info(
                "Loading dataset with target same source and target files")
            target_label = 'source'
        else:
            logging.info("Loading dataset with target source and target files")
            target_label = 'target'

        self.trainGenerator = DataGenerator(
            self.parameters.train_lst,
            batch_size=self.parameters.batch_size,
            frame_length=self.parameters.frame_length,
            target_label=target_label)
        self.testGenerator = DataGenerator(
            self.parameters.test_lst,
            batch_size=self.parameters.batch_size,
            frame_length=self.parameters.frame_length,
            target_label=target_label)
def train(opt, data_type=None):
    callbacks = MyCallback(opt)
    
    if data_type == 'All_data': # data from TextDataGenerator and AI Hub
        opt.num_class = len(opt.character)
        
        df = pd.read_csv(opt.data)
        xs = np.array(df['ids'])
        ys = np.array(df['labels'])
        
        # train/valid split and shuffle
        train_x, valid_x, train_y, valid_y = train_test_split(xs, ys, test_size=0.01)
        
        train_datagen = DataGeneratorByPath(train_x, train_y, opt)
        valid_datagen = DataGeneratorByPath(valid_x, valid_y, opt)
        
    else: # only data from TextDataGenerator
        opt.num_class = len(opt.character)
        train_datagen = DataGenerator(opt.train_data, opt)
        valid_datagen = DataGenerator(opt.valid_data, opt, shuffle=False)
    
    print(f'num class : {opt.num_class}')
    
    model = VGG_FeatureExtractor(opt.num_class)
    model.summary()
    
    start_epoch = 0
    if opt.saved_model != '':
        model.load_weights(opt.saved_model)
        start_epoch = int(opt.saved_model.split('/')[-1].split('_')[0])+1
        print(f'load weight of {start_epoch} epoch')
        
    model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.001),
                  loss='sparse_categorical_crossentropy',
                  metrics=['acc'])
    
    model.fit(train_datagen,
              initial_epoch=start_epoch,
              epochs=opt.num_epochs,
              validation_data=valid_datagen,
              callbacks=[callbacks])
Example #16
0
def get_test_imgs(path, size):
    path_imgs = []
    for root, dir, files in os.walk(path):
        for file in files:
            file_path = os.path.join(root, file)
            path_imgs.append(file_path)
    imgs = []
    for file in path_imgs:
        img = cv2.imread(file)
        img = DataGenerator.pre_process(img, size)
        imgs.append(img)
    return path_imgs, imgs
Example #17
0
def train(model, batch_size, num_epochs, model_dir=MODEL_DIR):
    train_data_dirs = ['data/G_data', 'data/H_data']
    train_label_files = [
        'data/labels/image_boxes_G.txt', 'data/labels/image_boxes_H.txt'
    ]
    val_data_dirs = ['data/I_data']
    val_label_files = ['data/labels/image_boxes_I.txt']
    train_generator = DataGenerator(train_data_dirs,
                                    train_label_files,
                                    batch_size,
                                    enable_data_aug=True)
    val_generator = DataGenerator(val_data_dirs,
                                  val_label_files,
                                  batch_size,
                                  enable_data_aug=False)

    # Check if data looks correct
    # verify_data_generator(train_generator)

    callbacks = [
        tf.keras.callbacks.ModelCheckpoint(model_dir,
                                           monitor='loss',
                                           save_best_only=True,
                                           save_weights_only=True),
        # tf.keras.callbacks.EarlyStopping(monitor='loss', patience=20, verbose=1),
        tf.keras.callbacks.ReduceLROnPlateau(monitor='loss',
                                             factor=0.8,
                                             patience=10,
                                             min_lr=1e-6,
                                             verbose=1),
        tf.keras.callbacks.TensorBoard(log_dir=LOG_DIR, histogram_freq=0)
    ]

    return model.fit_generator(generator=train_generator,
                               epochs=num_epochs,
                               callbacks=callbacks,
                               validation_data=val_generator)
Example #18
0
def evaluate_age_mobilenet_v1_imdb_wiki():
    validation_size = 1000
    data = get_imdb_wiki_dataset()
    addrs = data["addrs"][:validation_size]
    age_labels = data["age_labels"][:validation_size]
    gender_labels = data["gender_labels"][:validation_size]

    num_classes = 101
    batch_size = 64

    imp = task_importance_weights(age_labels, num_classes)
    plt.figure("Weight importance")
    plt.plot(imp)
    plt.show()

    checkpoint_path = os.path.join(ROOT_DIR, "outputs", "checkpoints",
                                   "age_mobilenet_v1_imdb_wiki", "ckpt.h5")

    # Building Mobilenet

    val_generator = DataGenerator(addrs[:validation_size],
                                  age_labels[:validation_size], batch_size,
                                  num_classes)

    # steps_per_epoch = val_generator.n // val_generator.batch_size

    loss = coral_loss(imp)
    model = keras.models.load_model(
        checkpoint_path,
        custom_objects={
            "loss": loss,
            "mae_pred": mae_pred,
            "Linear_1_bias": Linear_1_bias
        },
    )

    pred = model.predict_generator(generator=val_generator)
    pred = pred > 0.5
    y_pred = np.sum(pred, axis=1)
    mae = np.mean(np.abs(age_labels - y_pred))
    print("mae: {}".format(mae))

    # print(list(zip(model.metrics_names, score)))
    plot(validation_size, batch_size, addrs, gender_labels, age_labels, y_pred)
def evaluate(opt):
    test_datagen = DataGenerator(opt.test_data, opt, False)
    opt.num_class = len(opt.character)

    model = VGG_FeatureExtractor(opt.num_class)
    model.summary()

    if opt.saved_model == '':
        print('set path of save model!')
        exit()

    model.load_weights(opt.saved_model)
    model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.001),
                  loss='sparse_categorical_crossentropy',
                  metrics=['acc'])

    history = model.evaluate(test_datagen)

    return history
Example #20
0
def evaluate_tut_model():
    num_classes = 7
    batch_size = 64
    validation_size = 2500
    data = get_affectnet_dataset()
    train_addrs = data["training"]["addrs"][:validation_size]
    train_expression_labels = data["training"][
        "expression_labels"][:validation_size]
    train_expression_labels[train_expression_labels > 6] = 0
    val_addrs = data["validation"]["addrs"]
    val_expression_labels = data["validation"]["expression_labels"]
    val_expression_labels[val_expression_labels > 6] = 0

    val_generator = DataGenerator(train_addrs, train_expression_labels,
                                  batch_size, num_classes)

    model = keras.models.load_model("./model.h5")

    evaluation = model.evaluate_generator(val_generator, batch_size=batch_size)
    print(evaluation)
Example #21
0
def evaluate_tut_model():
    data = get_audience_dataset()
    addrs = data["addrs"]
    age_labels = data["gender_labels"]

    num_classes = 2
    batch_size = 64
    validation_size = 1000

    # Building Mobilenet

    val_generator = DataGenerator(
        addrs[:validation_size], age_labels[:validation_size], batch_size, num_classes
    )
    # steps_per_epoch = val_generator.n // val_generator.batch_size

    model = keras.models.load_model(os.path.join(ROOT_DIR, "outputs/tut/gender/model.h5"))

    # evaluation = model.evaluate(X, keras.utils.to_categorical(Y), batch_size=128)
    score = model.evaluate_generator(generator=val_generator)
    print(list(zip(model.metrics_names, score)))
def predict(opt):
    test_datagen = DataGenerator(opt.test_data, opt, False, False)
    opt.num_class = len(opt.character)

    model = VGG_FeatureExtractor(opt.num_class)
    model.summary()

    if opt.saved_model == '':
        print('set path of save model!')
        exit()

    model.load_weights(opt.saved_model)
    model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.001),
                  loss='sparse_categorical_crossentropy',
                  metrics=['acc'])

    pred = model.predict(test_datagen)
    y_pred = tf.argmax(pred, axis=1).numpy()
    #print(y_pred)
    print([test_datagen.idx_to_char[idx] for idx in y_pred])

    return y_pred
Example #23
0
def oof_predict(sm_model, config):
    oof_preds = []
    oof_true_masks = []
    for i in range(5):
        config['train']['fold'] = i
        generator = DataGenerator(config=config['train'],
                                  preprocessing=sm_model.preprocessing,
                                  n_class=sm_model.n_class,
                                  split='val',
                                  full_size_mask=True)
        weithts_path = os.path.join(config['train']['save_model_folder'],
                                    'val_best_fold_{}_weights.h5'.format(i))
        sm_model.model.load_weights(weithts_path)
        print('Fold {} eval begin.'.format(i))
        for X, y in tqdm(list(generator)):
            y_preds = sm_model.model.predict(X)
            oof_preds.append(y_preds)
            y = y[:, :, :, :4]
            oof_true_masks.append(y)
    oof_preds = np.concatenate(oof_preds)
    oof_true_masks = np.concatenate(oof_true_masks)
    return oof_preds, oof_true_masks
Example #24
0
def evaluate_7expr_mobilenet_v1_train_affectnet_model():
    num_classes = 7
    batch_size = 64
    validation_size = 2500
    data = get_affectnet_dataset()
    train_addrs = data["training"]["addrs"][:validation_size]
    train_expression_labels = data["training"][
        "expression_labels"][:validation_size]
    train_expression_labels[train_expression_labels > 6] = 0
    val_addrs = data["validation"]["addrs"]
    val_expression_labels = data["validation"]["expression_labels"]
    val_expression_labels[val_expression_labels > 6] = 0

    val_generator = DataGenerator(train_addrs, train_expression_labels,
                                  batch_size, num_classes)

    checkpoint_path = os.path.join(ROOT_DIR, "outputs", "checkpoints",
                                   "7expr_mobilenet_v1_affectnet", "ckpt.h5")
    model = keras.models.load_model(checkpoint_path)

    evaluation = model.evaluate_generator(generator=val_generator)
    print(list(zip(model.metrics_names, evaluation)))
Example #25
0
def evaluate_imdb_wiki_model():
    data = get_imdb_wiki_dataset()
    addrs = data["addrs"]
    age_labels = data["gender_labels"]

    num_classes = 2
    batch_size = 64
    validation_size = 2500
    checkpoint_path = os.path.join(
        ROOT_DIR, "outputs", "checkpoints", "gender_mobilenet_v1_imdb_wiki", "ckpt-08-0.48.hdf5"
    )

    # Building Mobilenet

    val_generator = DataGenerator(
        addrs[:validation_size], age_labels[:validation_size], batch_size, num_classes
    )
    # steps_per_epoch = val_generator.n // val_generator.batch_size

    model = keras.models.load_model(checkpoint_path)

    # evaluation = model.evaluate(X, keras.utils.to_categorical(Y), batch_size=128)
    score = model.evaluate_generator(generator=val_generator)
    print(list(zip(model.metrics_names, score)))
Example #26
0
def evaluate_fine_tuned_audience_model():
    data = get_audience_dataset()
    addrs = data["addrs"]
    age_labels = data["gender_labels"]

    num_classes = 2
    batch_size = 64
    validation_size = 1000
    checkpoint_path = os.path.join(
        ROOT_DIR, "outputs", "checkpoints", "gender_mobilenet_v1_audience", "ckpt-04-0.22.hdf5"
    )

    # Building Mobilenet

    val_generator = DataGenerator(
        addrs[:validation_size], age_labels[:validation_size], batch_size, num_classes
    )
    # steps_per_epoch = val_generator.n // val_generator.batch_size

    model = keras.models.load_model(checkpoint_path)

    # evaluation = model.evaluate(X, keras.utils.to_categorical(Y), batch_size=128)
    score = model.predict_generator(generator=val_generator)
    print(np.argmax(score, axis=1))

if __name__ == "__main__":
    # get config
    config, _ = get_config()

    # Build Model and Reward from config
    actor = Actor(config)

    print("Starting training...")
    with tf.Session() as sess:
        tf.global_variables_initializer().run()
        print_config()

        solver = [] #Solver(actor.max_length)
        training_set = DataGenerator(solver)

        nb_epoch=2
        for i in tqdm(range(nb_epoch)): # epoch i

            # Get feed_dict
            input_batch  = training_set.train_batch(actor.batch_size, actor.max_length, actor.input_dimension)
            feed = {actor.input_: input_batch}
            #print(' Input \n', input_batch)

            permutation, distances = sess.run([actor.positions, actor.distances], feed_dict=feed) 
            print(' Permutation \n',permutation)
            print(' Tour length \n',distances)


        variables_names = [v.name for v in tf.global_variables() if 'Adam' not in v.name]
Example #28
0
batch_size = 64
validation_size = 1000
input_shape = (224, 224, 3)
app_id = "age_mobilenet_v1_audience"

################################################################################
# Create dataset generator
################################################################################
data = get_audience_dataset()
addrs = data["addrs"]
age_labels = data["age_labels"]

imp = task_importance_weights(age_labels, num_classes)

train_generator = DataGenerator(addrs[validation_size:],
                                age_labels[validation_size:], batch_size,
                                num_classes)
val_generator = DataGenerator(addrs[:validation_size],
                              age_labels[:validation_size], batch_size,
                              num_classes)
steps_per_epoch = train_generator.n // train_generator.batch_size

################################################################################
# Create and load mobilenet
################################################################################
model = keras.applications.mobilenet.MobileNet(input_shape=input_shape,
                                               weights=None,
                                               include_top=False)
x = model.output
x = keras.layers.GlobalAveragePooling2D()(x)
x = keras.layers.Dropout(0.001)(x)
Example #29
0
def preprocess_input(x):
    """Preprocesses a numpy array encoding a batch of images.
    # Arguments
        x: a 4D numpy array consists of RGB values within [0, 255].
    # Returns
        Input array scaled to [-1.,1.]
    """
    return imagenet_utils.preprocess_input(x, mode='tf')


def create_model():
    return Deeplabv3(weights=None, backbone='mobilenetv2', OS=8, classes=2, input_shape=(480, 854, 4))


if __name__ == '__main__':
    import keras
    from keras import optimizers
    # from keras.utils import plot_model
    from dataset import DataGenerator

    # plot_model(create_model(), show_shapes=True, to_file='mobile_net_v2.png', )
    model = create_model()

    # datagen = ImageDataGenerator(horizontal_flip=True, width_shift_range=0.125, height_shift_range=0.125,
    #                              fill_mode='constant', cval=0.)
    sgd = optimizers.SGD(lr=.001, momentum=0.9, nesterov=True)
    model.compile(optimizer=sgd, loss=keras.losses.binary_crossentropy, metrics=['accuracy'])
    train_datagen = DataGenerator('../MT_split/train.txt', 2)
    model.fit_generator(train_datagen, epochs=10, workers=2, use_multiprocessing=True)
Example #30
0
def main():
    # Get running configuration
    config, _ = get_config()
    print_config()

    # Build tensorflow graph from config
    print("Building graph...")
    actor = Actor(config)

    # Saver to save & restore all the variables.
    variables_to_save = [
        v for v in tf.global_variables() if 'Adam' not in v.name
    ]
    saver = tf.train.Saver(var_list=variables_to_save,
                           keep_checkpoint_every_n_hours=1.0)

    print("Starting session...")
    with tf.Session() as sess:
        # Run initialize op
        sess.run(tf.global_variables_initializer())

        # Restore variables from disk.
        if config.restore_model == True:
            saver.restore(sess, "save/" + config.restore_from + "/actor.ckpt")
            print("Model restored.")

        # Initialize data generator
        solver = Solver(actor.max_length)  ###### ######
        training_set = DataGenerator(solver)

        # Training mode
        if not config.inference_mode:

            # Summary writer
            writer = tf.summary.FileWriter(config.log_dir, sess.graph)

            print("Starting training...")
            for i in tqdm(range(config.nb_epoch)):
                # Get feed dict
                input_batch = training_set.train_batch(actor.batch_size,
                                                       actor.max_length,
                                                       actor.input_dimension)
                feed = {actor.input_: input_batch}
                if i % 5 == 0:

                    sess.run(actor.assign_op, feed_dict=feed)

                # Forward pass & train step
                summary, base_op, train_step1, train_step2 = sess.run(
                    [
                        actor.merged, actor.base_op, actor.train_step1,
                        actor.train_step2
                    ],
                    feed_dict=feed)

                if i % 100 == 0:
                    writer.add_summary(summary, i)

                # Save the variables to disk
                if i % max(1, int(config.nb_epoch / 5)) == 0 and i != 0:
                    save_path = saver.save(sess,
                                           "save/" + config.save_to +
                                           "/tmp.ckpt",
                                           global_step=i)
                    print("\n Model saved in file: %s" % save_path)

            print("Training COMPLETED !")
            saver.save(sess, "save/" + config.save_to + "/actor.ckpt")

        # Inference mode
        else:

            targets = []
            predictions = []

            for __ in tqdm(range(1000)):  # num of examples

                # Get feed_dict (single input)
                seed_ = 1 + __
                input_batch, or_sequence = training_set.test_batch(
                    actor.batch_size,
                    actor.max_length,
                    actor.input_dimension,
                    seed=seed_)  # seed=0 means None
                feed = {actor.input_: input_batch}

                # Solve instance (OR tools)
                opt_trip, opt_length = training_set.solve_instance(or_sequence)
                targets.append(opt_length / 100)
                #print('\n Optimal length:',opt_length/100)

                ################################### UMPA LOOOOP HERE ###################################    nb_loop / temperature

                # Sample solutions
                permutations, circuit_length = sess.run(
                    [actor.positions, actor.distances], feed_dict=feed)
                #training_set.visualize_sampling(permutations)

                # Find best solution
                #print(circuit_length)
                j = np.argmin(circuit_length)
                best_permutation = permutations[j][:-1]
                predictions.append(circuit_length[j])

                ################################### UMPA LOOOOP HERE ###################################

                #print('\n Best tour length:',circuit_length[j])
                #print(' * permutation:', best_permutation)

                # plot corresponding tour
                #training_set.visualize_2D_trip(opt_trip)
                #training_set.visualize_2D_trip(or_sequence[best_permutation])

            predictions = np.asarray(predictions)
            targets = np.asarray(targets)

            print(' Mean length:', np.mean(predictions))
            ratio = np.asarray(predictions) / np.asarray(targets)
            print('\n Average deviation: \n', np.mean(ratio))

            n, bins, patches = plt.hist(ratio, 50, facecolor='r', alpha=0.75)

            plt.xlabel('Prediction/target')
            plt.ylabel('Counts')
            plt.title('Comparison to Google OR tools')
            plt.axis([0.9, 1.4, 0, 500])
            plt.grid(True)
            plt.show()