Пример #1
0
def test(chkpt_file, params):
    logger.info(f"run_id: {params['run_id']}")
    logger.info("getting model...")
    model = get_cnn_model_1(**params)
    logger.info("compiling model...")
    opt = tf.keras.optimizers.Adam(learning_rate=params["learning_rate"])
    model.compile(opt,
                  loss="categorical_crossentropy",
                  metrics=["accuracy", "categorical_crossentropy"])
    model.build([None, 128, 128, 10])
    model.load_weights(chkpt_file)
    logger.info("testing...")
    ds = get_dataset("val")
    pred = []
    true = []
    for x, y in ds:
        results = model.predict(x)
        # print (type(results))
        pred.append(np.argmax(results, axis=1))
        true.append(np.argmax(y, axis=1))
    P = np.concatenate(pred)
    T = np.concatenate(true)
    print(list(P))
    print(list(T))
    cmat = sklearn.metrics.confusion_matrix(P, T)
    plt.matshow(cmat)
    plt.savefig("cmat.png")

    # predicted =
    logger.info(model.summary())
    return history
Пример #2
0
def draw_map(file, map=None, show=True, title=None, log=False, map_type=None):
    '''Use Matplotlib's basemap to generate a map of a given BIOCLIM data 
    file.
    
    You can supply a Basemap object (in any projection) as the optional 
    keyword argument "map." If none is provided, the default Miller 
    projection will be used.'''
    
    data, no_value, ul, dims, size = extract_attributes(file)
    data = get_dataset(file)
    lats = np.linspace(ul[0], ul[0]-dims[0]*size[0], size[0], endpoint=False)
    lons = np.linspace(ul[1], ul[1]+dims[1]*size[1], size[1], endpoint=False)
    if map_type == 'variance':
        x, y = np.meshgrid(lons, lats)
        raster = np.zeros(x.shape)
        values = get_spatial_variance(file, 
                                      [(lat, lon) 
                                       for lat in lats 
                                       for lon in lons])
        for a in range(data.shape[0]):
            for b in range(data.shape[1]):
                data[a,b] = values.pop()
    else:
        raster = data.ReadAsArray()

    
    # because missing data is entered as -9999, created a masked array so that 
    # these points will not be plotted
    values = np.ma.masked_where(raster==no_value, raster)
    
    # log transform data, if requested
    if log:
        if (values < 0).any():
            values -= min(values)
        values = np.log1p(values)
    
    plt.figure()
    if title is None:
        title = '%s' % file
        if file in variable_names:
            title += ': %s' % variable_names[file]
    plt.title(title)
    if map is None:
        map = Basemap(projection='mill',lon_0=0)
        map.drawcoastlines(linewidth=1)
        map.drawcountries(linewidth=1)
        map.drawstates(linewidth=0.5)
        parallels = np.arange(-90.,90,10.)
        map.drawparallels(parallels,labels=[False,True,True,False])
        meridians = np.arange(-180.,180.,20.)
        map.drawmeridians(meridians,labels=[True,False,False,True])    

    x, y = np.meshgrid(lons, lats)
            
    map.pcolormesh(x, y, data=values, latlon=True, cmap=plt.cm.Spectral_r)
    cbar = plt.colorbar()
    
    if show: plt.show()
Пример #3
0
def train(data_dir, params, epochs=30):
    logger.info(f"run_id: {params['run_id']}")
    logger.info("getting model...")
    model = get_cnn_model_1(**params)
    logger.info("compiling model...")
    opt = tf.keras.optimizers.Adam(learning_rate=params["learning_rate"])
    model.compile(opt,
                  loss="categorical_crossentropy",
                  metrics=["accuracy", "categorical_crossentropy"])
    logger.info("training...")
    history = model.fit(get_dataset("train"),
                        validation_data=get_dataset("val"),
                        callbacks=[
                            tf.keras.callbacks.ModelCheckpoint(
                                f"cnn1_{params['run_id']}.chkpt",
                                save_best_only=True)
                        ],
                        validation_steps=56,
                        steps_per_epoch=200,
                        epochs=epochs)
    logger.info(model.summary())
    return history
def main(argv):
    global y_
    with tf.Session() as sess:
        summary_writer = tf.summary.FileWriter(tensorboard_path, sess.graph) # 指定文件tensorboard_path用来保存图
        data_set_train = read_data.get_dataset(record_name_train)
        data_set_train = data_set_train.shuffle(shuffle_pool_size).batch(batch_size).repeat()
        data_set_train_iter = data_set_train.make_one_shot_iterator()
        train_handle = sess.run(data_set_train_iter.string_handle())

        data_set_test = read_data.get_dataset(record_name_test)
        data_set_test = data_set_test.shuffle(shuffle_pool_size).batch(test_batch_size).repeat()
        data_set_test_iter = data_set_test.make_one_shot_iterator()
        test_handle = sess.run(data_set_test_iter.string_handle())

        handle = tf.placeholder(tf.string, shape=[], name='handle')
        iterator = tf.data.Iterator.from_string_handle(handle, data_set_train.output_types, data_set_train.output_shapes)
        x_input_bacth, y_target_batch = iterator.get_next()

        cnn_model = model.CNN_Model()
        x_input = cnn_model.x_input
        y_target = cnn_model.y_target
        logits = tf.nn.softmax(cnn_model.logits)
        loss = cnn_model.loss
        train_step = cnn_model.train_step
        dropout = cnn_model.dropout
        sess.run(tf.global_variables_initializer())

        if retrain:
            print('retraining')
            ckpt_name = 'cnn_emotion_classifier.ckpt'
            ckpt_path = os.path.join(data_folder_name, data_path_name, ckpt_name)
            saver = tf.train.Saver()
            saver.restore(sess, ckpt_path)

        with tf.name_scope('Loss_and_Accuracy'):
            tf.summary.scalar('Loss', loss)
        summary_op = tf.summary.merge_all()

        print('start training')
        saver = tf.train.Saver(max_to_keep=1)
        max_accuracy = 0
        temp_train_loss = []
        temp_test_loss = []
        temp_train_acc = []
        temp_test_acc = []
        for i in range(generations):
            x_batch, y_batch = sess.run([x_input_bacth, y_target_batch], feed_dict={handle: train_handle})
            train_feed_dict = {x_input: x_batch, y_target: y_batch,
                               dropout: 0.5}
            sess.run(train_step, train_feed_dict)
            if (i + 1) % 100 == 0:
                train_loss, train_logits = sess.run([loss, logits], train_feed_dict)
                train_accuracy = evaluate(train_logits, y_batch)
                print('Generation # {}. Train Loss : {:.3f} . '
                      'Train Acc : {:.3f}'.format(i, train_loss, train_accuracy))
                temp_train_loss.append(train_loss)
                temp_train_acc.append(train_accuracy)
                summary_writer.add_summary(sess.run(summary_op, train_feed_dict), i)
            if (i + 1) % 400 == 0:
                test_x_batch, test_y_batch = sess.run([x_input_bacth, y_target_batch], feed_dict={handle: test_handle})
                test_feed_dict = {x_input: test_x_batch, y_target: test_y_batch,
                                  dropout: 1.0}
                test_loss, test_logits = sess.run([loss, logits], test_feed_dict)
                test_accuracy = evaluate(test_logits, test_y_batch)
                print('Generation # {}. Test Loss : {:.3f} . '
                      'Test Acc : {:.3f}'.format(i, test_loss, test_accuracy))
                temp_test_loss.append(test_loss)
                temp_test_acc.append(test_accuracy)
                if test_accuracy >= max_accuracy and save_flag and i > generations // 2:
                    max_accuracy = test_accuracy
                    saver.save(sess, os.path.join(data_folder_name, data_path_name, save_ckpt_name))
                    print('Generation # {}. --model saved--'.format(i))
        print('Last accuracy : ', max_accuracy)

        with open(model_log_path, 'w') as f:
            f.write('train_loss: ' + str(temp_train_loss))
            f.write('\n\ntest_loss: ' + str(temp_test_loss))
            f.write('\n\ntrain_acc: ' + str(temp_train_acc))
            f.write('\n\ntest_acc: ' + str(temp_test_acc))
        print(' --log saved--')
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--x_size",
        "-x",
        type=int,
        help=
        "set the width of used images, 0 to use full width, else it will be cropped to center"
    )
    parser.add_argument(
        "--y_size",
        "-y",
        type=int,
        help=
        "set the height of used images, 0 to use full height, else it will be cropped to center"
    )
    parser.add_argument("--epochs",
                        "-e",
                        type=int,
                        help="number of epochs to train the model")
    parser.add_argument(
        "--classes",
        "-c",
        type=int,
        help=
        "number of unique classes to use, 0 to use all, else they will be randomly picked"
    )
    parser.add_argument(
        "--dataset",
        "-d",
        type=str,
        help="the dataset to use, supports soco, fvc_db1, fvc_db3")
    parser.add_argument("--enc_dim",
                        "-n",
                        type=int,
                        help="dimension of the encoded image")
    args = parser.parse_args()
    x_size = check_and_get(args.x_size, 0)
    y_size = check_and_get(args.y_size, 0)
    epochs = check_and_get(args.epochs, 100)
    classes = check_and_get(args.classes, 0)
    dataset_name = check_and_get(args.dataset, "soco")
    encoding_dim = check_and_get(args.enc_dim, 128)

    x_train, x_test, labels, new_x, new_y = get_dataset(
        dataset_name, classes, x_size, y_size)
    input_shape = new_x * new_y
    autoencoder, encoder, decoder = create_autoencoder(input_shape,
                                                       encoding_dim)

    x_train = x_train.astype('float32') / 255.
    x_test = x_test.astype('float32') / 255.
    x_train = x_train.reshape((len(x_train), np.prod(x_train.shape[1:])))
    x_test = x_test.reshape((len(x_test), np.prod(x_test.shape[1:])))

    # Train autoencoder for my_epochs epochs
    history = autoencoder.fit(x_train,
                              x_train,
                              epochs=epochs,
                              batch_size=64,
                              shuffle=True,
                              validation_data=(x_test, x_test),
                              verbose=2)

    # save model to file
    filepath = "autoencoder_model_{}_dims_{}_epochs_{}_input_shape_{}.h5".format(
        encoding_dim, epochs, input_shape,
        datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S'))
    autoencoder.save(filepath)
    # Visualize the reconstructed encoded representations
    # encode and decode some fingerprints
    # note that we take them from the *test* set
    encoded_imgs = encoder.predict(x_test)
    decoded_imgs = decoder.predict(encoded_imgs)

    show_loss_plot(history)
    show_reconstruction_plot(new_x, new_y, x_test, decoded_imgs, labels)

    K.clear_session()