def main(data_path, mode, load_path):
    # Model
    model = CNN_Model(mode=mode, multiclass=False)
    model.load_weights(load_path + 'my_chekpoint').expect_partial()

    # Load Dataset
    fn_list = glob.glob(data_path + '*.npy')
    for fn in fn_list:
        pid = fn.split('/')[-1][:-4]
        img = np.load(fn)
        img = img.reshape((1, 128, 128, 96))

        # Predict
        pred = model(img)
        pred = 0 if pred < 0.5 else 1
        if 'S_C' in data_path:
            if pred == 0:
                label = 'Control'
            else:
                label = 'Severe'
        elif 'S_M' in data_path:
            if pred == 0:
                label = 'Mild'
            else:
                label = 'Severe'
        else:
            if pred == 0:
                label = 'Control'
            else :
                label = 'Mild'
        
        # Result
        print("ID: {} Label: {}".format(pid, label))
コード例 #2
0
def main(argv=None):

    if argv is None:
        argv = sys.argv

    try:
        # Setup argument parser
        parser = ArgumentParser(formatter_class=RawDescriptionHelpFormatter)
        parser.add_argument('input', metavar='input-file', type=str, nargs=1)
        parser.add_argument('output',
                            metavar='output-file',
                            type=str,
                            nargs='?')
        parser.add_argument("-c",
                            "--ckpt",
                            type=str,
                            dest="ckpt",
                            action="store",
                            default='ckpt')
        parser.add_argument("-j",
                            "--json",
                            type=str,
                            dest="json",
                            action="store",
                            default='jsons')
        parser.add_argument("-ir",
                            "--irace",
                            type=bool,
                            dest="irace",
                            action="store",
                            default=False)
        parser.add_argument("-s",
                            "--seed",
                            type=int,
                            dest="seed",
                            action="store",
                            default=50)
        parser.add_argument("-e",
                            "--epochs",
                            type=int,
                            dest="epochs",
                            action="store",
                            default=50)
        parser.add_argument("-lr",
                            "--learning_rate",
                            type=float,
                            dest="learning_rate",
                            action="store",
                            default=0.0001)
        parser.add_argument("-bs",
                            "--batch_size",
                            type=int,
                            dest="batch_size",
                            action="store",
                            default="256")
        parser.add_argument("-cgs",
                            "--cnn_group_size",
                            type=int,
                            dest="cnn_group_size",
                            action="store",
                            default=2)
        parser.add_argument("-cgn",
                            "--cnn_group_num",
                            type=int,
                            dest="cnn_group_num",
                            action="store",
                            default=2)
        parser.add_argument("-cbf",
                            "--cnn_base_filters",
                            dest="cnn_base_filters",
                            action="store",
                            default="64")
        parser.add_argument("-dln",
                            "--dense_layer_num",
                            type=int,
                            dest="dense_layer_num",
                            action="store",
                            default=1)
        parser.add_argument("-dlu",
                            "--dense_layer_units",
                            type=int,
                            dest="dense_layer_units",
                            action="store",
                            default="256")
        parser.add_argument("-do",
                            "--dropout",
                            type=float,
                            dest="dropout",
                            action="store",
                            default=0.2)
        parser.add_argument("-ds",
                            "--dataset",
                            type=str,
                            dest="dataset",
                            action="store",
                            default='cifar10')
        parser.add_argument("-ts",
                            "--test_sample",
                            type=int,
                            dest="test_sample",
                            action="store",
                            default=0)
        parser.add_argument("-tsr",
                            "--test_sample_range",
                            type=str,
                            dest="test_sample_range",
                            action="store",
                            default='')

        args = parser.parse_args()

    except Exception as e:
        raise (e)
        indent = len(program_name) * " "
        sys.stderr.write(program_name + ": " + repr(e) + "\n")
        sys.stderr.write(indent + "  for help use --help")
        return 2

    sample_inds = get_sample_inds(args)

    irace = args.irace

    model = CNN_Model("cnn_model",
                      cnn_group_num=args.cnn_group_num,
                      cnn_group_size=args.cnn_group_size,
                      cnn_base_filters=int(args.cnn_base_filters),
                      dense_layer_num=args.dense_layer_num,
                      dense_layer_units=int(args.dense_layer_units),
                      dropout=args.dropout)

    x, y, x_test, y_test, width, class_names = load_dataset(args.dataset)

    sample_x = x_test[sample_inds]
    sample_y = y_test[sample_inds]

    indexlist, meta_dict = get_checkpoint_files(args.ckpt)

    for i in indexlist:
        with tf.Session() as sess:
            metafile = os.path.join(args.ckpt, meta_dict[i])
            ckpt_file = os.path.splitext(metafile)[0]

            new_saver = tf.train.import_meta_graph(metafile)
            new_saver.restore(sess, ckpt_file)

            print(sess.run(tf.report_uninitialized_variables()))

            input_op = sess.graph.get_collection("input_op")[0]

            vis_ops = tf.get_collection('VisOps')
            trainable_ops = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)

            weight_ops = get_weight_ops(trainable_ops)
            weight_values = sess.run(weight_ops)

            activation_ops = get_activation_ops(vis_ops)

            activation_values, input_values = sess.run(
                [activation_ops, input_op], {input_op: sample_x})

            structure_json_base = get_structure_json_list(
                vis_ops, weight_ops, input_op, class_names)
            weight_json_base = get_weight_json_list(vis_ops, weight_ops,
                                                    weight_values)
            activation_json_base = get_activation_json_list(
                vis_ops, activation_ops, activation_values, input_values,
                sample_y, class_names)

            dataset_name = args.dataset

            outdir = args.json
            structure_filename = os.path.join(outdir, "model_structure.json")
            weight_filename = os.path.join(
                outdir, "model_weights_epoch{0:03d}.json".format(i))
            activation_filename = os.path.join(
                outdir, "model_activations_epoch{0:03d}.json".format(i))
            if (not os.path.exists(outdir)):
                os.makedirs(outdir)

            with open(structure_filename, "w") as outfile:
                try:
                    json.dump(structure_json_base, outfile)
                except Exception as e:
                    print(e.message)

            with open(weight_filename, "w") as outfile:
                try:
                    json.dump(weight_json_base, outfile)
                except Exception as e:
                    print(e.message)

            with open(activation_filename, "w") as outfile:
                try:
                    json.dump(activation_json_base, outfile)
                except Exception as e:
                    print(e.message)
コード例 #3
0
indices = np.arange(num_examples)

print("input created")


input_train, input_test, output_train, output_test, indices_train, indices_test = \
    train_test_split(input_data, output_data, indices, test_size=0.3, shuffle=False)

input_validation, input_test, output_validation, output_test, indices_validation, indices_test = \
    train_test_split(input_test, output_test, indices_test, test_size=0.5, shuffle=False)

np.save('./data/input_test.npy', input_test)
np.save('./data/output_test.npy', output_test)

print("before CNN")
model = CNN_Model(num_samples, input_volume=3).get_model()
print("after CNN")

# es_callback = EarlyStopping(monitor='val_loss',patience=10,restore_best_weights=True)

history = model.fit(
    input_train,
    output_train,
    epochs=num_epochs,
    verbose=1,
    validation_data=(input_validation,
                     output_validation))  #, callbacks=[es_callback])

save_model_to_json(model, json_file_name)

# summarize history for loss
コード例 #4
0
def main(argv=None):

    if argv is None:
        argv = sys.argv
    else:
        sys.argv.extend(argv)

    program_name = os.path.basename(sys.argv[0])
    program_usage = '''View or solve book embedding
USAGE
'''
    try:
        # Setup argument parser
        parser = ArgumentParser(description=program_usage,
                                formatter_class=RawDescriptionHelpFormatter)
        parser.add_argument('input', metavar='input-file', type=str, nargs=1)
        parser.add_argument('output',
                            metavar='output-file',
                            type=str,
                            nargs='?')
        parser.add_argument("-ir",
                            "--irace",
                            type=bool,
                            dest="irace",
                            action="store",
                            default=False)
        parser.add_argument("-s",
                            "--seed",
                            type=int,
                            dest="seed",
                            action="store",
                            default=50)
        parser.add_argument("-e",
                            "--epochs",
                            type=int,
                            dest="epochs",
                            action="store",
                            default=50)
        parser.add_argument("-lr",
                            "--learning_rate",
                            type=float,
                            dest="learning_rate",
                            action="store",
                            default=0.0001)
        parser.add_argument("-bs",
                            "--batch_size",
                            type=int,
                            dest="batch_size",
                            action="store",
                            default="256")
        parser.add_argument("-cgs",
                            "--cnn_group_size",
                            type=int,
                            dest="cnn_group_size",
                            action="store",
                            default=2)
        parser.add_argument("-cgn",
                            "--cnn_group_num",
                            type=int,
                            dest="cnn_group_num",
                            action="store",
                            default=2)
        parser.add_argument("-cbf",
                            "--cnn_base_filters",
                            dest="cnn_base_filters",
                            action="store",
                            default="64")
        parser.add_argument("-dln",
                            "--dense_layer_num",
                            type=int,
                            dest="dense_layer_num",
                            action="store",
                            default=1)
        parser.add_argument("-dlu",
                            "--dense_layer_units",
                            type=int,
                            dest="dense_layer_units",
                            action="store",
                            default="256")
        parser.add_argument("-do",
                            "--dropout",
                            type=float,
                            dest="dropout",
                            action="store",
                            default=0.2)
        parser.add_argument("-ds",
                            "--dataset",
                            type=str,
                            dest="dataset",
                            action="store",
                            default='cifar10')

        args = parser.parse_args()

    except Exception as e:
        raise (e)
        indent = len(program_name) * " "
        sys.stderr.write(program_name + ": " + repr(e) + "\n")
        sys.stderr.write(indent + "  for help use --help")
        return 2

    irace = args.irace

    model = CNN_Model("cnn_model",
                      cnn_group_num=args.cnn_group_num,
                      cnn_group_size=args.cnn_group_size,
                      cnn_base_filters=int(args.cnn_base_filters),
                      dense_layer_num=args.dense_layer_num,
                      dense_layer_units=int(args.dense_layer_units),
                      dropout=args.dropout)
    x, y, x_test, y_test, width, _ = load_dataset(args.dataset)

    x_inference = tf.placeholder(tf.uint8, [None, width, width, 1], name="X")
    y_inference = tf.placeholder(tf.uint8, [None], name="Y")

    EPOCHS = args.epochs
    BATCH_SIZE = int(args.batch_size)

    batch_size = BATCH_SIZE

    dataset_train = tf.data.Dataset.from_tensor_slices(
        (x, y)).repeat().shuffle(buffer_size=500).batch(batch_size)
    dataset_test = tf.data.Dataset.from_tensor_slices(
        (x_test, y_test)).repeat().shuffle(buffer_size=500).batch(batch_size)

    n_batches = x.shape[0] // BATCH_SIZE

    iter = tf.data.Iterator.from_structure(dataset_train.output_types,
                                           dataset_train.output_shapes)

    features, labels = iter.get_next()

    logits = model.inference(features)
    test_logits = model.inference(features, False)

    loss = model.loss(logits, labels)
    loss_test = model.loss(test_logits, labels)

    train_accuracy = model.accuracy(logits, labels)
    test_accuracy = model.accuracy(test_logits, labels)

    loss_var = tf.Variable(0.0)
    acc_var = tf.Variable(0.0)

    s1 = tf.summary.scalar("loss", loss_var)
    s2 = tf.summary.scalar("accuracy", acc_var)

    summary = tf.summary.merge_all()

    train_op = tf.train.AdamOptimizer(
        learning_rate=args.learning_rate).minimize(loss)

    train_init_op = iter.make_initializer(dataset_train)
    test_init_op = iter.make_initializer(dataset_test, name="test_init_op")

    with tf.Session() as sess:
        train_writer = tf.summary.FileWriter('summaries' + '/train')
        test_writer = tf.summary.FileWriter('summaries' + '/test')

        sess.run(tf.global_variables_initializer())

        saver = tf.train.Saver(max_to_keep=None)
        saver.save(sess, "ckpt/model.ckpt", global_step=0)
        for i in range(EPOCHS):
            sess.run(train_init_op)
            train_loss = 0
            train_acc = 0
            for _ in range(n_batches):
                __, loss_value, tr_acc = sess.run(
                    [train_op, loss, train_accuracy])

                train_loss += loss_value
                train_acc += tr_acc

            sess.run(test_init_op)
            test_loss, test_acc = sess.run([loss_test, test_accuracy])

            train_writer.add_summary(
                sess.run(
                    summary, {
                        loss_var: train_loss / n_batches,
                        acc_var: train_acc / n_batches
                    }), i)
            test_writer.add_summary(
                sess.run(summary, {
                    loss_var: test_loss,
                    acc_var: test_acc
                }), i)

            saver.save(sess, "ckpt/model.ckpt", global_step=i + 1)

            if irace:
                pass
            else:
                print("Epoch: {}, Train loss: {:.4f}, Test loss: {:.4f}, Train accuracy: {:.4f}, Test accuracy: {:.4f} "\
                  .format(i,train_loss/n_batches, test_loss, train_acc/n_batches, test_acc))

        print(test_loss)
コード例 #5
0
    # dev_sample_index = -1 * int(0.1 * float(len(y)))
    # x_train, x_dev = x[:dev_sample_index], x[dev_sample_index:]
    x_dev = cnn_preprocessing.convert2vec(x_dev, sequence_length, cnn_preprocessing.model)
    # y_train, y_dev = y[:dev_sample_index], y[dev_sample_index:]
y_dev_true = np.argmax(y_dev, 1)

with tf.Graph().as_default():
    session_conf = tf.ConfigProto(
        allow_soft_placement=FLAGS.allow_soft_placement,
        log_device_placement=FLAGS.log_device_placement)
    sess = tf.Session(config=session_conf)
    with sess.as_default():
        cnn = CNN_Model(
            sequence_length,
            num_classes,
            embedding_size,
            filter_sizes=list(map(int, FLAGS.filter_sizes.split(","))),
            num_filters=FLAGS.num_filters,
            l2_reg_lambda=FLAGS.l2_reg_lambda)

        # Define Training procedure
        global_step = tf.Variable(0, name="global_step", trainable=False)
        optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate)
        grads_and_vars = optimizer.compute_gradients(cnn.loss)
        train_op = optimizer.apply_gradients(grads_and_vars, global_step=global_step)

        # Keep track of gradient values and sparsity (optional)
        grad_summaries = []
        for g, v in grads_and_vars:
            if g is not None:
                grad_hist_summary = tf.histogram_summary("{}/grad/hist".format(v.name), g)
コード例 #6
0
ファイル: train.py プロジェクト: thomasaimondy/vaa3d_tools
def train():

    log_dir = TESTDIR + "/log"
    save_dir = TESTDIR + "/save"
    data_dir = TESTDIR + "/data"

    # feature_size = 21
    feature_size = 19
    sequence_size = 10
    num_classses = 9

    batch_size = 1000
    num_epoches = 10000
    dropout_keep_prob = 0.5
    l2_reg_lambda = 0.01
    learning_rate = 1e-4
    save_every = 100  # save every 100 batches.

    data_loader = DataLoader(data_dir=data_dir,
                             batch_size=batch_size,
                             sequence_size=sequence_size,
                             feature_size=feature_size,
                             mode="train")
    model = CNN_Model(num_classes=num_classses,
                      filter_sizes=[3, 4, 5],
                      sequence_size=sequence_size,
                      feature_size=feature_size,
                      num_filters=20)

    with tf.Session() as sess:
        summaries = tf.summary.merge_all()
        writer = tf.summary.FileWriter(
            os.path.join(log_dir, time.strftime("%Y-%m-%d-%H-%M-%S")))
        writer.add_graph(sess.graph)

        sess.run(tf.global_variables_initializer())
        saver = tf.train.Saver(tf.global_variables())

        # ckpt = tf.train.latest_checkpoint(save_dir)
        # if ckpt is not None:
        #     saver.restore(sess, ckpt)
        for e in range(num_epoches):
            sess.run(tf.assign(model.lr, learning_rate))
            sess.run(tf.assign(model.dropout_keep_prob, dropout_keep_prob))
            sess.run(tf.assign(model.l2_reg_lambda, l2_reg_lambda))
            data_loader.reset_batch()
            for b in range(data_loader.num_batches):
                start = time.time()
                x, y = data_loader.next_batch()
                feed = {model.input_x: x, model.input_y: y}

                summ, loss, accuracy, _ = sess.run(
                    [summaries, model.loss, model.accuracy, model.train_op],
                    feed)
                writer.add_summary(summ, e * data_loader.num_batches + b)

                end = time.time()
                print(
                    "{}/{} (epoch {}), train_loss = {:.3f}, accuracy = {:.3f}, time/batch = {:.3f}"
                    .format(e * data_loader.num_batches + b,
                            num_epoches * data_loader.num_batches, e, loss,
                            accuracy, end - start))
                if (e * data_loader.num_batches + b) % save_every == 0 \
                        or (e == num_epoches - 1 and
                            b == data_loader.num_batches - 1):
                    # save for the last result
                    checkpoint_path = os.path.join(save_dir, 'model.ckpt')
                    saver.save(sess,
                               checkpoint_path,
                               global_step=e * data_loader.num_batches + b)
                    print("model saved to {}".format(checkpoint_path))