コード例 #1
0
def run_analysis(model_name):
    if not os.path.exists(MODEL_DIR):
        os.makedirs(MODEL_DIR)
    if not os.path.exists(LC_OUT_DIR):
        os.makedirs(LC_OUT_DIR)

    tf.reset_default_graph()

    # computational graph
    img_batch = tf.placeholder(tf.float32,
                               shape=[None, 28 * 28],
                               name='img_batch')
    out = model.cnn(img_batch)
    probabilities = out.get('probabilities')

    saver = tf.train.Saver()
    with tf.Session() as sess:
        saver.restore(
            sess, "./" + MODEL_DIR + os.sep + "model_" + model_name + ".ckpt")
        a = data.get_class_sample(4)
        b = data.get_class_sample(2)
        result = sess.run(probabilities,
                          feed_dict={img_batch: linear_combinations(a, b, 50)})
        log_csv(result, './' + LC_OUT_DIR + '/four2two.csv')
        scipy.misc.imsave('./' + LC_OUT_DIR + '/four.png',
                          np.reshape(a, [28, 28]))
        scipy.misc.imsave('./' + LC_OUT_DIR + '/two.png',
                          np.reshape(b, [28, 28]))
        scipy.misc.imsave('./' + LC_OUT_DIR + '/four2two.png', result)
コード例 #2
0
ファイル: main.py プロジェクト: victorialin898/petfinder
def main():
    # Sets up the train and test directories according to flow_from_directory. Aborts if they are already present
    create_sets(ARGS.data, train_ratio=0.9)

    # Our datasets here
    datasets = Datasets(ARGS.data)

    model = cnn()
    checkpoint_path = "./your_model_checkpoints/"

    if ARGS.load_checkpoint is not None:
        model.load_weights(ARGS.load_checkpoint)

    if not os.path.exists(checkpoint_path):
        os.makedirs(checkpoint_path)

    # Compile model graph
    model.compile(optimizer='sgd',
                  loss='sparse_categorical_crossentropy',
                  metrics=["sparse_categorical_accuracy"])

    if ARGS.evaluate:
        test(model, datasets)
    else:
        train(model, datasets, checkpoint_path)
コード例 #3
0
def run_analysis(model_name, xs, xs_adv):
    assert (len(xs) == len(xs_adv))
    sample_count = len(xs)

    if not os.path.exists(MODEL_DIR):
        os.makedirs(MODEL_DIR)

    tf.reset_default_graph()

    # computational graph
    img_batch = tf.placeholder(tf.float32,
                               shape=[None, 28 * 28],
                               name='img_batch')
    out = model.cnn(img_batch)
    layers = out.get('layers')
    layerwise_perturbation_mean = get_perturbation(layers, sample_count)

    saver = tf.train.Saver()
    with tf.Session() as sess:
        saver.restore(
            sess, "./" + MODEL_DIR + os.sep + "model_" + model_name + ".ckpt")
        layerwise_perturbation_mean_val = sess.run(
            layerwise_perturbation_mean,
            feed_dict={img_batch: get_input(xs, xs_adv)})
        print layerwise_perturbation_mean_val
コード例 #4
0
def evaluate(data_dir, weights_path, labels_file):
    labels = []
    with open(labels_file, 'r') as fp:
        labels = [line.strip() for line in fp.readlines()]

    model = tf.keras.Sequential([
        cnn(trainable=False),
        tf.keras.layers.Dense(len(labels),
                              trainable=False,
                              activation='softmax')
    ])
    model.build([None, *IMAGE_SIZE, 3])
    model.summary()
    model.load_weights(weights_path)
    model.compile(loss=tf.keras.losses.CategoricalCrossentropy(),
                  metrics=[tf.keras.metrics.CategoricalAccuracy()])

    evaluation_datagen = tf.keras.preprocessing.image.ImageDataGenerator(
        rescale=1. / 255)
    evaluation_data = evaluation_datagen.flow_from_directory(
        os.path.join(data_dir, 'validation'),
        target_size=(299, 299),
        classes=labels,
        batch_size=1)
    result = model.evaluate(evaluation_data)
    print(result)
コード例 #5
0
ファイル: main.py プロジェクト: xingjunxia725/deepLearning
def mnist():

    sess = tf.Session()
    x = tf.placeholder(dtype=tf.float32, shape=[None, 28, 28, 1])
    with tf.variable_scope("reg"):
        pred_reg, variables_reg = regression(x)
    saver = tf.train.Saver(variables_reg)
    saver.restore(sess, "my_net/reg_net.ckpt")

    with tf.variable_scope("cnn"):
        keep_prob = tf.placeholder(dtype=tf.float32)
        pred_cnn, variables_cnn = cnn(x, keep_prob)
    saver = tf.train.Saver(variables_cnn)
    saver.restore(sess, "my_net/cnn_net.ckpt")

    def calc_reg(input):
        return sess.run(pred_reg, feed_dict={x: input}).flatten().tolist()

    def calc_cnn(input):
        return sess.run(pred_cnn, feed_dict={
            x: input,
            keep_prob: 1
        }).flatten().tolist()

    input = ((255 - np.array(request.json, dtype=np.uint8)) / 255.0).reshape(
        1, 28, 28, 1)
    output1 = calc_reg(input)
    print(output1)
    output2 = calc_cnn(input)
    print(output2)
    sess.close()
    return jsonify(results=[output1, output2])
コード例 #6
0
def get_attack_batch(model_name, count):
    if not os.path.exists(FGSM_DIR):
        os.makedirs(FGSM_DIR)

    tf.reset_default_graph()

    # computational graph
    img_batch = tf.placeholder(tf.float32,
                               shape=[None, 28 * 28],
                               name='img_batch')
    label_batch = tf.placeholder(tf.float32,
                                 shape=[None, 10],
                                 name='labels_batch')
    out = model.cnn(img_batch)
    logits = out.get('logits')
    probabilities = out.get('probabilities')
    loss = model.loss(label_batch, logits)

    img_batch_val, label_batch_val = data.get_test_batch(count)
    classes_batch_val = np.argmax(label_batch_val, axis=1)

    saver = tf.train.Saver()
    with tf.Session() as sess:
        saver.restore(
            sess, pp + MODEL_DIR + os.sep + 'model_' + model_name + '.ckpt')

        gradients = tf.gradients(loss, img_batch)
        grad_vals, probabilities_val = sess.run([gradients, probabilities],
                                                feed_dict={
                                                    img_batch: img_batch_val,
                                                    label_batch:
                                                    label_batch_val
                                                })

        grad_vals_sign = np.sign(grad_vals[0]) * 1.0 / 255.
        assigned_classes = np.argmax(probabilities_val, axis=1)

        original_images = []
        successful_attacks = []

        for i, grad in enumerate(grad_vals_sign):
            if assigned_classes[i] != classes_batch_val[i]:
                # classification should have been correct
                continue
            epss = np.arange(0., 100., 1)  # epsilon values
            attacks = [img_batch_val[i] + grad * 1 * x for x in epss]
            attacks = np.clip(attacks, 0, 1)  # clip image pixels to [0,1]

            # run classification on attacks
            probabilities_val = sess.run(probabilities,
                                         feed_dict={img_batch: attacks})

            best_attack = get_first_successful(probabilities_val, attacks)
            if best_attack is not None:
                successful_attacks.append(best_attack)
                original_images.append(img_batch_val[i])

    log_attacks(original_images, successful_attacks)
    return original_images, successful_attacks
コード例 #7
0
def fgsm(model_name):
    """
    Loads the given model and computes an adversarial example using FGSM (fast gradient sign method).
    Instead of computing the gradient of the loss we use the element of the probability output vector that corresponds
    to the sample class.
    """
    if not os.path.exists(FGSM_DIR):
        os.makedirs(FGSM_DIR)

    tf.reset_default_graph()

    # computational graph
    img_batch = tf.placeholder(tf.float32,
                               shape=[None, 28 * 28],
                               name='img_batch')
    out = model.cnn(img_batch)
    probabilities = out.get('probabilities')

    saver = tf.train.Saver()
    with tf.Session() as sess:
        saver.restore(
            sess, pp + MODEL_DIR + os.sep + 'model_' + model_name + '.ckpt')
        mnist_class = 0
        a = data.get_class_sample(mnist_class)

        # compute partial y / partial x
        # how does the specific class prediction depend on every single input
        gradients = tf.gradients(probabilities[:, mnist_class], img_batch)

        grad_vals = sess.run(gradients, feed_dict={img_batch:
                                                   [a]})[0]  # unpack batch
        grad_vals = grad_vals[0]  # first class (selection above)
        grad_vals_sign = np.sign(
            grad_vals) * 1.0 / 255.  # could be used for the sign method
        scipy.misc.imsave(pp + FGSM_DIR + '/grad.png',
                          np.reshape(np.abs(grad_vals), [28, 28]))
        scipy.misc.imsave(pp + FGSM_DIR + '/grad_sign.png',
                          np.reshape(grad_vals_sign, [28, 28]))

        # scaling of the attack vector
        epss = np.arange(0., 50., 1)  # epsilon values
        attacks = [a - grad_vals_sign * 1 * x for x in epss]
        attacks = np.clip(attacks, 0, 1)  # clip image pixels to [0,1]

        # compute probabilities for the attack images
        probabilities_out = sess.run(probabilities,
                                     feed_dict={img_batch: attacks})

        for i in range(len(probabilities_out)):
            log_output(i, grad_vals_sign * epss[i], attacks[i])

        # log classes
        scipy.misc.imsave(pp + FGSM_DIR + '/probabilities.png',
                          probabilities_out)
        print probabilities_out
        print np.argmax(probabilities_out, axis=1)
コード例 #8
0
ファイル: train.py プロジェクト: sugyan/image-dataset
def train(data_dir, weights_dir, labels_file):
    os.makedirs(weights_dir, exist_ok=True)
    labels = []
    with open(labels_file, 'r') as fp:
        labels = [line.strip() for line in fp.readlines()]

    model = tf.keras.Sequential([
        cnn(),
        tf.keras.layers.Dropout(rate=0.1),
        tf.keras.layers.Dense(
            len(labels),
            activation='softmax',
            kernel_regularizer=tf.keras.regularizers.l2(1e-4)),
    ])
    model.build([None, *IMAGE_SIZE, 3])
    model.summary()
    model.compile(optimizer=tf.keras.optimizers.RMSprop(),
                  loss=tf.keras.losses.CategoricalCrossentropy(),
                  metrics=[tf.keras.metrics.CategoricalAccuracy()])

    training_datagen = tf.keras.preprocessing.image.ImageDataGenerator(
        rotation_range=2,
        width_shift_range=2,
        height_shift_range=2,
        brightness_range=(0.8, 1.2),
        channel_shift_range=0.2,
        zoom_range=0.02,
        rescale=1. / 255)
    training_data = training_datagen.flow_from_directory(
        os.path.join(data_dir, 'training'),
        target_size=(299, 299),
        classes=labels,
        batch_size=BATCH_SIZE)
    validation_datagen = tf.keras.preprocessing.image.ImageDataGenerator(
        rescale=1. / 255)
    validation_data = validation_datagen.flow_from_directory(
        os.path.join(data_dir, 'validation'),
        target_size=(299, 299),
        classes=labels,
        batch_size=BATCH_SIZE)

    tf.keras.backend.clear_session()
    history = model.fit(
        training_data,
        epochs=100,
        validation_data=validation_data,
        callbacks=[
            tf.keras.callbacks.TensorBoard(),
            tf.keras.callbacks.ModelCheckpoint(os.path.join(
                weights_dir, 'finetuning_weights-{epoch:02d}.h5'),
                                               save_weights_only=True),
        ])
    print(history.history)
    model.trainable = False
    model.save('finetuning_classifier.h5')
コード例 #9
0
ファイル: simple_cnn.py プロジェクト: ryanshin712/Study
def build_eval_graph(x, y):
    losses = {}
    logit = model.cnn(x,
                      is_training=False,
                      update_batch_stats=False,
                      stochastic=True,
                      seed=FLAGS.seed)
    loss = ce_loss(logit, y)
    losses['loss'] = loss
    acc = accuracy(logit, y)
    losses['accuracy'] = acc
    return losses
コード例 #10
0
def setup_graph():
    graph_params = {}
    graph_params['graph'] = tf.Graph()
    with graph_params['graph'].as_default():
        model_params = model.params()
        graph_params['target_image'] = tf.placeholder(
            tf.float32,
            shape=(1, model.IMG_HEIGHT, model.IMG_WIDTH, model.IMG_CHANNELS))
        logits = model.cnn(graph_params['target_image'],
                           model_params,
                           keep_prob=1.0)
        graph_params['pred'] = tf.nn.softmax(logits)
        graph_params['saver'] = tf.train.Saver()
    return graph_params
コード例 #11
0
def train():
    # computational graph
    img_batch = tf.placeholder(tf.float32,
                               shape=[None, 28 * 28],
                               name='img_batch')
    labels_batch = tf.placeholder(tf.float32,
                                  shape=[None, 10],
                                  name='labels_batch')
    out = model.cnn(img_batch)
    logits = out.get('logits')
    probabilities = out.get('probabilities')
    loss = model.loss(labels_batch, logits)
    optimizer = tf.train.AdamOptimizer(LEARNING_RATE).minimize(loss)

    # tensor board logging
    tf.summary.image('input',
                     tf.reshape(img_batch, [-1, 28, 28, 1]),
                     max_outputs=4)
    summary_merged = tf.summary.merge_all()

    # add ops to save and restore all the variables
    saver = tf.train.Saver()

    # init
    init_op = tf.global_variables_initializer()
    sess = tf.Session()
    sess.run(init_op)
    if not os.path.exists(LOG_PATH):
        os.makedirs(LOG_PATH)
    log_writer = tf.summary.FileWriter(LOG_PATH + os.sep + MODEL_NAME,
                                       sess.graph)

    for step in range(STEPS):
        print(step)
        img_batch_val, labels_batch_val = data.mnist.train.next_batch(
            BATCH_SIZE)
        _, summary, _ = sess.run([optimizer, summary_merged, probabilities],
                                 feed_dict={
                                     img_batch: img_batch_val,
                                     labels_batch: labels_batch_val
                                 })

        if step % 10 == 0:
            log_writer.add_summary(summary, step)

    print("completed gradient descend")

    save_path = saver.save(sess, MODEL_DIR + "/model_" + MODEL_NAME + ".ckpt")
    print("model saved at %s" % save_path)
コード例 #12
0
ファイル: simple_cnn.py プロジェクト: ryanshin712/Study
def build_training_graph(x, y, lr, mom):
    global_step = tf.get_variable(name="global_step",
                                  shape=[],
                                  dtype=tf.float32,
                                  initializer=tf.constant_initializer(0.0),
                                  trainable=False)
    logit = model.cnn(x,
                      is_training=True,
                      update_batch_stats=True,
                      stochastic=True,
                      seed=FLAGS.seed)
    loss = ce_loss(logit, y)

    opt = tf.train.AdamOptimizer(learning_rate=lr, beta1=mom)
    tvars = tf.trainable_variables()
    grads_and_vars = opt.compute_gradients(loss, tvars)
    train_op = opt.apply_gradients(grads_and_vars, global_step=global_step)
    return loss, train_op, global_step
コード例 #13
0
    def __init__(self, model_name, checkpoint_dir):
        self.graph = tf.Graph()  # create graph for each instance individuly
        self.model_name = model_name

        with self.graph.as_default():
            self.x = tf.placeholder(tf.float32, [None, 784])
            self.keep_prob = tf.placeholder(tf.float32)

            if self.model_name == 'regression':
                self.output = model.regression(self.x)
            else:
                self.output = model.cnn(self.x, self.keep_prob)
            self.saver = tf.train.Saver()

        self.sess = tf.Session(graph=self.graph)
        with self.graph.as_default():
            ckpt = tf.train.get_checkpoint_state(checkpoint_dir)
            if ckpt and ckpt.model_checkpoint_path:
                self.saver.restore(self.sess, ckpt.model_checkpoint_path)
コード例 #14
0
ファイル: train.py プロジェクト: benedictquartey/RollE
def train_model():
    x_train, x_test, y_train, y_test = data_processing.prepareData()

    # import neural network model architecture
    model = NNmodel.cnn()
    # display model architecture
    model.summary()

    # code to train model on data

    # callback function to be executed after every training epoch, only saves the trsined model
    # if its validation mean_squared_error is less than the model from the previoud epoch
    interimModelPoint = ModelCheckpoint('model-{epoch:03d}.h5',
                                        monitor='val_loss',
                                        verbose=0,
                                        save_best_only='true',
                                        mode='auto')

    # define cost function type to be mean_squared_error
    model.compile(loss='mean_squared_error',
                  optimizer=Adam(lr=learningRate),
                  metrics=['accuracy'])

    # train model
    model_history = model.fit_generator(
        generator=batch_generator(x_train, y_train, batch_size, "train"),
        steps_per_epoch=epoch_steps,
        epochs=epochNum,
        max_queue_size=1,
        validation_data=batch_generator(x_test, y_test, batch_size,
                                        "validate"),
        validation_steps=len(x_test),
        callbacks=[interimModelPoint],
        verbose=1)

    plot.mean_square_error(model_history)
    plot.model_accuracy(model)

    # print history with all loss and accuracy values
    print(model_history.history)
コード例 #15
0
ファイル: train.py プロジェクト: pat-coady/speech-command
def main(config):
    run_logger = None
    if config['azure_ml']:
        run_logger = init_azure_logging(config)
    if config['tall_kernel']:
        model = tall_kernel(config)
    else:
        model = cnn(config)
    with tf.device('/cpu:0'):  # put data pipeline on CPU
        ds_train = build_dataset(config, 'train')
        ds_val = build_dataset(config, 'val')
    loss = tf.losses.SparseCategoricalCrossentropy(from_logits=True)
    optimizer = get_optimizer(config)
    model.compile(loss=loss, optimizer=optimizer, metrics=['accuracy'])
    add_logpaths(config)
    save_state(config)
    callbacks = build_callbacks(config, run_logger)
    model.fit(x=ds_train,
              validation_data=ds_val,
              epochs=config['epochs'],
              callbacks=callbacks,
              validation_steps=90,
              verbose=config['verbose'])
コード例 #16
0
ファイル: predict.py プロジェクト: sugyan/image-dataset
def predict(data_dir, weights_path, labels_file):
    labels = []
    with open(labels_file, 'r') as fp:
        labels = [line.strip() for line in fp.readlines()]

    model = tf.keras.Sequential([
        cnn(trainable=False),
        tf.keras.layers.Dense(len(labels),
                              trainable=False,
                              activation='softmax')
    ])
    model.build([None, *IMAGE_SIZE, 3])
    model.summary()
    model.load_weights(weights_path)

    with open('results.tsv', 'w') as fp:
        writer = csv.writer(fp, delimiter='\t')
        for root, dirs, files in os.walk(os.path.join(data_dir, 'validation')):
            if not files:
                continue
            class_name = os.path.basename(root)
            if class_name not in labels:
                continue
            label = labels.index(class_name)
            for filename in files:
                image = tf.io.decode_jpeg(
                    tf.io.gfile.GFile(os.path.join(root, filename),
                                      'rb').read())
                images = tf.expand_dims(tf.image.convert_image_dtype(
                    image, dtype=tf.float32),
                                        axis=0)
                result = int(model.predict(images).argmax())
                writer.writerow([
                    os.path.abspath(os.path.join(root, filename)), label,
                    result
                ])
コード例 #17
0
import tensorflow as tf 
import input_data
import model 
import os

os.environ['CUDA_VISIBLE_DEVICES'] = '0'

mnist = input_data.read_data_sets('mnist_data/', one_hot=True)

x = tf.placeholder(tf.float32, [None, 784])
x_image = tf.reshape(x, [-1, 28, 28, 1])
y_ = tf.placeholder(tf.float32, [None, 10])
keep_prob = tf.placeholder(tf.float32)
y = model.cnn(x_image, keep_prob)


cross_entropy = -tf.reduce_sum(y_ * tf.log(y))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

saver = tf.train.Saver()

with tf.Session() as sess:
    merged_summary_op = tf.summary.merge_all()
    summary_writer = tf.summary.FileWriter('./checkpoints', sess.graph)
    summary_writer.add_graph(sess.graph)

    sess.run(tf.global_variables_initializer())
    for i in range(10000):
        batch = mnist.train.next_batch(50)
コード例 #18
0
def main():
    if len(sys.argv) > 1:
        test_image_fn = sys.argv[1]
        if not os.path.exists(test_image_fn):
            print("Not found:", test_image_fn)
            sys.exit(-1)
    else:
        # Select a test image from a test directory
        test_dirs = [
            os.path.join(common.CROPPED_AUG_IMAGE_DIR, class_name, 'test')
            for class_name in common.CLASS_NAME
        ]
        test_dir = np.random.choice(test_dirs)
        test_images_fn = [test_image for test_image in os.listdir(test_dir)]
        test_image_fn = np.random.choice(test_images_fn, 1)[0]
        test_image_fn = os.path.join(test_dir, test_image_fn)
    print("Test image:", test_image_fn)

    # Open and resize a test image
    if common.CNN_IN_CH == 1:
        test_image_org = skimage.io.imread(test_image_fn, as_grey=True)
        test_image_org = test_image_org.reshape(common.CNN_IN_HEIGHT,
                                                common.CNN_IN_WIDTH,
                                                common.CNN_IN_CH)
    else:
        test_image_org = skimage.io.imread(test_image_fn)
    if test_image_org.shape != (common.CNN_IN_HEIGHT, common.CNN_IN_WIDTH,
                                common.CNN_IN_CH):
        test_image_org = imresize(test_image_org,
                                  (common.CNN_IN_HEIGHT, common.CNN_IN_WIDTH),
                                  interp='bicubic')
    test_image_org = preprocess.scaling(test_image_org)
    test_image = test_image_org.reshape(
        (1, common.CNN_IN_HEIGHT, common.CNN_IN_WIDTH,
         common.CNN_IN_CH)).astype(np.float32)

    # Training model
    graph = tf.Graph()
    with graph.as_default():
        # Weights and biases
        model_params = model.params()

        # restore weights
        f = "weights.npz"
        if os.path.exists(f):
            initial_weights = load_initial_weights(f)
        else:
            initial_weights = None

        if initial_weights is not None:
            assert len(initial_weights) == len(model_params)
            assign_ops = [
                w.assign(v) for w, v in zip(model_params, initial_weights)
            ]

        # A placeholder for a test image
        tf_test_image = tf.constant(test_image)

        # model
        logits = model.cnn(tf_test_image, model_params, keep_prob=1.0)
        test_pred = tf.nn.softmax(logits)

        # Restore ops
        saver = tf.train.Saver()

    # Recognize a brand logo of test image
    with tf.Session(graph=graph) as session:
        tf.global_variables_initializer().run()
        if initial_weights is not None:
            session.run(assign_ops)
            print('initialized by pre-learned weights')
        elif os.path.exists("models"):
            save_path = "models/deep_logo_model"
            saver.restore(session, save_path)
            print('Model restored')
        else:
            print('initialized')
        pred = session.run([test_pred])
        print("Class name:", common.CLASS_NAME[np.argmax(pred)])
        print("Probability:", np.max(pred))
コード例 #19
0
def init_cnn(vocab_size, embed_size):
    mdl = model.cnn(vocab_size, embed_size)

    return mdl
コード例 #20
0
                                                    y_data,
                                                    test_size=0.2)

batch_size = 54
learning_rate = 0.001
n_epoch = 50
n_samples = len(X_data)  # change to 1000 for entire dataset
cv_split = 0.8
train_size = int(n_samples * cv_split)
test_size = n_samples - train_size

#model
with tf.variable_scope("convolutional"):
    X = tf.placeholder("float", [None, 96, 1366, 1])
    phase_train = tf.placeholder(tf.bool, name='phase_train')
    y_, weights = model.cnn(X, phase_train)

#train
y = tf.placeholder("float", [None, 10])
lrate = tf.placeholder("float")
cost = tf.reduce_mean(
    tf.nn.softmax_cross_entropy_with_logits(logits=y_, labels=y))
train_op = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost)
predict_op = y_

saver = tf.train.Saver(weights)
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    for i in range(n_epoch):
        training_batch = zip(range(0, len(X_train), batch_size),
                             range(batch_size,
コード例 #21
0
from keras import optimizers
from utils import output_performance, generate_figures, get_args

args = get_args()

(x_train, y_train), (x_test,
                     y_test) = imdb.load_data(path="imdb.npz",
                                              num_words=args.vocab_size,
                                              maxlen=args.maxLen)
x_train = sequence.pad_sequences(x_train, maxlen=args.maxLen)
x_test = sequence.pad_sequences(x_test, maxlen=args.maxLen)

model = cnn(vocab_size=args.vocab_size,
            maxLen=args.maxLen,
            kernel_size=args.kernel_size,
            embedding_dim=args.embed,
            hidden_dim=args.hidden,
            output_dim=args.output,
            keep_prob=args.keep)

model.compile(optimizer=optimizers.Adam(lr=args.lr),
              loss='binary_crossentropy',
              metrics=['accuracy'])

print(model.summary())
history = model.fit(x_train,
                    y_train,
                    validation_split=args.val_split,
                    batch_size=args.batch,
                    epochs=args.epochs,
                    callbacks=[EarlyStopping(monitor='val_loss', patience=10)])
コード例 #22
0
ファイル: replay.py プロジェクト: lotka/autofaces
import matplotlib
matplotlib.use('Agg')
from expman import PyExp
import sys
from os.path import join
from model import cnn
import disfa
path = sys.argv[1]
# Load yaml config file
config = PyExp(config_file=join(path,'config.yaml'),make_new=False)
# Load data
data = disfa.Disfa(config['data'])
config.update('data',data.config,save=False)

# Load model
model = cnn(config)

x = model['x']
y_ = model['y']
train_step = model['train_step']
loss = model['loss']
y_conv = model['y_conv']
output_dim = model['output_dim']
keep_prob = model['keep_prob']
lmsq_loss = model['classifer_lmsq_loss']
cross_entropy = model['cross_entropy']
accuracy = model['accuracy']
alpha = model['alpha']
auto_loss = model['auto_loss']
mask = model['mask']
N = config['iterations']
コード例 #23
0
def train(filepath):
    """Train model to estimate power.

    Args:
        filepath (str): Path to training set.

    """

    MODELS_PATH.mkdir(parents=True, exist_ok=True)

    # Load parameters
    params = yaml.safe_load(open("params.yaml"))["train"]
    net = params["net"]

    # Load training set
    train = np.load(filepath)

    X_train = train["X"]
    y_train = train["y"]

    n_features = X_train.shape[-1]

    # Create sample weights
    sample_weights = np.ones_like(y_train)

    if params["weigh_samples"]:
        sample_weights[y_train > params["weight_thresh"]] = params["weight"]

    hist_size = X_train.shape[-2]

    # hypermodel = DeepPowerHyperModel(hist_size, n_features)

    # # hp = HyperParameters()
    # # hp.Choice("num_layers", values=[1, 2])
    # # hp.Fixed("kernel_size", value=4)
    # # hp.Fixed("kernel_size_0", value=4)

    # tuner = Hyperband(
    #         hypermodel,
    #         # hyperparameters=hp,
    #         # tune_new_entries=True,
    #         objective="val_loss",
    #         # max_trials=10,
    #         # min_epochs=20,
    #         max_epochs=50,
    #         executions_per_trial=2,
    #         directory="model_tuning",
    #         project_name="DeepPower"
    # )

    # tuner.search_space_summary()

    # tuner.search(
    #     X_train, y_train,
    #     epochs=params["n_epochs"],
    #     batch_size=params["batch_size"],
    #     validation_split=0.2,
    #     sample_weight=sample_weights
    # )

    # tuner.results_summary()

    # # best_hyperparameters = tuner.get_best_hyperparameters(1)[0]

    # # model = tuner.hypermodel.build(best_hyperparameters)

    # # print(model.summary())

    # # history = model.fit(
    # #     X_train, y_train,
    # #     epochs=params["n_epochs"],
    # #     batch_size=params["batch_size"],
    # #     validation_split=0.2,
    # #     sample_weight=sample_weights
    # # )

    # model = tuner.get_best_models()[0]

    # print(model.summary())

    # model.save(MODELS_FILE_PATH)

    # Build model
    if net == "cnn":
        hist_size = X_train.shape[-2]
        model = cnn(hist_size, n_features, kernel_size=params["kernel_size"])
    elif net == "dnn":
        model = dnn(n_features)
    elif net == "lstm":
        pass
    elif net == "cnndnn":
        pass

    print(model.summary())

    # Save a plot of the model
    plot_model(model,
               to_file=PLOTS_PATH / 'model.png',
               show_shapes=False,
               show_layer_names=True,
               rankdir='TB',
               expand_nested=True,
               dpi=96)

    history = model.fit(X_train,
                        y_train,
                        epochs=params["n_epochs"],
                        batch_size=params["batch_size"],
                        validation_split=0.2,
                        sample_weight=sample_weights)

    model.save(MODELS_FILE_PATH)

    TRAININGLOSS_PLOT_PATH.parent.mkdir(parents=True, exist_ok=True)

    loss = history.history['loss']
    val_loss = history.history['val_loss']

    n_epochs = range(len(loss))

    plt.figure()
    plt.plot(n_epochs, loss, label="Training loss")
    plt.plot(n_epochs, val_loss, label="Validation loss")
    plt.legend()
    plt.savefig(TRAININGLOSS_PLOT_PATH)
コード例 #24
0
parser.add_argument('--batch', type=int, default=-1)
parser.add_argument('--early_stop', type=int, default=10)

parser.add_argument('--prefix', type=str, default='sensitivity')
parser.add_argument('--save_dir', type=str, default='./')
parser.add_argument('--result_dir', type=str, default='./')
parser.add_argument('--result_file', type=str, default=None)
args = parser.parse_args()

# import mnist data
mnist = input_data.read_data_sets('./MNIST/', one_hot=True)

# construct model
X = tf.placeholder(tf.float32, [None, 784])
Y = tf.placeholder(tf.float32, [None, 10])
out = cnn(X, 'cnn', 32)

loss = tf.losses.softmax_cross_entropy(Y, out)
if args.opt == 'sgd':
    train_step = tf.train.GradientDescentOptimizer(args.lr).minimize(loss)
elif args.opt == 'adam':
    train_step = tf.train.AdamOptimizer(args.lr).minimize(loss)

jacobian = []
for i in range(out.shape[1]):
    jacobian.append(tf.gradients(out[:, i], X)[0])

# start training
init = tf.global_variables_initializer()

var = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='cnn')
コード例 #25
0
def main():
    if len(sys.argv) > 1:
        f = np.load(sys.argv[1])

        # f.files has unordered keys ['arr_8', 'arr_9', 'arr_6'...]
        # Sorting keys by value of numbers
        initial_weights = [
            f[n] for n in sorted(f.files, key=lambda s: int(s[4:]))
        ]
    else:
        initial_weights = None

    # read input data
    dataset, labels = read_data()

    train_dataset, train_labels = reformat(dataset[0], labels[0])
    valid_dataset, valid_labels = reformat(dataset[1], labels[1])
    test_dataset, test_labels = reformat(dataset[2], labels[2])
    print('Training set', train_dataset.shape, train_labels.shape)
    print('Valid set', valid_dataset.shape, valid_labels.shape)
    print('Test set', test_dataset.shape, test_labels.shape)

    # Training model
    graph = tf.Graph()
    with graph.as_default():
        # Weights and biases
        model_params = model.params()

        # Initial weights
        if initial_weights is not None:
            assert len(model_params) == len(initial_weights)
            assign_ops = [
                w.assign(v) for w, v in zip(model_params, initial_weights)
            ]

        # Input data
        tf_train_dataset = tf.placeholder(
            tf.float32,
            shape=(FLAGS.batch_size, FLAGS.image_height, FLAGS.image_width,
                   FLAGS.num_channels))
        tf_train_labels = tf.placeholder(tf.float32,
                                         shape=(FLAGS.batch_size,
                                                model.NUM_CLASSES))
        tf_valid_dataset = tf.constant(valid_dataset)
        tf_test_dataset = tf.constant(test_dataset)

        # Training computation
        logits = model.cnn(tf_train_dataset, model_params, keep_prob=0.5)
        with tf.name_scope('loss'):
            loss = tf.reduce_sum(
                tf.nn.softmax_cross_entropy_with_logits(
                    logits=logits, labels=tf_train_labels))
            tf.summary.scalar('loss', loss)
        optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate).minimize(loss)

        # Predictions for the training, validation, and test data
        train_prediction = tf.nn.softmax(logits)
        valid_prediction = tf.nn.softmax(
            model.cnn(tf_valid_dataset, model_params, keep_prob=1.0))
        test_prediction = tf.nn.softmax(
            model.cnn(tf_test_dataset, model_params, keep_prob=1.0))
        # Merge all summaries
        merged = tf.summary.merge_all()
        train_writer = tf.summary.FileWriter(FLAGS.train_dir + '/train')

        # Add ops to save and restore all the variables
        saver = tf.train.Saver()

    # Do training
    cpu_count = 1
    config = tf.ConfigProto(intra_op_parallelism_threads=cpu_count,
                            inter_op_parallelism_threads=cpu_count,
                            allow_soft_placement=True,
                            device_count={"CPU": cpu_count})
    with tf.Session(graph=graph, config=config) as session:
        tf.global_variables_initializer().run()
        if initial_weights is not None:
            session.run(assign_ops)
            print('initialized by pre-learned values')
        else:
            print('initialized')
        for step in range(FLAGS.max_steps):
            offset = (step * FLAGS.batch_size) % (train_labels.shape[0] -
                                                  FLAGS.batch_size)
            batch_data = train_dataset[offset:(offset +
                                               FLAGS.batch_size), :, :, :]
            batch_labels = train_labels[offset:(offset + FLAGS.batch_size), :]
            feed_dict = {
                tf_train_dataset: batch_data,
                tf_train_labels: batch_labels
            }
            try:
                _, l, predictions = session.run(
                    [optimizer, loss, train_prediction], feed_dict=feed_dict)
                if step % 50 == 0:
                    summary, _ = session.run([merged, optimizer],
                                             feed_dict=feed_dict)
                    train_writer.add_summary(summary, step)
                    print('Minibatch loss at step %d: %f' % (step, l))
                    print('Minibatch accuracy: %.1f%%' %
                          accuracy(predictions, batch_labels))
                    print('Validation accuracy: %.1f%%' %
                          accuracy(valid_prediction.eval(), valid_labels))
            except KeyboardInterrupt:
                last_weights = [p.eval() for p in model_params]
                np.savez("weights.npz", *last_weights)
                return last_weights

        print('Test accuracy: %.1f%%' %
              accuracy(test_prediction.eval(), test_labels))

        # Save the variables to disk.
        save_dir = "models"
        if not os.path.exists(save_dir):
            os.makedirs(save_dir)
        save_path = os.path.join(save_dir, "deep_logo_model")
        saved = saver.save(session, save_path)
        print("Model saved in file: %s" % saved)
コード例 #26
0
train_indices = np.random.choice(train_indices,
                                 size=int(np.floor(len(train_indices) * .7)))

# Hyperparameter
learning_rate = float(args.lr)
max_epoch = int(args.maxepoch)
momentum = 0.1
k = int(args.k)

if args.model == 'feedforward':
    model = feedforward()
elif args.model == 'feedforward_50':
    model = feedforward_50()
else:
    model = cnn()
model.to(device)

optimizer = optim.Adadelta(model.parameters(),
                           lr=learning_rate,
                           rho=0.99,
                           eps=1.0e-8)
criterion = nn.MSELoss()

best_params = cross_val(train_indices, optimizer, criterion, model, data,
                        batch_size, k, max_epoch)
model.load_state_dict(best_params)
# if args.wandb: wandb.watch(model)

# for epoch in trange(0, max_epoch, total=max_epoch, initial=0):
#     model.train()
コード例 #27
0
def main():
    if len(sys.argv) > 1:
        f = np.load(sys.argv[1])

        # f.files has unordered keys ['arr_8', 'arr_9', 'arr_6'...]
        # Sorting keys by value of numbers
        initial_weights = [
            f[n] for n in sorted(f.files, key=lambda s: int(s[4:]))
        ]
    else:
        initial_weights = None

    # read input data
    dataset, labels = read_data()

    train_dataset, train_labels = reformat(dataset[0], labels[0])
    valid_dataset, valid_labels = reformat(dataset[1], labels[1])
    test_dataset, test_labels = reformat(dataset[2], labels[2])
    print('Training set', train_dataset.shape, train_labels.shape)
    print('Valid set', valid_dataset.shape, valid_labels.shape)
    print('Test set', test_dataset.shape, test_labels.shape)

    # Training model
    graph = tf.Graph()
    with graph.as_default():
        # Weights and biases
        model_params = model.params()

        # Initial weights
        if initial_weights is not None:
            assert len(model_params) == len(initial_weights)
            assign_ops = [
                w.assign(v) for w, v in zip(model_params, initial_weights)
            ]

        # Input data
        tf_train_dataset = tf.placeholder(
            tf.float32,
            shape=(FLAGS.batch_size, FLAGS.image_height, FLAGS.image_width,
                   FLAGS.num_channels))
        tf_train_labels = tf.placeholder(
            tf.float32, shape=(FLAGS.batch_size, model.NUM_CLASSES))
        tf_valid_dataset = tf.constant(valid_dataset)
        tf_test_dataset = tf.constant(test_dataset)

        # Training computation
        logits = model.cnn(tf_train_dataset, model_params, keep_prob=0.5)
        with tf.name_scope('loss'):
            loss = tf.reduce_sum(
                tf.nn.softmax_cross_entropy_with_logits(
                    logits=logits, labels=tf_train_labels))
            tf.summary.scalar('loss', loss)
        optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate).minimize(loss)

        # Predictions for the training, validation, and test data
        train_prediction = tf.nn.softmax(logits)
        valid_prediction = tf.nn.softmax(
            model.cnn(tf_valid_dataset, model_params, keep_prob=1.0))
        test_prediction = tf.nn.softmax(
            model.cnn(tf_test_dataset, model_params, keep_prob=1.0))
        # Merge all summaries
        merged = tf.summary.merge_all()
        train_writer = tf.summary.FileWriter(FLAGS.train_dir + '/train')

        # Add ops to save and restore all the variables
        saver = tf.train.Saver()

    # Do training
    with tf.Session(graph=graph) as session:
        tf.global_variables_initializer().run()
        if initial_weights is not None:
            session.run(assign_ops)
            print('initialized by pre-learned values')
        else:
            print('initialized')
        for step in range(FLAGS.max_steps):
            offset = (step * FLAGS.batch_size) % (
                train_labels.shape[0] - FLAGS.batch_size)
            batch_data = train_dataset[offset:(offset + FLAGS.batch_size
                                               ), :, :, :]
            batch_labels = train_labels[offset:(offset + FLAGS.batch_size), :]
            feed_dict = {
                tf_train_dataset: batch_data,
                tf_train_labels: batch_labels
            }
            try:
                _, l, predictions = session.run(
                    [optimizer, loss, train_prediction], feed_dict=feed_dict)
                if step % 50 == 0:
                    summary, _ = session.run(
                        [merged, optimizer], feed_dict=feed_dict)
                    train_writer.add_summary(summary, step)
                    print('Minibatch loss at step %d: %f' % (step, l))
                    print('Minibatch accuracy: %.1f%%' % accuracy(
                        predictions, batch_labels))
                    print('Validation accuracy: %.1f%%' % accuracy(
                        valid_prediction.eval(), valid_labels))
            except KeyboardInterrupt:
                last_weights = [p.eval() for p in model_params]
                np.savez("weights.npz", *last_weights)
                return last_weights

        print('Test accuracy: %.1f%%' % accuracy(test_prediction.eval(),
                                                 test_labels))

        # Save the variables to disk.
        save_dir = "models"
        if not os.path.exists(save_dir):
            os.makedirs(save_dir)
        save_path = os.path.join(save_dir, "deep_logo_model")
        saved = saver.save(session, save_path)
        print("Model saved in file: %s" % saved)
コード例 #28
0
ファイル: test_set_analysis.py プロジェクト: lotka/autofaces
def test_model(name,data,config,path):


    if socket.gethostname() == 'ux305':
        sess = tf.Session()
    else:
        tensorflow_config = tf.ConfigProto(allow_soft_placement=True)
        tensorflow_config.gpu_options.allow_growth=True
        sess = tf.Session(config=tensorflow_config)
    # Load model
    nBatches = int(500)
    model = cnn(config,train=False)

    saver = tf.train.Saver()

    sess.run(tf.initialize_all_variables())
    # Here's where you're restoring the variables w and b.
    # Note that the graph is exactly as it was when the variables were
    # saved in a prior training run.
    print join(path,'models/'+name)
    ckpt = tf.train.get_checkpoint_state(join(path,'models/'+name))
    if ckpt and ckpt.model_checkpoint_path:
        print ckpt.model_checkpoint_path
        saver.restore(sess, ckpt.model_checkpoint_path)
    else:
        raise Exception('Can\'t find checkpoint!')

    x = model['x']
    y_ = model['y']
    y_conv = model['y_conv']
    output_dim = model['output_dim']
    keep_prob = model['keep_prob']
    alpha = model['alpha']
    auto_loss = model['auto_loss']
    batch_size = int(model['batch_size'])
    y_image = model['y_image']
    mask = model['mask']

    def make_mask_batch(mean):
        mask_image = (mean > 50).astype(float)
        return np.expand_dims(np.array([mask_image for i in xrange(batch_size)]),axis=3)

    mask_batch = make_mask_batch(data.validation.mean_image)


    thresholds = 20
    padding = 0.1
    threshold_values = np.linspace(-padding, thresholds+padding, thresholds)/float(thresholds)
    test_threshold_data = np.zeros((thresholds,output_dim,4))
    accuracy_data = np.zeros(thresholds)

    nBatches = int(float(data.validation.labels.shape[0])/float(batch_size))
    y_out = np.zeros(data.validation.labels.shape)
    autoencoder_loss = 0.0
    true_autoencoder_loss = 0.0

    a = data.validation.next_batch(batch_size)[0]
    b = sess.run(model['y_image'],feed_dict={x: data.validation.next_batch(batch_size)[0], keep_prob: 1.0,alpha: 1.0, mask : mask_batch})[:,:,:,0]
    validation_images = (data.validation.inverse_process(a),data.validation.inverse_process(b))
    # validation_images = (a,b)

    a = data.train.next_batch(batch_size)[0]
    b = sess.run(model['y_image'],feed_dict={x: data.train.next_batch(batch_size)[0],keep_prob: 1.0, alpha: 1.0, mask : mask_batch})[:,:,:,0]
    train_images = (data.train.inverse_process(a),data.train.inverse_process(b))
    # train_images = (a,b)

    print 'VALIDATION: Feeding validation set through network..'
    true_losses = np.array([])
    for i in tqdm(xrange(nBatches)):
        l = batch_size*i
        r = batch_size*(i+1)
        current_batch = data.validation.images[l:r]

        y_out[l:r] = sess.run(y_conv, feed_dict={x: current_batch, keep_prob : 1.0, alpha : 0.0, mask : mask_batch})
        autoencoder_loss += sess.run(auto_loss, feed_dict={x: current_batch, mask : mask_batch, keep_prob: 1.0})/float(nBatches)
        # calculate true loss
        _in  = data.validation.images[l:r,:,:]
        _out = sess.run(y_image, feed_dict={x: current_batch, mask : mask_batch, keep_prob: 1.0})[:,:,:,0]
        true_losses = np.append(true_losses,data.validation.true_loss(_in,_out,base=l))



    idx_big   = get_n_idx_biggest(true_losses, batch_size)
    idx_small = get_n_idx_smallest(true_losses, batch_size)
    idx_mean  = get_n_idx_near_mean(true_losses, batch_size)
    print 'idx_big'
    print idx_big
    print 'idx_small'
    print idx_small
    print 'idx_mean'
    print idx_mean
    assert len(idx_big) == batch_size
    assert len(idx_small) == batch_size
    assert len(idx_mean) == batch_size

    i_big   = data.validation.images[idx_big,   :,:]
    i_small = data.validation.images[idx_small, :, :]
    i_mean  = data.validation.images[idx_mean,  :, :]
    print data.validation.idx_interesting
    idx_interesting = list(data.validation.idx_interesting)
    print idx_interesting
    i_interesting = data.validation.images[idx_interesting,:,:]

    t_big = data.validation.labels[idx_big,:]
    t_small = data.validation.labels[idx_small,:]
    t_mean = data.validation.labels[idx_mean,:]
    t_interesting = data.validation.labels[idx_interesting,:]

    if hasattr(data.validation, 'images_original'):
        i_big_original   = data.validation.images_original[idx_big,   :,:]
        i_small_original = data.validation.images_original[idx_small, :, :]
        i_mean_original  = data.validation.images_original[idx_mean,  :, :]
        i_interesting_original  = data.validation.images_original[idx_interesting,  :, :]
    else:
        # Just so we don't always have to have these images
        i_big_original   = np.zeros(i_mean.shape)
        i_small_original = np.zeros(i_mean.shape)
        i_mean_original  = np.zeros(i_mean.shape)
        i_interesting_original = np.zeros(i_mean.shape)

    o_big,   p_big   = sess.run([model['y_image'],model['y_conv']], feed_dict={x: i_big,   keep_prob: 1.0, alpha: 1.0, mask : mask_batch})
    o_small, p_small = sess.run([model['y_image'],model['y_conv']], feed_dict={x: i_small, keep_prob: 1.0, alpha: 1.0, mask : mask_batch})
    o_mean,  p_mean  = sess.run([model['y_image'],model['y_conv']], feed_dict={x: i_mean,  keep_prob: 1.0, alpha: 1.0, mask : mask_batch})
    o_interesting,  p_interesting  = sess.run([model['y_image'],model['y_conv']], feed_dict={x: i_interesting,  keep_prob: 1.0, alpha: 1.0, mask : mask_batch})

    o_big   = o_big[:, :, :, 0]
    o_small = o_small[:, :, :, 0]
    o_mean  = o_mean[:, :, :, 0]
    o_interesting = o_interesting[:,:,:,0]

    i_big       = (i_big_original, i_big.copy(),       data.validation.inverse_process(i_big.copy(),idx=idx_big))
    i_small     = (i_small_original, i_small.copy(),     data.validation.inverse_process(i_small.copy(),idx=idx_small))
    i_mean      = (i_mean_original, i_mean.copy(),      data.validation.inverse_process(i_mean.copy(),idx=idx_mean))
    i_interesting      = (i_interesting_original, i_interesting.copy(),      data.validation.inverse_process(i_interesting.copy(),idx=idx_interesting))
    label_big   = data.validation.labels[idx_big,:]
    label_small = data.validation.labels[idx_small,:]
    label_mean  = data.validation.labels[idx_mean,:]
    label_interesting = data.validation.labels[idx_interesting,:]
    o_big       = (o_big.copy()     ,  data.validation.inverse_process(o_big.copy(),idx=idx_big))
    o_small     = (o_small.copy()   ,  data.validation.inverse_process(o_small.copy(),idx=idx_small))
    o_mean      = (o_mean.copy()    ,  data.validation.inverse_process(o_mean.copy(),idx=idx_mean))
    o_interesting      = (o_interesting.copy()    ,  data.validation.inverse_process(o_interesting.copy(),idx=idx_interesting))

    true_autoencoder_loss = true_losses.mean()

    y_out = y_out[:nBatches*(batch_size+1),:]
    data.validation.labels = data.validation.labels[:nBatches*(batch_size+1),:]


    print 'VALIDATION: Comparing threshold values...'
    test_confusion = []
    test_roc_data = []
    for i in tqdm(xrange(thresholds)):
        results, confusion, roc_data, accuracy_data[i] = metric.multi_eval(y_out,
                                                         data.validation.labels,
                                                         threshold_values[i])
        test_confusion.append(confusion)
        test_roc_data.append(roc_data)
        test_threshold_data[i,:,:] = results

    ssv_path = join(path,'numerical_data')
    if not isdir(ssv_path):
        mkdir(ssv_path)
    print 'VALIDATION: Saving results..'
    np.savez_compressed(join(ssv_path,name+'_model_analysis.npz'),
             threshold_values=threshold_values,
             test_threshold_data=test_threshold_data,
             test_confusion=test_confusion,
             test_roc_data=test_roc_data,
             accuracy_data=accuracy_data,
             autoencoder_loss=autoencoder_loss,
             true_autoencoder_loss=true_autoencoder_loss,
             true_losses=true_losses,
             i_big=i_big,
             i_small=i_small,
             i_mean=i_mean,
             i_interesting=i_interesting,
             t_big=t_big,
             t_small=t_small,
             t_mean=t_mean,
             t_interesting=t_interesting,
             label_big=label_big,
             label_small=label_small,
             label_mean=label_mean,
             label_interesting=label_interesting,
             p_big=p_big,
             p_small=p_small,
             p_mean=p_mean,
             p_interesting=p_interesting,
             o_big=o_big,
             o_small=o_small,
             o_mean=o_mean,
             o_interesting=o_interesting,
             idx_small=idx_small,
             idx_big=idx_big,
             idx_mean=idx_mean,
             idx_interesting=idx_interesting,
             valid_subject_idx=data.validation.subject_idx,
             train_subject_idx=data.train.subject_idx,
             auto_images=(train_images,validation_images))

    plt.figure()
    v = np.load(join(ssv_path, name + '_model_analysis.npz'))['i_big'][0,0]
    plt.imshow(v)
    plt.colorbar()
    plt.show()

    sess.close()
コード例 #29
0
# Creating dataset split
data_size = len(data)
indices = list(range(data_size))
split = int(np.floor(validation_split * data_size))
np.random.shuffle(indices)

models = []
models_param = []
for b in range(b_max):
    if args.model == 'feedforward':
        m = feedforward()
    elif args.model == 'feedforward_50':
        m = feedforward_50()
    else:
        m = cnn()
    m.load_state_dict(torch.load(f'{saved_model_path}/model-{b}.pth'))
    # m.parameters(require_grads=False)
    m.to(device)
    if args.trainable_bag:
        m.train()
        models_param += list(m.parameters())
    else:
        m.eval()
    models += [m]

aggregate = MyEnsemble(models, b_max)
aggregate.to(device)
if args.trainable_bag:
    optimizer = optim.Adadelta(list(aggregate.parameters()) + models_param,
                               lr=learning_rate,
コード例 #30
0
ファイル: tagger.py プロジェクト: dohyungp/emosic
    return '.' in filename and \
           filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS


TYPES = [
    '밝은', '행복한', '펑키한', '격정적인', '어두운', '조용한', '영감을 주는', '화난', '슬픈', '로맨틱한'
]

GENRE = ['힙합', '컨트리', '재즈', '팝', '락', '댄스', '클래식']

x = tf.placeholder("float", [None, 96, 1366, 1])
sess = tf.Session()
phase_train = tf.placeholder(tf.bool, name='phase_train')

with tf.variable_scope("convolutional"):
    y1, variables = cnn(x, phase_train)
    sess.run(tf.global_variables_initializer())
    saver = tf.train.Saver(variables)
    saver.restore(sess, "emosic_model/mood/model.ckpt")

with tf.variable_scope("genre_convolutional"):
    y2, variables = genre_cnn(x, phase_train)
    sess.run(tf.global_variables_initializer())
    saver = tf.train.Saver(variables)
    saver.restore(sess, "emosic_model/genre/model.ckpt")


def convolutional(input):
    return sess.run(y1, feed_dict={
        x: input,
        phase_train: True
コード例 #31
0
# hyper-parameters
epochs = 40
report_every = 16
conv = [3,32,64]
fc = [300,100]
dropout_rate = 0.2

n_classes = int(sys.argv[1])	# Number of celebs
n_images = int(sys.argv[2])		# Number of training images per celeb
total_images = int(sys.argv[3]) # Total number of images
size = 100
batch_size = 20

# return normalized dataset divided into two sets
model = model.cnn(size, conv, fc, n_classes, dropout_rate)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adagrad(model.parameters(), lr=0.005)

def train(model, optim, db):

	for epoch in range(1, epochs+1):

		train_loader = torch.utils.data.DataLoader(db['train'],batch_size=batch_size, shuffle=True)

		# Update (Train)
		model.train()
		for batch_idx, (data, target) in enumerate(train_loader):

			data, target = Variable(data), Variable(target)
			optimizer.zero_grad()
コード例 #32
0
                                                  batch_size=batch_size,
                                                  target_size=(img_width,
                                                               img_height),
                                                  classes=label)

image, label = next(train_data)
X_train, X_test, y_train, y_test = train_test_split(image,
                                                    label,
                                                    test_size=0.2,
                                                    random_state=42)

num_label = y_train.shape[1]

print(num_label)

model = cnn(num_label)

#### model CNN #####

print("training started")
with tf.Session() as sess:
    init_op = tf.compat.v1.global_variables_initializer()
    sess.run(init_op)
    saver = tf.compat.v1.train.Saver(tf.global_variables())

    # tf.summary.text("text", b)

    # tf.summary.histogram("weights", weight)
    # tf.summary.histogram("fc1", model.fc)
    # tf.summary.histogram("fc2", model.fc3)
    # tf.summary.histogram("fc3", model.fc3)
コード例 #33
0
def main():
    if len(sys.argv) > 1:
        test_image_fn = sys.argv[1]
        if not os.path.exists(test_image_fn):
            print("Not found:", test_image_fn)
            sys.exit(-1)
    else:
        # Select a test image from a test directory
        test_dirs = [
            os.path.join(common.CROPPED_AUG_IMAGE_DIR, class_name, 'test')
            for class_name in common.CLASS_NAME
        ]
        test_dir = np.random.choice(test_dirs)
        test_images_fn = [test_image for test_image in os.listdir(test_dir)]
        test_image_fn = np.random.choice(test_images_fn, 1)[0]
        test_image_fn = os.path.join(test_dir, test_image_fn)
    print("Test image:", test_image_fn)

    # Open and resize a test image
    if common.CNN_IN_CH == 1:
        test_image_org = skimage.io.imread(test_image_fn, as_grey=True)
        test_image_org = test_image_org.reshape(
            common.CNN_IN_HEIGHT, common.CNN_IN_WIDTH, common.CNN_IN_CH)
    else:
        test_image_org = skimage.io.imread(test_image_fn)
    if test_image_org.shape != (common.CNN_IN_HEIGHT, common.CNN_IN_WIDTH,
                                common.CNN_IN_CH):
        test_image_org = imresize(
            test_image_org, (common.CNN_IN_HEIGHT, common.CNN_IN_WIDTH),
            interp='bicubic')
    test_image_org = preprocess.scaling(test_image_org)
    test_image = test_image_org.reshape(
        (1, common.CNN_IN_HEIGHT, common.CNN_IN_WIDTH,
         common.CNN_IN_CH)).astype(np.float32)

    # Training model
    graph = tf.Graph()
    with graph.as_default():
        # Weights and biases
        model_params = model.params()

        # restore weights
        f = "weights.npz"
        if os.path.exists(f):
            initial_weights = load_initial_weights(f)
        else:
            initial_weights = None

        if initial_weights is not None:
            assert len(initial_weights) == len(model_params)
            assign_ops = [
                w.assign(v) for w, v in zip(model_params, initial_weights)
            ]

        # A placeholder for a test image
        tf_test_image = tf.constant(test_image)

        # model
        logits = model.cnn(tf_test_image, model_params, keep_prob=1.0)
        test_pred = tf.nn.softmax(logits)

        # Restore ops
        saver = tf.train.Saver()

    # Recognize a brand logo of test image
    with tf.Session(graph=graph) as session:
        tf.global_variables_initializer().run()
        if initial_weights is not None:
            session.run(assign_ops)
            print('initialized by pre-learned weights')
        elif os.path.exists("models"):
            save_path = "models/deep_logo_model"
            saver.restore(session, save_path)
            print('Model restored')
        else:
            print('initialized')
        pred = session.run([test_pred])
        print("Class name:", common.CLASS_NAME[np.argmax(pred)])
        print("Probability:", np.max(pred))
コード例 #34
0
ファイル: train_model.py プロジェクト: pmpaquet/Kaggle_EEG
def main():
    global BATCH_SIZE, BATCH_LAG, NUM_CHANNELS

    features = tf.placeholder(tf.float32,
                              shape=[None, NUM_CHANNELS, BATCH_LAG, 1],
                              name="features")
    labels = tf.placeholder(tf.float32, shape=[None, 6], name="labels")

    logits = cnn(features)

    auc = tf.metrics.auc(labels=labels, predictions=logits)
    loss = tf.losses.softmax_cross_entropy(onehot_labels=labels, logits=logits)

    optimizer = tf.train.AdamOptimizer(learning_rate=0.00001)
    train_op = optimizer.minimize(loss=loss,
                                  global_step=tf.train.get_global_step())

    init = tf.global_variables_initializer()

    with tf.Session() as sess:
        print("in main function")
        sess.run(tf.local_variables_initializer())
        #writer = tf.summary.FileWriter('logs', sess.graph)
        print("local variables initialized")
        sess.run(tf.global_variables_initializer())
        print("global variables initialized")

        #saver, save_path = utils.restore(sess, get('cnn.checkpoint'))
        eeg_data = EEG_Dataset(batch_size=BATCH_SIZE, batch_lag=BATCH_LAG)
        print("Calling train cnn")

        #train_cnn(features, labels, loss, train_op, auc, eeg_data)
        for subj in range(1, 13):
            eeg_data.load_training_data(sub=subj)
            for batch_index in range(0, eeg_data.get_total_batches()):
                #print("Running Batch: ", batch_index)
                # Run one step of training
                batch_features, batch_labels = eeg_data.get_batch()

                #print("-----DEBUG-----")
                #print(batch_features.shape)
                #print(batch_labels.shape)
                sess.run(train_op,
                         feed_dict={
                             features: list(batch_features),
                             labels: list(batch_labels)
                         })

                if batch_index % 100 == 0:
                    batch_auc, batch_loss = sess.run([auc, loss],
                                                     feed_dict={
                                                         features:
                                                         batch_features,
                                                         labels: batch_labels
                                                     })
                    log_training(batch_index, batch_loss, batch_auc)

        # test
        eeg_data.load_testing_data()
        test_auc, test_loss = sess.run([auc, loss],
                                       feed_dict={
                                           features: test_features,
                                           labels: test_labels
                                       })
        print('----------TESTING DATA--------------')
        print('\tCross entropy validation loss: {}'.format(valid_loss))
        print('\tAccuracy: {}'.format(valid_acc))