Ejemplo n.º 1
0
def learn(train_states,
          test_states,
          model,
          learning_rate=.0001,
          save_every=10,
          batch_size=2048,
          hidden_size=2048,
          dropout=.5,
          epochs=500,
          print_every=1,
          model_dir='.',
          perceptron=False,
          mem_ratio=.95):

    data = read_data(train_states, test_states)

    model_name = (
        '''trust_classifier_epochs_{}_batch_{}_learning_rate_{}'''.format(
            epochs, batch_size, learning_rate))

    if perceptron:
        model_name = '{}_perceptron.pb'.format(model_name)
    else:
        model_name = '{}_dropout_{}_hidden_size_{}.pb'.format(
            model_name, dropout, hidden_size)

    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=mem_ratio)
    with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
        load_graph(model)
        transfer_predictor = sess.graph.get_tensor_by_name('output:0')

        # Evaluate the model on the training and test data, for training and testing.
        data.train._X = np.vstack([
            sess.run(transfer_predictor, {'input:0': chunk})
            for chunk in chunks(data.train.X, 10)
        ])
        answer = tf.equal(tf.argmax(data.train.X, 1),
                          tf.argmax(data.train.Y, 1))
        data.train._Y = tf.one_hot(tf.to_int32(answer), depth=2).eval()

        data.test._X = sess.run(transfer_predictor, {'input:0': data.test.X})
        answer = tf.equal(tf.argmax(data.test.X, 1), tf.argmax(data.test.Y, 1))
        data.test._Y = tf.one_hot(tf.to_int32(answer), depth=2).eval()

        x = tf.placeholder('float',
                           shape=[None, data.train.X_features],
                           name='input_b')
        y_ = tf.placeholder('float',
                            shape=[None, data.train.Y_features],
                            name='target')

        if perceptron:
            W = weight_variable([data.train.X_features, data.train.Y_features],
                                name='weights')
            b = bias_variable([data.train.Y_features], name='bias')

            logits = tf.matmul(x, W) + b
        else:
            W_in = weight_variable([data.train.X_features, hidden_size],
                                   name='weights_in')
            b_in = bias_variable([hidden_size], name='bias_in')

            hidden = tf.matmul(x, W_in) + b_in
            relu = tf.nn.relu(hidden)

            keep_prob = tf.placeholder_with_default([1.], shape=None)
            hidden_dropout = tf.nn.dropout(relu, keep_prob)

            W_out = weight_variable([hidden_size, data.train.Y_features],
                                    name='weights_out')
            b_out = bias_variable([data.train.Y_features], name='bias_out')

            logits = tf.matmul(relu, W_out) + b_out

        # Loss & train
        cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits, y_)
        train_step = tf.train.AdamOptimizer(learning_rate).minimize(
            cross_entropy)

        # Evaluation
        y = tf.nn.softmax(logits, name='output_b')
        correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, 'float'))

        sess.run(tf.initialize_all_variables())

        last_epoch = 0

        t_epoch = time.time()
        while data.train.epoch <= epochs:
            epoch = data.train.epoch
            batch_x, batch_y = data.train.next_batch(batch_size)

            t_start = time.time()
            feed_dict = {
                x: batch_x,
                y_: batch_y
            } if perceptron else {
                x: batch_x,
                y_: batch_y,
                keep_prob: dropout
            }
            train_step.run(feed_dict=feed_dict)
            t_end = time.time() - t_start

            if epoch > last_epoch:

                if epoch % print_every == 0:
                    train_accuracy_mean = accuracy.eval(feed_dict={
                        x: batch_x,
                        y_: batch_y
                    })

                    validation_accuracy_mean = accuracy.eval(feed_dict={
                        x: data.test.X,
                        y_: data.test.Y
                    })

                    print(
                        '''Epoch {} train accuracy: {}, test accuracy: {}. '''
                        '''{} states/sec, {} secs/epoch.'''.format(
                            epoch, train_accuracy_mean,
                            validation_accuracy_mean, batch_size / t_end,
                            time.time() - t_epoch))
                if epoch % save_every == 0 or epoch == epochs:
                    output_graph_def = graph_util.convert_variables_to_constants(
                        sess, sess.graph.as_graph_def(),
                        ['input_b', 'output_b'])

                    with gfile.FastGFile(os.path.join(model_dir, model_name),
                                         'w') as f:
                        f.write(output_graph_def.SerializeToString())

                t_epoch = time.time()
                last_epoch = epoch

        print('Trained model saved to {}'.format(
            os.path.join(model_dir, model_name)))
Ejemplo n.º 2
0
def learn(train_states, test_states, learning_rate, save_every, batch_size, hidden_size, dropout, epochs,
          print_every, model_dir, perceptron, dense, lenet, filter_width, depth, mem_ratio,
          q_size, use_dask, in_memory, dask_chunksize):

    data = read_data(train_states, test_states, use_dask, in_memory, dask_chunksize)

    model_name = ('''transfer_classifier_epochs_{}_batch_{}_learning_rate_{}'''.format(
        epochs, batch_size, learning_rate))
    
    if perceptron:
        model_name = '{}_perceptron.pb'.format(model_name)
    if dense:
        model_name = '{}_dense_dropout_{}_hidden_size_{}.pb'.format(model_name, dropout, hidden_size)
    if lenet:
        model_name = '{}_lenet_dropout_{}_hidden_size_{}.pb'.format(model_name, dropout, hidden_size)
        
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=mem_ratio)
    with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
        
        q_x_in = tf.placeholder(tf.float32, shape=[batch_size, data.train.X_features])
        q_y_in = tf.placeholder(tf.int32, shape=[batch_size])
        y_real = tf.placeholder(tf.int32, shape=[None])
        keep_prob = tf.placeholder_with_default([1.], shape=None)
        
        q = tf.FIFOQueue(q_size, [tf.float32, tf.int32],
                         shapes=[ q_x_in.get_shape(), q_y_in.get_shape()])

        enqueue_op = q.enqueue([q_x_in, q_y_in])
        q_x_out, q_y_out = q.dequeue()

        x = tf.placeholder_with_default(q_x_out, shape=[None, data.train.X_features], name='input')
        y_ = tf.placeholder_with_default(q_y_out, shape=[None])

        if perceptron:
            W = weight_variable([data.train.X_features, data.train.Y_features])
            b = bias_variable([data.train.Y_features])

            logits = tf.matmul(x, W) + b
        if dense:
            W_in = weight_variable([data.train.X_features, hidden_size])
            b_in = bias_variable([hidden_size])

            hidden = tf.matmul(x, W_in) + b_in
            relu = tf.nn.relu(hidden)
            hidden_dropout = tf.nn.dropout(relu, keep_prob)

            W_out = weight_variable([hidden_size,data.train.Y_features])
            b_out = bias_variable([data.train.Y_features])

            logits = tf.matmul(hidden_dropout, W_out) + b_out
        if lenet:
            w1 = weight_variable([1, filter_width, 1, depth])
            b1 = bias_variable([depth])
            x_4d = tf.expand_dims(tf.expand_dims(x,1),-1) # Singleton dimension height, out_channel
            conv = tf.nn.conv2d(x_4d, w1, strides=[1,1,1,1], padding='SAME')
            relu = tf.nn.relu(tf.nn.bias_add(conv, b1))
            pool = tf.nn.max_pool(relu, ksize=[1, 1, 2, 1], strides=[1, 1, 2, 1], padding='SAME')

            w2 = weight_variable([1, filter_width, depth, depth*2])
            b2 = bias_variable([depth*2])
            conv = tf.nn.conv2d(pool, w2, strides=[1,1,1,1], padding='SAME')
            relu = tf.nn.relu(tf.nn.bias_add(conv, b2))
            pool = tf.nn.max_pool(relu, ksize=[1, 1, 2, 1], strides=[1, 1, 2, 1], padding='SAME')

            pool_shape = tf.shape(pool)
            reshape = tf.reshape(pool,
                                 [pool_shape[0], pool_shape[1] * pool_shape[2] * pool_shape[3] ])

            w3 = weight_variable([ (data.train.X_features/4)*2*depth, hidden_size])
            b3 = bias_variable([hidden_size])
            hidden = tf.matmul(reshape, w3) + b3
            relu = tf.nn.relu(hidden)
            hidden_dropout = tf.nn.dropout(relu, keep_prob)

            w4 = weight_variable([hidden_size, data.train.Y_features])
            b4 = bias_variable([data.train.Y_features])
            logits = tf.matmul(hidden_dropout, w4) + b4

        # Loss & train
        cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits, y_)
        train_step = tf.train.AdamOptimizer(learning_rate).minimize(cross_entropy)

        # Evaluation
        y = tf.nn.softmax(logits) 
        train_correct_prediction = tf.equal(tf.to_int32(tf.argmax(y, 1)), y_,)
        train_accuracy = tf.reduce_mean(tf.cast(train_correct_prediction, tf.float32))

        # Applying convolutional filter to put subcategories into original categories for testing
        stride = [1,1,1,1]
        _filter = tf.constant(data.output_filter, dtype=tf.float32, shape=data.output_filter.shape)

        conv_in = tf.expand_dims(y, 0)
        conv_in = tf.expand_dims(conv_in,-1)
        conv_out = tf.nn.conv2d(conv_in, _filter, stride, 'VALID') # We don't want zero padding.
        back = tf.squeeze(conv_out, squeeze_dims=[0,2], name='output')

        test_correct_prediction = tf.equal(tf.to_int32(tf.argmax(back, 1)), y_real)
        test_accuracy = tf.reduce_mean(tf.cast(test_correct_prediction, tf.float32))
        
        sess.run(tf.initialize_all_variables())

        def load_data():
            try:
                while True:
                    next_x, next_y = data.train.next_batch(batch_size)
                    if next_x.shape[0] == batch_size:
                        sess.run(enqueue_op, feed_dict={q_x_in: next_x, q_y_in: next_y})
            except Exception as error:
                print(error)
                print('Stopped streaming of data.')
                
        data_thread = threading.Thread(target=load_data)
        data_thread.daemon = True
        data_thread.start()

        last_epoch = 0
        epoch = 0
        
        t_epoch = time.time()
        t_end = []
        i = 0 
        while epoch <= epochs:

            t_start = time.time()
            sess.run(train_step, feed_dict={keep_prob: dropout})
            t_end.append(time.time() - t_start)

            if epoch > last_epoch:

                if epoch % print_every == 0:
                    batch_x, batch_y = data.train.next_batch(batch_size)
                    
                    train_accuracy_mean = train_accuracy.eval(feed_dict={
                        x: batch_x,
                        y_: batch_y })

                    validation_accuracy_mean = np.mean([ test_accuracy.eval(feed_dict={x: t_x, y_real: t_y })
                                                         for t_x, t_y in zip(chunks(data.test.X, batch_size),
                                                                             chunks(data.test.Y, batch_size)) ])

                    print('''Epoch {} train accuracy: {}, test accuracy: {}. '''
                          '''{} states/sec on average, {} secs/epoch.'''.format(epoch, pf(train_accuracy_mean),
                                                                                pf(validation_accuracy_mean),
                                                                                pf(batch_size/np.mean(t_end)),
                                                                                pf(time.time() - t_epoch)))
                if epoch % save_every == 0 or epoch == epochs:
                    output_graph_def = graph_util.convert_variables_to_constants(
                        sess, sess.graph.as_graph_def(), ['input', 'output'])

                    with gfile.FastGFile(os.path.join(model_dir, model_name), 'w') as f:
                        f.write(output_graph_def.SerializeToString())

                t_epoch = time.time()
                t_end = []
                last_epoch = epoch

            
            i += 1
                
            if batch_size*i > data.train.X_len:
                epoch += 1
                i = 0

        q.close(cancel_pending_enqueues=True)
        
        print('Trained model saved to {}'.format(os.path.join(model_dir, model_name)))
Ejemplo n.º 3
0
        '--in_memory',
        help='How many vectors to hold in memory when using dask.',
        type=int,
        default=200000)
    parser.add_argument('--gpu_memory_ratio',
                        help='How much memory to occupy on the GPU card',
                        type=float,
                        default=.95)
    parser.add_argument('--dask_chunksize',
                        help='The size of dask chunks on disk.',
                        type=int,
                        default=8 * 1024)

    args = parser.parse_args()

    data = read_data(args.train_states, args.test_states, args.use_dask,
                     args.in_memory, args.dask_chunksize)

    model_name = (
        '''transfer_classifier_epochs_{}_batch_{}_learning_rate_{}_'''.format(
            args.epochs, args.batch_size, args.learning_rate))

    if args.network == 'perceptron':
        model_name = '{}_perceptron.pb'.format(model_name)
    if args.network == 'dense':
        model_name = '{}_dense_dropout_{}_hidden_size_{}.pb'.format(
            model_name, args.dropout, args.hidden_size)
    if args.network == 'lenet':
        model_name = '{}_lenet_dropout_{}_hidden_size_{}.pb'.format(
            model_name, args.dropout, args.hidden_size)

    learn(data, model_name, args.learning_rate, args.save_every,
Ejemplo n.º 4
0
def learn(data_folder,
          experts,
          learning_rate=.001,
          train_ratio=.8,
          validation_ratio=.1,
          test_ratio=.1,
          save_every=10,
          batch_size=2048,
          hidden_size=1024,
          dropout=.5,
          epochs=500,
          print_every=1,
          model_dir='.',
          perceptron=False,
          mem_ratio=.95):

    assert train_ratio + validation_ratio + test_ratio == 1, 'Train/validation/test ratios must sum up to 1'

    data = read_data(data_folder, train_ratio, validation_ratio, test_ratio)

    model_name = (
        '''transfer_classifier_moe_epochs_{}_batch_{}_ratios_{}_{}_{}_'''
        '''learning_rate_{}'''.format(epochs, batch_size, train_ratio,
                                      validation_ratio, test_ratio,
                                      learning_rate))
    if perceptron:
        model_name = '{}_perceptron.pb'.format(model_name)
    else:
        model_name = '{}_dropout_{}_hidden_size_{}.pb'.format(
            model_name, dropout, hidden_size)

    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=mem_ratio)
    with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
        local_experts = {}

        for model in os.listdir(experts):
            print('Loading {}'.format(model))
            load_graph(os.path.join(args.experts, model))
            stripped = model[20:]
            h5 = stripped[:stripped.find('_')]  # MESSY.
            local_experts[h5] = sess.graph.get_tensor_by_name(
                '{}output:0'.format(h5))

        data.train._X = np.hstack([
            data.train.X,
            np.vstack([
                flow(sess, local_experts, x)
                for x in chunks(data.train.X, batch_size)
            ])
        ])
        data.validation._X = np.hstack(
            [data.validation.X,
             flow(sess, local_experts, data.validation.X)])
        data.test._X = np.hstack(
            [data.test.X, flow(sess, local_experts, data.test.X)])

        x = tf.placeholder('float',
                           shape=[None, data.train.X_features],
                           name='input')
        y_ = tf.placeholder('float',
                            shape=[None, data.train.Y_features],
                            name='target')

        if perceptron:
            W = weight_variable([data.train.X_features, data.train.Y_features],
                                name='weights')
            b = bias_variable([data.train.Y_features], name='bias')

            logits = tf.matmul(x, W) + b
        else:
            W_in = weight_variable([data.train.X_features, hidden_size],
                                   name='weights_in')
            b_in = bias_variable([hidden_size], name='bias_in')

            hidden = tf.matmul(x, W_in) + b_in
            relu = tf.nn.relu(hidden)

            keep_prob = tf.placeholder_with_default([1.], shape=None)
            hidden_dropout = tf.nn.dropout(relu, keep_prob)

            W_out = weight_variable([hidden_size, data.train.Y_features],
                                    name='weights_out')
            b_out = bias_variable([data.train.Y_features], name='bias_out')

            logits = tf.matmul(relu, W_out) + b_out

        y = tf.nn.softmax(logits, name='output')

        cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits, y_)

        train_step = tf.train.AdamOptimizer(learning_rate).minimize(
            cross_entropy)
        correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, 'float'))

        sess.run(tf.initialize_all_variables())

        last_epoch = 0

        t_epoch = time.time()
        while data.train.epoch <= epochs:
            epoch = data.train.epoch
            batch_x, batch_y = data.train.next_batch(batch_size)

            t_start = time.time()
            feed_dict = {
                x: batch_x,
                y_: batch_y
            } if perceptron else {
                x: batch_x,
                y_: batch_y,
                keep_prob: dropout
            }
            train_step.run(feed_dict=feed_dict)
            t_end = time.time() - t_start

            if epoch > last_epoch:

                if epoch % print_every == 0:
                    train_accuracy = accuracy.eval(feed_dict={
                        x: batch_x,
                        y_: batch_y
                    })

                    validation_accuracy = accuracy.eval(feed_dict={
                        x: data.validation.X,
                        y_: data.validation.Y
                    })

                    print(
                        '''Epoch {} train accuracy: {}, validation accuracy: {}. '''
                        '''{} states/sec, {} secs/epoch.'''.format(
                            epoch, train_accuracy, validation_accuracy,
                            batch_size / t_end,
                            time.time() - t_epoch))
                if epoch % save_every == 0 or epoch == epochs:
                    output_graph_def = graph_util.convert_variables_to_constants(
                        sess, sess.graph.as_graph_def(), ['input', 'output'])

                    with gfile.FastGFile(os.path.join(model_dir, model_name),
                                         'w') as f:
                        f.write(output_graph_def.SerializeToString())

                t_epoch = time.time()
                last_epoch = epoch

        print('Trained model saved to {}'.format(
            os.path.join(model_dir, model_name)))

        if test_ratio > 0:
            test_accuracy = accuracy.eval(feed_dict={
                x: data.test.X,
                y_: data.test.Y
            })
            print('Evaluation on testing data: {}'.format(test_accuracy))
Ejemplo n.º 5
0
def learn(
    train_states,
    test_states,
    model,
    learning_rate=0.0001,
    save_every=10,
    batch_size=2048,
    hidden_size=2048,
    dropout=0.5,
    epochs=500,
    print_every=1,
    model_dir=".",
    perceptron=False,
    mem_ratio=0.95,
):

    data = read_data(train_states, test_states)

    model_name = """trust_classifier_epochs_{}_batch_{}_learning_rate_{}""".format(epochs, batch_size, learning_rate)

    if perceptron:
        model_name = "{}_perceptron.pb".format(model_name)
    else:
        model_name = "{}_dropout_{}_hidden_size_{}.pb".format(model_name, dropout, hidden_size)

    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=mem_ratio)
    with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
        load_graph(model)
        transfer_predictor = sess.graph.get_tensor_by_name("output:0")

        # Evaluate the model on the training and test data, for training and testing.
        data.train._X = np.vstack(
            [sess.run(transfer_predictor, {"input:0": chunk}) for chunk in chunks(data.train.X, 10)]
        )
        answer = tf.equal(tf.argmax(data.train.X, 1), tf.argmax(data.train.Y, 1))
        data.train._Y = tf.one_hot(tf.to_int32(answer), depth=2).eval()

        data.test._X = sess.run(transfer_predictor, {"input:0": data.test.X})
        answer = tf.equal(tf.argmax(data.test.X, 1), tf.argmax(data.test.Y, 1))
        data.test._Y = tf.one_hot(tf.to_int32(answer), depth=2).eval()

        x = tf.placeholder("float", shape=[None, data.train.X_features], name="input_b")
        y_ = tf.placeholder("float", shape=[None, data.train.Y_features], name="target")

        if perceptron:
            W = weight_variable([data.train.X_features, data.train.Y_features], name="weights")
            b = bias_variable([data.train.Y_features], name="bias")

            logits = tf.matmul(x, W) + b
        else:
            W_in = weight_variable([data.train.X_features, hidden_size], name="weights_in")
            b_in = bias_variable([hidden_size], name="bias_in")

            hidden = tf.matmul(x, W_in) + b_in
            relu = tf.nn.relu(hidden)

            keep_prob = tf.placeholder_with_default([1.0], shape=None)
            hidden_dropout = tf.nn.dropout(relu, keep_prob)

            W_out = weight_variable([hidden_size, data.train.Y_features], name="weights_out")
            b_out = bias_variable([data.train.Y_features], name="bias_out")

            logits = tf.matmul(relu, W_out) + b_out

        # Loss & train
        cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits, y_)
        train_step = tf.train.AdamOptimizer(learning_rate).minimize(cross_entropy)

        # Evaluation
        y = tf.nn.softmax(logits, name="output_b")
        correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))

        sess.run(tf.initialize_all_variables())

        last_epoch = 0

        t_epoch = time.time()
        while data.train.epoch <= epochs:
            epoch = data.train.epoch
            batch_x, batch_y = data.train.next_batch(batch_size)

            t_start = time.time()
            feed_dict = {x: batch_x, y_: batch_y} if perceptron else {x: batch_x, y_: batch_y, keep_prob: dropout}
            train_step.run(feed_dict=feed_dict)
            t_end = time.time() - t_start

            if epoch > last_epoch:

                if epoch % print_every == 0:
                    train_accuracy_mean = accuracy.eval(feed_dict={x: batch_x, y_: batch_y})

                    validation_accuracy_mean = accuracy.eval(feed_dict={x: data.test.X, y_: data.test.Y})

                    print(
                        """Epoch {} train accuracy: {}, test accuracy: {}. """
                        """{} states/sec, {} secs/epoch.""".format(
                            epoch,
                            train_accuracy_mean,
                            validation_accuracy_mean,
                            batch_size / t_end,
                            time.time() - t_epoch,
                        )
                    )
                if epoch % save_every == 0 or epoch == epochs:
                    output_graph_def = graph_util.convert_variables_to_constants(
                        sess, sess.graph.as_graph_def(), ["input_b", "output_b"]
                    )

                    with gfile.FastGFile(os.path.join(model_dir, model_name), "w") as f:
                        f.write(output_graph_def.SerializeToString())

                t_epoch = time.time()
                last_epoch = epoch

        print("Trained model saved to {}".format(os.path.join(model_dir, model_name)))
Ejemplo n.º 6
0
def learn(data_folder, experts, learning_rate=.001, train_ratio=.8, validation_ratio=.1, test_ratio=.1, save_every=10, batch_size=2048, hidden_size=1024, dropout=.5, epochs=500, print_every=1, model_dir='.', perceptron=False, mem_ratio=.95):
    
    assert train_ratio + validation_ratio + test_ratio == 1, 'Train/validation/test ratios must sum up to 1'

    data = read_data(data_folder, train_ratio, validation_ratio, test_ratio)

    model_name = ('''transfer_classifier_moe_epochs_{}_batch_{}_ratios_{}_{}_{}_'''
                  '''learning_rate_{}'''.format(
                      epochs, batch_size,
                      train_ratio, validation_ratio,
                      test_ratio, learning_rate))
    if perceptron:
        model_name = '{}_perceptron.pb'.format(model_name)
    else:
        model_name = '{}_dropout_{}_hidden_size_{}.pb'.format(model_name, dropout, hidden_size)

    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=mem_ratio)
    with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
        local_experts = {}
        
        for model in os.listdir(experts):
            print('Loading {}'.format(model))
            load_graph(os.path.join(args.experts, model))
            stripped = model[20:]
            h5 = stripped[:stripped.find('_')]# MESSY.
            local_experts[h5] = sess.graph.get_tensor_by_name('{}output:0'.format(h5))

        data.train._X = np.hstack([ data.train.X,  np.vstack([ flow(sess, local_experts, x) for x in chunks(data.train.X, batch_size) ]) ])
        data.validation._X = np.hstack([ data.validation.X,  flow(sess, local_experts, data.validation.X) ])
        data.test._X = np.hstack([ data.test.X, flow(sess, local_experts, data.test.X) ])

        x = tf.placeholder('float', shape=[None, data.train.X_features], name='input')
        y_ = tf.placeholder('float', shape=[None, data.train.Y_features], name='target')
            
        if perceptron:
            W = weight_variable([data.train.X_features, data.train.Y_features], name='weights')
            b = bias_variable([data.train.Y_features], name='bias')

            logits = tf.matmul(x,W) + b
        else:
            W_in = weight_variable([data.train.X_features, hidden_size], name='weights_in')
            b_in = bias_variable([hidden_size], name='bias_in')

            hidden = tf.matmul(x,W_in) + b_in
            relu = tf.nn.relu(hidden)
            
            keep_prob = tf.placeholder_with_default([1.], shape=None)
            hidden_dropout = tf.nn.dropout(relu, keep_prob)

            W_out = weight_variable([hidden_size,data.train.Y_features], name='weights_out')
            b_out = bias_variable([data.train.Y_features], name='bias_out')

            logits = tf.matmul(relu,W_out) + b_out

        y = tf.nn.softmax(logits, name='output')

        cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits, y_)
        
        train_step = tf.train.AdamOptimizer(learning_rate).minimize(cross_entropy)
        correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1))
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, 'float'))

        sess.run(tf.initialize_all_variables())

        last_epoch = 0

        t_epoch = time.time()
        while data.train.epoch <= epochs:
            epoch = data.train.epoch
            batch_x, batch_y = data.train.next_batch(batch_size)
            
            t_start = time.time()
            feed_dict = {x: batch_x, y_: batch_y } if perceptron else {x: batch_x, y_: batch_y, keep_prob: dropout}
            train_step.run(feed_dict=feed_dict)
            t_end = time.time() - t_start

            if epoch > last_epoch:

                if epoch % print_every == 0:
                    train_accuracy = accuracy.eval(feed_dict={
                        x: batch_x,
                        y_: batch_y })

                    validation_accuracy = accuracy.eval(feed_dict={
                        x: data.validation.X,
                        y_: data.validation.Y })

                    print('''Epoch {} train accuracy: {}, validation accuracy: {}. '''
                          '''{} states/sec, {} secs/epoch.'''.format(epoch, train_accuracy,
                                                                     validation_accuracy, batch_size/t_end,
                                                                     time.time() - t_epoch))
                if epoch % save_every == 0 or epoch == epochs:
                    output_graph_def = graph_util.convert_variables_to_constants(
                        sess, sess.graph.as_graph_def(), ['input', 'output'])

                    with gfile.FastGFile(os.path.join(model_dir, model_name), 'w') as f:
                        f.write(output_graph_def.SerializeToString())

                t_epoch = time.time()
                last_epoch = epoch

        print('Trained model saved to {}'.format(os.path.join(model_dir, model_name)))

        if test_ratio > 0:
            test_accuracy = accuracy.eval(feed_dict={x: data.test.X, y_: data.test.Y })
            print('Evaluation on testing data: {}'.format(test_accuracy))