예제 #1
0
    def fit(self, X, y=None):
        tf.reset_default_graph()
        tfl.init_graph(gpu_memory_fraction=self.gpu_mem_prop)

        # Computing model name according to its parameters so that it can be easily retrieved.
        # Also adding an ID of 8 characters, so that the logs are recorded in different directories
        # There is no guarantee that the ID is unique but collisions should not happen often and that would only
        # mess with tensorboard graphs
        model_name = ("lr:" + str(self.learning_rate) + "|eps:" + str(self.epsilon) + "|do:" + str(self.dropout) +
                      "|stddev_init:" + str(self.stddev_init) + "|hidact:" + self.hidden_act + "|outact:" +
                      self.outlayer_act + "|wd:" + str(self.weight_decay) + "|lastlayw:" + str(self.last_layer_width) +
                      "|d:" + str(self.depth) + "|batchs:" + str(self.batch_size) + "|epochs:" + str(self.epochs) +
                      "|id:"+str(uuid.uuid4())[:8])

        # Computing first layer width (all the examples of the dataset must have the same width)
        first_layer_width = len(X[0])

        # Neural network creation
        network = _nn_creation(first_layer_width, self.last_layer_width, self.depth, self.epsilon, self.learning_rate,
                               self.dropout, self.stddev_init, self.hidden_act, self.outlayer_act, self.weight_decay,
                               self.score_fun, self.loss_fun, self.gpu_mem_prop)

        # Model creation
        self.model = tfl.DNN(network, tensorboard_verbose=3, tensorboard_dir=self.logs_dir)

        # Training
        self.model.fit(X_inputs=X, Y_targets=y, batch_size=self.batch_size,
                       shuffle=True, snapshot_step=100, validation_set=0.1,
                       show_metric=True, run_id=model_name, n_epoch=self.epochs)
예제 #2
0
def create2dConvNetNeuralNetworkModel(input_size, output_size, learningRate):

    # Specify the log directory
    logdir = 'log/2d/' + datetime.now().strftime('%Y%m%d-%H%M%S')

    convnet = input_data(shape=[None, input_size, input_size,1], name='input_currentState')

    tflearn.init_graph(num_cores=1, gpu_memory_fraction=0.9)

    convnet = conv_2d(convnet, nb_filter=16, filter_size=5, strides=1, padding='valid', activation='relu')
    convnet = max_pool_2d(convnet, kernel_size=2, strides=2, padding='valid')

    convnet = conv_2d(convnet, nb_filter=32, filter_size=3, strides=1, padding='valid', activation='relu')
    convnet = max_pool_2d(convnet, kernel_size=2, strides=2, padding='valid')

    convnet = flatten(convnet)

    convnet = fully_connected(convnet, n_units=256, weights_init='truncated_normal', activation='relu')
    convnet = dropout(convnet, 0.5)

    convnet = fully_connected(convnet, n_units=128, weights_init='truncated_normal', activation='relu')
    convnet = dropout(convnet, 0.5)

    convnet = fully_connected(convnet, n_units=output_size, activation='softmax')
    convnet = regression(convnet, optimizer='adam', learning_rate=learningRate, loss='categorical_crossentropy', name='targets')

    model = tflearn.DNN(convnet, tensorboard_dir=logdir)

    return model
예제 #3
0
def construct_dnn():
    tf.reset_default_graph()
    tflearn.init_graph(num_cores=4, gpu_memory_fraction=0.2)
    tflearn.config.init_training_mode()
    input_layer = tflearn.input_data(shape=[None, 15, 15, 3])
    # block 1
    net = tflearn.conv_2d(input_layer, 256, 3, activation=None)
    net = tflearn.batch_normalization(net)
    net = tflearn.activation(net, activation='relu')
    # block 2
    tmp = tflearn.conv_2d(net, 256, 3, activation=None)
    tmp = tflearn.batch_normalization(tmp)
    tmp = tflearn.activation(tmp, activation='relu')
    tmp = tflearn.conv_2d(tmp, 256, 3, activation=None)
    tmp = tflearn.batch_normalization(tmp)
    net = tflearn.activation(net + tmp, activation='relu')
    # block 3
    tmp = tflearn.conv_2d(net, 256, 3, activation=None)
    tmp = tflearn.batch_normalization(tmp)
    tmp = tflearn.activation(tmp, activation='relu')
    tmp = tflearn.conv_2d(tmp, 256, 3, activation=None)
    tmp = tflearn.batch_normalization(tmp)
    net = tflearn.activation(net + tmp, activation='relu')
    # value head
    net = tflearn.conv_2d(net, 1, 1, activation=None)
    net = tflearn.batch_normalization(net)
    net = tflearn.activation(net, activation='relu')
    net = tflearn.fully_connected(net, 256, activation='relu')
    final = tflearn.fully_connected(net, 1, activation='tanh')
    # optmizer
    sgd = tflearn.optimizers.SGD(learning_rate=0.01, lr_decay=0.95, decay_step=200000)
    regression = tflearn.regression(final, optimizer=sgd, loss='mean_square',  metric='R2')
    model = tflearn.DNN(regression)#, tensorboard_verbose=3)
    return model
예제 #4
0
def network():

    tflearn.init_graph(num_cores=4, gpu_memory_fraction=0.8)

    # Normalization of the data
    img_prep = ImagePreprocessing()
    img_prep.add_featurewise_zero_center()
    img_prep.add_featurewise_stdnorm()

    # Create random new data (more you have, better is)
    img_aug = ImageAugmentation()
    img_aug.add_random_flip_leftright()
    img_aug.add_random_rotation(max_angle=25.)
    img_aug.add_random_blur(sigma_max=3.)

    #Input network must match inputs of the data set
    network = input_data(shape=[None, 100, 100, 3],
                         data_preprocessing=img_prep,
                         data_augmentation=img_aug)
    """
		Creation of the different hidden layers

		================
		Editing section
		================
	"""
    network = conv_2d(network, 64, 3, strides=2, activation='relu')
    network = max_pool_2d(network, 2)

    network = conv_2d(network, 64, 3, activation='relu')
    network = max_pool_2d(network, 2)

    network = conv_2d(network, 64, 2, activation='relu')
    network = conv_2d(network, 64, 2, activation='relu')
    network = max_pool_2d(network, 2)

    #Fully connected layer then we drop a part of the data in order to not overfit
    network = fully_connected(network, 4096, activation='relu')
    network = dropout(network, 0.7)
    """
		======================
		End of Editing section
		======================
	"""

    network = fully_connected(network, 120, activation='softmax')

    # Training hyper-parameters
    network = regression(network,
                         optimizer='adam',
                         loss='categorical_crossentropy',
                         learning_rate=0.001)

    #Creation of the deep neural network with the back up name
    #tensorboard_verbose=0 is the most optimal for the calculation time
    model = tflearn.DNN(network,
                        tensorboard_verbose=0,
                        checkpoint_path='dog_classifier.tfl.ckpt')

    return model
예제 #5
0
def initialize():
    color = 'black' if strategy.playing == 0 else 'white'

    # initialize zobrist for u caching
    if not hasattr(strategy, 'zobrist_me'):
        np.random.seed(19890328) # use the same random matrix for storing
        strategy.zobrist_me = np.random.randint(np.iinfo(np.int64).max, size=board_size**2).reshape(board_size,board_size)
        strategy.zobrist_opponent = np.random.randint(np.iinfo(np.int64).max, size=board_size**2).reshape(board_size,board_size)
        #strategy.zobrist_code = np.random.randint(np.iinfo(np.int64).max)
        # reset the random seed to random for other functions
        np.random.seed()

    if not hasattr(best_action_q, 'move_interest_values'):
        best_action_q.move_interest_values = np.zeros(board_size**2, dtype=np.float32).reshape(board_size,board_size)

    if not hasattr(strategy, 'learndata'):
        filename = color + '.learndata'
        if os.path.exists(filename):
            strategy.learndata = pickle.load(open(filename, 'rb'))
            print("Successfully loaded %d previously saved learndata"%len(strategy.learndata))
        else:
            strategy.learndata = dict()

    if not hasattr(tf_predict_u, 'tf_state'):
        tf_predict_u.tf_state = np.zeros(board_size**2 * 5, dtype=np.int32).reshape(board_size, board_size, 5)
        tf_predict_u.tf_state[:, :, 3] = 1
        #tf_predict_u.tf_state[4, :, :] = 1 if color == 'black' else 0

    if not hasattr(tf_predict_u, 'model'):
        tflearn.init_graph(num_cores=4, gpu_memory_fraction=0.5)
        import construct_dnn
        model = construct_dnn.construct_dnn()
        model.load('tf_model')
        tf_predict_u.model = model
    def __init__(self):
        self.len_past = 30
        #self.s_date = "20120101_20160330"
        #self.model_dir = '../model/tflearn/reg_l3_bn/big/%s/' % self.s_date

        tf.reset_default_graph()
        tflearn.init_graph(gpu_memory_fraction=0.05)
        input_layer = tflearn.input_data(shape=[None, 690], name='input')
        dense1 = tflearn.fully_connected(input_layer,
                                         400,
                                         name='dense1',
                                         activation='relu')
        dense1n = tflearn.batch_normalization(dense1, name='BN1')
        dense2 = tflearn.fully_connected(dense1n,
                                         100,
                                         name='dense2',
                                         activation='relu')
        dense2n = tflearn.batch_normalization(dense2, name='BN2')
        dense3 = tflearn.fully_connected(dense2n, 1, name='dense3')
        output = tflearn.single_unit(dense3)
        regression = tflearn.regression(output,
                                        optimizer='adam',
                                        loss='mean_square',
                                        metric='R2',
                                        learning_rate=0.001)
        self.estimators = tflearn.DNN(regression)
        self.qty = {}
        self.day_last = {}
        self.currency = 100000000
예제 #7
0
def train_NN(train_X,
             train_y,
             n_epochs=config.no_of_epochs,
             continue_work=False,
             n_layers=1,
             n_nodes=(1024, )):
    tf.reset_default_graph()
    model = create_model(no_of_layers=n_layers, num_of_nodes=n_nodes)

    i = 0
    iterator_batch = 0
    if continue_work:
        model, iterator_batch = load_batch(model)

    tflearn.init_graph(seed=1995, gpu_memory_fraction=1)
    with tf.Session() as sess:
        tflearn.is_training(True, sess)
    for train_batch_X, train_batch_y in pickle_lazy_loading(train_X,
                                                            train_y,
                                                            i=iterator_batch):
        print("training batch:", i)
        start_time__ = time.time()
        model.fit(train_batch_X,
                  train_batch_y,
                  n_epoch=n_epochs,
                  shuffle=True,
                  snapshot_step=100,
                  show_metric=True)
        print("batch", i, "trained in", time.time() - start_time__, "s")
        i += 1
        save_batch(model, i)

    remove_batch()

    return model
예제 #8
0
    def __init__(self, s_date, n_frame):
        self.n_epoch = 20
        prev_bd = int(s_date[:6])-1
        prev_ed = int(s_date[9:15])-1
        if prev_bd%100 == 0: prev_bd -= 98
        if prev_ed%100 == 0: prev_ed -= 98
        pred_s_date = "%d01_%d01" % (prev_bd, prev_ed)
        prev_model = '../model/tflearn/reg_l3_bn/big/%s' % pred_s_date
        self.model_dir = '../model/tflearn/reg_l3_bn/big/%s' % s_date

        tf.reset_default_graph()
        tflearn.init_graph(gpu_memory_fraction=0.1)
        input_layer = tflearn.input_data(shape=[None, 23*n_frame], name='input')
        dense1 = tflearn.fully_connected(input_layer, 400, name='dense1', activation='relu')
        dense1n = tflearn.batch_normalization(dense1, name='BN1')
        dense2 = tflearn.fully_connected(dense1n, 100, name='dense2', activation='relu')
        dense2n = tflearn.batch_normalization(dense2, name='BN2')
        dense3 = tflearn.fully_connected(dense2n, 1, name='dense3')
        output = tflearn.single_unit(dense3)
        regression = tflearn.regression(output, optimizer='adam', loss='mean_square',
                                metric='R2', learning_rate=0.001)
        self.estimators = tflearn.DNN(regression)
        if os.path.exists('%s/model.tfl' % prev_model):
            self.estimators.load('%s/model.tfl' % prev_model)
            self.n_epoch = 10
        if not os.path.exists(self.model_dir):
            os.makedirs(self.model_dir)
예제 #9
0
def generate_nnet(feats):
    """Generate a neural network.

    Parameters
    ----------
    feats : list with at least one feature vector

    Returns
    -------
    Neural network object
    """
    # Load it here to prevent crash of --help when it's not present
    import tflearn

    tflearn.init_graph(num_cores=2, gpu_memory_fraction=0.6)

    input_shape = (None, feats[0].shape[0], feats[0].shape[1],
                   feats[0].shape[2])
    logging.info("input shape: %s", input_shape)
    net = tflearn.input_data(shape=input_shape)
    net = tflearn.conv_2d(net, 10, 3, activation='relu', regularizer="L2")
    net = tflearn.conv_2d(net, 10, 3, activation='relu', regularizer="L2")
    net = tflearn.fully_connected(net, 2, activation='sigmoid')
    net = tflearn.regression(net,
                             optimizer='adam',
                             learning_rate=0.01,
                             loss='categorical_crossentropy',
                             name='target')
    return tflearn.DNN(net)
예제 #10
0
    def training(self):
        labels, words = self.parse_intents_file()

        words = [_stemmer.stem(w.lower()) for w in words if w != "?"]
        words = sorted(list(set(words)))

        labels = sorted(labels)

        training, output = self.to_binary_array(words, labels)

        with open("{}/data.pickle".format(self.model_dir), "wb") as f:
            pickle.dump((words, labels, training, output, self.input_file), f)

        tensorflow.reset_default_graph()
        tflearn.init_graph(num_cores=1, gpu_memory_fraction=0.5)

        net = tflearn.input_data(shape=[None, len(training[0])])
        # two hidden layers with 8 neurons of each layer. This is where you can do more tuning.
        net = tflearn.fully_connected(net, 8)
        net = tflearn.fully_connected(net, 8)
        # turn result to probability rather than a number for classification: softmax, relu, sigmoid
        net = tflearn.fully_connected(net,
                                      len(output[0]),
                                      activation="softmax")

        net = tflearn.regression(net)
        # net = tflearn.regression(net, optimizer='sgd', learning_rate=0.01, loss='mean_square')

        model = tflearn.DNN(net)
        model.fit(training,
                  output,
                  n_epoch=200,
                  batch_size=2,
                  show_metric=True)
        model.save("{}/model.tflearn".format(self.model_dir))
예제 #11
0
def build_estimator(model_dir, model_type, embeddings, index_map,
                    combination_method):
    """Build an estimator."""

    # Continuous base columns.
    node1 = tf.contrib.layers.real_valued_column("node1")

    deep_columns = [node1]

    if model_type == "regressor":

        tflearn.init_graph(num_cores=8, gpu_memory_fraction=0.5)
        if combination_method == 'concatenate':
            net = tflearn.input_data(shape=[None, embeddings.shape[1] * 2])
        else:
            net = tflearn.input_data(shape=[None, embeddings.shape[1]])
        net = tflearn.fully_connected(net, 100, activation='relu')
        net = tflearn.fully_connected(net, 2, activation='softmax')
        net = tflearn.regression(net,
                                 optimizer='adam',
                                 loss='categorical_crossentropy')
        m = tflearn.DNN(net)
    else:
        m = tf.contrib.learn.DNNLinearCombinedClassifier(
            model_dir=model_dir,
            linear_feature_columns=wide_columns,
            dnn_feature_columns=deep_columns,
            dnn_hidden_units=[100])
    return m
예제 #12
0
def main(args):

    """
    Environment used in this code is Pendulum-v0 from OpenAI gym.

        States: cos(theta), sin(theta), theta_dt
        Actions: Force application between -2 to 2
        Reward: -(Θ^2 + 0.1*Θ_dt^2 + 0.001*action^2)

    Objective:  Pendulum is vertical, with 0 movement.
    Initialization: Starts at a random angle, and at a random velocity.
    End: After all the steps are exhausted
    """

    # Initialize saver
    saver = tf.train.Saver()

    with tf.Session() as sess:

        # Create the gym environment
        env = LinearSystem(nsim=args['max_episode_len'], model_type='MIMO', x0=np.array([1.333, 4]),
                           u0=np.array([3, 6]), xs=np.array([3.555, 4.666]), us=np.array([5, 7]), step_size=0.2)

        # Set all the random seeds for the random packages
        np.random.seed(int(args['random_seed']))
        tf.set_random_seed(int(args['random_seed']))
        env.seed(int(args['random_seed']))
        tflearn.init_graph(seed=args['random_seed'])

        # Define all the state and action dimensions, and the bound of the action
        state_dim = env.observation_space.shape[0]
        action_dim = env.action_space.shape[0]
        action_bound = env.action_space.high

        # Restore old model
        # saver.restore(sess, args['ckpt_dir'])

        # Initialize the actor and critic
        actor = ActorNetwork(sess, state_dim, action_dim, action_bound,
                             float(args['actor_lr']), float(args['tau']),
                             int(args['minibatch_size']))

        critic = CriticNetwork(sess, state_dim, action_dim,
                               float(args['critic_lr']), float(args['tau']),
                               float(args['gamma']),
                               actor.get_num_trainable_vars())

        # Initialize Ornstein Uhlenbeck Noise
        actor_noise = OrnsteinUhlenbeckActionNoise(mu=np.zeros(action_dim), dt=env.step_size,
                                                   random_seed=int(args['random_seed']))

        # sigma=np.array([1, 1])

        # Train the Actor-Critic Model
        replay_buffer, action_list = train(sess, env, args, actor, critic, actor_noise)

        # Save the model
        saver.save(sess, args['ckpt_dir'])

        return actor, critic, env, replay_buffer, action_list
예제 #13
0
def generate_nnet(feats):
    """Generate a neural network.

    Parameters
    ----------
    feats : list with at least one feature vector

    Returns
    -------
    Neural network object
    """
    # Load it here to prevent crash of --help when it's not present
    import tflearn

    tflearn.init_graph(num_cores=2, gpu_memory_fraction=0.6)

    input_shape = (None,
                   feats[0].shape[0],
                   feats[0].shape[1],
                   feats[0].shape[2])
    logging.info("input shape: %s", input_shape)
    net = tflearn.input_data(shape=input_shape)
    net = tflearn.conv_2d(net, 10, 3, activation='relu', regularizer="L2")
    net = tflearn.conv_2d(net, 10, 3, activation='relu', regularizer="L2")
    net = tflearn.fully_connected(net, 2, activation='sigmoid')
    net = tflearn.regression(net, optimizer='adam', learning_rate=0.01,
                             loss='categorical_crossentropy', name='target')
    return tflearn.DNN(net)
예제 #14
0
def construct_dnn():
    tf.reset_default_graph()
    tflearn.init_graph(num_cores=4, gpu_memory_fraction=0.3)
    tflearn.config.init_training_mode()
    img_aug = tflearn.ImageAugmentation()
    img_aug.add_random_90degrees_rotation()
    img_aug.add_random_flip_leftright()
    img_aug.add_random_flip_updown()
    input_layer = tflearn.input_data(shape=[None, 15, 15, 3],
                                     data_augmentation=img_aug)
    # block 1
    net = tflearn.conv_2d(input_layer, 256, 3, activation=None)
    net = tflearn.batch_normalization(net)
    net = tflearn.activation(net, activation='relu')
    # res block 1
    tmp = tflearn.conv_2d(net, 256, 3, activation=None)
    tmp = tflearn.batch_normalization(tmp)
    tmp = tflearn.activation(tmp, activation='relu')
    tmp = tflearn.conv_2d(tmp, 256, 3, activation=None)
    tmp = tflearn.batch_normalization(tmp)
    net = tflearn.activation(net + tmp, activation='relu')
    # res block 2
    tmp = tflearn.conv_2d(net, 256, 3, activation=None)
    tmp = tflearn.batch_normalization(tmp)
    tmp = tflearn.activation(tmp, activation='relu')
    tmp = tflearn.conv_2d(tmp, 256, 3, activation=None)
    tmp = tflearn.batch_normalization(tmp)
    net = tflearn.activation(net + tmp, activation='relu')
    # res block 3
    tmp = tflearn.conv_2d(net, 256, 3, activation=None)
    tmp = tflearn.batch_normalization(tmp)
    tmp = tflearn.activation(tmp, activation='relu')
    tmp = tflearn.conv_2d(tmp, 256, 3, activation=None)
    tmp = tflearn.batch_normalization(tmp)
    net = tflearn.activation(net + tmp, activation='relu')
    # res block 4
    tmp = tflearn.conv_2d(net, 256, 3, activation=None)
    tmp = tflearn.batch_normalization(tmp)
    tmp = tflearn.activation(tmp, activation='relu')
    tmp = tflearn.conv_2d(tmp, 256, 3, activation=None)
    tmp = tflearn.batch_normalization(tmp)
    net = tflearn.activation(net + tmp, activation='relu')
    # value head
    net = tflearn.conv_2d(net, 1, 1, activation=None)
    net = tflearn.batch_normalization(net)
    net = tflearn.activation(net, activation='relu')
    net = tflearn.fully_connected(net, 256, activation='relu')
    final = tflearn.fully_connected(net, 1, activation='tanh')
    # optmizer
    #sgd = tflearn.optimizers.SGD(learning_rate=0.01, lr_decay=0.95, decay_step=200000)
    sgd = tflearn.optimizers.SGD(learning_rate=0.01,
                                 lr_decay=0.95,
                                 decay_step=500000)
    regression = tflearn.regression(final,
                                    optimizer=sgd,
                                    loss='mean_square',
                                    metric='R2')
    model = tflearn.DNN(regression)
    return model
예제 #15
0
def _nn_creation(first_layer_width, last_layer_width, depth, epsilon=1e-8, learning_rate=0.001, dropout_val=0.99,
                stddev_init=0.001, hidden_act='relu', outlayer_act='linear', weight_decay=0.001,
                validation_fun=_rmse_valid, cost_fun=_rmse, gpu_mem_prop=1):
    """
    Creates a neural network with the given parameters.
    :param first_layer_width:
    :param last_layer_width:
    :param depth:
    :param epsilon:
    :param learning_rate:
    :param dropout_val:
    :param stddev_init:
    :param hidden_act:
    :param outlayer_act:
    :param weight_decay:
    :param validation_fun:
    :param cost_fun:
    :param gpu_mem_prop:
    :return: created neural network
    """

    # Weights initialization
    winit = tfl.initializations.truncated_normal(stddev=stddev_init, dtype=tf.float32, seed=None)

    # GPU memory utilisation proportion
    tfl.init_graph(num_cores=16, gpu_memory_fraction=gpu_mem_prop, soft_placement=True)

    # Creating NN input
    network = input_data(shape=[None, first_layer_width], name='input')

    # Calculating width coef
    width_coef = (last_layer_width - first_layer_width) / (depth + 1)

    # Creating hidden layers
    for i in range(1, depth+1):

        # Computing current width
        curr_width = math.floor(width_coef * (i+1) + first_layer_width)

        # Creating current layer
        network = fully_connected(network, curr_width, activation=hidden_act, name='fc' + str(i-1), weights_init=winit,
                                  weight_decay=weight_decay)

        print("size : " + str(curr_width))

        # Applying dropout
        network = dropout(network, dropout_val)

    # Adding outlayer
    network = fully_connected(network, 1, activation=outlayer_act, name='outlayer', weights_init=winit)

    # Adam optimizer creation
    adam = Adam(learning_rate=learning_rate, epsilon=epsilon)

    # Model evaluation layer creation
    network = regression(network, optimizer=adam,
                         loss=cost_fun, metric=validation_fun, name='target')

    return network
예제 #16
0
def train_save_CNN(network, X, Y):
    tflearn.init_graph()
    with tf.device('/gpu:0'):
        model = tflearn.DNN(network)
        model.fit(X, Y, n_epoch=1000, validation_set=0.1, shuffle=True,
                show_metric=True, batch_size=64, snapshot_step=200,
                snapshot_epoch=False, run_id='alexnet_oxflowers17')
        model.save(os.getcwd()+'/model/17flowers_model')
예제 #17
0
 def tflearn_conv2d(l_i_shape, str_optimizer, learning_rate, filter_size,
                    kernel_size, nb_filter, activation_function,
                    loss_function, optimizer):
     tflearn.init_graph()
     input_ = tflearn.input_data(shape=[None] + l_i_shape, name='input')
     print(input_)
     layer1conv = tflearn.layers.conv.conv_2d(
         input_, nb_filter, filter_size,
         activation=activation_function)  #, weight_decay=0.001)
     print(layer1conv)
     layer1pool = tflearn.layers.conv.max_pool_2d(layer1conv, kernel_size)
     print(layer1pool)
     layer4conv = tflearn.layers.conv.conv_2d(
         layer1pool,
         nb_filter * 2,
         filter_size,
         activation=activation_function)  #, weight_decay=0.001)
     print(layer4conv)
     layer4pool = tflearn.layers.conv.max_pool_2d(layer4conv, kernel_size)
     print(layer4pool)
     layer6 = tflearn.layers.conv.conv_2d_transpose(
         layer4pool,
         nb_filter * 2,
         filter_size,
         strides=[1, 2, 2, 1],
         output_shape=layer4conv.get_shape().as_list()[1:])
     print(layer6)
     layer6_skip_connected = tf.math.add(layer6, layer4conv)
     #layer6_skip_connected = tflearn.layers.merge_ops.merge([layer6, layer4conv], 'sum', axis = 0)
     print(layer6_skip_connected)
     #layer6_skip_connected = tflearn.reshape(layer6_skip_connected, [-1]+layer4conv.get_shape().as_list()[1:])
     layer7 = tflearn.layers.conv.conv_2d_transpose(
         layer6_skip_connected,
         nb_filter,
         filter_size,
         strides=[1, 2, 2, 1],
         output_shape=layer1conv.get_shape().as_list()[1:])
     print(layer7)
     #layer7_skip_connected = tflearn.layers.merge_ops.merge([layer7, layer1conv], 'sum', axis = 0)
     #layer7_skip_connected = tflearn.reshape(layer7_skip_connected, [-1]+layer1conv.get_shape().as_list()[1:])
     layer7_skip_connected = tf.math.add(layer7, layer1conv)
     print(layer7_skip_connected)
     layer8 = tflearn.layers.conv.conv_2d_transpose(
         layer7_skip_connected,
         3,
         1,
         output_shape=input_.get_shape().as_list()[1:])
     #layer8 = tflearn.reshape(layer8, [-1]+l_i_shape)
     print(layer8)
     net = tflearn.regression(layer8,
                              optimizer=optimizer,
                              loss=loss_function,
                              metric='R2',
                              learning_rate=learning_rate,
                              name='target')
     model = tflearn.DNN(net, tensorboard_verbose=0
                         )  # run tensorboard --logdir='/tmp/tflearn_logs'
     return layer4conv, model
예제 #18
0
 def simple_learn(self):
     tflearn.init_graph()
     net=tflearn.input_data(shape=[None,64,64,3])
     net=tflearn.fully_connected(net,64)
     net=tflearn.dropout(net,.5)
     net=tflearn.fully_connected(net,10,activation='softmax')
     net=tflearn.regression(net,optimizer='adam',loss='softmax_categorical_crossentropy')
     model = tflearn.DNN(net)
     model.fit(self.trainset,self.trainlabels)
예제 #19
0
def MLP(dataset):
    seed = 7
    epoch = 100

    np.random.seed(seed)

    labels = dataset[:, -1]

    p = np.unique(labels)

    output_dim = len(p)
    labels = one_hot(labels, output_dim)

    feature = np.delete(dataset, -1, 1)

    no_of_samples, input_dimension = feature.shape
    print(no_of_samples, input_dimension)

    X_train, X_test, y_train, y_test = train_test_split(feature,
                                                        labels,
                                                        test_size=0.3)
    #print(X_train.shape, y_train.shape)

    tflearn.init_graph(num_cores=8, gpu_memory_fraction=0.5)

    net = input_data(shape=[None, input_dimension], name='input')

    net = tflearn.fully_connected(net, 1024, activation='relu')
    net = tflearn.dropout(net, 0.5)

    net = tflearn.fully_connected(net, 1024, activation='relu')
    net = tflearn.dropout(net, 0.5)

    net = tflearn.fully_connected(net, output_dim, activation='softmax')
    net = regression(net,
                     optimizer='adam',
                     learning_rate=0.01,
                     loss='categorical_crossentropy',
                     name='targets')

    model = tflearn.DNN(net)

    #model.summary()
    tr = (input_dimension * 1024 + 1) + (1024 * 1024 + 1) + (1024 * 6 + 1)
    print("Trainable parameters:", tr)

    model.fit({'input': X_train}, {'targets': y_train},
              n_epoch=epoch,
              validation_set=({
                  'input': X_test
              }, {
                  'targets': y_test
              }),
              show_metric=True,
              run_id='DCNet')
예제 #20
0
def DecisionNetwork():
    tflearn.init_graph(soft_placement=True)
    with tf.device('/gpu:0'):
        network = tflearn.input_data(shape=[None, FRAME_KEEP, FEATURES_LENGTH],
                                     name='input')
        network = tflearn.gru(network,
                              256,
                              return_seq=True,
                              name='DBFull_layer1')
        network = tflearn.dropout(network, 0.6, name='DBFull_layer2')
        network = tflearn.gru(network,
                              256,
                              return_seq=True,
                              name='DBFull_layer3')
        network = tflearn.dropout(network, 0.6, name='DBFull_layer4')
        movement_network = tflearn.gru(network,
                                       256,
                                       return_seq=False,
                                       name='DBMove_layer1')
        movement_network = tflearn.dropout(movement_network,
                                           0.6,
                                           name='DBMove_layer2')
        movement_network = tflearn.fully_connected(movement_network,
                                                   OUTPUT_MOVE,
                                                   activation='softmax',
                                                   name='DBMove_layer3')
        movement_network = tflearn.regression(movement_network,
                                              optimizer='adam',
                                              loss='categorical_crossentropy',
                                              learning_rate=LR,
                                              name='DBMove_layer4')
        action_network = tflearn.gru(network,
                                     256,
                                     return_seq=False,
                                     name='DBAct_layer1')
        action_network = tflearn.dropout(action_network,
                                         0.6,
                                         name='DBAct_layer2')
        action_network = tflearn.fully_connected(action_network,
                                                 OUTPUT_ACT,
                                                 activation='softmax',
                                                 name='DBAct_layer3')
        action_network = tflearn.regression(action_network,
                                            optimizer='adam',
                                            loss='categorical_crossentropy',
                                            learning_rate=LR,
                                            name='DBAct_layer4')
        network = tflearn.merge([movement_network, action_network],
                                mode='concat',
                                name='DBFull_layer5')
        return tflearn.DNN(network,
                           max_checkpoints=5,
                           tensorboard_verbose=0,
                           checkpoint_path='full_model/full_model.tfl.ckpt')
예제 #21
0
def evaluate_nn(X_train,
                y_train,
                X_val,
                y_val,
                n_epoch=20,
                tune_params=dict(hidden_size=50,
                                 batch_size=200,
                                 reg=0.5,
                                 learning_rate=1e-4,
                                 learning_rate_decay=0.95)):
    os.environ['CUDA_VISIBLE_DEVICES'] = '1'
    import tensorflow as tf
    import tflearn
    # tf.reset_default_graph()
    tflearn.init_graph()
    # sess = tf.Session()
    net = tflearn.input_data(shape=[None, 3072])
    net = tflearn.fully_connected(
        net,
        tune_params['hidden_size'],
        activation='relu',
        weight_decay=tune_params['reg'],
        weights_init=tflearn.initializations.truncated_normal(stddev=1e-4),
    )
    net = tflearn.fully_connected(
        net,
        10,
        activation='softmax',
        weight_decay=tune_params['reg'],
        weights_init=tflearn.initializations.truncated_normal(stddev=1e-4),
    )
    # optimizer = tflearn.optimizers.SGD(
    #    learning_rate=tune_params['learning_rate'],
    #    lr_decay=tune_params['learning_rate_decay'],
    #    decay_step=max(1, int(X_train.shape[0] / tune_params['batch_size'])),
    #    staircase=True
    # )
    net = tflearn.regression(net,
                             optimizer='adam',
                             loss='categorical_crossentropy',
                             batch_size=tune_params['batch_size'])
    model = tflearn.DNN(net)
    model.fit(
        X_train,
        y_train,
        n_epoch=n_epoch,
        batch_size=tune_params['batch_size'],
        snapshot_epoch=False,
        show_metric=False,
    )
    accuracy = model.evaluate(X_val, y_val)[0]
    # sess.close()
    return accuracy
def test_NN(model, t_X, t_y):
    tflearn.init_graph(seed=1995, gpu_memory_fraction=1)
    with tf.Session() as sess:
        tflearn.is_training(False, sess)

    mini_validations = []
    for test_X, test_y in pickle_lazy_loading(t_X, t_y):
        validation_accuracy = float(model.evaluate(test_X, test_y)[0])
        print("mini accurecy:", validation_accuracy)
        mini_validations.append(validation_accuracy)
        print("current accurecy:", mean(mini_validations))

    return mean(mini_validations), mini_validations
예제 #23
0
 def recognize(self, img):
     img = cv2.GaussianBlur(img, (11, 11), 0)
     img = transform.resize(img, (28, 28))
     img = img[:, :, 0] * 255
     tf.reset_default_graph()
     tflearn.init_graph()
     pred = self.model.predict([img.reshape(784)])
     res_arr = []
     for i, n in enumerate(pred[0]):
         res_arr.append([float(n), chr(i + 65)])
     res_arr.sort()
     res_arr.reverse()
     return res_arr
예제 #24
0
def initialize():
    color = 'black' if strategy.playing == 0 else 'white'

    # initialize zobrist for u caching
    if not hasattr(strategy, 'zobrist_me'):
        np.random.seed(19890328)  # use the same random matrix for storing
        strategy.zobrist_me = np.random.randint(np.iinfo(np.int64).max,
                                                size=board_size**2).reshape(
                                                    board_size, board_size)
        strategy.zobrist_opponent = np.random.randint(
            np.iinfo(np.int64).max,
            size=board_size**2).reshape(board_size, board_size)
        #strategy.zobrist_code = np.random.randint(np.iinfo(np.int64).max)
        # reset the random seed to random for other functions
        np.random.seed()

    if not hasattr(best_action_q, 'move_interest_values'):
        best_action_q.move_interest_values = np.zeros(
            board_size**2, dtype=np.float32).reshape(board_size, board_size)

    if not hasattr(strategy, 'learndata'):
        filename = color + '.learndata'
        if os.path.exists(filename):
            strategy.learndata = pickle.load(open(filename, 'rb'))
            print("Successfully loaded %d previously saved learndata" %
                  len(strategy.learndata))
        else:
            strategy.learndata = dict()

    if not hasattr(tf_predict_u, 'tf_state'):
        tf_predict_u.tf_state = np.zeros(5 * board_size**2,
                                         dtype=np.int32).reshape(
                                             5, board_size, board_size)
        tf_predict_u.tf_state[3, :, :] = 1
        #tf_predict_u.tf_state[4, :, :] = 1 if color == 'black' else 0

    if not hasattr(tf_predict_u, 'model'):
        tflearn.init_graph(num_cores=4, gpu_memory_fraction=0.5)
        input_layer = tflearn.input_data(shape=[None, 5, 15, 15])
        net = tflearn.fully_connected(input_layer, 256)
        net = tflearn.fully_connected(net, 256, activation='relu')
        final = tflearn.fully_connected(net, 1, activation='tanh')
        regression = tflearn.regression(final,
                                        optimizer='SGD',
                                        learning_rate=0.01,
                                        loss='mean_square',
                                        metric='R2')
        model = tflearn.DNN(regression)
        model.load('tf_model')
        tf_predict_u.model = model
예제 #25
0
def initialize():
    # initialize zobrist for u caching
    if not hasattr(strategy, 'zobrist_me'):
        np.random.seed(2018)  # use the same random matrix for storing
        strategy.zobrist_black = np.random.randint(np.iinfo(np.int64).max,
                                                   size=board_size**2).reshape(
                                                       board_size, board_size)
        strategy.zobrist_white = np.random.randint(np.iinfo(np.int64).max,
                                                   size=board_size**2).reshape(
                                                       board_size, board_size)
        #strategy.zobrist_code = np.random.randint(np.iinfo(np.int64).max)
        # reset the random seed to random for other functions
        np.random.seed()

    if not hasattr(best_action_q, 'move_interest_values'):
        best_action_q.move_interest_values = np.zeros(
            board_size**2, dtype=np.float32).reshape(board_size, board_size)

    if not hasattr(strategy, 'learndata'):
        if os.path.isfile('strategy.learndata'):
            strategy.learndata = pickle.load(open('strategy.learndata', 'rb'))
            print("strategy.learndata found, loaded %d data" %
                  len(strategy.learndata))
        else:
            strategy.learndata = dict()
    strategy.started_from_beginning = False

    if not hasattr(tf_predict_u, 'all_interest_states'):
        tf_predict_u.all_interest_states = np.zeros(board_size**4 * 3,
                                                    dtype=np.int8).reshape(
                                                        board_size**2,
                                                        board_size, board_size,
                                                        3)

    if not hasattr(tf_predict_u, 'cache'):
        if os.path.isfile('tf_predict_u.cache'):
            tf_predict_u.cache = pickle.load(open("tf_predict_u.cache", 'rb'))
            print("tf_predict_u.cache found, loaded %d cache" %
                  len(tf_predict_u.cache))
        else:
            tf_predict_u.cache = dict()

    if not hasattr(tf_predict_u, 'model'):
        tflearn.init_graph(num_cores=4, gpu_memory_fraction=0.3)
        import construct_dnn
        model = construct_dnn.construct_dnn()
        path = os.path.realpath(__file__)
        folder = os.path.dirname(path)
        model.load(os.path.join(folder, 'tf_model'))
        tf_predict_u.model = model
예제 #26
0
def construct_dnn():
    tflearn.init_graph(num_cores=4, gpu_memory_fraction=0.6)
    img_aug = tflearn.ImageAugmentation()
    img_aug.add_random_90degrees_rotation()
    img_aug.add_random_flip_leftright()
    img_aug.add_random_flip_updown()
    input_layer = tflearn.input_data(shape=[None, 15, 15, 5], data_augmentation=img_aug)
    net = tflearn.conv_2d(input_layer, 192, 5, activation='relu')
    net = tflearn.max_pool_2d(net, 2)
    net = tflearn.fully_connected(net, 192, activation='relu')
    final = tflearn.fully_connected(net, 1, activation='sigmoid')
    sgd = tflearn.optimizers.SGD(learning_rate=0.1, lr_decay=0.95, decay_step=100000)
    regression = tflearn.regression(final, optimizer=sgd, loss='mean_square',  metric='R2')
    model = tflearn.DNN(regression)
    return model
예제 #27
0
def createNeuralNetworkModel(input_size, output_size, learningRate):
    network = input_data(shape=[None, input_size], name="input")

    tflearn.init_graph(gpu_memory_fraction=0.2)

    network = fully_connected(network, 12, activation="sigmoid",regularizer='L2', weights_init= tf.constant_initializer(0.03))

    network = fully_connected(network, output_size, activation="softmax",weights_init= tf.constant_initializer(0.02),regularizer='L2')

    network = regression(network, optimizer='RMSProp', learning_rate=learningRate, loss="categorical_crossentropy",name="targets")

    #model = tflearn.DNN(network,tensorboard_dir='log/' + timestamp, tensorboard_verbose=3)
    model = tflearn.DNN(network, tensorboard_verbose=0)

    return model
예제 #28
0
def main():
    batch = speech_data.wave_batch_generator(10000,
                                             target=speech_data.Target.digits)
    X, Y = next(batch)
    Y = [numpy.hstack([y, numpy.array([0, 0, 0, 0, 0, 0])]) for y in Y]
    # Y = map(lambda a: , Y)
    print(type(Y))
    # print (np.hstack([Y[0], np.array([0, 0, 0, 0, 0, 0])]))
    number_classes = 16  # Digits

    # Classification
    tflearn.init_graph(num_cores=8, gpu_memory_fraction=0.5)

    net = tflearn.input_data(shape=[None, 8192])
    net = tflearn.fully_connected(net, 64, name='f1')
    net = tflearn.dropout(net, 0.5, name='dp')
    net = tflearn.fully_connected(net,
                                  number_classes,
                                  activation='softmax',
                                  name='f2')
    net = tflearn.regression(net,
                             optimizer='sgd',
                             loss='categorical_crossentropy')

    model = tflearn.DNN(net)
    model.load('pre-trained/model.tflearn.sgd_trained')

    # Overfitting okay for now
    totalTime = 0
    totalAcc = 0
    numTimes = 100
    for i in range(numTimes):
        t = time.time()
        result = model.predict(X)
        print("-------------")

        result = numpy.array([numpy.argmax(r) for r in result])
        answers = numpy.array([numpy.argmax(answer) for answer in Y])

        print(i, ">>>", (result == answers).sum() / float(len(answers)),
              "time: ",
              time.time() - t)
        totalAcc = totalAcc + (result == answers).sum() / float(len(answers))
        totalTime = totalTime + time.time() - t

    print("Avg. Acc. = ", totalAcc / numTimes)
    print("Avg. time = ", totalTime / numTimes)
예제 #29
0
    def __init__(self, savedModel=None):

        tflearn.init_graph(num_cores=1, gpu_memory_fraction=0.5)

        net = tflearn.input_data(shape=[None, 7])
        net = tflearn.fully_connected(net, 32)
        net = tflearn.fully_connected(net, 15, activation='softmax')
        net = tflearn.regression(net,
                                 optimizer='adam',
                                 loss='categorical_crossentropy',
                                 batch_size=50,
                                 learning_rate=0.01)

        self.model = tflearn.DNN(net)

        if savedModel is not None:
            self.model.load(savedModel)
예제 #30
0
def exponential_regression_net_tf(xs, ys):
    tflearn.init_graph(num_cores=8, gpu_memory_fraction=0.5)
    net = tflearn.input_data(shape=(None, 1))
    net = tflearn.fully_connected(net, 250, activation='relu')
    net = tflearn.fully_connected(net, 150, activation='sigmoid')
    net = tflearn.fully_connected(net, 60, activation='relu')
    net = tflearn.fully_connected(net, 10, activation='relu')
    net = tflearn.fully_connected(net, 1, activation='relu')
    net = tflearn.regression(net, optimizer='adam', loss='mean_square')
    model = tflearn.DNN(net)
    pxs = pack_for_tf(xs)
    model.fit(pxs,
              pack_for_tf(ys),
              batch_size=20,
              n_epoch=50,
              show_metric=True)
    return model.predict(pxs)
예제 #31
0
    def _hyper_train_target_sub(self, **kwargs):
        """
        Actual training procedure for specific set of hyper parameters.
        """

        if self.saver.log_filename:
            fh = logging.FileHandler(self.saver.log_filename)
            self.logger.addHandler(fh)

        config = tflearn.init_graph()
        config.gpu_options.allow_growth = True

        data = self.data_source(**kwargs)

        net = self.model(
            self.optimizer(**kwargs),
            self.data_source,
            **kwargs)
        model = tflearn.DNN(network=net,
                            tensorboard_verbose=(kwargs['tensorboard_verbose'] if 'tensorboard_verbose' in kwargs else 0),
                            tensorboard_dir=str(self.saver.log_dirname),
                            checkpoint_path=os.path.join(self.saver.last_checkpoints_dirname, self.saver.model_filename_prefix),
                            max_checkpoints=2)

        if self.data_source.rewrite_data_aug:
            train_op_cp.replace_train_op_initialize_fit_cp(model)

        bss_callback = BestStateSaverCallback(
            session=model.session,
            best_snapshot_path=os.path.join(self.saver.best_checkpoints_dirname, self.saver.model_filename_prefix),
            best_val_accuracy=(kwargs['bss_best_val_accuracy'] if 'bss_best_val_accuracy' in kwargs else 0.0),
            bigger=(kwargs['bss_bigger'] if 'bss_bigger' in kwargs else True),
            epoch_tail=self.epoch_tail)

        try:
            model.fit(n_epoch=self.num_epoch,
                      show_metric=True,
                      batch_size=self.data_source.batch_size,
                      shuffle=True,
                      snapshot_epoch=True,
                      run_id=self.saver.project_name,
                      callbacks=bss_callback,
                      **data)
        except TrainControllerStopException as e:
            model.trainer.summ_writer.close()
            self.logger.info(e)

        self.logger.info("Best validation accuracy: {:.4f} (at epoch {})".format(
            bss_callback.best_val_accuracy, bss_callback.best_epoch))

        if self.saver.log_filename:
            self.logger.removeHandler(fh)
            fh.close()

        tf.reset_default_graph()
        model.session.close()

        return bss_callback.best_val_accuracy
예제 #32
0
def processor():
    # print(request.form)
    img = base64.b64decode(request.form.get('img')[22:])
    with open('temp.jpg', 'wb') as f:
        f.write(img)
    img = plt.imread('temp.jpg')
    img = cv2.GaussianBlur(img, (11, 11), 0)
    img = transform.resize(img, (28, 28))
    img = img[:, :, 0] * 255
    tf.reset_default_graph()
    tflearn.init_graph()
    pred = model.predict([img.reshape(784)])
    res_arr = []
    for i, n in enumerate(pred[0]):
        res_arr.append([float(n), chr(i + 65)])
    res_arr.sort()
    res_arr.reverse()
    return jsonify({'pred': res_arr})
예제 #33
0
def deep_net_tflearn(X_train,X_test,Y_train,Y_test, num_epoch, first_layer, second_layer, third_layer,fourth_layer):
    #Implementation with TFLEARN
    tf.reset_default_graph()
    tflearn.init_graph(num_cores=8, gpu_memory_fraction=0.8)
    tnorm = tflearn.initializations.uniform(minval=-1.0, maxval=1.0)

    # Building DNN
    nn = tflearn.input_data(shape=[None, len(X_train[0])])
    Input = nn
    nn = tflearn.fully_connected(nn, first_layer, activation='elu', regularizer='L2', weights_init=tnorm, name = "layer_1")
    nn = tflearn.dropout(nn, 0.5)
    nn = tflearn.fully_connected(nn, second_layer, activation='elu', regularizer='L2', weights_init=tnorm, name = "layer_2")
    nn = tflearn.dropout(nn, 0.5)
    nn = tflearn.fully_connected(nn, third_layer, activation='elu', regularizer='L2', weights_init=tnorm, name = "layer_3")
    nn = tflearn.dropout(nn, 0.5)
    nn = tflearn.fully_connected(nn, fourth_layer, activation='elu', regularizer='L2', weights_init=tnorm, name = "layer_4")
    nn = tflearn.dropout(nn, 0.5)
    Hidden_state = nn
    nn = tflearn.fully_connected(nn, len(Y_train[0]), activation='elu', weights_init=tnorm, name = "layer_5")
    Output = nn    
    #custom_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(
    #    out_layer, tf_train_labels) +
    #    0.01*tf.nn.l2_loss(hidden_weights) +
    #    0.01*tf.nn.l2_loss(hidden_biases) +
    #    0.01*tf.nn.l2_loss(out_weights) +
    #    0.01*tf.nn.l2_loss(out_biases))


    # Regression, with mean square error
    net = tflearn.regression(nn, optimizer='SGD' , learning_rate=0.001, loss ='categorical_crossentropy', metric=None)

    # Training the auto encoder
    model = tflearn.DNN(net, tensorboard_verbose=3)
    model.fit( X_train,  Y_train, n_epoch = num_epoch, validation_set=0.1, run_id="bitsight_nn_tflearn", batch_size=128)
    pred = model.predict(X_test)
    total = 0
    correct = 0

    for i in range(len(pred)):
        total += 1
        if np.argmax(pred[i]) == np.argmax(Y_test[i]):
            correct += 1
    return total*1., correct*1.
예제 #34
0
    def __init__(self, s_date):
        prev_bd = int(s_date[:6])-1
        prev_ed = int(s_date[9:15])-1
        if prev_bd%100 == 0: prev_bd -= 98
        if prev_ed%100 == 0: prev_ed -= 98
        pred_s_date = "%d01_%d01" % (prev_bd, prev_ed)
        prev_model = '../model/tflearn/lstm/%s' % pred_s_date
        self.model_dir = '../model/tflearn/lstm/%s' % s_date

        tf.reset_default_graph()
        tflearn.init_graph(gpu_memory_fraction=0.1)
        input_layer = tflearn.input_data(shape=[None, 30, 23], name='input')
        lstm1 = tflearn.lstm(input_layer, 23, dynamic=True, name='lstm1')
        dense1 = tflearn.fully_connected(lstm1, 1, name='dense1')
        output = tflearn.single_unit(dense1)
        regression = tflearn.regression(output, optimizer='adam', loss='mean_square',
                                metric='R2', learning_rate=0.001)
        self.estimators = tflearn.DNN(regression)
        if os.path.exists('%s/model.tfl' % prev_model):
            self.estimators.load('%s/model.tfl' % prev_model)
    def __init__(self):
        self.len_past = 30
        #self.s_date = "20120101_20160330"
        #self.model_dir = '../model/tflearn/reg_l3_bn/big/%s/' % self.s_date

        tf.reset_default_graph()
        tflearn.init_graph(gpu_memory_fraction=0.05)
        input_layer = tflearn.input_data(shape=[None, 690], name='input')
        dense1 = tflearn.fully_connected(input_layer, 400, name='dense1', activation='relu')
        dense1n = tflearn.batch_normalization(dense1, name='BN1')
        dense2 = tflearn.fully_connected(dense1n, 100, name='dense2', activation='relu')
        dense2n = tflearn.batch_normalization(dense2, name='BN2')
        dense3 = tflearn.fully_connected(dense2n, 1, name='dense3')
        output = tflearn.single_unit(dense3)
        regression = tflearn.regression(output, optimizer='adam', loss='mean_square',
                                metric='R2', learning_rate=0.001)
        self.estimators = tflearn.DNN(regression)
        self.qty = {}
        self.day_last = {}
        self.currency = 100000000
import pyaudio
import speech_data
import numpy

# Simple spoken digit recognition demo, with 98% accuracy in under a minute

# Training Step: 544  | total loss: 0.15866
# | Adam | epoch: 034 | loss: 0.15866 - acc: 0.9818 -- iter: 0000/1000

batch=speech_data.wave_batch_generator(10000,target=speech_data.Target.digits)
X,Y=next(batch)

number_classes=10 # Digits

# Classification
tflearn.init_graph(num_cores=8, gpu_memory_fraction=0.5)

net = tflearn.input_data(shape=[None, 8192])
net = tflearn.fully_connected(net, 64)
net = tflearn.dropout(net, 0.5)
net = tflearn.fully_connected(net, number_classes, activation='softmax')
net = tflearn.regression(net, optimizer='adam', loss='categorical_crossentropy')

model = tflearn.DNN(net)
model.fit(X, Y,n_epoch=3,show_metric=True,snapshot_step=100)
# Overfitting okay for now

demo_file = "5_Vicki_260.wav"
demo=speech_data.load_wav_file(speech_data.path + demo_file)
result=model.predict([demo])
result=numpy.argmax(result)
예제 #37
0
파일: Yuan.py 프로젝트: ckml/tf_learn
train_features = df_train[featurescolumns].values
X = train_features.astype(np.float32, copy=False)
train_labels = df_train[LABEL_COLUMN].values
train_labels = train_labels.astype(np.int32, copy=False)
train_labels = train_labels - 1
Y = to_categorical(train_labels, 24)

test_features = df_test[featurescolumns].values
X_val = test_features.astype(np.float32, copy=False)
test_labels = df_test[LABEL_COLUMN].values
test_labels = test_labels.astype(np.int32, copy=False)
test_labels = test_labels - 1
Y_val = to_categorical(test_labels, 24)

tflearn.init_graph(num_cores=15)
net = tflearn.input_data([None, 31])
net = tflearn.fully_connected(net, 100, activation='relu',
                              weights_init='xavier')
net = tflearn.dropout(net, 0.5)
net = tflearn.fully_connected(net, 300, activation='relu',
                              weights_init='xavier')
net = tflearn.dropout(net, 0.5)
net = tflearn.fully_connected(net, 500, activation='relu',
                              weights_init='xavier')
net = tflearn.dropout(net, 0.5)
net = tflearn.fully_connected(net, 300, activation='relu',
                              weights_init='xavier')
net = tflearn.dropout(net, 0.5)
net = tflearn.fully_connected(net, 100, activation='relu',
                              weights_init='xavier')
예제 #38
0
from tflearn.layers.estimator import regression

if __name__ == "__main__":
	parser = argparse.ArgumentParser()
	parser.add_argument('--t', action='store', dest='test_path', type=str, help='Test Data Path')
	parser.add_argument('--i', action='store', dest='image_count', type=str, help='Test Image Count')
	config = parser.parse_args()

	#Load Test data
	image_count = (config.image_count).split(",")
	image_count = (int(image_count[0]), int(image_count[1]))
	patch_count = 20
	X = generate_patches(img2numpy_arr(config.test_path), image_count, patch_count)

	# Building 'Complex ConvNet'
	tl.init_graph(num_cores=2, gpu_memory_fraction=0.2)
	network = input_data(shape=[None, 42, 42, 3], name='input')

	network = conv_2d(network, 16, 3, activation='relu')
	network = conv_2d(network, 16, 3, activation='relu')
	network = max_pool_2d(network, 2, strides=2)

	network = conv_2d(network, 64, 3, activation='relu')
	network = conv_2d(network, 64, 3, activation='relu')
	network = max_pool_2d(network, 2, strides=2)

	network = conv_2d(network, 64, 3, activation='relu')
	network = conv_2d(network, 64, 3, activation='relu')
	network = max_pool_2d(network, 2, strides=2)

	network = conv_2d(network, 64, 3, activation='relu')