コード例 #1
0
ファイル: wavenet.py プロジェクト: ykv001/pycadl
def train_vctk():
    """Summary

    Returns
    -------
    TYPE
        Description
    """
    batch_size = 24
    filter_length = 2
    n_stages = 7
    n_layers_per_stage = 9
    n_hidden = 48
    n_skip = 384
    dataset = vctk.get_dataset()
    it_i = 0
    n_epochs = 1000
    sequence_length = get_sequence_length(n_stages, n_layers_per_stage)
    ckpt_path = 'vctk-wavenet/wavenet_filterlen{}_batchsize{}_sequencelen{}_stages{}_layers{}_hidden{}_skips{}'.format(
        filter_length, batch_size, sequence_length, n_stages,
        n_layers_per_stage, n_hidden, n_skip)
    with tf.graph().as_default(), tf.session() as sess:
        net = create_wavenet(batch_size=batch_size,
                             filter_length=filter_length,
                             n_hidden=n_hidden,
                             n_skip=n_skip,
                             n_stages=n_stages,
                             n_layers_per_stage=n_layers_per_stage)
        saver = tf.train.saver()
        init_op = tf.group(tf.global_variables_initializer(),
                           tf.local_variables_initializer())
        sess.run(init_op)
        if tf.train.latest_checkpoint(ckpt_path) is not None:
            saver.restore(sess, tf.train.latest_checkpoint(ckpt_path))
        batch = vctk.batch_generator
        with tf.variable_scope('optimizer'):
            opt = tf.train.adamoptimizer(learning_rate=0.0002).minimize(
                net['loss'])
        var_list = [
            v for v in tf.global_variables() if v.name.startswith('optimizer')
        ]
        sess.run(tf.variables_initializer(var_list))
        writer = tf.summary.filewriter(ckpt_path)
        for epoch_i in range(n_epochs):
            for batch_xs in batch(dataset, batch_size, sequence_length):
                loss, quantized, _ = sess.run(
                    [net['loss'], net['quantized'], opt],
                    feed_dict={net['x']: batch_xs})
                print(loss)
                if it_i % 100 == 0:
                    summary = sess.run(net['summaries'],
                                       feed_dict={net['x']: batch_xs})
                    writer.add_summary(summary, it_i)
                    # save
                    saver.save(sess,
                               os.path.join(ckpt_path, 'model.ckpt'),
                               global_step=it_i)
                it_i += 1

    return loss
コード例 #2
0
def train_network(x):
    prediction = neural_network(x)
    cost = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(prediction, y))

    # LR = 0.001 (Default)
    optimizer = tf.train.AdamOptimizer().minimize(cost)

    hm_epochs = 10
    with tf.session() as sess:
        sess.run(tf.initialize_all_variables())

        for epoch in range(hm_epochs):
            epoch_loss = 0
            for _ in range(int(mnist.train.num_examples / batch_size)):
                epoch_x, epoch_y = mnist.train.next_batch(batch_size)
                _, c = sess.run([optimizer, cost],
                                feed_dict={
                                    x: epoch_x,
                                    y: epoch_y
                                })
                epoch_loss += c
            print('Epoch', epoch, 'Completed out of', hm_epochs, ' loss: ',
                  epoch_loss)

        correct = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1))
        accuracy = tf.reduce_mean(tf.cast(correct, 'float'))
        print('Accuracy:',
              accuracy.eval({
                  x: mnist.test.images,
                  y: mnist.test.labels
              }))
コード例 #3
0
ファイル: ElMo.py プロジェクト: qhrong/MUDAC2019
def elmo_vectors(x):
    embeddings = elmo(x.tolist(), signature="default", as_dict=True)["elmo"]
    with tf.session() as sess:
        sess.run([tf.global_variables_initializer(), tf.tables_initializer()])
        #return sess.run(tf.reduce_mean(embeddings,1))
        message_embeddings = sess.run(embeddings)
        print(message_embeddings)
コード例 #4
0
ファイル: deep-net.py プロジェクト: nishgaddam/ML_Practice
def train_neural_network(x):
    prediction = neural_network_model(x)
    cost = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(prediction, y))
    #learning_rate = 0.01
    optimizer = tf.train.Adamoptimizer().minimize(cost)
    #cyles of feedforward + back Prop
    hm_epochs = 10

    with tf.session() as sess:
        sess.run(tf.initilize_all_variables())

        for epoch in hm_epochs:
            epoch_loss = 0
            for _ in range(int(mnist.train.num_examples / batch_size)):
                x, y = mnist.train.next_batch(batch_size)
                _, c = sess.run([optimizer, cost], feed_dict={x: x, y: y})
                epoch_loss += c
            print('epoch', epoch, 'completed out of', hm_epochs, 'loss:',
                  epoch_loss)

        correct = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1))
        accuracy = tf.reduce_mean(tf.cast(correct, float))
        print('Accuracy:',
              accuracy.eval({
                  x: mnist.test.images,
                  y: mnist.test.labels
              }))
コード例 #5
0
def load_Begin_data(batch_size):
    Begin = pd.read_csv('begin.csv')
    Begin = Begin.sample(frac=1.0)

    train_X = Begin.loc[:, [
        'total_fpktl', 'total_bpktl', 'min_flowpktl', 'max_flowpktl',
        'flow_fin', 'bVarianceDataBytes', 'max_idle', 'Init_Win_bytes_forward',
        'min_seg_size_forward'
    ]]
    train_Y = Begin.loc[:, ['calss']]
    test_X = Begin.loc[:, [
        'total_fpktl', 'total_bpktl', 'min_flowpktl', 'max_flowpktl',
        'flow_fin', 'bVarianceDataBytes', 'max_idle', 'Init_Win_bytes_forward',
        'min_seg_size_forward'
    ]]
    test_Y = Begin.loc[:, ['calss']]

    train_X = train_X.values
    train_Y = train_Y.values
    test_X = test_X.values
    test_Y = test_Y.values

    train_Y = train_Y.reshape((-1, ))
    train_Y = tf.one_hot(train_Y, depth=3, axis=1, dtype='float32')
    test_Y = test_Y.reshape((-1, ))
    test_Y = tf.one_hot(test_Y, depth=3, axis=1, dtype='float32')

    with tf.session() as sess:
        train_Y = sess.run(train_Y)
        test_Y = sess.run(test_Y)

    num_tr_batch = 471597 // batch_size

    return trainX, trainY, num_tr_batch
コード例 #6
0
ファイル: ans_node.py プロジェクト: virajshastri97/EruditeX
    def run_model(self):
        train_step = self.nn_model()
        dataset = self.get_dataset()

        with tf.session() as sess:
            sess.run(tf.global_variables_initializer())
            for data in dataset:
                sess.run([train_step], feed_dict=data)
コード例 #7
0
ファイル: word2vec.py プロジェクト: mwin007/email-word2vec
 def __init__(self, db, sess=None):
     self.db = db
     serialized = db.query("SELECT EMBEDDINGS FROM WORD2VEC LIMIT 1")
     self.embeddings = tf.Variable(np.array(json.loads(serialized[0][0])))
     self.input = tf.placeholder(tf.int32, shape=[1])
     self.embed = tf.nn.embedding_lookup(self.embeddings, self.input)
     self.session = sess
     if sess is None:
         self.session = tf.session()
コード例 #8
0
ファイル: initialization.py プロジェクト: FTima/tensorflow
def linear_function():

    np.random.seed(1)

    X = tf.constant(np.random.randn(3, 1), name="X")
    W = tf.constant(np.random.randn(4, 3), name="W")
    b = tf.constant(np.random.randn(4, 1), name="b")
    Y = tf.add(tf.matmul(W, X), b)

    sess = tf.session()
    result = sess.run(Y)

    sess.close()

    return result
コード例 #9
0
ファイル: addtf.py プロジェクト: LigaData/Kamanja
	def execute(self, msg):
		"""
		A real implementation would use the output fields to
		determine what should be returned.
		"""
                a = tf.placeholder(tf.int32)
                b = tf.placeholder(tf.int32)
                sumofTup = tf.placeholder(tf.int32)
                add = tf.add(a, b)
                sess = tf.session()
                sumofTup = sess.run (add, feed_dict={a: int(msg["a"]) ,b: int(msg["a"]) })
                self.logger.debug("sumof Tup" + str(sumofTup))
                outMsg = json.dumps({'a' : msg["a"], 'b' : msg["b"], 'result' : sumofTup})
                self.logger.debug("sumof Tup" + outMsg)
                return outMsg
コード例 #10
0
 def run(self, out, model_inputs, X):
     """ Runs the model while also setting the learning phase flags to False.
     """
     feed_dict = dict(zip(model_inputs, X))
     for t in self.learning_phase_flags:
         feed_dict[t] = False
     import tensorflow
     try:
         #tf 1
         self.session = tf.session()
     except:
         #tf 2
         from tensorflow.compat.v1.keras.backend import get_session
         tensorflow.compat.v1.disable_v2_behavior()
         self.session = get_session()
     return self.session.run(out, feed_dict)
コード例 #11
0
ファイル: Lab701.py プロジェクト: jh630kim/RI_Learning
    def main():
        max_episodes = 5000

        replay_buffer = deque()

        with tf.session() as sess:
            mainDQN = dqn.DQN(sess, input_size, output_size)
            tf.global_variables_intializer().run()

            for episode in range(max_episodes):
                e = 1. / ((episode / 10) + 1)
                done = False
                step_count = 0

                state = env.reset()
                while not done:
                    if np.random.rand(1) < e:
                        action = env.action_space.sample()
                    else:
                        action = np.argmax(mainDQN.predict(state))

                    next_state, reward, done, _ = env.step(action)

                    if done:
                        reward = -100

                    replay_buffer.append(
                        (state, action, reward, next_state, done))

                    if len(replay_buffer) > REPLAY_MEMORY:
                        replay_buffer.popleft()

                    state = next_state
                    step_count += 1
                    if step_count > 10000:
                        break

                print("Episode: {} steps: {}".format(episode, step_count))
                if step_count > 10000:
                    pass

                if episode % 10 == 1:
                    for _ in range(50):
                        minibatch = random.sample(replay_buffer, 10)
                        cost, _ = simple_replay_train(mainDQN, minibatch)
                    print("Cost: ", cost)
            bot_play(mainDQN)
コード例 #12
0
ファイル: hover.py プロジェクト: freedomkwok/deep-learning
    def __init__(self):
        # State space: <position_x, .._y, .._z, orientation_x, .._y, .._z, .._w>
        cube_size = 300.0  # env is cube_size x cube_size x cube_size
        self.observation_space = spaces.Box(
            np.array([- cube_size / 2, - cube_size / 2,       0.0, -1.0, -1.0, -1.0, -1.0]),
            np.array([  cube_size / 2,   cube_size / 2, cube_size,  1.0,  1.0,  1.0,  1.0]))

        # Action space: <force_x, .._y, .._z, torque_x, .._y, .._z>
        max_force = 25.0
        max_torque = 25.0
        self.action_space = spaces.Box(
            np.array([-max_force, -max_force, -max_force, -max_torque, -max_torque, -max_torque]),
            np.array([ max_force,  max_force,  max_force,  max_torque,  max_torque,  max_torque]))

        self.max_duration = 3.0  # secs
        self.target_z = 10.0  # target height (z position) to reach for successful takeoff
        self.sess = tf.session()
コード例 #13
0
ファイル: seq2seq_contrib.py プロジェクト: remilb/ghissu-bot
    def tf_session(self):
        with tf.session() as session:
            model =  Seq2SeqModel(
                        encoder_cell=LSTMCell(input_size=self.hidden_units_encoder),
                        decoder_cell=LSTMCell(input_size=self.hidden_units_encoder),
                        vocab_size=self.vocab_size,
                        attention=True,
                        bidirectional=True,
                        debug=False
            )

         # Word Embedding Initialiser
            W = tf.Variable(tf.constant(0.0, shape=[self.vocab_size, self.embedding_dim]),
                        trainable=False, name="W")
            embedding_placeholder = tf.placeholder(tf.float32, [self.vocab_size, self.embedding_dim])
            embedding_init = W.assign(embedding_placeholder)
            req_embedded = tf.nn.embedding_lookup(W, self.X)

            session.run(tf.global_variables_initializer())
コード例 #14
0
    def execute(self, msg):
        """
		A real implementation would use the output fields to
		determine what should be returned.
		"""
        a = tf.placeholder(tf.int32)
        b = tf.placeholder(tf.int32)
        sumofTup = tf.placeholder(tf.int32)
        add = tf.add(a, b)
        sess = tf.session()
        sumofTup = sess.run(add,
                            feed_dict={
                                a: int(msg["a"]),
                                b: int(msg["a"])
                            })
        self.logger.debug("sumof Tup" + str(sumofTup))
        outMsg = json.dumps({'a': msg["a"], 'b': msg["b"], 'result': sumofTup})
        self.logger.debug("sumof Tup" + outMsg)
        return outMsg
コード例 #15
0
ファイル: DeepQ.py プロジェクト: cjjun/DRL
def Predict(agent):
    result_dict = global_result_dict
    X1,Y1 = terminal["X1"],terminal["Y1"]   
    X2,Y2 = terminal["X2"],terminal["Y2"]   
    X3,Y3 = terminal["X3"],terminal["Y3"]   
    X4,Y4 = terminal["X4"],terminal["Y4"]   

    Z1 = result_dict["Z1"]
    Z2 = result_dict["Z2"]
    Z3 = result_dict["Z3"]
    Z4 = result_dict["Z4"]
    
    with tf.session() as sess:
        # Layer 1
        S1 = agent.state(1)
        Qout1 = sess.run( Z1, feed_dict={ X1:S1 } )
        Act1 = eipsilon_greedy( Qout1,episilon )
        
        if Act1 == REJECT:
            server = agent.Round_robin(task)
            if not server:
                agent.Recycle_Task(task)
            else:
                agent.Select_farm( server.farm )

        agent.Select_farm(Act1)

        # Layer 2
        S2 = agent.state(2)
        Qout2 = sess.run( Z2, feed_dict={ X2:S2 } )
        Act2 = eipsilon_greedy( Qout2,episilon )
        
        if Act2 == REJECT:
            server = agent.Round_robin(task)
            if not server:
                agent.Recycle_Task(task,"DIRECT")
            else:
                agent.Select_server(server)
                continue

        agent.Select_server(Act2)
コード例 #16
0
def classify(df, classifier, pieces=5):
    results = []

    sess = tf.session()

    for item in split(df, pieces=pieces):
        x_train = np.array([[value for value in item]
                            for item in item['train'].text.to_numpy()])
        x_test = np.array([[value for value in item]
                           for item in item['test'].text.to_numpy()])
        y_train = item['train'].type.to_numpy()
        y_test = item['test'].type.to_numpy()

        print(f"X train = {x_train}")
        print(f"Y train = {y_train}")

        x_data = placeholder(shape=[None, 100], dtype=float32)
        y_target = placeholder(shape=[None, 1], dtype=float32)

        A = tf.Variable(tf.random.normal(shape=[100, 1]))
        b = tf.Variable(tf.random.normal(shape=[1, 1]))
        model_output = tf.subtract(tf.matmul(A, x_data), b)

        l2_norm = tf.reduce_sum(tf.square(A))
        alpha = tf.constant([0.1])
        classification_term = tf.reduce_mean(
            tf.maximum(0, tf.subtract(1., tf.multiply(model.output,
                                                      y_target))))
        loss = tf.add(classification_term, tf.multiply(alpha, l2_norm))

        results.append({
            "accuracy": accuracy_score(y_test, y_pred),
            "f1-score": f1_score(y_test, y_pred, average='weighted')
        })

    print(
        f"Average accuracy: {np.average([item['accuracy'] for item in results])} (constant classifier gives {np.max(df.groupby('type').size())/(np.sum(df.groupby('type').size()))})"
    )
    print(
        f"Average f1-score: {np.average([item['f1-score'] for item in results])}"
    )
コード例 #17
0
def _demo():
    info("hi")
    debug("shouldn't appear")
    set_level(DEBUG)
    debug("should appear")
    dir = "/tmp/testlogging"
    if os.path.exists(dir):
        shutil.rmtree(dir)
    with tf.session(dir=dir):
        logkv("a", 3)
        logkv("b", 2.5)
        dumpkvs()
        logkv("b", -2.5)
        logkv("a", 5.5)
        dumpkvs()
        info("^^^ should see a = 5.5")

    logkv("b", -2.5)
    dumpkvs()

    logkv("a", "longasslongasslongasslongasslongasslongassvalue")
    dumpkvs()
コード例 #18
0
def main():
    max_ep = 5000
    replay_buffer = deque()

    with tf.session() as sess:
        mainDQN = dqn.DQN(sess, input_size,output_size, name="main")
        targetDQN = dqn.DQN(sess, input_size,output_size, name="target")

        cp_op = get_copy_var_ops(dest_scope_name="target", src_scope_name="main")

        sess.run(cp_op)

        for episode in range(max_ep):
            e = 1. / ((episode / 10) + 1)
            done = False
            step_count = 0
            state = env.reset()

            while not done:
                if np.random.rand(1) < e:
                    action = env.action_space.sample()
                else:
                    action = np.argmax(mainDQN.predict(state))

                next_state, reward, done, _ = env.step(action)
                if done:
                    reward = -100
                replay_buffer.append((state, action, reward, next_state, done))

                if len(replay_buffer) > REPLAY_MEMORY:
                    replay_buffer.popleft()

                state = next_state
                step_count += 1
                if step_count > 10000:
                    break

                print "episode : {} step : {}".format(episode, step_count)
                if step_count > 1000
コード例 #19
0
ファイル: inference.py プロジェクト: ucloud/uai-sdk
    def load_model(self):
        sess = tf.session()
        x = tf.placeholder(dtype=tf.float32,
                           shape=[1, 32, 100, 3],
                           name='input')
        net = crnn_model.ShadowNet(phase=phase_tensor,
                                   hidden_nums=256,
                                   layers_nums=2,
                                   seq_length=15,
                                   num_classes=config.cfg.TRAIN.CLASSES_NUMS,
                                   rnn_cell_type='lstm')
        with tf.variable_scope('shadow'):
            net_out, tensor_dict = net.build_shadownet(inputdata=inputdata)
        decodes, _ = tf.nn.ctc_beam_search_decoder(inputs=net_out,
                                                   sequence_length=20 *
                                                   np.ones(1),
                                                   merge_repeated=False)

        saver = tf.train.Saver()
        params_file = tf.train.latest_checkpoint(self.model_dir)
        saver.restore(sess=sess, save_path=params_file)
        self.output['sess'] = sess
        self.output['x'] = x
        self.output['y'] = decodes
コード例 #20
0
def test_loop(hyper_params, vehicle, map, sensors):
    cp = init_tf()
    with tf.session(config = cp) as sess:

        saver = tf.train.import_meta_graph(hyper_params.model_save_path + '.meta')
        saver.restore(sess, tf.train.latest_checkpoint(hyper_params.model_path))

        if saver is None:
            print("Didn't load")
        
        graph = tf.get_default_graph()
        inputs = graph.get_tensor_by_name(hyper_params.model_name + '/inputs:0')
        output = graph.get_tensor_by_name(hyper_params.model_name + '/output:0')

        episode_reward = 0

        while True:
            state = process_image(sensors.cam_queue)
            Qs = sess.run(output, feed_dict = {inputs: state.reshape((1, *state.shape))})
            index = np.argmax(Qs)
            
            car_controls = map_action(index, hyper_params.action_space)
            vehicle.apply_control(car_controls)
            reward = compute_reward(vehicle, sensors)

            episode_reward += reward

            done = isDone(reward)

            if done:
                print("Episode end, reward: {}".format(episode_reward))
                reset_env(map, vehicle, sensors)
                episode_reward = 0
            
            else:
                time.sleep(0.25)
コード例 #21
0
ファイル: wavenet.py プロジェクト: pradeeps/pycadl
def train_vctk():
    """Summary

    Returns
    -------
    TYPE
        Description
    """
    batch_size = 24
    filter_length = 2
    n_stages = 7
    n_layers_per_stage = 9
    n_hidden = 48
    n_skip = 384
    dataset = vctk.get_dataset()
    it_i = 0
    n_epochs = 1000
    sequence_length = get_sequence_length(n_stages, n_layers_per_stage)
    ckpt_path = 'vctk-wavenet/wavenet_filterlen{}_batchsize{}_sequencelen{}_stages{}_layers{}_hidden{}_skips{}'.format(
        filter_length, batch_size, sequence_length, n_stages,
        n_layers_per_stage, n_hidden, n_skip)
    with tf.graph().as_default(), tf.session() as sess:
        net = create_wavenet(
            batch_size=batch_size,
            filter_length=filter_length,
            n_hidden=n_hidden,
            n_skip=n_skip,
            n_stages=n_stages,
            n_layers_per_stage=n_layers_per_stage)
        saver = tf.train.saver()
        init_op = tf.group(tf.global_variables_initializer(),
                           tf.local_variables_initializer())
        sess.run(init_op)
        if tf.train.latest_checkpoint(ckpt_path) is not None:
            saver.restore(sess, tf.train.latest_checkpoint(ckpt_path))
        batch = vctk.batch_generator
        with tf.variable_scope('optimizer'):
            opt = tf.train.adamoptimizer(
                learning_rate=0.0002).minimize(net['loss'])
        var_list = [
            v for v in tf.global_variables() if v.name.startswith('optimizer')
        ]
        sess.run(tf.variables_initializer(var_list))
        writer = tf.summary.filewriter(ckpt_path)
        for epoch_i in range(n_epochs):
            for batch_xs in batch(dataset, batch_size, sequence_length):
                loss, quantized, _ = sess.run(
                    [net['loss'], net['quantized'], opt],
                    feed_dict={net['x']: batch_xs})
                print(loss)
                if it_i % 100 == 0:
                    summary = sess.run(
                        net['summaries'], feed_dict={net['x']: batch_xs})
                    writer.add_summary(summary, it_i)
                    # save
                    saver.save(
                        sess,
                        os.path.join(ckpt_path, 'model.ckpt'),
                        global_step=it_i)
                it_i += 1

    return loss
コード例 #22
0
def run_test(batch_size=30, test_num=10, tol=1e-5):
    print('*' * 80)
    print('*' + ' ' * 29 + 'Testing tt conv full' + ' ' * 29 + '*')
    print('*' * 80)

    in_h = 32
    in_w = 32

    padding = 'SAME'

    # inp_ch_modes = np.array([4, 4, 4, 3], dtype=np.int32)
    # in_c = np.prod(inp_ch_modes)
    # out_ch_modes = np.array([5, 2, 5, 5], dtype=np.int32)
    # out_c = np.prod(out_ch_modes)
    # ranks = np.array([3, 2, 2, 3, 1], dtype=np.int32)
    in_c = 192
    out_c = 48
    # out_cc = in_c/4
    # out_c = np.dtype('int32').type(out_cc)

    inp = tf.placeholder(tf.float32, [None, in_h, in_w, in_c])

    wh = 1
    ww = 1

    w_ph = tf.placeholder(tf.float32, [wh, ww, in_c, out_c])

    s = [1, 1]

    first_cov_downsampling = tf.nn.conv2d(inp, w_ph, [1] + s + [1], padding)
    start_matrix_shape = first_cov_downsampling.get_shape().as_list()

    with tf.session() as covarianceMatrixSession:
        input_numpy = first_cov_downsampling.eval()
        for i in range(0, start_matrix_shape[0]):
            covariance_matrix_numpy = covariance_matrix_one_image(
                input_numpy[i])
            covariance_matrix_all_images_numpy = np.vstack(
                covariance_matrix_numpy)
        covariance_matrix_all_images_tensor = tf.convert_to_tensor(
            covariance_matrix_all_images_numpy)

    out = tensornet.layers.tt_conv_full(inp, [wh, ww],
                                        inp_ch_modes,
                                        out_ch_modes,
                                        ranks,
                                        s,
                                        padding,
                                        biases_initializer=None,
                                        scope='tt_conv')

    sess = tf.Session()
    graph = tf.get_default_graph()
    init_op = tf.initialize_all_variables()

    d = inp_ch_modes.size

    filters_t = graph.get_tensor_by_name('tt_conv/filters:0')

    cores_t = []
    for i in range(d):
        cores_t.append(graph.get_tensor_by_name('tt_conv/core_%d:0' % (i + 1)))

    for test in range(test_num):
        sess.run(init_op)

        filters = sess.run([filters_t])
        cores = sess.run(cores_t)

        w = np.reshape(filters.copy(), [wh, ww, ranks[0]])

        # mat = np.reshape(inp_cores[inp_ps[0]:inp_ps[1]], [inp_ch_ranks[0], inp_ch_modes[0], inp_ch_ranks[1]])

        for i in range(0, d):
            core = cores[i].copy()
            # [out_ch_modes[i] * ranks[i + 1], ranks[i] * inp_ch_modes[i]]
            core = np.transpose(core, [1, 0])
            core = np.reshape(
                core,
                [ranks[i], inp_ch_modes[i] * out_ch_modes[i] * ranks[i + 1]])

            w = np.reshape(w, [-1, ranks[i]])
            w = np.dot(w, core)

        # w = np.dot(w, np.reshape(mat, [inp_ch_ranks[0], -1]))

        L = []
        for i in range(d):
            L.append(inp_ch_modes[i])
            L.append(out_ch_modes[i])

        w = np.reshape(w, [-1] + L)
        w = np.transpose(w, [0] + list(range(1, 2 * d + 1, 2)) +
                         list(range(2, 2 * d + 1, 2)))

        w = np.reshape(w, [wh, ww, in_c, out_c])

        X = np.random.normal(0.0, 0.2, size=(batch_size, in_h, in_w, in_c))

        t1 = time.clock()
        correct = sess.run(corr, feed_dict={w_ph: w, inp: X})
        t2 = time.clock()
        y = sess.run(out, feed_dict={w_ph: w, inp: X})
        t3 = time.clock()

        err = np.max(np.abs(correct - y))
        print('Test #{0:02d}. Error: {1:0.2g}'.format(test + 1, err))
        print('TT-conv time: {0:.2f} sec. conv time: {1:.2f} sec.'.format(
            t3 - t2, t2 - t1))
        assert err <= tol, 'Error = {0:0.2g} is bigger than tol = {1:0.2g}'.format(
            err, tol)
コード例 #23
0
import tensorflow as tf
hello = tf.contant('hello')
sess = tf.session()
prinnt(sess.run(hello))

import cv2
print('openCV')

# 不同图片的压缩比
import cv2
# read_type: 1 彩色图片
img = cv2.imread(file_name, read_type)

# IMWRITE_JPG_QUALIFY 图片质量,
# numbers 0~100
# jpg的压缩质量可以控制,即有损压缩,无法设置透明度
# png是无损压缩,还可以设置透明度
cv2.imwrite(file_name, img, [cv2.IMWRITE_JPG_QUALIFY, numbers])
# numbers: 0~9 值越小,压缩效果越差
cv2.imwrite(file_name, img, [cv2.IMWRITE_PNG_COMPRESSION, numbers])

## 像素处理
img = cv2.imread(file_name, 1)
# 100行100列对应的像素
(b,g,r) = img[100,100]

# tf基本信息
data1 = tf.constant(2.5)
data2 = tf.Variable(10, name='var')
data3 = tf.constant(2.5, dtype=tf.int32)
print(data1)
import numpy as np
import tensorflow as tf

# initialize weights
w = tf.Variable(0, dtype=tf.float32)

# define cost function
cost = tf.add(tf.add(w**2, tf.multiply(-10., w)), 25)

# define learning algorithm
train = tf.train.GradientDescentOptimizer(0.01).minimize(cost)

# set up session
init = tf.global_variables_initializer()
session = tf.session()

# initialize variables
session.run(init)

# evaluate a variable
print(session.run(w))
# >> 0.0
# w is still zero as defined above

# run one step of gradient descent
session.run(train)

# and evaluate w again
print(session.run(w))
# >> 0.1
コード例 #25
0
ファイル: main.py プロジェクト: hermes2507/test_repo
import tensorflow as tf

tf.session()
コード例 #26
0
ファイル: seq2seq.py プロジェクト: sxu2583/machine-learning
Deep learning based chat bot
"""

import tensorflow as tf
import data_utils
import seq2seq_model

def train():
    #Prepares the dataset
    enc_train, dec_train = data_utils.prepare_custom_data(gConfig['working_directory'])

    train_set = read_data(enc_train, dec_train)
    
    
#create model
def seq2seq_f(encoder_inputs, decoder_inputs, do_decode):
    return tf.nn.seq2seq.embedding_attention_seq2seq(
    encoder_inputs, decoder_inputs, cell, num_encoder_symbols = source_vocab_size,
    num_decoder_symbols = target_vocab_size, embedding_size = size, output_projection=output_projection,
    feed_previous=do_decode)

with tf.session(config=config) as sess:
    model = create_model(sess, False)
    while True:
        sess.run(model)
        
        checkpoint_path = os.path.join(gConfig['working_directory'], "seq2seq.ckpt")
        model.saver.save(sess, checkpoint_path, global_step=model.global_step)
        
コード例 #27
0
ファイル: tf_intro.py プロジェクト: praveentn/hgwxx7
E:\Python>python.exe
Python 3.6.3 (v3.6.3:2c5fed8, Oct  3 2017, 18:11:49) [MSC v.1900 64 bit (AMD64)] on win32
Type "help", "copyright", "credits" or "license" for more information.
>>> import tensorflow as tf
>>> tf.session()
Traceback (most recent call last):
  File "<stdin>", line 1, in <module>
AttributeError: module 'tensorflow' has no attribute 'session'
>>> tf.Session()
2018-10-03 10:21:40.779195: I T:\src\github\tensorflow\tensorflow\core\platform\cpu_feature_guard.cc:141] Your CPU supports instructions that this TensorFlow binary was not compiled to use: AVX2
<tensorflow.python.client.session.Session object at 0x0000020816B24DA0>

>>> s = tf.Session()
>>> s
<tensorflow.python.client.session.Session object at 0x0000020816B24DA0>

# placeholder
>>> x = tf.placeholder(tf.float32, shape=[2,2])
>>> x
<tf.Tensor 'Placeholder:0' shape=(2, 2) dtype=float32>
>>>

# identity
>>> y = tf.identity(x)
>>> y
<tf.Tensor 'Identity:0' shape=(2, 2) dtype=float32>
>>>

# load numpy
>>> import numpy as np
コード例 #28
0
def tensorflow_embedding(p_list, lambda1,lambda2, d, iterations,
                         results_file=None,
                         display_progress = False):
  if results_file:
    writer = tf.summary.filewriter(results_file)

  n = p_list[0].shape[0]
  slices = len(p_list)
  sess = tf.session()

  with tf.name_scope("loss_func"):
    lambda_1 = tf.constant(lambda1,name="lambda_1")
    lambda_2 = tf.constant(lambda2, name="lambda_2")

    u = tf.get_variable("u",initializer=tf.random_uniform([n,d], -0.1, 0.1))
    b = tf.get_variable("b",initializer=tf.ones([slices,d,d]))
    #pmi = tf.sparse_placeholder(tf.float32)

 #   indices = [(slice,i,j) for (i,j) in x.keys() for slice,x in enumerate(
  #    p_list)]

    indices = reduce(lambda x,y: x + y,[[(i,y,z) for (y,z) in p.keys()] for i,\
        p in enumerate(p_list)])
    values = reduce (lambda x,y: x + y, map(lambda x: x.values(),p_list))
    pmi = tf.sparsetensor(indices=indices, values=values,
                          dense_shape=[slices, n, n])

    ub = tf.map_fn(lambda b_k: tf.matmul(u,b_k),b)
    svd_term = tf.norm(tf.sparse_add(pmi,
      tf.map_fn(lambda ub_k: tf.matmul(-1 * ub_k, ub_k, transpose_b=True),ub)))
    fro_1 = tf.multiply(lambda_1, tf.norm(u))
    fro_2 = tf.multiply(lambda_2,tf.norm(b))
  #  fro_2 = tf.multiply(lambda_2, tf.norm(v))
  #  b_sym = tf.norm(tf.subtract(b,tf.transpose(b)))
    loss = svd_term + fro_1
    if results_file:
      tf.summary.scalar('loss',loss)
      tf.summary.tensor_summary("u",u)
      tf.summary.tensor_summary("b",b)

  with tf.name_scope("train"):
    optimizer = tf.train.adamoptimizer()
    train = optimizer.minimize(loss)

  if results_file:
    writer.add_graph(sess.graph)
    merged_summary = tf.summary.merge_all()

  init = tf.global_variables_initializer()
  sess.run(init)

  print sess.run(b)
  for i in range(iterations):
    if display_progress:
      if (i % (.1*iterations)) == 0:
        print "{}% training progress".format((float(i)/iterations) * 100)

    if results_file:
      if (i % 5 == 0):
        writer.add_summary(sess.run(merged_summary),i)
    sess.run(train)

  u_res,b_res = sess.run([u,b])
  print b_res
  return u_res, b_res
コード例 #29
0
import tensorflow as tf
print(tf.__version__)

hello = tf.constant("Hello world")

###
# sess = tf.Session()
print(sess.run(hello))
sess = tf.interactives
### with
hello.tf.eval(tf.session()=sess)
コード例 #30
0
ファイル: numpy_sandbox.py プロジェクト: asjad99/tensorflow
import numpy as np

y  = np.array([1,2,3.5,10,20])

print y

#--------------------------------------

t = np.ones([5,5])

print t


#--------------------------------------

import tensorflow as tf
sess = tf.InteractiveSession()

state = tf.variable(0,name="counter")

new_value = tf.add(state,tf.constant(1))

update = tf.assign(state,new_value)

with tf.session() as sess:
	sess.run(tf.initialize_all_variables())
	print(sess.run(state))
	for _ in range(3):
		sess.run(update)
		print(sess.run(state))
コード例 #31
0
import numpy as np
import tensorflow as tf

"""
phase1. assemble a graph
"""
#load data 'xor_dataset.txt'

#define placeholders for input and output

#define the weights and bias

#define hypotheses

#define loss function

#define optimizer


"""
phase2. use a session to execute operations in the graph
"""
with tf.session() as sess:
	#init variables
	#wirte your own code...

	#test trained model

	#check accuracy
	
コード例 #32
0
ファイル: test03.py プロジェクト: physicist123/test04
import matplotlib.pyplot as plt
plt.plot([1, 2, 3], [3, 2, 1])
plt.show()

import tensorflow as tf
hello = tf.constant("hello")
print(tf.session().run(hello))
コード例 #33
0
ファイル: hellotf.py プロジェクト: pashabhai/OpenNity
# -*- coding: utf-8 -*-
"""
Created on Sun Jul  3 01:42:17 2016

@author: Pashabhai
"""

import tensorflow as tf

hello= tf.constant('hello tensorflow')
session = tf.session()
print(session.run(hello))

コード例 #34
0
from __future__ import absolute_import, division, print_function, unicode_literals

# Install TensorFlow method
try:
  # %tensorflow_version only exists in Colab.
  %tensorflow_version 2.x
except Exception:
  pass

##import tensorflow as tf

import tensorflow as tf ## defined tf obj of tensorflow lib
hello=tf.constant("Hello World")
print('start')
sess=tf.session()
print(sess.run(hello))
print('done')
##Don't forget to load tensorflow files first.
##This program is only to check whether Tensorflow is installed in your system or not.
コード例 #35
0
def train_loop(hyper_params, vehicle, map, sensors):

    cp = init_tf()
    dqn = DQNet(hyper_params.state_size,
    hyper_params.action_space, hyper_params.learning_rate,
    name = hyper_params.model_name)
    t_net = DQNet(hyper_params.state_size,
    hyper_params.action_space, hyper_params.learning_rate,
    name = 'TargetNetwork')
    writer = tf.summary.FileWriter('Summary')
    tf.summary.scalar('Loss', DQNet.loss)
    tf.summary.scalar('Hubor_Loss', DQNet.loss_2)
    tf.histogram('Weights', DQNet.weights)
    write_op = tf.summary.merge_all()
    saver = tf.train.Saver()

    memory = Memory(hyper_params.memory_size, hyper_params.pretrain_length, 
    hyper_params.action_space)
    if hyper_params.load_mem:
        memory = memory.load_memory(hyper_params.memory_load_path)
        print('Memory Loaded')
    else:
        memory.fill_memory(map, vehicle, sensors.cam_queue, sensors, autopilot=True)
        memory.save_memory(hyper_params.memory_save_path, memory)

    with tf.session(config = cp) as sess:
        sess.run(tf.global_variables_initializer())
        writer.add_graph(sess.graph)
        m = 0
        decay_step = 0
        tau = 0

        update_t = update_target_graph()
        sess.run(update_t)
        for episode in range(1,hyper_params.total_episodes):
            reset_env(map, vehicle, sensors)
            state = process_image(sensors.cam_queue)
            done = False
            start = time.time()
            episode_reward = 0

            if hyper_params.model_save_freq:
                if episode % hyper_params.model_save_freq == 0:
                    save_path = saver.save(sess, hyper_params.model_save_path)
            
            for step in range(hyper_params.max_steps):
                tau += 1
                decay_step += 1
                index, action, explore_prob = DQNet.predict_action(sess, hyper_params.explore_start,
                 hyper_params.explore_stop, hyper_params.decay_rate, decay_step, state)

                car_controls = map_action(index, hyper_params.action_space)
                vehicle.apply_control(car_controls)
                time.sleep(0.25)
                next_state = process_image(sensors.cam_queue)
                reward = compute_reward(vehicle, sensors)
                episode_reward += reward 
                done = isDone(reward)

                experience = state, action, reward, next_state, done
                memory.store(experience)

                tree_index, batch, weights = memory.sample(hyper_params.batch_size)
                states, actions, rewards, next_states, dones = get_split_batch(batch)

                q_next = sess.run(DQNet.output, feed_dict = {DQNet.inputs : next_states})
                q_target_next = sess.run(TargetNetwork.output, feed_dict = {TargetNetwork.inputs: next_states})
                
                q_target_batch = []
                for i in range(0, len(dones)):
                    terminal = dones[i]
                    action = np.argmax(q_next[i])
                    if terminal:
                        q_target_batch.append(rewards[i])
                    else:
                        t = rewards[i] + ( hyper_params.gamma * q_target_next[i][action] )
                         q_target_batch.append(t)
                
                targets = np.array([each for each in q_target_batch])

                _,_, loss, loss_2, abs_erros = sess.run([DQNet.optimizer, 
                DQNet.optimizer_2, DQNet.loss, DQNet.loss_2],
                feed_dict = {
                    DQNet.inputs : states,
                    DQNet.target_q : targets,
                    DQNet.actions: actions,
                    DQNet.weights: weights})
                
                memory.batch_update(tree_index, abs_errors)

                if tau > hyper_params.max_tau:
                    update_t = update_target_graph()
                    sess.run(update_target)
                    m+=1
                    tau=0
                    print('Model Updated!')
                state = next_state
                if done:
                    print('{} episode finished. Total reward: {}'.format(episode, episode_reward))
                    break