def model_inputs(): """ Create tf placeholders for inputs, targets, learning_rate, and lengths of source and target sequences. """ inputs = tf.placeholders(tf.int32, [None, None], name='input') targets = tf.placeholder(tf.float32, [None, None], name='target') learning_rate = tf.placeholder(tf.float32) keep_prob = tf.placeholder(tf.float32, name='keep_prob') target_sequence_length = tf.placeholder(tf.int32, [None,], name='target_sequence_length') max_target_sequence_length = tf.reduce_max(target_sequence_length, name='max_target_length') source_sequence_length = tf.placeholder(tf.int32, name='source_sequence_length') return (inputs, targets, learning_rate, keep_prob, target_sequence_length, max_target_sequence_length, source_sequence_length)
def loss(self, net_out): m = self.meta loss_type = self.meta['type'] assert loss_type in _LOSS_TYPE, \ 'Loss type {} not implemented'.format(loss_type) out = net_out out_shape = out.get_shape() out_dtype = out.dtype.base_dtype _truth = tf.placeholders(out_dtype, out_shape) self.placeholders = dict({'truth': _truth}) diff = _truth - out if loss_type in ['sse', '12']: loss = tf.nn.l2_loss(diff) elif loss_type == ['smooth']: small = tf.cast(diff < 1, tf.float32) large = 1. - small l1_loss = tf.nn.l1_loss(tf.multiply(diff, large)) l2_loss = tf.nn.l2_loss(tf.multiply(diff, small)) loss = l1_loss + l2_loss elif loss_type in ['sparse', 'l1']: loss = l1_loss(diff) elif loss_type == 'softmax': loss = tf.nn.softmax_cross_entropy_with_logits( labels=tf.stop_gradient(y)) loss = tf.reduce_mean(input_tensor=loss) #loss = tf.reduce_mean(loss) elif loss_type == 'svm': assert 'train_size' in m, \ 'Must specify' size = m['train_size'] self.nu = tf.Variable(tf.ones([train_size, num_classes]))
def loss(self, net_out): m = self.meta loss_type = self.meta['type'] assert loss_type in _LOSS_TYPE, \ 'Loss type {} not implemented'.format(loss_type) out = net_out out_shape = out.get_shape() out_dtype = out.dtype.base_dtype _truth = tf.placeholders(out_dtype, out_shape) self.placeholders = dict({ 'truth': _truth }) diff = _truth - out if loss_type in ['sse','12']: loss = tf.nn.l2_loss(diff) elif loss_type == ['smooth']: small = tf.cast(diff < 1, tf.float32) large = 1. - small l1_loss = tf.nn.l1_loss(tf.multiply(diff, large)) l2_loss = tf.nn.l2_loss(tf.multiply(diff, small)) loss = l1_loss + l2_loss elif loss_type in ['sparse', 'l1']: loss = l1_loss(diff) elif loss_type == 'softmax': loss = tf.nn.softmax_cross_entropy_with_logits(logits, y) loss = tf.reduce_mean(loss) elif loss_type == 'svm': assert 'train_size' in m, \ 'Must specify' size = m['train_size'] self.nu = tf.Variable(tf.ones([train_size, num_classes]))
def __init_( self, D, K, hidden_layer_sizes ): #D=input_size, K=output_size, list of all hidden layers sizes # this part creates the main graph self.layers = [] M1 = D for M2 in hidden_layer_sizes: layer = HiddenLayer(M1, M2) self.layers.append(layer) M1 = M2 # final layer layer = HiddenLayer(M1, K, tf.nn.softmax, use_bias=False) self.layers.append(layer) # inputs and targets self.X = tf.placeholder(tf.float32, shape=(None, X), name='X') self.actions = tf.placeholders(tf.int32, shape=(None, ), name='actions') self.advantages = tf.placehodlder(tf.float32, shape=(None, ), name='advantages') # calculate output and costs Z = self.X for layer in self.layers: Z = layer.forward(Z) p_a_given_s = Z self.predict_op = p_a_given_s # cost function and train operation selected_probs = tf.log( tf.reduce_sum(p_a_given_s * tf.one_hot(self.actins, K), reduction_indices=[1])) cost = -tf.reduce_sum(self.advantages * selected_probs) self.train_op = tf.train.AdagradOptimizer(1e-1).minimize(cost)
import tensorflow as tf import xlrd import utils DATA_FILE = 'data/fire_theft.xls' # read data form excel file book = xlrd.open_workbook(DATA_FILE,encoding_override='utf-8') sheet = book.sheet_by_index(0) data = np.asarray([sheet.row_values(i) for i in range(i,sheet.nrows)]) n_data = sheet.nrows -1 # create placeholders for input X , Y Y = tf.placeholders(tf.float32,name='Y') X = tf.placeholders(tf.float32,name='X') # create W and bias with initialize value is zero w = tf.Variable(0.0,name='weights') bias = tf.variable(0.0,name='bias') #build the model in tf Y_predicted = w*X + bias #definec the roor squared error for loss function loss = tf.square(Y-Y_predicted,name='loss') #Using gradient descent with learning rate euqal 0.01 to minimize the loss optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.001).minimize(loss) def huber_loss_function(Y, predicted,delta=14.): residual = tf.abs(Y-predicted)
support = [preprocess_adj(adj)] num_supports = 1 model_func = GCN elif FLAGS.model == 'gcn_cheby': support = chebyshev_polynomials(adj, FLAGS.max_degree) num_supports = 1 + FLAGS.max_degree model_func = GCN elif FLAGS.model == 'dense': support = [preprocess_adj(adj)] # Not used num_supports = 1 model_func = MLP else: raise ValueError('Invalid argument for model: ' + str(FLAGS.model)) placeholders = {'support': [tf.sparse_placeholder(tf.float32) for _ in range(num_supports)], 'features': tf.sparse_placeholder(tf.float32, shape=tf.constant(features[2], dtype=tf.int64)), 'labels': tf.placeholders(tf.float32, shape=(None, y_train.shape[1])), 'labels_mask': tf.placeholders(tf.int32), 'dropout':tf.placeholder_with_default(0., shape=()), 'num_features_nonzero':tf.placeholders(tf.int32)} model = model_func(placeholders, input_dim=features[2][1], logging=True) sess = tf.Session() def evaluate(features, support, labels, mask, placeholders): t_test = time.time() feed_dict_val = construct_feed_dict(features, support, labels, mask, placeholders) outs_val = sess.run([model.loss, model.accuracy], feed_dict=feed_dict_val) return outs_val[0], outs_val[1], (time.time()-t_test) sess.run(tf.global_variables_initializer())
def model_inputs(): inputs = tf.placeholders(tf.int32, [None, None], name = 'input') targets = tf.placeholders(tf.int32, [None, None], name = 'target') lr = tf.placeholders(tf.float32, name = 'learning_rate') keep_prob = tf.placeholder(tf.float32, name = 'keep_prob') # control the dropout rate return inputs, targets, lr, keep_prob
def train(train_data, test_data=None): G = train_data[0] properties = train_data[1] id_map = train_data[2] class_map = train_data[4] num_classes = len(list(class_map.values())[0]) if not properties is None: properties = np.vstack([properties, np.zeros((properties.shape[1], ))]) context_pairs = train_data[3] if FLAGS.random_context else None placeholders = construct_placeholders(num_classes) minibatch = NodeMinibatchIterator(....) adj_info_ph = tf.placeholders(tf.int32, shape=minibatch.adj.shape) adj_info = tf.Variable(adj_info_ph, trainable=False, name='adj_info') # training using the proposed algorithm model = SupervisedPanda(num_classes, placeholders, properties, adj_info, minibatch.deg, layer_info, model_size=FLAGS.model_size, sigmoid_loss=FLAGS.sigmoid, identity_dim=FLAGS.identity_dim, logging=True) config = tf.ConfigProto(log_device_placement=FLAGS.log_device_placement) # Session initialization sess = tf.Session() merged = tf.summary.merge_all() summary_writer = tf.summary.FileWriter(log_dir(), sess.graph) # Variables initialization sess.run(tf.global_variables_initizalizer(), feed_dict={adj_info_ph: minibatch.adj}) # Model Training total_steps = 0 avg_time = 0.0 epoch_val_costs = [] train_adj_info = tf.assign(adj_info, minibatch.adj) val_adj_info = tf.assign(adj_info, minibatch.test_adj) for epoch in range(FLAGS.epochs): minibatch.shuffle() iter = 0 print('epoch: %0.4d' % (epoch + 1)) epoch_val_costs.append(0) while not minibatch.end(): feed_dict, labels = minibatch.next_minibatch_feed_dict() feed_dict.update({placeholders['dropout']: FLAGS.dropout}) t = time.time() outs = sess.run([merged, model.opt_op, model.loss, model.predictions], feed_dict=feed_dict) train_cost = outs[2] if iter % FLAGS.validate_iter == 0: sess.run(val_adj_info.op) if FLAGS.validate_batch_size == -1: val_cost, val_f1_mic, val_f1_mac, duration = incremental_evaluate(sess, model, minibatch, FLAGS.batch_size) else: val_cost, val_f1_min, val_f1_mac, duration = evaluate(sess, model, minibatch, FLAGS.validate_batch_size) sess.run(train_adj_info.op) epoch_val_costs[-1] += val_cost if total_steps % FLGAS.print_every == 0: summary_writer.add_summary(outs[0], total_steps) avg_time = (avg_time * total_steps + time.time() - t) / (total_steps + 1) iter += 1 total_steps += 1 if total_steps > FLAGS.max_total_steps: break if total_steps > FLAGS.max_total_steps: break
# ============= # parameter configuration # ============= batch_size = 32 # normally between 16 and 513 vocabulary_size = 10000 embedding_size = 100 num_sampled = 1 # embeddings = tf.Variable( tf.random_uniform(vocabulary_size, embedding_size), -1.0, 1.0)) nce_weights = tf.Variable( tf.truncated_normal([vocabulary_size, embedding_size], stddev=1.0/math.sqrt(embedding_size))) nce_biases = tf.Variable(tf.zeros([vocabulary_size])) # skip-gram model # placeholders for inputs train_inputs = tf.placeholders(tf.int32, shape=[batch_size]) train_labels = tf.placeholders(tf.int32, shape=[batch_size, 1]) # retrieve embeddings of the source words embed = tf.nn.embedding_lookup(embeddings, train_inputs) # compute the NCE loss, using a sample of the negative labels each time loss = tf.reduce_mean( tf.nn.nce_loss(nce_weights, nce_biases, embed, train_labels, num_sampled, vocabulary_size)) # use the SGD optimizer optimizer = tf.train.GradientDescentOptimizer(learning_rate=1.0).minimize(loss)
print("Data loaded and spiltted successfully........\n") #--------------------------------------------------------------------- #Neural Net Construction n_input = N_INPUT n_hidden1 = HIDDEN_SIZE n_hidden2 = HIDDEN_SIZE n_hidden3 = HIDDEN_SIZE n_hidden4 = HIDDEN_SIZE n_classes = N_CLASSES #Tf placeholders X = tf.placeholders(tf.float32,[None, n_input]) Y = tf.placeholders(tf.float32,[None, n_classes]) dropout_keep_prob = tf.placeholder(tf.float32) def mlp(_X, _weights, _biases, dropout_keep_prob): layer1 = tf.nn.dropout(tf.nn.tanh(tf.add(tf.matmul(_X,_weights['h1']),_biases['b1'])), dropout_keep_prob) layer2 = tf.nn.dropout(tf.nn.tanh(tf.add(tf.matmul(layer1, _weights['h2']),_biases['b2'])),dropout_keep_prob) layer3 = tf.nn.dropout(tf.nn.tanh(tf.add(tf.matmul(layer2, _weights['h3']),_biases['b3'])),dropout_keep_prob) layer4 = tf.nn.dropout(tf.nn.tanh(tf.add(tf.matmul(layer3, _weights['h3']),_biases['b3'])),dropout_keep_prob) out = ACTIVATION_FUNCTION_OUT(tf.add(tf.matmul(layer4,_weights['h4']),_biases['out'])) return out weights ={ 'h1': tf.Variable(tf.random_normal([n_input, n_hidden1],stddev= STDDEV)), 'h2': tf.Variable(tf.random_normal([n_hidden1, n_hidden2],stddev= STDDEV)), 'h3': tf.Variable(tf.random_normal([n_hidden2, n_hidden3],stddev= STDDEV)),