def gen_dict(a_train, b_train, training_value=0): X_a, y_a, w_a, ids_a = a_train X_b, y_b, w_b, ids_b = b_train atoms_dictA = graphA_topology.batch_to_feed_dict(X_a) atoms_dictB = graphB_topology.batch_to_feed_dict(X_b) atoms_dict = atoms_dictA.copy() atoms_dict.update(atoms_dictB) Y = np.reshape(y_a, [batch_size]) label_dict = {label_gold: Y} training_dict = {training: training_value} feed_dict = merge_dicts([label_dict, atoms_dict, training_dict]) return feed_dict
def construct_feed_dict(self, test, support, training=True, add_phase=False): """Constructs tensorflow feed from test/support sets.""" # Generate dictionary elements for support feed_dict = ( self.model.support_graph_topology.batch_to_feed_dict(support.X)) feed_dict[self.support_label_placeholder] = np.squeeze(support.y) # Get graph information for test batch_topo_dict = ( self.model.test_graph_topology.batch_to_feed_dict(test.X)) feed_dict = merge_dicts([batch_topo_dict, feed_dict]) # Generate dictionary elements for test feed_dict[self.test_label_placeholder] = np.squeeze(test.y) feed_dict[self.test_weight_placeholder] = np.squeeze(test.w) if add_phase: feed_dict[self.phase] = training return feed_dict
def construct_feed_dict(self, X_b, y_b=None, w_b=None, training=True): """Get initial information about task normalization""" # TODO(rbharath): I believe this is total amount of data n_samples = len(X_b) if y_b is None: y_b = np.zeros((n_samples, self.n_tasks)) if w_b is None: w_b = np.zeros((n_samples, self.n_tasks)) targets_dict = {self.label_placeholder: y_b, self.weight_placeholder: w_b} # Get graph information atoms_dict = self.graph_topology.batch_to_feed_dict(X_b) # TODO (hraut->rhbarath): num_datapoints should be a vector, with ith element being # the number of labeled data points in target_i. This is to normalize each task # num_dat_dict = {self.num_datapoints_placeholder : self.} # Get other optimizer information # TODO(rbharath): Figure out how to handle phase appropriately feed_dict = merge_dicts([targets_dict, atoms_dict]) return feed_dict
def construct_feed_dict(self, X_b, y_b=None, w_b=None, training=True): """Get initial information about task normalization""" # TODO(rbharath): I believe this is total amount of data n_samples = len(X_b) if y_b is None: y_b = np.zeros((n_samples, self.n_tasks)) if w_b is None: w_b = np.zeros((n_samples, self.n_tasks)) targets_dict = {self.label_placeholder: y_b, self.weight_placeholder: w_b} # Get graph information atoms_dict = self.graph_topology.batch_to_feed_dict(X_b) # TODO (hraut->rhbarath): num_datapoints should be a vector, with ith element being # the number of labeled data points in target_i. This is to normalize each task # num_dat_dict = {self.num_datapoints_placeholder : self.} # Get other optimizer information # TODO(rbharath): Figure out how to handle phase appropriately feed_dict = merge_dicts([targets_dict, atoms_dict]) return feed_dict
def construct_feed_dict(self, test, support, training=True, add_phase=False): """Constructs tensorflow feed from test/support sets.""" # Generate dictionary elements for support feed_dict = (self.model.support_graph_topology.batch_to_feed_dict( support.X)) feed_dict[self.support_label_placeholder] = np.squeeze(support.y) # Get graph information for test batch_topo_dict = (self.model.test_graph_topology.batch_to_feed_dict( test.X)) feed_dict = merge_dicts([batch_topo_dict, feed_dict]) # Generate dictionary elements for test feed_dict[self.test_label_placeholder] = np.squeeze(test.y) feed_dict[self.test_weight_placeholder] = np.squeeze(test.w) if add_phase: feed_dict[self.phase] = training return feed_dict
print("Accuracy for test set is" , accuracy ,evaluation) elif mode=='prediction': X_a, y_a, w_a, ids_a = a_train X_b, y_b, w_b, ids_b = b_train atoms_dictA = graphA_topology.batch_to_feed_dict(X_a) atoms_dictB = graphB_topology.batch_to_feed_dict(X_b) atoms_dict = atoms_dictA.copy() atoms_dict.update(atoms_dictB) Y = np.reshape(y_a, [batch_size]) label_dict = {label_gold: Y} training_dict = {training: 0} feed_dict = merge_dicts([label_dict, atoms_dict, training_dict]) result = sess.run(predictions, feed_dict=feed_dict) print(result) elif mode=='prediction_vector': print("Restoring model from " + restore_path) saver.restore(sess, restore_path) print("DONE!") a_train = list(train_A.iterbatches(batch_size, pad_batches=True, deterministic=True)) b_train = list(train_B.iterbatches(batch_size, pad_batches=True, deterministic=True)) evaluate_train = [a_train, b_train] x_shape, y_shape, w_shape, i_shape = train_A.get_shape() train_step = int(x_shape[0] / batch_size) + 1 lose_num_train = train_step * batch_size - x_shape[0] max_step = train_step * max_epochs