def __init__(self, output_dim, vocab_size1, emb1_dim, vocab_size2, emb2_dim, batch_size, max_seq_length, n_hidden, n_hidden_layers, learning_rate, keep_prob, test_inputs1, test_inputs2, test_seq_lengths, test_indices_wsd, test_labels, wsd_classifier=True, pos_classifier=False, pos_classes=0, test_pos_labels=None): """See docstring for AbstratModel""" AbstractModel.__init__(self, output_dim, vocab_size1, emb1_dim, vocab_size2, emb2_dim, batch_size, max_seq_length, n_hidden, n_hidden_layers, learning_rate, keep_prob, test_inputs1, test_inputs2, test_seq_lengths, test_indices_wsd, test_labels, wsd_classifier, pos_classifier, pos_classes, test_pos_labels) self.run_neural_model()
def __init__(self): AbstractModel.__init__(self) parser = argparse.ArgumentParser() parser.add_argument('--noise_n', type=int, default=128) parser.add_argument('--G_last_act', type=str, default="tanh") parser.add_argument('--G_pretrained_model', type=str, default="weights/generator.pth") config = parser.parse_args() self.solver = Solver(config)
def __init__(self, output_dim, vocab_size1, emb1_dim, vocab_size2, emb2_dim, batch_size, max_seq_length, n_hidden, n_hidden_layers, learning_rate, keep_prob, test_inputs1, test_inputs2, test_seq_lengths, test_indices_wsd, test_labels_wsd, test_labels_wsd_context, wsd_classifier=True, pos_classifier=False, pos_classes=0, test_pos_labels=None): """See docstring for AbstractModel for most of the parameters Additional args: test_labels_wsd_context: An array of floats, the gold data embeddings for the embedding pathway """ AbstractModel.__init__( self, output_dim, vocab_size1, emb1_dim, vocab_size2, emb2_dim, batch_size, max_seq_length, n_hidden, n_hidden_layers, learning_rate, keep_prob, test_inputs1, test_inputs2, test_seq_lengths, test_indices_wsd, test_labels_wsd, wsd_classifier, pos_classifier, pos_classes, test_pos_labels) self.weights_wsd_context = tf.get_variable( name="context_wsd-w", shape=[2 * n_hidden, emb1_dim], dtype=tf.float32) self.biases_wsd_context = tf.get_variable(name="context_wsd-b", shape=[emb1_dim], dtype=tf.float32) self.train_labels_wsd_context = tf.placeholder( tf.float32, shape=[None, emb1_dim], name="train_labels_wsd_context") # self.train_labels_wsd = (self.train_labels_wsd, self.train_labels_wsd_context) self.test_labels_wsd_context = tf.constant(test_labels_wsd_context, tf.float32) # self.test_labels_wsd = (self.test_labels_wsd, self.test_labels_wsd_context) self.run_neural_model()
def __init__(self): AbstractModel.__init__(self) self.classfier = cv2.CascadeClassifier( r"weights/haarcascade_frontalface_alt.xml")
def __init__(self, packing=None, name='circle', sigma=1.0): AbstractModel.__init__(self, name, sigma) if packing == None: packing = 0.0 self.__packing__ = packing return
def __init__(self): AbstractModel.__init__(self) self.solver = Solver("database", "weights/recognition.pth")
def __init__(self,name='lorentz',metric=lambda x: numpysum(x*x),sigma=1.0): AbstractModel.__init__(self,name,metric,sigma) return
def __init__(self,packing=None,name='circle',sigma=1.0): AbstractModel.__init__(self,name,sigma) if packing == None: packing = 0.0 self.__packing__ = packing return
def __init__(self,name='decay',metric=lambda x: numpysum(x*x)): AbstractModel.__init__(self,name,metric) return