Exemple #1
0
    def validate_params(params):
        (are_orig_valid, orig_error_msg) = \
                         AbstractModel.validate_orig_params(params)
        league_ok = False
        if are_orig_valid:
            error_msgs = []
        else:
            error_msgs = [orig_error_msg]
        if "league" in params:
            league_valid = AbstractModel.validate_param(
                Team.validate_league(params["league"]),
                error_msgs)
            if league_valid:
                league_ok = True
        else:
            error_msgs.append("League is missing")

        location_ok = False
        if "location" in params:
            location_valid = AbstractModel.validate_param(
                Team.validate_location(params["location"]),
                error_msgs)
            if location_valid:
                location_ok = True
        else:
            error_msgs.append("Location is missing")

        if are_orig_valid and league_ok and location_ok:
            return (True, None)
        else:
            return (False, ",".join(error_msgs))
Exemple #2
0
 def __init__(self,
              output_dim,
              vocab_size1,
              emb1_dim,
              vocab_size2,
              emb2_dim,
              batch_size,
              max_seq_length,
              n_hidden,
              n_hidden_layers,
              learning_rate,
              keep_prob,
              test_inputs1,
              test_inputs2,
              test_seq_lengths,
              test_indices_wsd,
              test_labels,
              wsd_classifier=True,
              pos_classifier=False,
              pos_classes=0,
              test_pos_labels=None):
     """See docstring for AbstratModel"""
     AbstractModel.__init__(self, output_dim, vocab_size1, emb1_dim,
                            vocab_size2, emb2_dim, batch_size,
                            max_seq_length, n_hidden, n_hidden_layers,
                            learning_rate, keep_prob, test_inputs1,
                            test_inputs2, test_seq_lengths,
                            test_indices_wsd, test_labels, wsd_classifier,
                            pos_classifier, pos_classes, test_pos_labels)
     self.run_neural_model()
Exemple #3
0
    def __init__(self):
        AbstractModel.__init__(self)

        parser = argparse.ArgumentParser()
        parser.add_argument('--noise_n', type=int, default=128)
        parser.add_argument('--G_last_act', type=str, default="tanh")
        parser.add_argument('--G_pretrained_model', type=str, default="weights/generator.pth")

        config = parser.parse_args()

        self.solver = Solver(config)
    def __init__(self,
                 output_dim,
                 vocab_size1,
                 emb1_dim,
                 vocab_size2,
                 emb2_dim,
                 batch_size,
                 max_seq_length,
                 n_hidden,
                 n_hidden_layers,
                 learning_rate,
                 keep_prob,
                 test_inputs1,
                 test_inputs2,
                 test_seq_lengths,
                 test_indices_wsd,
                 test_labels_wsd,
                 test_labels_wsd_context,
                 wsd_classifier=True,
                 pos_classifier=False,
                 pos_classes=0,
                 test_pos_labels=None):
        """See docstring for AbstractModel for most of the parameters

        Additional args:
            test_labels_wsd_context: An array of floats, the gold data embeddings for the embedding pathway

        """
        AbstractModel.__init__(
            self, output_dim, vocab_size1, emb1_dim, vocab_size2, emb2_dim,
            batch_size, max_seq_length, n_hidden, n_hidden_layers,
            learning_rate, keep_prob, test_inputs1, test_inputs2,
            test_seq_lengths, test_indices_wsd, test_labels_wsd,
            wsd_classifier, pos_classifier, pos_classes, test_pos_labels)
        self.weights_wsd_context = tf.get_variable(
            name="context_wsd-w",
            shape=[2 * n_hidden, emb1_dim],
            dtype=tf.float32)
        self.biases_wsd_context = tf.get_variable(name="context_wsd-b",
                                                  shape=[emb1_dim],
                                                  dtype=tf.float32)
        self.train_labels_wsd_context = tf.placeholder(
            tf.float32,
            shape=[None, emb1_dim],
            name="train_labels_wsd_context")
        # self.train_labels_wsd = (self.train_labels_wsd, self.train_labels_wsd_context)
        self.test_labels_wsd_context = tf.constant(test_labels_wsd_context,
                                                   tf.float32)
        # self.test_labels_wsd = (self.test_labels_wsd, self.test_labels_wsd_context)
        self.run_neural_model()
Exemple #5
0
 def validate_params(params):
     (are_orig_valid, orig_error_msg) = \
                      AbstractModel.validate_orig_params(params)
     error_msgs = []
     if "sport" in params:
         sport_valid = AbstractModel.validate_param(
             League.validate_sport(params["sport"]),
             error_msgs)
         if sport_valid:
             return (are_orig_valid, orig_error_msg)
         else:
             return (False, ",".join(orig_error_msg, "Sport is null"))
     else:
         return (False, ",".join(orig_error_msg, "Sport is missing"))
Exemple #6
0
 def validate_subset_params(params):
     valid = True
     error_msgs = []
     if "title" in params:
         valid = valid and AbstractModel.validate_param(
             AbstractModel.validate_title(params["title"]),
             error_msgs)
     if "description" in params:
         valid = valid and AbstractModel.validate_param(
             AbstractModel.validate_description(params["description"]),
             error_msgs)
     if valid:
         return (True, None)
     else:
         return (False, ",".join(error_msgs))
Exemple #7
0
    def validate_params(params):
        error_msgs = []
        (are_orig_valid, orig_error_msg) = \
                         AbstractModel.validate_orig_params(params)
        start_date_str = end_date_str = None
        start_date = end_date = None

        if "start_date_str" in params:
            start_date_str = params["start_date_str"]
            (start_date, start_date_error_msg) = \
                         Event.validate_date(start_date_str, "start_date")
            if not start_date:
                error_msgs.append(start_date_error_msg)
            else:
                params["start_date"] = start_date
        if "end_date_str" in params:
            end_date_str = params["end_date_str"]
            (end_date, end_date_error_msg) = \
                       Event.validate_date(end_date_str, "end_date")
            if not end_date:
                error_msgs.append(end_date_error_msg)
            else:
                params["end_date"] = end_date

        (are_dates_valid, date_error_msg) = \
                          Event.validate_dates(start_date, end_date)
        if are_orig_valid and are_dates_valid:
            return (True, None)
        else:
            if orig_error_msg:
                error_msgs.append(orig_error_msg)
            if date_error_msg:
                error_msgs.append(date_error_msg)
            return (False, ",".join(error_msgs))
Exemple #8
0
 def validate_params(params):
     (are_orig_valid, orig_error_msg) = \
                      AbstractModel.validate_orig_params(params)
     if are_orig_valid:
         error_msgs = []
     else:
         error_msgs = [orig_error_msg]
     if "league" in params:
         league_valid = AbstractModel.validate_param(
             Season.validate_league(params["league"]), error_msgs)
         if league_valid:
             return (are_orig_valid, orig_error_msg)
         else:
             error_msgs.append("League is invalid")
             return (False, ",".join(error_msgs))
     else:
         error_msgs.append("League is missing")
         return (False, ",".join(error_msgs))
Exemple #9
0
    def _build_graph(self):
        from tf_utils.layers import conv2d, max_pool, rescale_bilinear, avg_pool

        def layer_width(layer: int):  # number of channels (features per pixel)
            return min([4 * 4**(layer + 1), 64])

        input_shape = [None] + list(self.input_shape)
        output_shape = input_shape[:3] + [self.class_count]

        # Input image and labels placeholders
        input = tf.placeholder(tf.float32, shape=input_shape)
        target = tf.placeholder(tf.float32, shape=output_shape)

        # Downsampled input (to improve speed at the cost of accuracy)
        h = rescale_bilinear(input, 0.5)

        # Hidden layers
        h = conv2d(h, 3, layer_width(0))
        h = tf.nn.relu(h)
        for l in range(1, self.conv_layer_count):
            h = max_pool(h, 2)
            h = conv2d(h, 3, layer_width(l))
            h = tf.nn.relu(h)

        # Pixelwise softmax classification and label upscaling
        logits = conv2d(h, 1, self.class_count)
        probs = tf.nn.softmax(logits)
        probs = tf.image.resize_bilinear(probs, output_shape[1:3])

        # Loss
        clipped_probs = tf.clip_by_value(probs, 1e-10, 1.0)
        ts = lambda x: x[:, :, :, 1:] if self.class0_unknown else x
        cost = -tf.reduce_mean(ts(target) * tf.log(ts(clipped_probs)))

        # Optimization
        optimizer = tf.train.AdamOptimizer(self.learning_rate)
        training_step = optimizer.minimize(cost)

        # Dense predictions and labels
        preds, dense_labels = tf.argmax(probs, 3), tf.argmax(target, 3)

        # Other evaluation measures
        self._n_accuracy = tf.reduce_mean(
            tf.cast(tf.equal(preds, dense_labels), tf.float32))

        return AbstractModel.EssentialNodes(
            input=input,
            target=target,
            probs=probs,
            loss=cost,
            training_step=training_step)
    def _build_graph(self, learning_rate, epoch, is_training):
        from layers import conv

        # Input image and labels placeholders
        input_shape = [None] + list(self.input_shape)
        output_shape = [None, self.class_count]
        input = tf.placeholder(tf.float32, shape=input_shape)
        target = tf.placeholder(tf.float32, shape=output_shape)

        # Hidden layers
        h = layers_exp.rbf_resnet(input,
                                  is_training=is_training,
                                  base_width=self.base_width,
                                  widening_factor=self.widening_factor,
                                  group_lengths=self.group_lengths)

        # Global pooling and softmax classification
        h = tf.reduce_mean(h, axis=[1, 2], keep_dims=True)
        logits = conv(h, 1, self.class_count)
        logits = tf.reshape(logits, [-1, self.class_count])
        probs = tf.nn.softmax(logits)

        # Loss
        clipped_probs = tf.clip_by_value(probs, 1e-10, 1.0)
        loss = -tf.reduce_mean(target * tf.log(clipped_probs))

        # Regularization
        w_vars = filter(lambda x: 'weights' in x.name, tf.global_variables())
        loss += self.weight_decay * regularization.l2_regularization(w_vars)

        # Optimization
        optimizer = tf.train.MomentumOptimizer(learning_rate, 0.9)
        training_step = optimizer.minimize(loss)

        # Dense predictions and labels
        preds, dense_labels = tf.argmax(probs, 1), tf.argmax(target, 1)

        # Other evaluation measures
        accuracy = tf.reduce_mean(
            tf.cast(tf.equal(preds, dense_labels), tf.float32))

        #writer = tf.summary.FileWriter('logs', self._sess.graph)

        return AbstractModel.EssentialNodes(input=input,
                                            target=target,
                                            probs=probs,
                                            loss=loss,
                                            training_step=training_step,
                                            evaluation={'accuracy': accuracy})
Exemple #11
0
 def __init__(self,name='decay',metric=lambda x: numpysum(x*x)):
     AbstractModel.__init__(self,name,metric)
     return
Exemple #12
0
 def __init__(self):
     AbstractModel.__init__(self)
     self.classfier = cv2.CascadeClassifier(
         r"weights/haarcascade_frontalface_alt.xml")
Exemple #13
0
 def __init__(self, packing=None, name='circle', sigma=1.0):
     AbstractModel.__init__(self, name, sigma)
     if packing == None: packing = 0.0
     self.__packing__ = packing
     return
Exemple #14
0
 def __init__(self):
     AbstractModel.__init__(self)
     self.solver = Solver("database", "weights/recognition.pth")
Exemple #15
0
 def __init__(self,name='lorentz',metric=lambda x: numpysum(x*x),sigma=1.0):
     AbstractModel.__init__(self,name,metric,sigma)
     return
Exemple #16
0
 def __init__(self,packing=None,name='circle',sigma=1.0):
     AbstractModel.__init__(self,name,sigma)
     if packing == None: packing = 0.0
     self.__packing__ = packing
     return
Exemple #17
0
 def validate_params(params):
     return AbstractModel.validate_orig_params(params)
Exemple #18
0
 def __init__(self,name='decay',metric=lambda x: numpysum(x*x)):
     AbstractModel.__init__(self,name,metric)
     return
    def _build_graph(self, learning_rate, epoch, is_training):
        from tensorflow.contrib import layers
        from tf_utils.layers import conv2d, max_pool, rescale_bilinear, avg_pool, bn_relu
        from tf_utils.losses import multiclass_hinge_loss

        def get_ortho_penalty():
            vars = tf.contrib.framework.get_variables('')
            filt = lambda x: 'conv' in x.name and 'weights' in x.name
            weight_vars = list(filter(filt, vars))
            loss = tf.constant(0.0)
            for v in weight_vars:
                m = tf.reshape(v, (-1, v.shape[3].value))
                d = tf.matmul(
                    m, m, True) - tf.eye(v.shape[3].value) / v.shape[3].value
                loss += tf.reduce_sum(d**2)
            return loss

        input_shape = [None] + list(self.input_shape)
        output_shape = [None, self.class_count]

        # Input image and labels placeholders
        input = tf.placeholder(tf.float32, shape=input_shape, name='input')
        target = tf.placeholder(tf.float32, shape=output_shape, name='target')

        # L2 regularization
        weight_decay = tf.constant(self.weight_decay, dtype=tf.float32)

        # Hidden layers
        h = input
        with tf.contrib.framework.arg_scope(
            [layers.conv2d],
                kernel_size=5,
                data_format='NHWC',
                padding='SAME',
                activation_fn=tf.nn.relu,
                weights_initializer=layers.variance_scaling_initializer(),
                weights_regularizer=layers.l2_regularizer(weight_decay)):
            h = layers.conv2d(h, 16, scope='convrelu1')
            h = layers.max_pool2d(h, 2, 2, scope='pool1')
            h = layers.conv2d(h, 32, scope='convrelu2')
            h = layers.max_pool2d(h, 2, 2, scope='pool2')
        with tf.contrib.framework.arg_scope(
            [layers.fully_connected],
                activation_fn=tf.nn.relu,
                weights_initializer=layers.variance_scaling_initializer(),
                weights_regularizer=layers.l2_regularizer(weight_decay)):
            h = layers.flatten(h, scope='flatten3')
            h = layers.fully_connected(h, 512, scope='fc3')

        self._print_vars()

        # Softmax classification
        logits = layers.fully_connected(h,
                                        self.class_count,
                                        activation_fn=None,
                                        scope='logits')
        probs = tf.nn.softmax(logits, name='probs')

        # Loss
        mhl = lambda t, lo: 0.1 * multiclass_hinge_loss(t, lo)
        sce = tf.losses.softmax_cross_entropy
        loss = (mhl if self.use_multiclass_hinge_loss else sce)(target, logits)
        loss = loss + tf.reduce_sum(
            tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES))
        if self.ortho_penalty > 0:
            loss += self.ortho_penalty * get_ortho_penalty()

        # Optimization
        optimizer = tf.train.AdamOptimizer(learning_rate)
        training_step = optimizer.minimize(loss)

        # Dense predictions and labels
        preds, dense_labels = tf.argmax(probs, 1), tf.argmax(target, 1)

        # Other evaluation measures
        accuracy = tf.reduce_mean(
            tf.cast(tf.equal(preds, dense_labels), tf.float32))

        return AbstractModel.EssentialNodes(input=input,
                                            target=target,
                                            probs=probs,
                                            loss=loss,
                                            training_step=training_step,
                                            evaluation={'accuracy': accuracy})
AudioDataset = dataset.AudioDataset
InversionV3 = inversion_model.InversionV3
# model = AbstractModel(
#     hparams=hparams_new,
#     data_paths = data_paths,
#     dataset_model = AudioDataset,
#     model = InversionV3(),
#     criterion = nn.MSELoss()
# )

hparams_new['num_workers'] = 0

model = AbstractModel.load_from_checkpoint(PATH,
                                           hparams=hparams_new,
                                           data_paths=data_paths,
                                           dataset_model=AudioDataset,
                                           model=InversionV3(),
                                           criterion=nn.MSELoss())

model = model.eval()

model.prepare_data()


def SRR(y, y_hat):
    numerator = np.sum(np.square(np.abs(y)))
    denominator = np.sum(np.square(np.abs(y - y_hat)))
    return numerator / denominator


f = open(