Пример #1
0
    def __init__(self, input_h, input_w, batch_size, output_size, out_h, out_w, init_weights, config):

        self.gpu_policy = _utils.TensorFlowGPUPolicy()
        self.gpu_policy.start()


        # Converting incoming weights from shared_float_array to numpy
        for key in init_weights.keys():
            init_weights[key] = _utils.convert_shared_float_array_to_numpy(init_weights[key])

        self.od_graph = _tf.Graph()
        self.config = config
        self.batch_size = batch_size
        self.grid_shape = [out_h, out_w]
        self.num_classes = int(_utils.convert_shared_float_array_to_numpy(config['num_classes']))
        self.anchors = [
            (1.0, 2.0), (1.0, 1.0), (2.0, 1.0),
            (2.0, 4.0), (2.0, 2.0), (4.0, 2.0),
            (4.0, 8.0), (4.0, 4.0), (8.0, 4.0),
            (8.0, 16.0), (8.0, 8.0), (16.0, 8.0),
            (16.0, 32.0), (16.0, 16.0), (32.0, 16.0),
        ]
        self.num_anchors = len(self.anchors)
        self.output_size = output_size
        self.sess = _tf.Session(graph=self.od_graph)
        with self.od_graph.as_default():
            self.init_object_detector_graph(input_h, input_w, init_weights)
Пример #2
0
    def __init__(self, config, net_params):

        self.gpu_policy = _utils.TensorFlowGPUPolicy()
        self.gpu_policy.start()
        
        _tf.reset_default_graph()

        for key in net_params.keys():
            net_params[key] = _utils.convert_shared_float_array_to_numpy(net_params[key])

        for key in config.keys():
            config[key] = _utils.convert_shared_float_array_to_numpy(config[key])

        self._batch_size = 1
        self._finetune_all_params = True
        self._define_training_graph = bool(config['st_training'])
        self._tf_variables = define_tensorflow_variables(net_params)

        # TODO: take care of batch size
        self.tf_input = _tf.placeholder(dtype = _tf.float32, shape = [None, 256, 256, 3])
        self.tf_style = _tf.placeholder(dtype = _tf.float32, shape = [None, 256, 256, 3])
        self.tf_index = _tf.placeholder(dtype = _tf.int64, shape = [self.batch_size])

        self.__define_graph();
        
        self.sess = _tf.Session()
        init = _tf.global_variables_initializer()
        self.sess.run(init)
Пример #3
0
    def __init__(
        self,
        net_params,
        batch_size,
        num_features,
        num_classes,
        prediction_window,
        seq_len,
        seed,
    ):

        _utils.suppress_tensorflow_warnings()
        self.gpu_policy = _utils.TensorFlowGPUPolicy()
        self.gpu_policy.start()

        for key in net_params.keys():
            net_params[key] = _utils.convert_shared_float_array_to_numpy(
                net_params[key]
            )

        self.ac_graph = _tf.Graph()
        self.num_classes = num_classes
        self.batch_size = batch_size
        self.seq_len = seq_len
        self.sess = _tf.Session(graph=self.ac_graph)
        with self.ac_graph.as_default():
            self.init_activity_classifier_graph(
                net_params, num_features, prediction_window, seed
            )
Пример #4
0
    def __init__(self, num_inputs, num_classes, custom_layer_sizes):
        """
        Defines the TensorFlow model, loss, optimisation and accuracy.

        """
        self.gpu_policy = _utils.TensorFlowGPUPolicy()
        self.gpu_policy.start()

        self.sc_graph = _tf.Graph()
        self.num_classes = num_classes
        self.sess = _tf.Session(graph=self.sc_graph)
        with self.sc_graph.as_default():
            self.init_sound_classifier_graph(num_inputs, custom_layer_sizes)
Пример #5
0
    def __init__(self, num_inputs, num_classes, custom_layer_sizes):
        """
        Defines the TensorFlow model, loss, optimisation and accuracy.
        """
        self.num_inputs = num_inputs
        self.num_classes = num_classes
        self.custom_layer_sizes = custom_layer_sizes

        self.gpu_policy = _utils.TensorFlowGPUPolicy()
        self.gpu_policy.start()

        self.sc_graph = _tf.Graph()
        self.sess = _tf.Session(graph=self.sc_graph)

        self.is_initialized = False
Пример #6
0
    def __init__(self):
        vggish_model_file = VGGish()

        if _mac_ver() < (10, 14):
            # Use TensorFlow/Keras
            import turicreate.toolkits._tf_utils as _utils
            self.gpu_policy = _utils.TensorFlowGPUPolicy()
            self.gpu_policy.start()

            model_path = vggish_model_file.get_model_path(format='tensorflow')
            self.vggish_model = _keras.models.load_model(model_path)
        else:
            # Use Core ML
            model_path = vggish_model_file.get_model_path(format='coreml')
            self.vggish_model = MLModel(model_path)
Пример #7
0
    def __init__(self, net_params, batch_size, num_classes):
        """
        Defines the TensorFlow model, loss, optimisation and accuracy. Then
        loads the weights into the model.
        """
        self.gpu_policy = _utils.TensorFlowGPUPolicy()
        self.gpu_policy.start()

        for key in net_params.keys():
            net_params[key] = _utils.convert_shared_float_array_to_numpy(
                net_params[key])

        self.dc_graph = _tf.Graph()
        self.num_classes = num_classes
        self.batch_size = batch_size
        self.sess = _tf.Session(graph=self.dc_graph)
        with self.dc_graph.as_default():
            self.init_drawing_classifier_graph(net_params)
Пример #8
0
    def __init__(self):
        vggish_model_file = VGGish()
        self.mac_ver = _mac_ver()

        if self.mac_ver < (10, 14):
            # Use TensorFlow/Keras
            import turicreate.toolkits._tf_utils as _utils

            self.gpu_policy = _utils.TensorFlowGPUPolicy()
            self.gpu_policy.start()

            model_path = vggish_model_file.get_model_path(format="tensorflow")
            _tf = _minimal_package_import_check("tensorflow")
            self.vggish_model = _tf.keras.models.load_model(model_path)
        else:
            # Use Core ML
            model_path = vggish_model_file.get_model_path(format="coreml")
            coremltools = _minimal_package_import_check("coremltools")
            self.vggish_model = coremltools.models.MLModel(model_path)
Пример #9
0
    def __init__(self, config, net_params):

        self.gpu_policy = _utils.TensorFlowGPUPolicy()
        self.gpu_policy.start()

        for key in net_params.keys():
            net_params[key] = _utils.convert_shared_float_array_to_numpy(
                net_params[key])

        for key in config.keys():
            config[key] = _utils.convert_shared_float_array_to_numpy(
                config[key])

        self.st_graph = _tf.Graph()
        self._batch_size = 1
        self._finetune_all_params = True
        self._define_training_graph = bool(config['st_training'])
        self.sess = _tf.Session(graph=self.st_graph)
        with self.st_graph.as_default():
            self.init_style_transfer_graph(net_params)
Пример #10
0
    def __init__(self, ptModel):
        """
        Parameters
        ----------
        ptModel: ImageClassifierPreTrainedModel
            An instance of a pre-trained model.
        """

        # Suppresses verbosity to only errors
        _utils.suppress_tensorflow_warnings()

        from tensorflow import keras

        self.gpu_policy = _utils.TensorFlowGPUPolicy()
        self.gpu_policy.start()

        self.ptModel = ptModel

        self.input_shape = ptModel.input_image_shape
        self.coreml_data_layer = ptModel.coreml_data_layer
        self.coreml_feature_layer = ptModel.coreml_feature_layer

        model_path = ptModel.get_model_path("tensorflow")
        self.model = keras.models.load_model(model_path)
Пример #11
0
    def __init__(self, net_params, batch_size, num_classes):
        """
        Defines the TensorFlow model, loss, optimisation and accuracy. Then
        loads the MXNET weights into the model.

        """
        self.gpu_policy = _utils.TensorFlowGPUPolicy()
        self.gpu_policy.start()

        for key in net_params.keys():
            net_params[key] = _utils.convert_shared_float_array_to_numpy(
                net_params[key])

        _tf.reset_default_graph()

        self.num_classes = num_classes
        self.batch_size = batch_size

        self.input = _tf.placeholder(_tf.float32, [None, 28, 28, 1])

        self.one_hot_labels = _tf.placeholder(_tf.int32,
                                              [None, self.num_classes])

        # Weights
        weights = {
            'drawing_conv0_weight':
            _tf.Variable(_tf.zeros([3, 3, 1, 16]),
                         name='drawing_conv0_weight'),
            'drawing_conv1_weight':
            _tf.Variable(_tf.zeros([3, 3, 16, 32]),
                         name='drawing_conv1_weight'),
            'drawing_conv2_weight':
            _tf.Variable(_tf.zeros([3, 3, 32, 64]),
                         name='drawing_conv2_weight'),
            'drawing_dense0_weight':
            _tf.Variable(_tf.zeros([576, 128]), name='drawing_dense0_weight'),
            'drawing_dense1_weight':
            _tf.Variable(_tf.zeros([128, self.num_classes]),
                         name='drawing_dense1_weight')
        }

        # Biases
        biases = {
            'drawing_conv0_bias':
            _tf.Variable(_tf.zeros([16]), name='drawing_conv0_bias'),
            'drawing_conv1_bias':
            _tf.Variable(_tf.zeros([32]), name='drawing_conv1_bias'),
            'drawing_conv2_bias':
            _tf.Variable(_tf.zeros([64]), name='drawing_conv2_bias'),
            'drawing_dense0_bias':
            _tf.Variable(_tf.zeros([128]), name='drawing_dense0_bias'),
            'drawing_dense1_bias':
            _tf.Variable(_tf.zeros([self.num_classes]),
                         name='drawing_dense1_bias')
        }

        conv_1 = _tf.nn.conv2d(self.input,
                               weights["drawing_conv0_weight"],
                               strides=1,
                               padding='SAME')
        conv_1 = _tf.nn.bias_add(conv_1, biases["drawing_conv0_bias"])
        relu_1 = _tf.nn.relu(conv_1)
        pool_1 = _tf.nn.max_pool2d(relu_1,
                                   ksize=[1, 2, 2, 1],
                                   strides=[1, 2, 2, 1],
                                   padding='VALID')

        conv_2 = _tf.nn.conv2d(pool_1,
                               weights["drawing_conv1_weight"],
                               strides=1,
                               padding='SAME')
        conv_2 = _tf.nn.bias_add(conv_2, biases["drawing_conv1_bias"])
        relu_2 = _tf.nn.relu(conv_2)
        pool_2 = _tf.nn.max_pool2d(relu_2,
                                   ksize=[1, 2, 2, 1],
                                   strides=[1, 2, 2, 1],
                                   padding='VALID')

        conv_3 = _tf.nn.conv2d(pool_2,
                               weights["drawing_conv2_weight"],
                               strides=1,
                               padding='SAME')
        conv_3 = _tf.nn.bias_add(conv_3, biases["drawing_conv2_bias"])
        relu_3 = _tf.nn.relu(conv_3)
        pool_3 = _tf.nn.max_pool2d(relu_3,
                                   ksize=[1, 2, 2, 1],
                                   strides=[1, 2, 2, 1],
                                   padding='VALID')

        # Flatten the data to a 1-D vector for the fully connected layer
        fc1 = _tf.reshape(pool_3, (-1, 576))

        fc1 = _tf.nn.xw_plus_b(fc1,
                               weights=weights["drawing_dense0_weight"],
                               biases=biases["drawing_dense0_bias"])

        fc1 = _tf.nn.relu(fc1)

        out = _tf.nn.xw_plus_b(fc1,
                               weights=weights["drawing_dense1_weight"],
                               biases=biases["drawing_dense1_bias"])
        softmax_out = _tf.nn.softmax(out)

        self.predictions = softmax_out

        # Loss
        self.cost = _tf.losses.softmax_cross_entropy(
            logits=out,
            onehot_labels=self.one_hot_labels,
            reduction=_tf.losses.Reduction.NONE)

        # Optimizer
        self.optimizer = _tf.train.AdamOptimizer(learning_rate=0.001).minimize(
            self.cost)

        # Predictions
        correct_prediction = _tf.equal(_tf.argmax(self.predictions, 1),
                                       _tf.argmax(self.one_hot_labels, 1))

        self.sess = _tf.Session()
        self.sess.run(_tf.global_variables_initializer())

        # Assign the initialised weights from C++ to tensorflow
        layers = [
            'drawing_conv0_weight', 'drawing_conv0_bias',
            'drawing_conv1_weight', 'drawing_conv1_bias',
            'drawing_conv2_weight', 'drawing_conv2_bias',
            'drawing_dense0_weight', 'drawing_dense0_bias',
            'drawing_dense1_weight', 'drawing_dense1_bias'
        ]

        for key in layers:
            if 'bias' in key:
                self.sess.run(
                    _tf.assign(
                        _tf.get_default_graph().get_tensor_by_name(key + ":0"),
                        net_params[key]))
            else:
                if 'drawing_dense0_weight' in key:
                    '''
                    To make output of CoreML pool3 (NCHW) compatible with TF (NHWC).
                    Decompose FC weights to NCHW. Transpose to NHWC. Reshape back to FC.
                    '''
                    coreml_128_576 = net_params[key]
                    coreml_128_576 = _np.reshape(coreml_128_576,
                                                 (128, 64, 3, 3))
                    coreml_128_576 = _np.transpose(coreml_128_576,
                                                   (0, 2, 3, 1))
                    coreml_128_576 = _np.reshape(coreml_128_576, (128, 576))
                    self.sess.run(
                        _tf.assign(
                            _tf.get_default_graph().get_tensor_by_name(key +
                                                                       ":0"),
                            _np.transpose(coreml_128_576, (1, 0))))
                elif 'dense' in key:
                    dense_weights = _utils.convert_dense_coreml_to_tf(
                        net_params[key])
                    self.sess.run(
                        _tf.assign(
                            _tf.get_default_graph().get_tensor_by_name(key +
                                                                       ":0"),
                            dense_weights))
                else:
                    # TODO: Call _utils.convert_conv2d_coreml_to_tf when #2513 is merged
                    self.sess.run(
                        _tf.assign(
                            _tf.get_default_graph().get_tensor_by_name(key +
                                                                       ":0"),
                            _np.transpose(net_params[key], (2, 3, 1, 0))))
Пример #12
0
    def __init__(self, net_params, batch_size, num_features, num_classes,
                 prediction_window, seq_len):

        self.gpu_policy = _utils.TensorFlowGPUPolicy()
        self.gpu_policy.start()

        for key in net_params.keys():
            net_params[key] = _utils.convert_shared_float_array_to_numpy(
                net_params[key])

        _tf.reset_default_graph()

        self.num_classes = num_classes
        self.batch_size = batch_size
        self.seq_len = seq_len

        # Vars
        self.data = _tf.placeholder(
            _tf.float32, [None, prediction_window * seq_len, num_features])
        self.weight = _tf.placeholder(_tf.float32, [None, seq_len, 1])
        self.target = _tf.placeholder(_tf.int32, [None, seq_len, 1])
        self.is_training = _tf.placeholder(_tf.bool)

        # Reshaping weights
        reshaped_weight = _tf.reshape(self.weight, [self.batch_size, seq_len])

        # One hot encoding target
        reshaped_target = _tf.reshape(self.target, [self.batch_size, seq_len])
        one_hot_target = _tf.one_hot(reshaped_target,
                                     depth=self.num_classes,
                                     axis=-1)

        # Weights
        self.weights = {
            'conv_weight':
            _tf.Variable(_tf.zeros([prediction_window, num_features, CONV_H]),
                         name='conv_weight'),
            'dense0_weight':
            _tf.Variable(_tf.zeros([LSTM_H, DENSE_H]), name='dense0_weight'),
            'dense1_weight':
            _tf.Variable(_tf.zeros([DENSE_H, self.num_classes]),
                         name='dense1_weight')
        }

        # Biases
        self.biases = {
            'conv_bias':
            _tf.Variable(_tf.zeros([CONV_H]), name='conv_bias'),
            'dense0_bias':
            _tf.Variable(_tf.zeros([DENSE_H]), name='dense0_bias'),
            'dense1_bias':
            _tf.Variable(_tf.zeros([num_classes]), name='dense1_bias')
        }

        # Convolution
        conv = _tf.nn.conv1d(self.data,
                             self.weights['conv_weight'],
                             stride=prediction_window,
                             padding='SAME')
        conv = _tf.nn.bias_add(conv, self.biases['conv_bias'])
        conv = _tf.nn.relu(conv)

        dropout = _tf.layers.dropout(conv, rate=0.2, training=self.is_training)

        # Long Stem Term Memory
        lstm = self.load_lstm_weights_params(net_params)
        cells = _tf.nn.rnn_cell.LSTMCell(num_units=LSTM_H,
                                         reuse=_tf.AUTO_REUSE,
                                         forget_bias=0.0,
                                         initializer=_tf.initializers.constant(
                                             lstm, verify_shape=True))
        init_state = cells.zero_state(batch_size, _tf.float32)
        rnn_outputs, final_state = _tf.nn.dynamic_rnn(cells,
                                                      dropout,
                                                      initial_state=init_state)

        # Dense
        dense = _tf.reshape(rnn_outputs, (-1, LSTM_H))
        dense = _tf.add(_tf.matmul(dense, self.weights['dense0_weight']),
                        self.biases['dense0_bias'])
        dense = _tf.layers.batch_normalization(
            inputs=dense,
            beta_initializer=_tf.initializers.constant(net_params['bn_beta'],
                                                       verify_shape=True),
            gamma_initializer=_tf.initializers.constant(net_params['bn_gamma'],
                                                        verify_shape=True),
            moving_mean_initializer=_tf.initializers.constant(
                net_params['bn_running_mean'], verify_shape=True),
            moving_variance_initializer=_tf.initializers.constant(
                net_params['bn_running_var'], verify_shape=True),
            training=self.is_training)
        dense = _tf.nn.relu(dense)
        dense = _tf.layers.dropout(dense, rate=0.5, training=self.is_training)

        # Output
        out = _tf.add(_tf.matmul(dense, self.weights['dense1_weight']),
                      self.biases['dense1_bias'])
        out = _tf.reshape(out, (-1, self.seq_len, self.num_classes))
        self.probs = _tf.nn.softmax(out)

        # Weights
        seq_sum_weights = _tf.reduce_sum(reshaped_weight, axis=1)
        binary_seq_sum_weights = _tf.reduce_sum(
            _tf.cast(seq_sum_weights > 0, dtype=_tf.float32))

        # Loss
        loss = _tf.losses.softmax_cross_entropy(
            logits=out,
            onehot_labels=one_hot_target,
            weights=reshaped_weight,
            reduction=_tf.losses.Reduction.NONE)
        self.loss_per_seq = _tf.reduce_sum(loss,
                                           axis=1) / (seq_sum_weights + 1e-5)
        self.loss_op = _tf.reduce_sum(
            self.loss_per_seq) / (binary_seq_sum_weights + 1e-5)

        # Optimizer
        update_ops = _tf.get_collection(_tf.GraphKeys.UPDATE_OPS)
        self.set_learning_rate(1e-3)
        train_op = self.optimizer.minimize(self.loss_op)
        self.train_op = _tf.group([train_op, update_ops])

        # Session
        self.sess = _tf.Session()

        # Initialize all variables
        self.sess.run(_tf.global_variables_initializer())
        self.sess.run(_tf.local_variables_initializer())

        self.load_weights(net_params)
Пример #13
0
    def __init__(self, input_h, input_w, batch_size, output_size, out_h, out_w,
                 init_weights, config):

        self.gpu_policy = _utils.TensorFlowGPUPolicy()
        self.gpu_policy.start()

        #reset tensorflow graph when a new model is created
        _tf.reset_default_graph()

        # Converting incoming weights from shared_float_array to numpy
        for key in init_weights.keys():
            init_weights[key] = _utils.convert_shared_float_array_to_numpy(
                init_weights[key])

        self.config = config
        self.batch_size = batch_size
        self.grid_shape = [out_h, out_w]
        self.num_classes = int(
            _utils.convert_shared_float_array_to_numpy(config['num_classes']))
        self.anchors = [
            (1.0, 2.0),
            (1.0, 1.0),
            (2.0, 1.0),
            (2.0, 4.0),
            (2.0, 2.0),
            (4.0, 2.0),
            (4.0, 8.0),
            (4.0, 4.0),
            (8.0, 4.0),
            (8.0, 16.0),
            (8.0, 8.0),
            (16.0, 8.0),
            (16.0, 32.0),
            (16.0, 16.0),
            (32.0, 16.0),
        ]
        self.num_anchors = len(self.anchors)
        self.output_size = output_size
        self.is_train = _tf.placeholder(
            _tf.bool)  # Set flag for training or val

        # Create placeholders for image and labels
        self.images = _tf.placeholder(_tf.float32,
                                      [self.batch_size, input_h, input_w, 3],
                                      name='images')
        self.labels = _tf.placeholder(_tf.float32, [
            self.batch_size, self.grid_shape[0], self.grid_shape[1],
            self.num_anchors, self.num_classes + 5
        ],
                                      name='labels')
        self.init_weights = init_weights
        self.tf_model = self.tiny_yolo(inputs=self.images,
                                       output_size=self.output_size)
        self.global_step = _tf.Variable(0, trainable=False, name="global_step")

        self.loss = self.loss_layer(self.tf_model, self.labels)
        self.base_lr = _utils.convert_shared_float_array_to_numpy(
            config['learning_rate'])
        self.num_iterations = int(
            _utils.convert_shared_float_array_to_numpy(
                config['num_iterations']))
        self.init_steps = [
            self.num_iterations // 2, 3 * self.num_iterations // 4,
            self.num_iterations
        ]
        self.lrs = [
            _np.float32(self.base_lr * 10**(-i))
            for i, step in enumerate(self.init_steps)
        ]
        self.steps_tf = self.init_steps[:-1]
        self.lr = _tf.train.piecewise_constant(self.global_step, self.steps_tf,
                                               self.lrs)
        # TODO: Evaluate method to update lr in set_learning_rate()

        self.opt = _tf.train.MomentumOptimizer(self.lr, momentum=0.9)

        self.clip_value = _utils.convert_shared_float_array_to_numpy(
            self.config.get('gradient_clipping'))

        grads_and_vars = self.opt.compute_gradients(self.loss)
        clipped_gradients = [(self.ClipIfNotNone(g, self.clip_value), v)
                             for g, v in grads_and_vars]
        self.train_op = self.opt.apply_gradients(clipped_gradients,
                                                 global_step=self.global_step)

        self.sess = _tf.Session()
        self.sess.run(_tf.global_variables_initializer())
        self.sess.run(_tf.local_variables_initializer())

        self.load_weights(self.init_weights)