Example #1
0
        def __init__(self,
                     kernel_initializer,
                     kernel_regularizer,
                     name='upsample_merge'):
            super().__init__(name=name)

            self.conv_lateral = Sequential([
                tf.layers.Conv2D(
                    256,
                    1,
                    1,
                    use_bias=False,
                    kernel_initializer=kernel_initializer,
                    kernel_regularizer=kernel_regularizer),
                Normalization()
            ])

            self.conv_merge = Sequential([
                tf.layers.Conv2D(
                    256,
                    3,
                    1,
                    padding='same',
                    use_bias=False,
                    kernel_initializer=kernel_initializer,
                    kernel_regularizer=kernel_regularizer),
                Normalization()
            ])
Example #2
0
    def __init__(self,
                 filters,
                 activation,
                 dropout_rate,
                 kernel_initializer,
                 kernel_regularizer,
                 name='bottleneck_composite_function'):
        layers = [
            Normalization(),
            activation,
            tf.layers.Conv2D(filters * 4,
                             1,
                             use_bias=False,
                             kernel_initializer=kernel_initializer,
                             kernel_regularizer=kernel_regularizer),
            Dropout(dropout_rate),
            Normalization(),
            activation,
            tf.layers.Conv2D(filters,
                             3,
                             padding='same',
                             use_bias=False,
                             kernel_initializer=kernel_initializer,
                             kernel_regularizer=kernel_regularizer),
            Dropout(dropout_rate),
        ]

        super().__init__(layers, name=name)
def LSE_Calculation(x, check):
    # data = x[i].drop(['log_commercial-price'], axis=1)
    # print(data)
    if (check == 1):
        data = x.drop(['log_commercial_rate'], axis=1)
        data = data.drop(['Commercial-rate'], axis=1)
        Y_actual = x['log_commercial_rate']
        Y_actual_ = x['Commercial-rate']
        row, column = data.shape
        Y_actual_ = Y_actual_.values
        Y_actual = Y_actual.values
        norm, data1 = Normalization(data)
        b, data = gradient_descent(data, norm, Y_actual_, check)
        return b, Y_actual_, data1, norm

    data1 = x.drop(['Commercial-rate'], axis=1)
    row, column = data1.shape
    column = column
    # print(data)
    Y_actual_ = x['Commercial-rate']
    #print(row, column)
    Y_actual_ = Y_actual_.values
    weight = np.identity(row)
    # Y_actual = x[i]['log_commercial-price']
    # Y_actual=Y_actual.values
    norm, data1 = Normalization(data1)
    #print(len(norm))
    b, data = gradient_descent(data1, norm, Y_actual_, check)
    return b, Y_actual_, data1, norm
Example #4
0
class NormLin(AbstractTransformation):
    """
    A normalization layer followed by a linear projection layer.

    Currently just accepts most defaults for the two layers, but this
    could be changed in the future if more customization is needed.
    """
    def __init__(self, num_dims, num_factors=2, name="Norm Lin"):
        self.name = name
        self.num_dims = num_dims
        self._norm = Normalization(num_dims)
        self._proj = Linear(num_dims, num_factors=num_factors)

    @property
    def hypers(self):
        return self._proj.hypers

    def output_num_dims(self):
        return self._proj.output_num_dims()

    def forward_pass(self, inputs):
        norm_inputs = self._norm.forward_pass(inputs)
        proj_inputs = self._proj.forward_pass(norm_inputs)

        return proj_inputs

    def backward_pass(self, V):
        JV_proj = self._proj.backward_pass(V)
        JV_norm = self._norm.backward_pass(JV_proj)

        return JV_norm
Example #5
0
    def build(self, input_shape):
        self.expand_conv = Sequential([
            tf.layers.Conv2D(input_shape[3] * self._expansion_factor,
                             1,
                             use_bias=False,
                             kernel_initializer=self._kernel_initializer,
                             kernel_regularizer=self._kernel_regularizer),
            Normalization(), self._activation,
            tf.layers.Dropout(self._dropout_rate)
        ])

        self.depthwise_conv = Sequential([
            DepthwiseConv2D(3,
                            strides=self._strides,
                            padding='same',
                            use_bias=False,
                            kernel_initializer=self._kernel_initializer,
                            kernel_regularizer=self._kernel_regularizer),
            Normalization(), self._activation,
            tf.layers.Dropout(self._dropout_rate)
        ])

        self.linear_conv = Sequential([
            tf.layers.Conv2D(self._filters,
                             1,
                             use_bias=False,
                             kernel_initializer=self._kernel_initializer,
                             kernel_regularizer=self._kernel_regularizer),
            Normalization(),
            tf.layers.Dropout(self._dropout_rate)
        ])

        super().build(input_shape)
Example #6
0
    def __init__(self,
                 activation,
                 kernel_initializer,
                 kernel_regularizer,
                 name='feature_pyramid_network'):
        super().__init__(name=name)

        self.p6_from_c5 = Sequential([
            tf.layers.Conv2D(
                256,
                3,
                2,
                padding='same',
                use_bias=False,
                kernel_initializer=kernel_initializer,
                kernel_regularizer=kernel_regularizer),
            Normalization()
        ])

        self.p7_from_p6 = Sequential([
            activation,
            tf.layers.Conv2D(
                256,
                3,
                2,
                padding='same',
                use_bias=False,
                kernel_initializer=kernel_initializer,
                kernel_regularizer=kernel_regularizer),
            Normalization()
        ])

        self.p5_from_c5 = Sequential([
            tf.layers.Conv2D(
                256,
                1,
                1,
                use_bias=False,
                kernel_initializer=kernel_initializer,
                kernel_regularizer=kernel_regularizer),
            Normalization()
        ])

        self.p4_from_c4p5 = FeaturePyramidNetwork.UpsampleMerge(
            kernel_initializer=kernel_initializer,
            kernel_regularizer=kernel_regularizer,
            name='upsample_merge_c4p5')
        self.p3_from_c3p4 = FeaturePyramidNetwork.UpsampleMerge(
            kernel_initializer=kernel_initializer,
            kernel_regularizer=kernel_regularizer,
            name='upsample_merge_c3p4')
Example #7
0
    def __init__(self):
        self.Nor = Normalization()
        self.sample_names = []
        self.all_data = pd.DataFrame()

        loader = QUiLoader()
        loader.registerCustomWidget(MplWidget)
        self.ui_comparation = loader.load('UIs/comparation.ui')

        self.ui_comparation.widget.canvas.axes = self.ui_comparation.widget.canvas.figure.add_axes(
            [0.12, 0.12, 0.8, 0.8])

        self.set_ui_comparation_connect()
        self.ui_comparation.show()
Example #8
0
 def update_norm(self):
     settings = Settings()
     enabled = settings.norm_enabled
     center = settings.center
     if not center:
         center = "None"
     spread = settings.spread
     if not spread:
         spread = "Unity"
     power = settings.power
     if not power:
         power = 2
     self._norm = Normalization(enabled, center, spread, power)
     self._norm_features = [self._norm.apply(f) for f in self._features]
     self.parent.status_bar.status()
Example #9
0
    def __init__(self,
                 num_anchors,
                 activation,
                 kernel_initializer,
                 kernel_regularizer,
                 name='classification_subnet'):
        super().__init__(name=name)

        self.num_anchors = num_anchors

        self.pre_conv = Sequential([
            Sequential([
                tf.layers.Conv2D(
                    256,
                    3,
                    1,
                    padding='same',
                    use_bias=False,
                    kernel_initializer=kernel_initializer,
                    kernel_regularizer=kernel_regularizer),
                Normalization(),
                activation,
            ]) for _ in range(4)
        ])

        self.out_conv = tf.layers.Conv2D(
            num_anchors * 4,
            3,
            1,
            padding='same',
            kernel_initializer=kernel_initializer,
            kernel_regularizer=kernel_regularizer)
Example #10
0
    def get_num_speakers(self, ivecs, min_speakers=2, max_speakers=6):
        """ Obtain number of speakers from pretrained model.

            :param ivecs: input i-vectors
            :type ivecs: numpy.array
            :param min_speakers: minimal number of speakers from model
            :type min_speakers: int
            :param max_speakers: maximal number of speakers from model
            :type max_speakers: int
            :returns: estimated number of speakers and KMeans centroid
            :rtype: tuple
        """
        avg, centroids_list = [], []
        features = []
        for num_speakers in range(min_speakers, max_speakers + 1):
            sklearnkmeans = sklearnKMeans(n_clusters=num_speakers).fit(ivecs)
            centroids = KMeans(sklearnkmeans.cluster_centers_, num_speakers,
                               self.plda).fit(ivecs)
            centroids_list.append(centroids)
            scores = self.s_norm(centroids,
                                 centroids)[np.tril_indices(num_speakers, -1)]
            features.append(Normalization.get_features(scores))
        num_speakers = np.argmax(
            np.sum(self.model.test(features, prob=True), axis=0))
        # raw_input('ENTER')
        return num_speakers + min_speakers, centroids_list[num_speakers]
Example #11
0
def export_keras_to_tf(input_model='model.h5',
                       output_model='model.pb',
                       num_output=1):
    print('Loading Keras model: ', input_model)

    keras_model = load_model(input_model,
                             custom_objects={'Normalization': Normalization()})

    print(keras_model.summary())

    predictions = [None] * num_output
    prediction_node_names = [None] * num_output

    for i in range(num_output):
        prediction_node_names[i] = 'output_node' + str(i)
        predictions[i] = tf.identity(keras_model.outputs[i],
                                     name=prediction_node_names[i])

    session = K.get_session()

    constant_graph = graph_util.convert_variables_to_constants(
        session, session.graph.as_graph_def(), prediction_node_names)
    infer_graph = graph_util.remove_training_nodes(constant_graph)

    graph_io.write_graph(infer_graph, '.', output_model, as_text=False)
Example #12
0
    def __init__(self, data):
        self._normalization = Normalization(data)

        normalized_data = self._normalization.normalized_dataset()
        data_matrix = sp.matrix(normalized_data)
        m = data_matrix.shape[0]
        covariance_matrix = data_matrix.transpose() * data_matrix
        covariance_matrix /= m
        eig_decomp = linalg.eigh(covariance_matrix)
        self._n = len(eig_decomp[0])
        self._pcas = sp.zeros((self._n, self._n))
        for i in range(self._n):
            self._pcas[i, :] = eig_decomp[1][:, self._n - i - 1]

        self._eig_vals = list(eig_decomp[0])
        self._eig_vals.reverse()
Example #13
0
class Pca:
    def __init__(self, data):
        self._normalization = Normalization(data)

        normalized_data = self._normalization.normalized_dataset()
        data_matrix = sp.matrix(normalized_data)
        m = data_matrix.shape[0]
        covariance_matrix = data_matrix.transpose() * data_matrix
        covariance_matrix /= m
        eig_decomp = linalg.eigh(covariance_matrix)
        self._n = len(eig_decomp[0])
        self._pcas = sp.zeros((self._n, self._n))
        for i in range(self._n):
            self._pcas[i, :] = eig_decomp[1][:, self._n - i - 1]

        self._eig_vals = list(eig_decomp[0])
        self._eig_vals.reverse()

    @property
    def pcas(self):
        return self._pcas

    @property
    def eig_vals(self):
        return self._eig_vals

    @property
    def n(self):
        return self._n

    def project(self, vector, k):
        v = self._normalization.normalize_x(vector)

        # project it
        v = sp.array(v)
        dot_product = lambda pca: sum(pca[j] * v[j] for j in range(len(v)))
        return [dot_product(self.pcas[i]) for i in range(k)]

    def deproject(self, vector):
        v = list(vector)
        result = sp.zeros(self._n)
        for i in range(len(v)):
            result += self._pcas[i] * v[i]

        result = self._normalization.denormalize_x(list(result))
        return list(result)
def get_style_model_and_losses(cnn, normalization_mean, normalization_std,
                               style_img, content_img, content_layers,
                               style_layers):
    cnn = copy.deepcopy(cnn)

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    # normalization module
    normalization = Normalization(normalization_mean,
                                  normalization_std).to(device)

    # just in order to have an iterable access to or list of content/syle losses
    content_losses = []
    style_losses = []

    # assuming that cnn is a nn.Sequential, so we make a new nn.Sequential
    # to put in modules that are supposed to be activated sequentially
    model = nn.Sequential(normalization)

    i = 0  # increment every time we see a conv
    for layer in cnn.children():
        if isinstance(layer, nn.Conv2d):
            i += 1
            name = 'conv_{}'.format(i)
        elif isinstance(layer, nn.ReLU):
            name = 'relu_{}'.format(i)
            layer = nn.ReLU(inplace=False)
        elif isinstance(layer, nn.MaxPool2d):
            name = 'pool_{}'.format(i)
        elif isinstance(layer, nn.BatchNorm2d):
            name = 'bn_{}'.format(i)
        else:
            raise RuntimeError('Unrecognized layer: {}'.format(
                layer.__class__.__name__))

        model.add_module(name, layer)

        if name in content_layers:
            # add content loss:
            target = model(content_img).detach()
            content_loss = ContentLoss(target)
            model.add_module("content_loss_{}".format(i), content_loss)
            content_losses.append(content_loss)

        if name in style_layers:
            # add style loss:
            target_feature = model(style_img).detach()
            style_loss = StyleLoss(target_feature)
            model.add_module("style_loss_{}".format(i), style_loss)
            style_losses.append(style_loss)

            # now we trim off the layers after the last content and style losses
        for i in range(len(model) - 1, -1, -1):
            if isinstance(model[i], ContentLoss) or isinstance(
                    model[i], StyleLoss):
                break
        model = model[:(i + 1)]
        return model, style_losses, content_losses
Example #15
0
def telemetry(sid, data):
    if data:
        # The current steering angle of the car
        steering_angle = data["steering_angle"]
        # The current throttle of the car
        throttle = data["throttle"]
        # The current speed of the car
        speed = data["speed"]
        # The current image from the center camera of the car
        imgString = data["image"]
        image = Image.open(BytesIO(base64.b64decode(imgString)))
        image_array = np.asarray(image)
        if exec_net is None:
            steering_angle = float(
                model.predict(image_array[None, :, :, :], batch_size=1))
        else:
            image_array = image_array[55:-25, 0:320]
            norm = Normalization()
            image_array = norm.call(image_array)
            image_array = image_array.transpose((2, 0, 1))
            image_array = image_array.reshape(1, 3, input_shape[2],
                                              input_shape[3])
            input_blob = next(iter(exec_net.inputs))
            it = iter(exec_net.outputs)
            output_blob = next(it)
            for output_blob in it:
                pass

            res = exec_net.infer({input_blob: image_array})
            steering_angle = res[output_blob][0][0]

        throttle = controller.update(float(speed))

        print(steering_angle, throttle)
        send_control(steering_angle, throttle)

        # save frame
        if args.image_folder != '':
            timestamp = datetime.utcnow().strftime('%Y_%m_%d_%H_%M_%S_%f')[:-3]
            image_filename = os.path.join(args.image_folder, timestamp)
            image.save('{}.jpg'.format(image_filename))
    else:
        # NOTE: DON'T EDIT THIS.
        sio.emit('manual', data={}, skip_sid=True)
Example #16
0
    def build(self, input_shape):
        self._conv = tf.layers.Conv2D(
            64,
            7,
            2,
            padding='same',
            use_bias=False,
            kernel_initializer=self._kernel_initializer,
            kernel_regularizer=self._kernel_regularizer)
        self._bn = Normalization()

        super().build(input_shape)
    def __set_dataset1(self, train, test):
        m_list = list(train.keys())
        RMT = ReshapeMergerTree()

        ##Make train/test_input/output arrays.
        for m_key in m_list:
            train_input_, train_output_ = RMT.make_dataset(
                train[m_key], self.input_size, self.output_size)
            test_input_, test_output_ = RMT.make_dataset(
                test[m_key], self.input_size, self.output_size)
            if m_key == m_list[0]:
                train_input, train_output = train_input_, train_output_
                test_input, test_output = test_input_, test_output_
            else:
                train_input, train_output = np.concatenate(
                    [train_input, train_input_],
                    axis=0), np.concatenate([train_output, train_output_],
                                            axis=0)
                test_input, test_output = np.concatenate(
                    [test_input, test_input_],
                    axis=0), np.concatenate([test_output, test_output_],
                                            axis=0)

        ##Normalize these input/output arrays.
        ##The test-array is normalized by train-array's normalization parameters.
        Norm_input, Norm_output = Normalization(
            self.norm_format), Normalization(self.norm_format)
        train_input, train_output = Norm_input.run(
            train_input), Norm_output.run(train_output)
        test_input, test_output = Norm_input.run_predict(
            test_input), Norm_output.run_predict(test_output)
        self.Norm_input, self.Norm_output = Norm_input, Norm_output

        ##Masking process to prevent division by zero.
        mask = (train_output == 0.0)
        train_output[mask] += 1e-7
        mask = (test_output == 0.0)
        test_output[mask] += 1e-7

        return train_input, train_output, test_input, test_output
    def train(self, filename='model.h5', batch_size=16):
        """
        
            Train the model and store results in specified file.
            
            filename - name of the file. 
            
        """

        # Create model
        model = Sequential()

        # Preprocess the images
        model.add(
            Cropping2D(cropping=((55, 25), (0, 0)),
                       input_shape=(160, 320, 3)))  # trimming
        model.add(Normalization())  # normalization

        # Create network architecture based on https://devblogs.nvidia.com/deep-learning-self-driving-cars/
        model.add(Conv2D(24, (5, 5), strides=(2, 2), activation='relu'))
        model.add(Conv2D(36, (5, 5), strides=(2, 2), activation='relu'))
        model.add(Conv2D(48, (5, 5), strides=(2, 2), activation='relu'))
        model.add(Conv2D(64, (3, 3), activation='relu'))
        model.add(Conv2D(64, (3, 3), activation='relu'))
        model.add(Flatten())
        model.add(Dense(100))
        model.add(Dense(50))
        model.add(Dense(10))
        model.add(Dense(1))
        model.compile(loss='mse', optimizer='adam', metrics=['accuracy'])
        plot_model(model, to_file='images/model.png', show_shapes=True)

        # train the model
        history = model.fit_generator(
            self.generator(self.train_samples, batch_size=batch_size),
            steps_per_epoch=ceil(len(self.train_samples) / batch_size),
            validation_data=self.generator(self.validation_samples,
                                           batch_size=batch_size),
            validation_steps=ceil(len(self.validation_samples) / batch_size),
            epochs=5,
            verbose=1,
            callbacks=[
                ModelCheckpoint(filename, verbose=1, save_best_only=True)
            ])
        print(history.history.keys())
        print('Loss:')
        print(history.history['loss'])
        print('Validation Loss:')
        print(history.history['val_loss'])
        print('Accuracy:')
        print(history.history['acc'])
Example #19
0
def get_style_model_and_losses(cnn, normalization_mean, normalization_std,
                               style_img, content_img, content_layers,
                               style_layers, device):
    cnn = copy.deepcopy(cnn)
    normalization = Normalization(normalization_mean,
                                  normalization_std).to(device)

    content_losses = []
    style_losses = []
    model = nn.Sequential(normalization)

    i = 0  # increment every time we see a conv
    for layer in cnn.children():
        if isinstance(layer, nn.Conv2d):
            i += 1
            name = 'conv_{}'.format(i)
        elif isinstance(layer, nn.ReLU):
            name = 'relu_{}'.format(i)
            layer = nn.ReLU(inplace=False)
        elif isinstance(layer, nn.MaxPool2d):
            name = 'pool_{}'.format(i)
        elif isinstance(layer, nn.BatchNorm2d):
            name = 'bn_{}'.format(i)
        else:
            raise RuntimeError('Unrecognized layer: {}'.format(
                layer.__class__.__name__))

        model.add_module(name, layer)

        if name in content_layers:
            # add content loss:
            target = model(content_img).detach()
            content_loss = ContentLoss(target)
            model.add_module("content_loss_{}".format(i), content_loss)
            content_losses.append(content_loss)

        if name in style_layers:
            # add style loss:
            target_feature = model(style_img).detach()
            style_loss = StyleLoss(target_feature)
            model.add_module("style_loss_{}".format(i), style_loss)
            style_losses.append(style_loss)

    # now we trim off the layers after the last content and style losses
    for i in range(len(model) - 1, -1, -1):
        if isinstance(model[i], ContentLoss) or isinstance(
                model[i], StyleLoss):
            break

    model = model[:(i + 1)]
    return model, style_losses, content_losses
Example #20
0
class NormTable(Table):
    def __init__(self, table_view, parent):
        super().__init__(table_view, parent, hide_scroll=True)
        self._norm = None
        self.update_norm()
        self.nominal_denominator = dict()
        self._norm_features = [self._norm.apply(f) for f in self._features]

    def update_norm(self):
        settings = Settings()
        enabled = settings.norm_enabled
        center = settings.center
        if not center:
            center = "None"
        spread = settings.spread
        if not spread:
            spread = "Unity"
        power = settings.power
        if not power:
            power = 2
        self._norm = Normalization(enabled, center, spread, power)
        self._norm_features = [self._norm.apply(f) for f in self._features]
        self.parent.status_bar.status()
        # model = FeaturesTableModel(features=self.features)
        # self._table_view.setModel(model)
        # self.set_features(self._features) # update (do not remove)

    @property
    def norm(self):
        return self._norm

    @property
    def features(self):
        return self._norm_features

    def set_features(self, features):
        if not self._check_name_uniquness(features):
            return
        self._features = features
        self.update_norm()
        model = FeaturesTableModel(features=self.features)
        self._table_view.setModel(model)

    def context_menu(self, point, feature=None):
        menu = super().context_menu(point)
        menu.popup(self._table_view.horizontalHeader().mapToGlobal(point))

    def update(self):
        for _f, f in zip(self._features, self.features):
            _f._markers = f._markers  # TODO reorganize
        self.set_features(self._features)
def gradient_descent(data1, norm, Y_actual_, check):
    #norm, data = Normalization(data)
    data = data1.values
    row, col = data.shape
    #print(col)
    #print(len(norm))
    Learning_Rate = 0.0000000000000001
    b = np.random.random(col)
    #print (b)
    #print(row,col)
    data = denormalizaton(data, norm)
    err = np.zeros(col)
    #b= np.ones(col)
    #print(err)
    err = 0.01
    k = 0
    Y_pre = np.matmul(data, b)
    if (check == 1):
        Y_pre = np.power(Y_pre, 10)
        Learning_Rate = 0.0000000000000000000000000001
    res = np.subtract(Y_actual_, Y_pre)
    # print(np.shape(res))
    SSE = np.transpose(res) * res
    #print(SSE)
    while (True):
        norm, data = Normalization(data1)
        data = data.values
        b_new = np.matmul(np.transpose(res), data)
        b_new = np.multiply(b_new, Learning_Rate / row)
        #print(np.shape(b_new))
        b_new = np.subtract(b, b_new)
        #print(b_new)
        b = b_new
        data = denormalizaton(data, norm)
        Y_pre = np.matmul(data, b_new)
        if (check == 1):
            Y_pre = np.power(Y_pre, 10)

        res = np.subtract(Y_actual_, Y_pre)
        J_theta = np.transpose(res) * res
        if (abs(J_theta - SSE).all() <= err):
            return b_new, data
        SSE = J_theta

        #print(data)
        #print(np.shape(SSE))
        k += 1
        if (k >= 10000):
            print(b_new)
            return b_new, data
Example #22
0
 def get_num_speakers(self, ivecs, min_speakers=2, max_speakers=6):
     avg, centroids_list = [], []
     features = []
     for num_speakers in range(min_speakers, max_speakers + 1):
         sklearnkmeans = sklearnKMeans(n_clusters=num_speakers).fit(ivecs)
         centroids = KMeans(sklearnkmeans.cluster_centers_, num_speakers,
                            self.plda).fit(ivecs)
         centroids_list.append(centroids)
         scores = self.s_norm(centroids,
                              centroids)[np.tril_indices(num_speakers, -1)]
         features.append(Normalization.get_features(scores))
     num_speakers = np.argmax(
         np.sum(self.model.test(features, prob=True), axis=0))
     # raw_input('ENTER')
     return num_speakers + min_speakers, centroids_list[num_speakers]
Example #23
0
    def __init__(self,
                 backbone,
                 levels,
                 num_classes,
                 activation,
                 dropout_rate,
                 kernel_initializer,
                 kernel_regularizer,
                 name='retinanet_base'):
        super().__init__(name=name)

        self.backbone = build_backbone(backbone, activation=activation, dropout_rate=dropout_rate)

        if backbone == 'densenet':
            # TODO: check if this is necessary
            # DenseNet has preactivation architecture,
            # so we need to apply activation before passing features to FPN
            self.postprocess_bottom_up = {
                cn: Sequential([
                    Normalization(),
                    activation
                ])
                for cn in ['C3', 'C4', 'C5']
            }
        else:
            self.postprocess_bottom_up = None

        self.fpn = FeaturePyramidNetwork(
            activation=activation,
            kernel_initializer=kernel_initializer,
            kernel_regularizer=kernel_regularizer)

        self.classification_subnet = ClassificationSubnet(
            num_anchors=levels.num_anchors,  # TODO: level anchor boxes
            num_classes=num_classes,
            activation=activation,
            kernel_initializer=kernel_initializer,
            kernel_regularizer=kernel_regularizer,
            name='classification_subnet')

        self.regression_subnet = RegressionSubnet(
            num_anchors=levels.num_anchors,  # TODO: level anchor boxes
            activation=activation,
            kernel_initializer=kernel_initializer,
            kernel_regularizer=kernel_regularizer,
            name='regression_subnet')
 def train(self,
           filename=os.path.dirname(os.path.abspath(__file__)) +
           '/model_data/lenet_' + str(dt.now()) + '.h5',
           batch_size=16,
           epochs=15):
     from keras.models import Sequential
     from keras.layers import Flatten, Dense, Conv2D, MaxPooling2D, \
     Cropping2D, Dropout
     from keras.callbacks import ModelCheckpoint
     from keras.utils import plot_model
     from math import ceil
     model = Sequential()
     model.add(Normalization(input_shape=(240, 120, 3)))
     model.add(Conv2D(24, (5, 5), strides=(2, 2), activation='relu'))
     model.add(Conv2D(36, (5, 5), strides=(2, 2), activation='relu'))
     model.add(Conv2D(48, (5, 5), strides=(2, 2), activation='relu'))
     model.add(Conv2D(64, (3, 3), activation='relu'))
     model.add(Conv2D(64, (3, 3), activation='relu'))
     model.add(Flatten())
     model.add(Dense(100))
     model.add(Dropout(0.5))
     model.add(Dense(50))
     model.add(Dropout(0.5))
     model.add(Dense(4))
     model.compile(loss='mse', optimizer='adam', metrics=['accuracy'])
     history = model.fit_generator(
         self.generator(self.train_samples, batch_size=batch_size),
         steps_per_epoch=ceil(len(self.train_samples) / batch_size),
         validation_data=self.generator(self.validation_samples,
                                        batch_size=batch_size),
         validation_steps=ceil(len(self.validation_samples) / batch_size),
         epochs=epochs,
         verbose=1,
         callbacks=[
             ModelCheckpoint(filename, verbose=1, save_best_only=True)
         ])
     print(history.history.keys())
     print('Loss:')
     print(history.history['loss'])
     print('Validation Loss:')
     print(history.history['val_loss'])
     print('Accuracy:')
     print(history.history['acc'])
Example #25
0
    def __init__(self,
                 input_filters,
                 compression_factor,
                 dropout_rate,
                 kernel_initializer,
                 kernel_regularizer,
                 name='transition_layer'):
        self.input_filters = input_filters
        filters = int(input_filters * compression_factor)

        layers = [
            Normalization(),
            tf.layers.Conv2D(filters,
                             1,
                             use_bias=False,
                             kernel_initializer=kernel_initializer,
                             kernel_regularizer=kernel_regularizer),
            Dropout(dropout_rate),
            tf.layers.AveragePooling2D(2, 2, padding='same')
        ]

        super().__init__(layers, name=name)
Example #26
0
def getHealthIndex(filePath, zoomFactor, translationFactor):
    normalization = Normalization(filePath, zoomFactor, translationFactor)
    X = np.array(normalization.X_train)
    X = np.row_stack((X, normalization.X_predict))
    for i in range(np.size(X[0])):
        temp = X[:, i]
        print("第个参数的相关系数为", i, sc.stats.pearsonr(temp,
                                                 np.arange(np.size(temp))))
    X = X[:, featureSelected]
    print("特征矩阵:", X)
    pca = PCA(n_components=2, svd_solver='full')
    pca.fit(X)
    print("主成分占比:", pca.explained_variance_ratio_)
    feature = pca.fit_transform(X)[-1][0]
    print("降维后的特征指数HI::", feature)
    # 计算健康指数
    dataframe = pd.DataFrame({'healthLevel': [feature]})
    dataframe.to_csv("./data/healthLevel.csv", index=False, sep=',')

    healthIndex = np.transpose(pca.fit_transform(X))[0]
    X = np.arange(1, np.size(healthIndex), 1)

    # 健康衰退曲线数据
    data = np.transpose(pd.read_csv('./data/curve.csv').values)

    # 计算健康等级
    healthLevel = levelDivide(feature)
    dataframe = pd.DataFrame({'healthLevel': [healthLevel]})
    dataframe.to_csv("./data/healthIndex.csv", index=False, sep=',')

    # 计算最小和最大时间(到下一个健康等级)
    remainTime = nextLevelTime(healthLevel, feature, data)
    dataframe = pd.DataFrame({'maxTime & minTime': remainTime})
    dataframe.to_csv("./data/remainTime.csv", index=False, sep=',')

    # 画图表示曲线
    plt.plot(data[0], data[1])
    plt.plot(X, [feature] * np.size(X))
    plt.show()
Example #27
0
    def __init__(self,
                 num_anchors,
                 num_classes,
                 activation,
                 kernel_initializer,
                 kernel_regularizer,
                 name='classification_subnet'):
        super().__init__(name=name)

        self.num_anchors = num_anchors
        self.num_classes = num_classes

        self.pre_conv = Sequential([
            Sequential([
                tf.layers.Conv2D(
                    256,
                    3,
                    1,
                    padding='same',
                    use_bias=False,
                    kernel_initializer=kernel_initializer,
                    kernel_regularizer=kernel_regularizer),
                Normalization(),
                activation,
            ]) for _ in range(4)
        ])

        pi = 0.01
        bias_prior_initializer = tf.constant_initializer(-math.log((1 - pi) / pi))

        self.out_conv = tf.layers.Conv2D(
            num_anchors * num_classes,
            3,
            1,
            padding='same',
            kernel_initializer=kernel_initializer,
            kernel_regularizer=kernel_regularizer,
            bias_initializer=bias_prior_initializer)
    def __init__(self):
        #TODO load classifier
        model_data_path = os.path.dirname(os.path.abspath(__file__))
        self.detector = YOLO(anchors_path=model_data_path +
                             '/keras_yolo3/model_data/tiny_yolo_anchors.txt',
                             model_path=model_data_path +
                             '/model_data/tiny_yolo.h5',
                             class_name='traffic light',
                             height=240,
                             width=120)
        model_name = model_data_path + '/model_data/lenet_traffic_light.h5'
        f = h5py.File(model_name, mode='r')
        model_version = f.attrs.get('keras_version')
        keras_version = str(keras.__version__).encode('utf8')

        if model_version != keras_version:
            print('You are using Keras version ', keras_version,
                  ', but the model was built using ', model_version)

        self.classifier = load_model(
            model_name, custom_objects={'Normalization': Normalization()})
        global graph
        graph = tf.get_default_graph()
    def set_dataset(self, dataset, train_ratio, fft_format, norm_format):
        mlist = dataset.keys()
        MTTD = MakeTrainTestDataset(mlist)
        train, test = MTTD.split(dataset, train_ratio)
        RMT_train, RMT_test = {}, {}
        train_input, train_output = {}, {}
        test_input, test_output = {}, {}
        train_input_, train_output_ = None, None
        test_input_, test_output_ = None, None
        if fft_format == "fft":
            fft = lambda x: np.fft.fft(x)
        elif fft_format == "rfft":
            fft = lambda x: np.fft.rfft(x)
        if self.is_epoch_in_each_mlist:
            Norm_train_input, Norm_train_output = {}, {}
            Norm_test_input, Norm_test_output = {}, {}
            for m_key in mlist:
                Norm_train_input[m_key] = Normalization(norm_format)
                Norm_train_output[m_key] = Normalization(norm_format)
                Norm_test_input[m_key] = Normalization(norm_format)
                Norm_test_output[m_key] = Normalization(norm_format)
        Norm_train_input_ = Normalization(norm_format)
        Norm_train_output_ = Normalization(norm_format)
        Norm_test_input_ = Normalization(norm_format)
        Norm_test_output_ = Normalization(norm_format)
        for m_key in mlist:
            RMT_train[m_key] = ReshapeMergerTree()
            RMT_test[m_key] = ReshapeMergerTree()
            train_input[m_key], train_output[m_key] = RMT_train[
                m_key].make_dataset(train[m_key], self.input_size,
                                    self.output_size)
            test_input[m_key], test_output[m_key] = RMT_test[
                m_key].make_dataset(test[m_key], self.input_size,
                                    self.output_size)
            if train_input_ is None and test_input_ is None:
                train_input_, train_output_ = train_input[m_key], train_output[
                    m_key]
                test_input_, test_output_ = test_input[m_key], test_output[
                    m_key]
            else:
                train_input_ = np.concatenate(
                    [train_input_, train_input[m_key]], axis=0)
                train_output_ = np.concatenate(
                    [train_output_, train_output[m_key]], axis=0)
                test_input_ = np.concatenate([test_input_, test_input[m_key]],
                                             axis=0)
                test_output_ = np.concatenate(
                    [test_output_, test_output[m_key]], axis=0)
            if self.is_epoch_in_each_mlist:
                train_input[m_key] = Norm_train_input[m_key].run(
                    fft(train_input[m_key]))
                train_output[m_key] = Norm_train_output[m_key].run(
                    fft(train_output[m_key]))
                test_input[m_key] = Norm_test_input[m_key].run(
                    fft(test_input[m_key]))
                test_output[m_key] = Norm_test_output[m_key].run(
                    fft(test_output[m_key]))

        train_input_ = Norm_train_input_.run(fft(train_input_))
        train_output_ = Norm_train_output_.run(fft(train_output_))
        test_input_ = Norm_test_input_.run(fft(test_input_))
        test_output_ = Norm_test_output_.run(fft(test_output_))
        train_mask_real = (train_output_.real == 0.0)
        train_mask_imag = (train_output_.imag == 0.0)
        train_output_[train_mask_real] += 1e-7
        train_output_[train_mask_imag] += 1e-7j
        if self.is_epoch_in_each_mlist:
            for m_key in mlist:
                train_mask_real = (train_output[m_key].real == 0.0)
                train_mask_imag = (train_output[m_key].imag == 0.0)
                test_mask_real = (test_output[m_key].real == 0.0)
                test_mask_imag = (test_output[m_key].imag == 0.0)
                train_output[m_key][train_mask_real] += 1e-7
                train_output[m_key][train_mask_imag] += 1e-7j
                test_output[m_key][test_mask_real] += 1e-7
                test_output[m_key][test_mask_imag] += 1e-7j
            return train_input_, train_output_, train_input, train_output, test_input, test_output
        else:
            test_mask_real = (test_output_.real == 0.0)
            test_mask_imag = (test_output_.imag == 0.0)
            test_output_[test_mask_real] += 1e-7
            test_output_[test_mask_imag] += 1e-7j
            return train_input_, train_output_, test_input_, test_output_
Example #30
0
 def __init__(self, num_dims, num_factors=2, name="Norm Lin"):
     self.name = name
     self.num_dims = num_dims
     self._norm = Normalization(num_dims)
     self._proj = Linear(num_dims, num_factors=num_factors)