Exemplo n.º 1
0
    def __init__(self,
                 dataset_name,
                 arch,
                 num_output_layers,
                 weight_fname,
                 has_pose=False):
        self.dataset_name = dataset_name
        self.has_pose = has_pose

        if dataset_name == DatasetName.w300:
            self.output_len = D300wConf.num_of_landmarks * 2
        elif dataset_name == DatasetName.cofw:
            self.output_len = CofwConf.num_of_landmarks * 2
        elif dataset_name == DatasetName.wflw:
            self.output_len = WflwConf.num_of_landmarks * 2

        cnn = CNNModel()
        model = cnn.get_model(arch=arch,
                              input_tensor=None,
                              output_len=self.output_len)

        model.load_weights(weight_fname)

        img = None  # load a cropped image

        image_utility = ImageUtility()
        pose_predicted = []
        image = np.expand_dims(img, axis=0)

        pose_predicted = model.predict(image)[1][0]
Exemplo n.º 2
0
 def _init_model(self):
     # if self.model is None:
     import pdb
     pdb.set_trace()
     try:
         with tf.variable_scope("classifier"):
             self.model = CNNModel(
                 seq_length=self.args.seq_length,
                 label_size=self.args.label_size,
                 vocab_size=self.args.vocab_size,
                 embedding_size=self.args.embedding_dim,
                 filter_sizes=list(
                     map(int, self.args.filter_sizes.split(","))),
                 num_filters=self.args.num_filters,
                 l2_reg_lambda=self.args.l2_reg_lambda)
             # self.model = BIDIRNNModel(self.args)
     except ValueError as ve:
         with tf.variable_scope("classifier", reuse=True):
             self.model = CNNModel(
                 seq_length=seq_length,
                 label_size=self.args.label_size,
                 vocab_size=self.args.vocab_size,
                 embedding_size=self.args.embedding_dim,
                 filter_sizes=list(
                     map(int, self.args.filter_sizes.split(","))),
                 num_filters=self.args.num_filters,
                 l2_reg_lambda=self.args.l2_reg_lambda)
Exemplo n.º 3
0
    def _create_regressor_net(self, input_tensor, input_shape):
        """
        This is the main network, we use for predicting hm as well as points:
        input:
            X: img
            Y: [hm, points]

        :param input_tensor:
        :param input_shape:
        :return: keras model created for the geo-hm regression task.
        """
        cnn = CNNModel()
        model = cnn.get_model(
            input_tensor=input_tensor,
            arch=self.regressor_arch,
            num_landmark=self.num_landmark,
            input_shape=input_shape,
            num_face_graph_elements=self.num_face_graph_elements)
        if self.regressor_weight is not None:
            model.load_weights(self.regressor_weight)
        model.compile(loss=self.geo_loss,
                      optimizer=self._get_optimizer(),
                      metrics=['mse'])

        return model
def main():
    
    cnn_model = CNNModel(
        config['plate_detector_service']['yolo']['model_path'], 
        config['plate_detector_service']['yolo']['weight_path']
    )

    if config['plate_detector_service']['source_type'] == "rtsp":
        capture = cv2.VideoCapture(config['plate_detector_service']['rtsp']['source'], cv2.CAP_FFMPEG)

    if config['plate_detector_service']['source_type'] == "video":
        capture = cv2.VideoCapture(config['plate_detector_service']['video_path'])

    if config['plate_detector_service']['source_type'] == "webcam":
        capture = cv2.VideoCapture(0)
    
    count = 0
    while capture.isOpened():
        
        _, frame = capture.read()

        if count > 20:
            count = 0
            image = Image(frame)
            image_with_predicted_plate = cnn_model.predict_plate_number(image)
            
            if len(image_with_predicted_plate.getPlateCoordinates()) != 0:
                image_original = image_with_predicted_plate.getImage()
                plate_coordinates = map(lambda plate_coordinate: {'min_x': plate_coordinate.getMinX(), 'max_x': plate_coordinate.getWidth(), 'min_y': plate_coordinate.getMinY(), 'max_y': plate_coordinate.getHeight()}, image_with_predicted_plate.getPlateCoordinates())
                publish_image_with_detected_plates(image_original, list(plate_coordinates))
        
        count = count + 1
Exemplo n.º 5
0
    def _create_hm_regressor_net(self,
                                 input_tensor,
                                 input_shape,
                                 is_trainable=True):
        """
        This is the main network, we use for predicting hm:
        input:
            X: img
            Y: [hm]

        :param input_tensor:
        :param input_shape:
        :return: keras model created for the geo-hm regression task.
        """
        cnn = CNNModel()
        model = cnn.get_model(input_tensor=input_tensor,
                              arch=self.hm_regressor_arch,
                              num_landmark=self.num_landmark,
                              input_shape=input_shape,
                              num_face_graph_elements=None)
        if self.hm_regressor_weight is not None:
            model.load_weights(self.hm_regressor_weight)

        model.trainable = is_trainable

        model.compile(loss=tf.keras.losses.mean_squared_error,
                      optimizer=self._get_optimizer(),
                      metrics=['mse'])
        return model
Exemplo n.º 6
0
 def _create_cord_discriminator_net(self,
                                    input_tensor,
                                    input_shape,
                                    is_trainable=True):
     """
     This is the discriminator network, being used at the second stage when we want to discriminate
     the real and fake data, generated by the RegressorNetwork
     :param input_tensor:
     :param input_shape:
     :return:
     """
     cnn = CNNModel()
     model = cnn.get_model(input_tensor=input_tensor,
                           arch=self.cord_discriminator_arch,
                           input_shape=input_shape,
                           num_landmark=self.num_landmark,
                           num_face_graph_elements=None)
     if self.cord_discriminator_weight is not None:
         model.load_weights(self.cord_discriminator_weight)
     model.trainable = is_trainable
     model.compile(
         loss=tf.keras.losses.BinaryCrossentropy(from_logits=True),
         optimizer=self._get_optimizer(lr=1e-4, decay=1e-6),
         metrics=['accuracy'])
     return model
Exemplo n.º 7
0
 def _init_model(self):
     # if not self.model:
     try:
         with tf.variable_scope('classifier'):
             self.model = CNNModel(
                 seq_length=self.args.seq_length,
                 label_size=self.args.label_size,
                 vocab_size=self.args.vocab_size,
                 embedding_size=self.args.embedding_dim,
                 filter_sizes=list(
                     map(int, self.args.filter_sizes.split(','))),
                 num_filters=self.args.num_filters,
                 l2_reg_lambda=self.args.l2_reg_lambda)
             # self.model = BIDIRNNModel(self.args)
     except ValueError as ve:
         with tf.variable_scope('classifier', reuse=True):
             self.model = CNNModel(
                 seq_length=seq_length,
                 label_size=self.args.label_size,
                 vocab_size=self.args.vocab_size,
                 embedding_size=self.args.embedding_dim,
                 filter_sizes=list(
                     map(int, self.args.filter_sizes.split(','))),
                 num_filters=self.args.num_filters,
                 l2_reg_lambda=self.args.l2_reg_lambda)
Exemplo n.º 8
0
def get_predictions(X_test_images, Y_test_labels):
  """
  Args:
  ------
  Given hdfs file of X_test_images and Y_test_labels
  
  returns:
  --------
  predictions: probability values for each class 
  label_predictions: returns predicted classes
  """

  ## Model definition
    convnet  = CNNModel()
    network = convnet.define_network(X_test_images)
    model = tflearn.DNN(network, tensorboard_verbose=0,\
         checkpoint_path='nodule3-classifier.tfl.ckpt')
    model.load("nodule3-classifier.tfl")

    predictions = np.vstack(model.predict(X_test_images[:,:,:,:]))
    #label_predictions = np.vstack(model.predict_label(X_test_images[:,:,:,:]))
    score = model.evaluate(X_test_images, Y_test_labels)
    label_predictions = np.zeros_like(predictions)
    label_predictions[np.arange(len(predictions)), predictions.argmax(1)] = 1
    return predictions, label_predictions
Exemplo n.º 9
0
    def __init__(self, dataset_name, weight_fname):
        if dataset_name is None:
            return

        self.dataset_name = dataset_name

        if dataset_name == DatasetName.ibug_test:
            self.num_landmark = IbugConf.num_of_landmarks * 2
        elif dataset_name == DatasetName.cofw_test:
            self.num_landmark = CofwConf.num_of_landmarks * 2
        elif dataset_name == DatasetName.wflw_test:
            self.num_landmark = WflwConf.num_of_landmarks * 2

        cnn = CNNModel()
        model = cnn.get_model(input_tensor=None, arch='cord_reg_model', num_landmark=self.num_landmark,
                              input_shape=[224, 224,3], num_face_graph_elements=None)

        if weight_fname is not None:
            model.load_weights(weight_fname)

        arch= ''

        if dataset_name == DatasetName.ibug_test:
            print(dataset_name + " _ " + arch + " _ : \r\n" + str(self._test_on_W300(model)))
        elif dataset_name == DatasetName.cofw_test:
            print(dataset_name + " _ " + arch + " _ : \r\n" + str(self._test_on_COFW(model)))
        elif dataset_name == DatasetName.wflw_test:
            print(dataset_name + " _ " + arch + " _ : \r\n" + str(self._test_on_WFLW(model)))
Exemplo n.º 10
0
    def __init__(self,
                 dataset_name,
                 arch,
                 num_output_layers,
                 weight_fname,
                 has_pose=False):
        self.dataset_name = dataset_name
        self.has_pose = has_pose

        if dataset_name == DatasetName.ibug:
            self.output_len = IbugConf.num_of_landmarks * 2
        elif dataset_name == DatasetName.cofw_test:
            self.output_len = CofwConf.num_of_landmarks * 2
        elif dataset_name == DatasetName.wflw_test:
            self.output_len = WflwConf.num_of_landmarks * 2

        cnn = CNNModel()
        model = cnn.get_model(train_images=None,
                              arch=arch,
                              num_output_layers=num_output_layers,
                              input_tensor=None,
                              output_len=self.output_len)

        model.load_weights(weight_fname)

        if dataset_name == DatasetName.ibug:
            self._test_on_W300(model)
        elif dataset_name == DatasetName.cofw_test:
            self._test_on_COFW(model)
        elif dataset_name == DatasetName.wflw_test:
            self._test_on_COFW(model)
Exemplo n.º 11
0
    def test(self):
        image_utility = ImageUtility()
        cnn = CNNModel()

        images_dir_out_ch = '/media/ali/extradata/facial_landmark_ds/from_ibug/test_set/challenging_intermediate_img/'

        model = cnn.mobileNet_v2_small(tensor=None)
        model.load_weights('weights-03-0.00095.h5')

        counter = 0

        for img_file in os.listdir(images_dir_out_ch):
            if img_file.endswith("_img.npy"):
                lbl_file = img_file[:-8] + "_lbl.npy"
                main_img_file = img_file[:-8] + ".jpg"

                img = Image.open(images_dir_out_ch + main_img_file)

                image = np.expand_dims(load(images_dir_out_ch + img_file),
                                       axis=0)
                lbl = load(images_dir_out_ch + lbl_file)

                p_lbl = model.predict(image)[0]

                labels_predict_transformed, landmark_arr_x_p, landmark_arr_y_p = image_utility.\
                    create_landmarks_from_normalized(p_lbl, 224, 224, 112, 112)

                imgpr.print_image_arr((counter + 1) * 1000, img,
                                      landmark_arr_x_p, landmark_arr_y_p)
                counter += 1
Exemplo n.º 12
0
def extract_features(signatures_path, save_path, model_weight_path, canvas_size=(150, 220)):
    print('Processing images from folder "%s" and saving to folder "%s"' % (signatures_path, save_path))
    print('Using model %s' % model_weight_path)
    print('Using canvas size: %s' % (canvas_size,))

    model = CNNModel(signet, model_weight_path)
    files = os.listdir(signatures_path)

    # Note: it there is a large number of signatures to process, it is faster to
    # process them in batches (i.e. use "get_feature_vector_multiple")
    for f in files:
        if os.path.isdir(os.path.join(signatures_path, f)):
            extract_features(os.path.join(signatures_path, f), os.path.join(save_path, f), model, canvas_size)
            continue

        # Skip if file is not an image
        if f.split('.')[-1] not in ['jpg', 'png', 'tif']:
            continue
    
        # Load and pre-process the signature
        filename = os.path.join(signatures_path, f)
        print "Processing", filename
        original = imread(filename, flatten=1)
        #processed = preprocess_signature(original, canvas_size)
        #Image.fromarray(processed).show()

        # Use the CNN to extract features
        feature_vector = model.get_feature_vector(original)

        # Save in the matlab format
        save_filename = os.path.join(save_path, os.path.splitext(f)[0] + '.mat')
        if not os.path.exists(save_path):
            os.makedirs(save_path)

        scipy.io.savemat(save_filename, {'feature_vector':feature_vector})
Exemplo n.º 13
0
 def make_model(self, arch, w_path, is_old=False):
     cnn = CNNModel()
     model = cnn.get_model(arch=arch, output_len=self.num_landmark, input_tensor=None, weight_path=w_path,
                           is_old=is_old)
     if w_path is not None and arch != 'mobileNetV2_d' and not is_old:
         model.load_weights(w_path)
     # model.save('test_model'+arch+'.h5')
     return model
Exemplo n.º 14
0
 def __init__(self, train_config, model_config, is_training=True):
     self.train_config, self.model_config = train_config, model_config
     self.is_training = is_training
     self.cnn_model = CNNModel(self.train_config.cnn_keep_prob, is_training=is_training)
     self.rnn_model = RNNModel(train_config.learning_rate, model_config.n_fcs, model_config.n_views,
                               model_config.n_hidden, model_config.n_classes, train_config.rnn_keep_prob if is_training else 1.0, is_training=self.is_training)
     self.gpu_config = tf.ConfigProto()
     self.gpu_config.gpu_options.allow_growth = True
     self.data = modelnet.read_data(FLAGS.modelnet_path)
Exemplo n.º 15
0
    def train_fit_on_batch(self):
        """"""
        """define optimizers"""
        optimizer = self._get_optimizer()
        '''create asm_pre_trained net'''
        cnn = CNNModel()
        asm_model = cnn.create_multi_branch_mn(inp_shape=[224, 224, 3],
                                               num_branches=3)
        # asm_model.load_weights('')
        '''creating model'''
        model = cnn.get_model(
            None,
            self.arch,
            self.num_output_layers,
        )
        '''loading weights'''
        if self.weight is not None:
            model.load_weights(self.weight)
        '''compiling model'''
        model.compile(loss=self._generate_loss(),
                      optimizer=optimizer,
                      metrics=['mse', 'mae'],
                      loss_weights=self._generate_loss_weights())
        '''create train, validation, test data iterator'''
        x_train_filenames, x_val_filenames, y_train_filenames, y_val_filenames = self._create_generators(
        )
        '''save logs'''
        log_file_name = 'log_' + datetime.now().strftime(
            "%Y%m%d-%H%M%S") + ".csv"
        metrics_names = model.metrics_names
        metrics_names.append('epoch')
        self.write_loss_log(log_file_name, metrics_names)
        ''''''
        self.STEPS_PER_EPOCH = len(x_train_filenames) // self.BATCH_SIZE

        for epoch in range(self.EPOCHS):
            loss = []
            for batch in range(self.STEPS_PER_EPOCH):
                try:
                    imgs, hm_g = self.get_batch_sample(batch,
                                                       x_train_filenames,
                                                       y_train_filenames)
                    # print(imgs.shape)
                    # print(hm_g.shape)

                    hm_predicted = asm_model.predict_on_batch(imgs)
                    loss = model.train_on_batch(imgs, [
                        hm_g, hm_predicted[0], hm_predicted[1], hm_predicted[2]
                    ])
                # print(f'Epoch: {epoch} \t batch:{batch} of {self.STEPS_PER_EPOCH}\t\n  moedl Loss: {loss}')
                except:
                    print('catch')
            loss.append(epoch)
            self.write_loss_log(log_file_name, loss)
            model.save_weights('weight_ep_' + str(epoch) + '_los_' +
                               str(loss) + '.h5')
def get_features(img):

    model_weight_path = '../models/signet.pkl'
    model = CNNModel(signet, model_weight_path)

    processed_sig = np.array([preprocess_signature(img, canvas_size)])
    feature_vect = model.get_feature_vector_multiple(processed_sig,
                                                     layer='fc2')

    return feature_vect
Exemplo n.º 17
0
 def make_cord_discriminator_model(self):
     cnn = CNNModel()
     model = cnn.get_model(input_tensor=None,
                           arch=self.cord_discriminator_arch,
                           num_landmark=self.num_landmark,
                           input_shape=self.input_shape_cord_disc,
                           num_face_graph_elements=None)
     if self.cord_discriminator_weight is not None:
         model.load_weights(self.cord_discriminator_weight)
     return model
Exemplo n.º 18
0
 def make_hm_generator_model(self):
     cnn = CNNModel()
     model = cnn.get_model(input_tensor=None,
                           arch=self.hm_regressor_arch,
                           num_landmark=self.num_landmark,
                           input_shape=self.input_shape_hm_reg,
                           num_face_graph_elements=None)
     if self.hm_regressor_weight is not None:
         model.load_weights(self.hm_regressor_weight)
     return model
Exemplo n.º 19
0
    def __init__(self, n_channel, n_action, gpu=False, lr=0.05) -> None:

        print(torch.cuda.is_available())
        print(torch.cuda.get_device_name(0))
        # self.device = torch.device("cpu")
        self.device = torch.device("cuda:0") if gpu else torch.device("cpu")

        self.criterion = torch.nn.MSELoss()
        self.model = CNNModel(n_channel, n_action).to(self.device)
        self.model_target = CNNModel(n_channel, n_action).to(self.device)
        self.optimizer = torch.optim.Adam(self.model.parameters(), lr)
Exemplo n.º 20
0
 def create_reg_net(self, input_tensor, input_shape):
     """img & point ---> point_p"""
     cnn = CNNModel()
     model = cnn.mobileNet_v2_main_discriminator(input_tensor, input_shape)
     # model = cnn.create_shallow_reg(num_branches=self.num_points)
     model.compile(loss=keras.losses.binary_crossentropy,
                   optimizer=adam(lr=1e-2,
                                  beta_1=0.9,
                                  beta_2=0.999,
                                  decay=1e-5,
                                  amsgrad=False),
                   metrics=['accuracy'])
     return model
Exemplo n.º 21
0
    def test_all_results(self, weight_path, num_output_layers):
        self.has_pose = True
        self.num_output_layers = num_output_layers

        cnn = CNNModel()
        detect = None

        f_names = [f for f in listdir(weight_path) if isfile(join(weight_path, f))]
        # cofw_ds_asm

        res_wflw = ""
        res_ibug = ""
        res_cofw = ""

        for file_name in f_names:
            dataset_name = file_name.split('_')[0]
            arch = file_name.split('_')[1]
            if arch == 'ds':
                arch = 'ASMNet'
            else:
                arch = 'mobileNetV2'

            self.dataset_name = dataset_name

            if dataset_name == DatasetName.ibug:

                self.output_len = IbugConf.num_of_landmarks * 2
                model = cnn.get_model(train_images=None, arch=arch,
                                      num_output_layers=num_output_layers,
                                      output_len=IbugConf.num_of_landmarks * 2)
                model.load_weights(weight_path + '/' + file_name)
                res_ibug = self._test_on_W300(detect, model)

            elif dataset_name == DatasetName.cofw:
                self.output_len = CofwConf.num_of_landmarks * 2
                model = cnn.get_model(train_images=None, arch=arch,
                                      num_output_layers=num_output_layers,
                                      output_len=CofwConf.num_of_landmarks * 2)
                model.load_weights(weight_path + '/' + file_name)
                res_cofw = self._test_on_COFW(detect, model)

            elif dataset_name == DatasetName.wflw:
                self.output_len = WflwConf.num_of_landmarks * 2
                model = cnn.get_model(train_images=None, arch=arch,
                                      num_output_layers=num_output_layers,
                                      output_len=WflwConf.num_of_landmarks * 2)
                model.load_weights(weight_path + '/' + file_name)
                res_wflw = self._test_on_WFLW(detect, model)
        print(res_ibug)
        print(res_cofw)
        print(res_wflw)
Exemplo n.º 22
0
    def create_hm_generator_net(self):
        """img & hm -->  hm_p"""
        cnn = CNNModel()
        model = cnn.mn_asm_v1(None)

        model.compile(loss=keras.losses.mean_squared_error,
                      optimizer=adam(lr=1e-2,
                                     beta_1=0.9,
                                     beta_2=0.999,
                                     decay=1e-5,
                                     amsgrad=False),
                      metrics=['mse'])

        return model
Exemplo n.º 23
0
    def train_fit_gen(self, on_point):
        """train_fit_gen"""
        '''prepare callbacks'''
        callbacks_list = self._prepare_callback()
        """define optimizers"""
        optimizer = self._get_optimizer()
        '''creating model'''
        cnn = CNNModel()
        model = cnn.get_model(None, self.arch, self.num_output_layers)
        '''loading weights'''
        if self.weight is not None:
            model.load_weights(self.weight)
        '''create train, validation, test data iterator'''
        x_train_filenames, x_val_filenames, y_train_filenames, y_val_filenames = self._create_generators(
        )

        is_single = True
        if self.num_output_layers > 1:
            is_single = False

        if not on_point:
            my_training_batch_generator = CustomHeatmapGenerator(
                is_single, x_train_filenames, y_train_filenames,
                self.BATCH_SIZE, self.num_output_layers, self.accuracy)
            my_validation_batch_generator = CustomHeatmapGenerator(
                is_single, x_val_filenames, y_val_filenames, self.BATCH_SIZE,
                self.num_output_layers, self.accuracy)
        else:
            my_training_batch_generator = PWCustomHeatmapGenerator(
                is_single, x_train_filenames, y_train_filenames,
                self.BATCH_SIZE, self.num_output_layers, self.accuracy)
            my_validation_batch_generator = PWCustomHeatmapGenerator(
                is_single, x_val_filenames, y_val_filenames, self.BATCH_SIZE,
                self.num_output_layers, self.accuracy)
        '''compiling model'''
        model.compile(loss=self._generate_loss(),
                      optimizer=optimizer,
                      metrics=['mse', 'mae'],
                      loss_weights=self._generate_loss_weights())
        '''train Model '''
        print('< ========== Start Training ============= >')
        model.fit_generator(generator=my_training_batch_generator,
                            epochs=self.EPOCHS,
                            verbose=1,
                            validation_data=my_validation_batch_generator,
                            steps_per_epoch=self.STEPS_PER_EPOCH,
                            callbacks=callbacks_list,
                            use_multiprocessing=True,
                            workers=16,
                            max_queue_size=32)
Exemplo n.º 24
0
def init_common(args, bert_model):
    sent_embedder = BertManager(bert_model, args.device)
    conv_model = CNNModel(args.embed_size,
                          torch.device("cpu"),
                          n_filters=args.n_filters,
                          filter_sizes=args.kernels,
                          batch_norm_eval=True)
    # Build unified model
    model = Common(
        conv_model,
        conv_model.get_n_blocks() * args.n_filters,
        encoder=sent_embedder if args.finetune else lambda x: x,
    )
    return model, conv_model, sent_embedder
Exemplo n.º 25
0
    def train(self, weight_path):
        """

        :param weight_path:
        :return:
        """

        '''create loss'''
        c_loss = ASMLoss(dataset_name=self.dataset_name, accuracy=90)
        cnn = CNNModel()
        '''making models'''
        model = cnn.get_model(arch=self.arch, output_len=self.num_landmark)
        if weight_path is not None:
            model.load_weights(weight_path)

        '''create sample generator'''
        image_names, landmark_names, pose_names = self._create_generators()

        '''create train configuration'''
        step_per_epoch = len(image_names) // LearningConfig.batch_size

        '''start train:'''
        optimizer = tf.keras.optimizers.Adam(lr=1e-2, decay=1e-5)
        for epoch in range(LearningConfig.epochs):
            image_names, landmark_names, pose_names = shuffle(image_names, landmark_names, pose_names)
            for batch_index in range(step_per_epoch):
                '''load annotation and images'''
                images, annotation_gr, poses_gr = self._get_batch_sample(
                    batch_index=batch_index,
                    img_filenames=image_names,
                    landmark_filenames=landmark_names,
                    pose_filenames=pose_names)

                '''convert to tensor'''
                images = tf.cast(images, tf.float32)
                annotation_gr = tf.cast(annotation_gr, tf.float32)
                poses_gr = tf.cast(poses_gr, tf.float32)

                '''train step'''
                self.train_step(epoch=epoch,
                                step=batch_index,
                                total_steps=step_per_epoch,
                                model=model,
                                images=images,
                                annotation_gt=annotation_gr,
                                poses_gt=poses_gr,
                                optimizer=optimizer,
                                c_loss=c_loss)
            '''save weights'''
            model.save(self.save_path + self.arch + str(epoch) + '_' + self.dataset_name)
Exemplo n.º 26
0
def main():
    small_image_size = (80, 80)
    half_image_size = (40, 40)
    parking_geometry = load_parking_geometry("parking_map.txt")
    print("Preprocessing train images - OpenCv start")
    train_images, train_labels = train_parking(parking_geometry,
                                               small_image_size)
    print("Preprocessing test images - OpenCv start")
    test_images, test_labels = test_parking_for_neural(parking_geometry,
                                                       small_image_size)
    # training
    #for i in range (20):
    #    for j in range(4):
    #        print(f" dense-1 {40+i*10}, dense-2 {j*5+5}")
    #        model, score = init_CNN(train_images, train_labels, test_images, test_labels, half_image_size, 50+i*5, j*5+5)
    #        save_model(model, score[1])
    #        print(score[1])

    print("CNN")
    model = load_model('models/model0.9895833134651184.json',
                       'models/model0.9895833134651184.h5')
    cnn_model = CNNModel('CNN Model', model)
    test_parking(parking_geometry, small_image_size, cnn_model)

    print("SOBEL")
    sobel_mode = SobelModel('Sobel Model', 216)
    test_parking(parking_geometry, small_image_size, sobel_mode)
Exemplo n.º 27
0
    def test_pca_validity(self, pca_postfix):
        cnn_model = CNNModel()
        pca_utility = PCAUtility()
        tf_record_utility = TFRecordUtility()
        image_utility = ImageUtility()

        eigenvalues, eigenvectors, meanvector = pca_utility.load_pca_obj(
            dataset_name=DatasetName.ibug, pca_postfix=pca_postfix)

        lbl_arr, img_arr, pose_arr = tf_record_utility.retrieve_tf_record(
            tfrecord_filename=IbugConf.tf_train_path,
            number_of_records=30,
            only_label=False)
        for i in range(20):
            b_vector_p = self.calculate_b_vector(lbl_arr[i], True, eigenvalues,
                                                 eigenvectors, meanvector)
            lbl_new = meanvector + np.dot(eigenvectors, b_vector_p)

            labels_true_transformed, landmark_arr_x_t, landmark_arr_y_t = image_utility. \
                create_landmarks_from_normalized(lbl_arr[i], 224, 224, 112, 112)

            labels_true_transformed_pca, landmark_arr_x_pca, landmark_arr_y_pca = image_utility. \
                create_landmarks_from_normalized(lbl_new, 224, 224, 112, 112)

            image_utility.print_image_arr(i, img_arr[i], landmark_arr_x_t,
                                          landmark_arr_y_t)
            image_utility.print_image_arr(i * 1000, img_arr[i],
                                          landmark_arr_x_pca,
                                          landmark_arr_y_pca)
Exemplo n.º 28
0
    def __init__(self):

        super(NNModel, self).__init__()

        self.model = CNNModel()

        # Define loss function
        self.loss = nn.CrossEntropyLoss()
Exemplo n.º 29
0
def main():
    """Sup Main!"""
    models = [CNNModel(), RNNModel()]
    for model in models:
        model.build_model()
        train = TrainModel(model, n_epochs=200, batch_size=128)
        train.train_model()
        train.reset_model()
def load_model(model_save_fn, model_type):
    if model_type == 'lstm':
        model = LSTMModel.load(model_save_fn)
    elif model_type == 'rnn':
        model = RNNModel.load(model_save_fn)
    elif model_type == 'cnn':
        model = CNNModel.load(model_save_fn)
    return model