Пример #1
0
    def create_generators(self):
        cnf = Config()
        fn_prefix = './file_names/'
        x_trains_path = fn_prefix + 'annotations_fns.npy'

        train_filenames = self.create_annotation_name(
            annotation_path=cnf.annotation_path)
        save(x_trains_path, train_filenames)

        return train_filenames
Пример #2
0
    def get_discriminator_model(self):
        cnf = Config()

        inputs = tf.keras.Input(shape=(cnf.num_of_landmarks))
        x = Dense(cnf.num_of_landmarks)(inputs)
        x = BatchNormalization()(x)
        x_1 = ReLU()(x)

        x = Dense(cnf.num_of_landmarks)(x_1)
        x = BatchNormalization()(x)
        x = ReLU()(x)
        x = Add()([x, x_1])

        x = Dense(128)(x)
        x = BatchNormalization()(x)
        x_1 = ReLU()(x)

        x = Dense(128)(x)
        x = BatchNormalization()(x)
        x = ReLU()(x)
        x = Add()([x, x_1])

        x = Dense(256)(x)
        x = BatchNormalization()(x)
        x_2 = ReLU()(x)

        x = Dense(256)(x)
        x = BatchNormalization()(x)
        x = ReLU()(x)
        x = Add()([x, x_2])

        x = Dense(256)(x)
        x = BatchNormalization()(x)
        x_2 = ReLU()(x)

        x = Dense(256)(x)
        x = BatchNormalization()(x)
        x = ReLU()(x)
        x = Add()([x, x_2])

        # outputs = Dense(1, activation='softmax')(x)
        # outputs = Dense(1, activation='sigmoid')(x)
        outputs = Dense(1)(x)

        model = tf.keras.Model(inputs=inputs,
                               outputs=outputs,
                               name="disc_model")
        model.summary()

        model_json = model.to_json()
        with open("./model_arch/Disc_model.json", "w") as json_file:
            json_file.write(model_json)
        return model
    def test(self):
        cnf = Config()
        '''making models'''
        net_model = NetworkModels()
        model_gen = net_model.get_generator_model()

        model_disc = net_model.get_discriminator_model()
        model_disc.load_weights('./models/last_we_model_disc_.h5')
        # model_disc = tf.keras.models.load_model('./models/model_disc1999_.h5')
        '''noise'''
        test_sample = tf.random.normal([9, cnf.num_of_landmarks])
        out_fake = model_disc(test_sample)
        print('------------')
Пример #4
0
    def get_batch_sample(self, batch_index, x_train_filenames):
        cnf = Config()
        pn_tr_path = cnf.annotation_path

        batch_x = x_train_filenames[batch_index *
                                    cnf.batch_size:(batch_index + 1) *
                                    cnf.batch_size]
        # pn_batch = np.array([self._load_and_create_hm(pn_tr_path + file_name) for file_name in batch_x])
        pn_batch = np.array([
            self._load_and_normalize(pn_tr_path + file_name)
            for file_name in batch_x
        ])
        pn_batch = tf.cast(pn_batch, tf.float32)
        return pn_batch
Пример #5
0
    def create_landmarks(self, normal_lnd):
        cnf = Config()
        normal_lnd = np.array(normal_lnd)
        # landmarks_splited = _landmarks.split(';')
        landmark_arr_x = []
        landmark_arr_y = []

        for j in range(0, len(normal_lnd), 2):
            x = float(normal_lnd[j] *
                      cnf.image_input_size) + cnf.image_input_size / 2.0
            y = float(normal_lnd[j + 1] *
                      cnf.image_input_size) + cnf.image_input_size / 2.0
            landmark_arr_x.append(x)
            landmark_arr_y.append(y)

        return landmark_arr_x, landmark_arr_y
Пример #6
0
    def _load_and_normalize(self, point_path):
        cnf = Config()
        annotation = load(point_path)
        '''test print '''
        # self.test_image_print(img_name='1', img=np.zeros([224,224,3]), landmarks=annotation)
        '''normalize landmarks based on hyperface method'''
        width = 1  # cnf.image_input_size
        height = 1  # cnf.image_input_size
        x_center = width / 2
        y_center = height / 2
        annotation_norm = []
        for p in range(0, len(annotation), 2):
            annotation_norm.append((x_center - annotation[p]) / width)
            annotation_norm.append((y_center - annotation[p + 1]) / height)
        '''denormalize for test'''
        # landmarks_x, landmarks_y = self.create_landmarks(annotation_norm)
        # plt.figure()
        # plt.scatter(x=landmarks_x[:], y=landmarks_y[:], c='#000000', s=15)
        # plt.savefig('11.png')
        ''''''

        return annotation_norm
    def train(self, train_gen, train_disc):
        """"""
        cnf = Config()
        '''create loss obj'''
        c_loss = CustomLosses()
        '''create summary writer'''
        summary_writer = tf.summary.create_file_writer(
            "./train_logs/fit/" + datetime.now().strftime("%Y%m%d-%H%M%S"))
        '''making models'''
        net_model = NetworkModels()
        model_gen = net_model.get_generator_model()
        model_gen.trainable = train_gen
        # model_gen.load_weights('./models/last_we_model_gen39_.h5')

        model_disc = net_model.get_discriminator_model()
        model_disc.trainable = train_disc
        # model_disc.load_weights('./models/last_we_model_disc_.h5')
        '''optimizer'''
        opti_gen = tf.keras.optimizers.Adam(lr=1e-4)
        opti_disc = tf.keras.optimizers.Adam(lr=1e-8, beta_1=0.5)
        '''create sample generator'''
        dhp = DataHelper()
        x_train_filenames = dhp.create_generators()
        '''create train configuration'''
        step_per_epoch = len(x_train_filenames) // cnf.batch_size
        # step_per_epoch = 5
        '''start train process'''
        train_gen = True
        train_disc = True
        model_gen.trainable = train_gen
        model_disc.trainable = train_disc

        for epoch in range(cnf.epochs):
            # if epoch < 500 and (epoch + 1) % 30 == 0:
            #     train_gen = not train_gen
            #     train_disc = not train_disc
            #
            #     model_gen.trainable = train_gen
            #     model_disc.trainable = train_disc
            #
            #     print('=================================')
            #     print(' Generator is :' + str(train_gen))
            #     print(' Discriminator is :' + str(train_disc))
            #     print('=================================')
            # else:
            #     model_gen.trainable = True
            #     model_disc.trainable = True
            #
            #     print('=================================')
            #     print(' Generator is :' + str(train_gen))
            #     print(' Discriminator is :' + str(train_disc))
            #     print('=================================')

            for batch_index in range(step_per_epoch):
                '''creating noises'''
                noise = tf.random.normal(
                    [cnf.batch_size, cnf.noise_input_size])
                '''load annotation and images'''
                real_data = dhp.get_batch_sample(
                    batch_index=batch_index,
                    x_train_filenames=x_train_filenames)
                ''''''
                self.train_step(epoch=epoch,
                                step=batch_index,
                                real_data=real_data,
                                model_gen=model_gen,
                                model_disc=model_disc,
                                opti_gen=opti_gen,
                                opti_disc=opti_disc,
                                cnf=cnf,
                                c_loss=c_loss,
                                noise=noise)
            '''save sample images:'''
            if (epoch + 1) % 10 == 0:
                test_sample = tf.random.normal([9, cnf.noise_input_size])
                self.save_sample_images(model=model_gen,
                                        epoch=epoch,
                                        test_input=test_sample,
                                        dhp=dhp)
            '''save weights'''
            if (epoch + 1) % 1000 == 0:
                model_gen.save('./models/model_gen' + str(epoch) + '_.h5')
                model_disc.save('./models/model_disc' + str(epoch) + '_.h5')
        '''save last weights'''
        model_gen.save('./models/model_gen_LAST.h5')
        model_disc.save('./models/model_disc_LAST.h5')
Пример #8
0
    def get_generator_model(self):
        cnf = Config()

        inputs = tf.keras.Input(shape=(cnf.noise_input_size, ))
        x = Dense(cnf.noise_input_size)(inputs)
        x_1 = BatchNormalization()(x)

        x = Dense(cnf.noise_input_size)(x)
        x = BatchNormalization()(x)
        x = ReLU()(x)
        x = Dense(cnf.noise_input_size)(x)
        x = BatchNormalization()(x)
        x = ReLU()(x)
        x = Dense(cnf.noise_input_size)(x)
        x = BatchNormalization()(x)
        x = Add()([x, x_1])
        x = ReLU()(x)
        #

        x = Dense(128)(x)
        x = BatchNormalization()(x)
        x = ReLU()(x)
        x = Dense(128)(x)
        x_1 = BatchNormalization()(x)

        x = Dense(128)(x)
        x = BatchNormalization()(x)
        x = ReLU()(x)
        x = Dense(128)(x)
        x = BatchNormalization()(x)
        x = ReLU()(x)
        x = Dense(128)(x)
        x = BatchNormalization()(x)
        x = Add()([x, x_1])
        x = ReLU()(x)

        #
        x = Dense(128)(x)
        x = BatchNormalization()(x)
        x = ReLU()(x)
        x = Dense(128)(x)
        x_1 = BatchNormalization()(x)

        x = Dense(128)(x)
        x = BatchNormalization()(x)
        x = ReLU()(x)
        x = Dense(128)(x)
        x = BatchNormalization()(x)
        x = ReLU()(x)
        x = Dense(128)(x)
        x = BatchNormalization()(x)
        x = Add()([x, x_1])
        x = ReLU()(x)

        ####
        x = Dense(256)(x)
        x = BatchNormalization()(x)
        x = ReLU()(x)
        x = Dense(256)(x)
        x_2 = BatchNormalization()(x)

        x = Dense(256)(x)
        x = BatchNormalization()(x)
        x = ReLU()(x)
        x = Dense(256)(x)
        x = BatchNormalization()(x)
        x = ReLU()(x)
        x = Dense(256)(x)
        x = BatchNormalization()(x)
        x = Add()([x, x_2])
        x = ReLU()(x)

        ##
        x = Dense(256)(x)
        x = BatchNormalization()(x)
        x = ReLU()(x)
        x = Dense(256)(x)
        x_2 = BatchNormalization()(x)

        x = Dense(256)(x)
        x = BatchNormalization()(x)
        x = ReLU()(x)
        x = Dense(256)(x)
        x = BatchNormalization()(x)
        x = ReLU()(x)
        x = Dense(256)(x)
        x = BatchNormalization()(x)
        x = Add()([x, x_2])
        x = ReLU()(x)

        #######################
        x = Dropout(.2)(x)

        outputs = Dense(cnf.num_of_landmarks, activation='linear')(x)
        model = tf.keras.Model(inputs=inputs,
                               outputs=outputs,
                               name="gen_model")
        model.summary()

        model_json = model.to_json()
        with open("./model_arch/Gen_model.json", "w") as json_file:
            json_file.write(model_json)
        return model