예제 #1
0
    def get_hessian(self, X, U, X_df):
        '''
            Get the full hessian by repeated calls to Hessian_Matvec
            Since PINNs are often small, this is feasible.
            Warning! This operation scales quadratically in time and space!
        '''

        print(
            "Warning, trying to calculate the full Hessian is infeasible for large networks!"
        )

        if self.use_differential_points:
            feed_dict = {self.X: X, self.U: U, self.X_df: X_df}
        else:
            feed_dict = {self.X: X, self.U: U}

        # We use repeated runs to avoid adding gradient ops for every
        # element of the hessian
        n = int(self.grads_flat.shape[0])
        H = np.empty((n, n))
        progbar = Progbar(n)
        for i in range(n):
            vec = np.zeros(n, dtype=np.float32)
            vec[i] = 1.0
            feed_dict[self.hessian_vector] = vec
            h_row = self.sess.run(self.hessian_matvec, feed_dict)
            h_row = util.unwrap(h_row)
            H[i, :] = h_row[:]
            progbar.update(i + 1)

        # Explicitly diagonalize so that e.g. eigenvalues are always real
        for i in range(n):
            for j in range(i + 1, n):
                H[j, i] = H[i, j]
        return H
예제 #2
0
    def prepare(self):
        self._create_own_dir()
        unzip_dir = self.unzip_dir
        image_names = os.listdir(unzip_dir)
        labels_dict = dict()

        face_detector = FaceDetector()
        face_aligner = FaceAligner(padding=0.1)
        progbar = Progbar(target=len(image_names))

        for image_name in image_names:
            progbar.add(1)
            age = image_name.split('_')[3]
            labels_dict[image_name] = int(age)

            image = read_image_like_rgb(os.path.join(unzip_dir, image_name))
            image = img_as_ubyte(exposure.equalize_adapthist(image))
            face_bboxes = face_detector.safe_detect_face_bboxes(image, include_cnn=False).clip(min=0)

            if face_bboxes.shape[0] == 0:
                cropped_image = self._crop_center(image, image.shape[0]//2, image.shape[1]//2)

            else:
                cropped_image = face_aligner.align_and_crop(image, bboxes=face_bboxes, bbox_number=0)

            image_path = os.path.join(self.data_dir, self.dataset_name, 'images', image_name)
            imsave(image_path, cropped_image)

        labels_path = os.path.join(self.data_dir, self.dataset_name, 'labels_dict.npy')
        np.save(labels_path, labels_dict)
예제 #3
0
    def load_shots(self,
                   shot_list,
                   is_inference=False,
                   as_list=False,
                   num_samples=np.Inf):
        X = []
        Y = []
        Disr = []
        print("loading...")
        pbar = Progbar(len(shot_list))

        sample_prob_d, sample_prob_nd = self.get_sample_probs(
            shot_list, num_samples)
        fn = partial(self.load_shot,
                     is_inference=is_inference,
                     sample_prob_d=sample_prob_d,
                     sample_prob_nd=sample_prob_nd)
        pool = mp.Pool()
        print('loading data in parallel on {} processes'.format(
            pool._processes))
        for x, y, disr in pool.imap(fn, shot_list):
            X.append(x)
            Y.append(y)
            Disr.append(disr)
            pbar.add(1.0)
        pool.close()
        pool.join()
        return X, Y, np.array(Disr)
예제 #4
0
def make_predictions(conf, shot_list, loader, custom_path=None):
    feature_extractor = FeatureExtractor(loader)
    # save_prepath = feature_extractor.get_save_prepath()
    if custom_path is None:
        model_path = conf['paths']['model_save_path'] + \
            model_filename  # save_prepath + model_filename
    else:
        model_path = custom_path
    model = joblib.load(model_path)
    # shot_list = shot_list.random_sublist(10)

    y_prime = []
    y_gold = []
    disruptive = []

    pbar = Progbar(len(shot_list))
    fn = partial(predict_single_shot,
                 model=model,
                 feature_extractor=feature_extractor)
    pool = mp.Pool()
    print('predicting in parallel on {} processes'.format(pool._processes))
    # for (y_p, y, disr) in map(fn, shot_list):
    for (y_p, y, disr) in pool.imap(fn, shot_list):
        # y_p, y, disr = predict_single_shot(model, feature_extractor,shot)
        y_prime += [np.expand_dims(y_p, axis=1)]
        y_gold += [np.expand_dims(y, axis=1)]
        disruptive += [disr]
        pbar.add(1.0)

    pool.close()
    pool.join()
    return y_prime, y_gold, disruptive
예제 #5
0
    def train_Adam(self,
                   X: np.ndarray,
                   U: np.ndarray,
                   X_df=None,
                   epochs=2000,
                   learning_rate=1e-3):
        '''
            Train using Full-Batch Adam for the given number of iterations

            Parameters:
                X (np.ndarray) : (N,d_in) array of domain points
                U (np.ndarray) : (N,d_out) array of solution points such that U = F(X)
                X_df (Optional[np.ndarray]) : (M,d_in) array of domain points where U is 
                    unknown but the PINN residual should still be evaluated.
                epochs (int) : Number of epochs to train for
                learning_rate (float) : If use_dynamic_learning_rate=True, this will 
                    be the learning rate used by the optimizer
        '''

        if self.use_differential_points:
            feed_dict = {self.X: X, self.U: U, self.X_df: X_df}
        else:
            feed_dict = {self.X: X, self.U: U}

        if self.learning_rate is not None:
            feed_dict[self.learning_rate] = learning_rate

        progbar = Progbar(epochs)
        for i in range(epochs):
            _, loss = self.sess.run([self.optimizer_Adam, self.loss],
                                    feed_dict)

            progbar.update(i + 1, [("loss", loss)])
예제 #6
0
    def train(self, train_gen, val_gen, epochs=10):
        """Trains the gan

        Arguments:
            train_gen {Sequence} -- Train data generator
            val_gen {Sequence} -- Validation data generator

        Keyword Arguments:
            epochs {int} -- Epochs (default: {10})

        Returns:
            tuple -- (train loss history, validation loss history)
        """

        for epoch in range(1, epochs + 1):

            print(f"Epoch {epoch}/{epochs}")

            num_batches = len(train_gen)
            progress_bar = Progbar(target=num_batches)

            for index, (X_train, y_train) in enumerate(train_gen):
                self.train_step(X_train, y_train)
                progress_bar.update(index + 1)

            display.clear_output(wait=True)
            self.generate_and_plot_images(epoch, val_gen[epoch][0])
예제 #7
0
파일: ycb_video.py 프로젝트: zuoguoqing/paz
 def load_data(self):
     sample_filenames = self._get_sample_filenames()
     progress_bar, data = Progbar(len(sample_filenames)), []
     for sample_arg, sample_id in enumerate(sample_filenames):
         data.append(self._load_sample(sample_id))
         progress_bar.update(sample_arg + 1)
     return data
예제 #8
0
def evaluate(model, dataset, num_val, num_cls):
    # if use fastnms
    # if use cross class nms

    # if eval image
    # if eval images
    # if eval video

    # if not display or benchmark
    # For mAP evaluation, creating AP_Object for every class per iou_threshold
    ap_data = {
        'box': [[APObject() for _ in range(num_cls)] for _ in iou_thresholds],
        'mask': [[APObject() for _ in range(num_cls)] for _ in iou_thresholds]
    }

    # detection object made from prediction output. for the purpose of creating json
    detections = Detections()

    # iterate the whole dataset to save TP, FP, FN
    i = 0
    progbar = Progbar(num_val)
    tf.print("Evaluating...")
    for image, labels in dataset:
        i += 1
        output = model(image, training=False)
        dets = model.detect(output)
        # update ap_data or detection depends if u want to save it to json or just for validation table
        prep_metrics(ap_data, dets, image, labels, detections)
        progbar.update(i)

    # if to json
    # save detection to json

    # Todo if not training, save ap_data, else calc_map
    return calc_map(ap_data, num_cls)
예제 #9
0
파일: utils.py 프로젝트: ho4040/pbt-keras
def train_population(population,
                     x,
                     y,
                     batch_size,
                     steps,
                     steps_save=100,
                     validation_split=0.3):
    # Split data in train and validation. Set seed to get same splits in
    # consequent calls
    x_train, x_val, y_train, y_val = train_test_split(
        x, y, test_size=validation_split, random_state=42)

    population_size = len(population)
    batch_generator = BatchGenerator(x_train, y_train, batch_size)

    results = defaultdict(lambda: [])
    stateful_metrics = ['min_loss', 'max_loss', 'mean_loss']
    for metric, _ in population[0].eval_metrics:
        stateful_metrics.extend(
            [m.format(metric) for m in ['min_{}', 'max_{}', 'mean_{}']])
    progbar = Progbar(steps, stateful_metrics=stateful_metrics)

    for step in range(1, steps + 1):
        x, y = batch_generator.next()
        for idx, member in enumerate(population):
            # One step of optimisation using hyperparameters of 'member'
            member.step_on_batch(x, y)
            # Model evaluation
            loss = member.eval_on_batch(x_val, y_val)
            # If optimised for 'STEPS_READY' steps
            if member.ready():
                # Use the rest of population to find better solutions
                exploited = member.exploit(population)
                # If new weights != old weights
                if exploited:
                    # Produce new hyperparameters for 'member'
                    member.explore()
                    loss = member.eval_on_batch(x_val, y_val)

            if step % steps_save == 0 or step == steps:
                results['model_id'].append(str(member))
                results['step'].append(step)
                results['loss'].append(loss)
                results['loss_smoothed'].append(member.loss_smoothed())
                for metric, value in member.eval_metrics:
                    results[metric].append(value)
                for h, v in member.get_hyperparameter_config().items():
                    results[h].append(v)

        # Get recently added losses to show in the progress bar
        all_losses = results['loss']
        recent_losses = all_losses[-population_size:]
        if recent_losses:
            metrics = _statistics(recent_losses, 'loss')
            for metric, _ in population[0].eval_metrics:
                metrics.extend(
                    _statistics(results[metric][-population_size:], metric))
            progbar.update(step, metrics)

    return pd.DataFrame(results)
예제 #10
0
파일: fat.py 프로젝트: zuoguoqing/paz
    def load_data(self):
        scene_names = glob(self.path + 'mixed/*')
        image_paths, label_paths = [], []
        for scene_name in scene_names:
            scene_image_paths, scene_label_paths = [], []
            for image_side in ['left', 'right']:
                image_names = glob(scene_name + '/*%s.jpg' % image_side)
                side_image_paths = sorted(image_names, key=self._base_number)
                label_names = glob(scene_name + '/0*%s.json' % image_side)
                side_label_paths = sorted(label_names, key=self._base_number)
                scene_image_paths = scene_image_paths + side_image_paths
                scene_label_paths = scene_label_paths + side_label_paths
            image_paths = image_paths + scene_image_paths
            label_paths = label_paths + scene_label_paths

        self.data = []
        progress_bar = Progbar(len(image_paths))
        for sample_arg, sample in enumerate(zip(image_paths, label_paths)):
            image_path, label_path = sample
            if not self._valid_name_match(image_path, label_path):
                raise ValueError('Invalid name match:', image_path, label_path)
            boxes = self._extract_boxes(label_path)
            if boxes is None:
                continue
            self.data.append({'image': image_path, 'boxes': boxes})
            progress_bar.update(sample_arg + 1)
        return self.data
예제 #11
0
    def pre_fit(self, batches, epochs=100):
        """Pre-trains the model.

        Args:
            batches (Dataset): Pre-training batches containing samples.
            epochs (int): The maximum number of pre-training epochs.

        """

        logger.info('Pre-fitting generator ...')

        # Gathering the amount of batches
        n_batches = tf.data.experimental.cardinality(batches).numpy()

        # Iterate through all generator epochs
        for e in range(epochs):
            logger.info('Epoch %d/%d', e + 1, epochs)

            # Resetting state to further append losses
            self.G_loss.reset_states()

            # Defining a customized progress bar
            b = Progbar(n_batches, stateful_metrics=['loss(G)'])

            # Iterate through all possible pre-training batches
            for x_batch, y_batch in batches:
                # Performs the optimization step over the generator
                self.G_pre_step(x_batch, y_batch)

                # Adding corresponding values to the progress bar
                b.add(1, values=[('loss(G)', self.G_loss.result())])

            logger.file('Loss(G): %s', self.G_loss.result().numpy())
예제 #12
0
    def prepare(self):
        self._create_own_dir()
        unzip_dir = os.path.join(self.unzip_dir, 'wiki')
        mat_file = loadmat(os.path.join(unzip_dir, 'wiki.mat'))
        labels_dict = self._parse_mat(mat_file)

        face_detector = FaceDetector()
        face_aligner = FaceAligner(padding=0.1)
        progbar = Progbar(target=len(labels_dict))

        for image_subpath in list(labels_dict.keys()):
            progbar.add(1)
            image = read_image_like_rgb(os.path.join(unzip_dir, image_subpath))
            image = img_as_ubyte(exposure.equalize_adapthist(image))
            face_bboxes = face_detector.safe_detect_face_bboxes(image, include_cnn=False).clip(min=0)

            if face_bboxes.shape[0] == 0:
                continue

            else:
                cropped_image = face_aligner.align_and_crop(image, bboxes=face_bboxes, bbox_number=0)
                image_name = image_subpath.split('/')[1]
                image_path = os.path.join(self.data_dir, self.dataset_name, 'images', image_name)
                imsave(image_path, cropped_image)

        labels_path = os.path.join(self.data_dir, self.dataset_name, 'labels_dict.npy')
        labels_dict = {key.split('/')[1]: value for key, value in labels_dict.items()}
        np.save(labels_path, labels_dict)
    def _train_depth(self, iterator, epochs, steps_per_epoch,
                     weights_filepath):
        print(f"Training network {self.name}.")
        for epoch in range(epochs):
            print(f"Epoch {epoch}:")
            progbar = Progbar(
                steps_per_epoch,
                verbose=1,
                stateful_metrics=[m.name for m in self._all_metrics])
            for step in range(1, steps_per_epoch + 1):
                data = next(iterator)
                self._train_depth_step(data)

                if step % 1 == 0:
                    progbar.update(step,
                                   values=[(m.name, m.result().numpy())
                                           for m in self._all_metrics])

            print("Saving model")
            self._model.save_weights(
                filepath=weights_filepath + '-' + self._monitored_metric.name +
                '-' + '{a:.3f}'.format(
                    a=self._monitored_metric.result().numpy()[0]) + '.hdf5')

            for m in self._all_metrics:
                m.reset_states()
예제 #14
0
    def transform(self, texts, verbose=False):

        if type(texts) is str:
            texts = [texts]

        texts = list(map(self._preprocessor, texts))
        n_samples = len(texts)

        blank_idx = []
        for i, text in enumerate(texts):
            if len(text) == 0:
                texts[i] = self._space_escape
                blank_idx.append(i)

        bar = Progbar(n_samples)

        mats = []
        for bi, text_batch in enumerate(batch(texts, self.batch_size)):

            self._data_container.set(text_batch)
            features = next(self._predict_fn)['output']
            mats.append(features)

            if verbose:
                bar.add(len(text_batch))

        mat = np.vstack(mats)
        if len(blank_idx):
            blank_idx = np.array(blank_idx)
            mat[blank_idx] = 0.0

        return mat
예제 #15
0
class ProgbarLogger(Callback):
    def on_train_begin(self, logs=None):
        #print('Epoch %d/%d' % (epoch + 1, self.epochs))
        self.target = self.params['epochs']
        self.stateful_metrics = ['loss', 'roc_auc', 'roc_auc_max']
        self.roc_auc_max = 0
        self.progbar = Progbar(self.target,
                               verbose=1,
                               stateful_metrics=self.stateful_metrics)
        self.seen = 0

    def on_epoch_begin(self, epoch, logs=None):
        if self.seen < self.target:
            self.log_values = []

    def on_epoch_end(self, epoch, logs=None):

        self.seen += 1
        logs = logs or {}

        for k in logs:
            if k in ['loss', 'roc_auc', 'roc_auc_max']:
                self.log_values.append((k, logs[k]))

        if self.seen < self.target:
            self.progbar.update(self.seen, self.log_values)

    def on_train_end(self, logs=None):
        # Necessary to end line
        print('')

        return
예제 #16
0
    def on_epoch_end(self, epoch, logs=None):

        generator = iter(self.validation_data)

        y_true, y_pred = [], []
        print(f'\nValidation {epoch+1}')
        pbar = Progbar(self.validation_steps)
        for it in range(self.validation_steps):
            x, y = next(generator)

            y_pred.extend(self.adj_fx(self.model.predict(x)))
            y_true.extend(self.adj_fy(y))

            pbar.update(current=it)

        print('\nClassification report:')
        print(classification_report(y_true, y_pred, zero_division=0))

        print('\nConfusion matrix:')
        print(confusion_matrix(y_true, y_pred, normalize='pred'))

        # verify if f1 score improved
        report = classification_report(y_true,
                                       y_pred,
                                       output_dict=True,
                                       zero_division=0)
        macro_f1 = report['macro avg']['precision']

        if macro_f1 > self.best:
            print(
                f'\nEpoch {epoch+1}: precision improved from {self.best:.3f} to {macro_f1:.3f}\n'
            )

            self.best = macro_f1
            self.model.save(self.path_to_save)
예제 #17
0
    def train_network(self, dataset, test_data, test_data_size, loss_function, epochs, tree_loss_weight, opt, size, interpret=False):
        # Inference has to be done on the original network and the full NBDT
        # The network should be pretrained on the dataset of interest

        # iterate through dataset
            # for each member of the dataset
                # make prediction with model
                # make prediction with NBDT
                # compute the loss
                # update the neural network parameters

        training_loss_results = []
        #tree_loss = TreeSupLoss(loss_function, tree_loss_weight)
        self.model.layers[-1].trainable = False
        for epoch in range(epochs):

            epoch_loss_avg = tf.keras.metrics.Mean()
            epoch_nbdt_loss_avg = tf.keras.metrics.Mean()
            epoch_net_loss_avg = tf.keras.metrics.Mean()
            epoch_acc_avg = tf.keras.metrics.CategoricalAccuracy()

            i = 0
            progress = Progbar(target=size)
            for x, y in dataset.take(size).batch(1):
                i = i + 1
                sample = x 
                nbdt_loss, net_loss, loss, grad = self.gradient(sample, y, loss_function, tree_loss_weight)
                opt.apply_gradients(zip(grad[0], self.model.trainable_variables))
                self.backbone = Sequential(self.model.layers[:-1])
                epoch_loss_avg.update_state(loss)
                epoch_nbdt_loss_avg.update_state(nbdt_loss)
                epoch_net_loss_avg.update_state(net_loss)
                nbdt_pred = self.nbdt_predict(sample, interpret)
                epoch_acc_avg.update_state(y, nbdt_pred)
                progress.update(i, values=[('nbdt_loss:', epoch_nbdt_loss_avg.result()),
                                            ('net_loss:', epoch_net_loss_avg.result()),
                                            ('loss:', epoch_loss_avg.result()),
                                            ('acc:', epoch_acc_avg.result()),
                                        ])
               
            #training_loss_results.append(epoch_loss_avg.result().numpy())
            print()
            print("Epoch {:03d}: Loss: {:.3f}, Accuracy: {:.3%}".format(epoch,
                                                                epoch_loss_avg.result(),
                                                                epoch_acc_avg.result()),
                                                                )

            test_acc, test_auc = self.evaluate(test_data.batch(1), size=test_data_size)
            print('VAL ACC: {:.3%} VAL AUC: {:.3}'.format(test_acc, test_auc))
            """
            if test_acc >= .90:
                print("SAVING MODEL")
                self.model.save("nn_nbdt_test_acc-{:.3f}_epoch-{:03d}_adam".format(test_acc, epoch))

            print()
            self.model.save("nn_nbdt_epoch-{:03d}_adam".format(epoch))
            """

        return training_loss_results
예제 #18
0
    def evaluate(self, test_iterator, steps):
        progbar = Progbar(steps, verbose=1, stateful_metrics=[m.name for m in self._all_metrics])
        for step in range(1, steps + 1):
            data = next(test_iterator)
            self._test_step(data)

            if step % 1 == 0:
                progbar.update(step, values=[(m.name, m.result().numpy()) for m in self._all_metrics])
예제 #19
0
 def reset(self):
     """ Reset statistics """
     self.interval_start = timeit.default_timer()
     self.progbar = Progbar(target=self.interval)
     self.metrics = []
     self.infos = []
     self.info_names = None
     self.episode_rewards = []
예제 #20
0
    def _train_stochastic_optimizer(self,
                                    optimizer_opp,
                                    X,
                                    U,
                                    X_df=None,
                                    batch_size=128,
                                    epochs=10):
        '''
            Generic custom training loop for stochastic optimizers. 
            Replace optimizer_opp with e.g. RMSProp.minimize() for a different
            stochastic optimizer.
        '''

        if self.use_differential_points:
            assert (X_df is not None)

            assert (X_df.shape[0] >= X.shape[0])

        progbar = Progbar(epochs, stateful_metrics=["loss_full"])
        for epoch in range(epochs):

            X_s, U_s = shuffle(X, U)

            if X_df is not None:
                X_df_s = shuffle(X_df)
                dataset_size = X_df.shape[0]
            else:
                dataset_size = X.shape[0]

            b_c = 0
            for b in range(0, dataset_size, batch_size):

                if X_df is not None:
                    b_c_last = b_c
                    b_c = b % X_s.shape[0]

                    # X and X_df are typically different sizes,
                    # so we shuffle them at different times
                    if b_c_last > b_c:
                        X_s, U_s = shuffle(X, U)
                    X_b = X_s[b_c:(b_c + batch_size), :]
                    U_b = U_s[b_c:(b_c + batch_size), :]
                    X_df_b = X_df_s[b:(b + batch_size), :]
                    feed_dict = {self.X: X_b, self.U: U_b, self.X_df: X_df_b}
                else:
                    X_b = X_s[b:(b + batch_size), :]
                    U_b = U_s[b:(b + batch_size), :]
                    feed_dict = {self.X: X_b, self.U: U_b}

                _, loss = self.sess.run([optimizer_opp, self.loss], feed_dict)

            if X_df is not None:
                feed_dict = {self.X: X, self.U: U, self.X_df: X_df}
            else:
                feed_dict = {self.X: X, self.U: U}

            progbar.update(epoch + 1, [("loss", loss)])
예제 #21
0
 def on_train_begin(self, logs=None):
     #print('Epoch %d/%d' % (epoch + 1, self.epochs))
     self.target = self.params['epochs']
     self.stateful_metrics = ['loss', 'roc_auc', 'roc_auc_max']
     self.roc_auc_max = 0
     self.progbar = Progbar(self.target,
                            verbose=1,
                            stateful_metrics=self.stateful_metrics)
     self.seen = 0
예제 #22
0
 def vectorize(text, verbose=False):
     x = []
     bar = Progbar(len(text))
     for text_batch in batch(text, batch_size):
         container.set(text_batch)
         x.append(next(predict_fn)['output'])
         if verbose:
             bar.add(len(text_batch))
     r = np.vstack(x)
     return r
예제 #23
0
    def __init__(self):
        self.env = Environment()
        self.replay_memory = deque(maxlen=C.REPLAY_MEMORY_LEN)

        self.epsilon = C.START_EPSILON

        self.model = self.load_model()
        self.target_model = self.load_model()
        self.update_target_model()

        self.progbar = Progbar(C.EPISODES, unit_name='Episode')
예제 #24
0
파일: gen_hdf5.py 프로젝트: aronsar/ganabi
 def save(ds_X, ds_Y, gen):
     prog = Progbar(len(gen))
     cur_idx = 0
     for idx, (x, y) in enumerate(gen):
         rows = x.shape[0]
         assert(rows == y.shape[0])
         ds_X[cur_idx:(cur_idx+rows), :] = x
         ds_Y[cur_idx:(cur_idx+rows), :] = y
         cur_idx += rows
         prog.update(idx)
     print()
예제 #25
0
def lipschitz_lb(f, X1, X2, iterations=1000, verbose=True):

    optimizer = Adam(lr=0.0001)

    X1 = tf.Variable(X1, name='x1', dtype='float32')
    X2 = tf.Variable(X2, name='x2', dtype='float32')
    
    max_L = None

    if verbose:
        pb = Progbar(iterations, stateful_metrics=['LC'])
    
    for _ in range(iterations):
        with tf.GradientTape() as tape:
            y1 = f(X1)
            y2 = f(X2)
            
            # The definition of the margin is not entirely symmetric: the top
            # class must remain the same when measuring both points. We assume
            # X1 is the reference point for determining the top class.
            original_predictions = tf.cast(
                tf.equal(y1, tf.reduce_max(y1, axis=1, keepdims=True)), 
                'float32')
            
            # This takes the logit at the top class for both X1 and X2.
            y1_j = tf.reduce_sum(
                y1 * original_predictions, axis=1, keepdims=True)
            y2_j = tf.reduce_sum(
                y2 * original_predictions, axis=1, keepdims=True)
            
            margin1 = y1_j - y1
            margin2 = y2_j - y2

            axes = tuple((tf.range(len(X1.shape) - 1) + 1).numpy())
            
            L = tf.abs(margin1 - margin2) / (tf.sqrt(
                tf.reduce_sum((X1 - X2)**2, axis=axes)) + EPS)[:,None]

            loss = -tf.reduce_max(L, axis=1)
            
        grad = tape.gradient(loss, [X1, X2])

        optimizer.apply_gradients(zip(grad, [X1, X2]))
        
        if max_L is None:
            max_L = L
        else:
            max_L = tf.maximum(max_L, L)

        if verbose:
            pb.add(1, [('LC', tf.reduce_max(max_L))])
        
    return tf.reduce_max(max_L)
예제 #26
0
 def on_epoch_begin(self, epoch, logs={}):
     self.loss_buff = []
     self.val_loss_buff = []
     print('Epoch : %d/%d, Effective Epoch : %d/%d' %
           (epoch + 1, self.epochs,
            (epoch + 1) // self.model.optimizer.L + 1,
            self.epochs // self.model.optimizer.L))
     self.target = self.params['samples']
     self.progbar = Progbar(target=self.target,
                            verbose=1,
                            stateful_metrics=['loss', 'val_loss'])
     self.seen = 0
예제 #27
0
def train(BATCH_SIZE, X_train):
    
    ### model define
    d = discriminator_model()
    g = generator_model()
    d_on_g = generator_containing_discriminator(g, d)
    d_optim = RMSprop(lr=0.0004)
    g_optim = RMSprop(lr=0.0002)
    g.compile(loss='mse', optimizer=g_optim)
    d_on_g.compile(loss='mse', optimizer=g_optim)
    d.trainable = True
    d.compile(loss='mse', optimizer=d_optim)
    

    for epoch in range(10):
        print ("Epoch is", epoch)
        n_iter = int(X_train.shape[0]/BATCH_SIZE)
        progress_bar = Progbar(target=n_iter)
        
        for index in range(n_iter):
            # create random noise -> U(0,1) 10 latent vectors
            noise = np.random.uniform(0, 1, size=(BATCH_SIZE, 10))

            # load real data & generate fake data
            image_batch = X_train[index*BATCH_SIZE:(index+1)*BATCH_SIZE]
            generated_images = g.predict(noise, verbose=0)
            
            # visualize training results
            if index % 20 == 0:
                image = combine_images(generated_images)
                image = image*127.5+127.5
                cv2.imwrite('./result/'+str(epoch)+"_"+str(index)+".png", image)

            # attach label for training discriminator
            X = np.concatenate((image_batch, generated_images))
            y = np.array([1] * BATCH_SIZE + [0] * BATCH_SIZE)
            
            # training discriminator
            d_loss = d.train_on_batch(X, y)

            # training generator
            d.trainable = False
            g_loss = d_on_g.train_on_batch(noise, np.array([1] * BATCH_SIZE))
            d.trainable = True

            progress_bar.update(index, values=[('g',g_loss), ('d',d_loss)])
        print ('')

        # save weights for each epoch
        g.save_weights('weights/generator.h5', True)
        d.save_weights('weights/discriminator.h5', True)
    return d, g
예제 #28
0
    def inference(self, test_dataset, L=1):
        '''Get \(p(c_i|Y_i,X_i)\).

        Parameters
        ----------
        test_dataset : tf.Dataset
            The dataset object.
        L : int
            The number of MC samples.

        Returns
        ----------
        pi_norm  : np.array
            \([1, K]\) The estimated \(\\pi\).
        mu : np.array
            \([d, k]\) The estimated \(\\mu\).
        p_c_x : np.array
            \([N, ]\) The estimated \(p(c_i|Y_i,X_i)\).
        w_tilde : np.array
            \([N, k]\) The estimated \(E(\\tilde{w}_i|Y_i,X_i)\).
        var_w_tilde  : np.array 
            \([N, k]\) The estimated \(Var(\\tilde{w}_i|Y_i,X_i)\).
        z_mean : np.array
            \([N, d]\) The estimated latent mean.
        '''
        if self.latent_space is None:
            raise ReferenceError('Have not initialized the latent space.')

        print('Computing posterior estimations over mini-batches.')
        progbar = Progbar(test_dataset.cardinality().numpy())
        pi_norm = tf.nn.softmax(self.latent_space.pi).numpy()
        mu = self.latent_space.mu.numpy()
        z_mean = []
        p_c_x = []
        w_tilde = []
        var_w_tilde = []
        for step, (x, c_score) in enumerate(test_dataset):
            x = tf.concat([x, c_score], -1) if self.has_cov else x
            _z_mean, _, z = self.encoder(x, L, False)
            res = self.latent_space(z, inference=True)

            z_mean.append(_z_mean.numpy())
            p_c_x.append(res['p_c_x'])
            w_tilde.append(res['w_tilde'])
            var_w_tilde.append(res['var_w_tilde'])
            progbar.update(step + 1)

        z_mean = np.concatenate(z_mean)
        p_c_x = np.concatenate(p_c_x)
        w_tilde = np.concatenate(w_tilde)
        var_w_tilde = np.concatenate(var_w_tilde)
        return pi_norm, mu, p_c_x, w_tilde, var_w_tilde, z_mean
    def fit(self, batches, good_batches, epochs=100):
        """Trains the model.

        Args:
            batches (Dataset): Training batches containing samples.
            epochs (int): The maximum number of training epochs.

        """

        logger.info('Fitting model ...')

        # Gathering the amount of batches
        n_batches = tf.data.experimental.cardinality(batches).numpy()
        print(n_batches)

        good_batches = list(good_batches.as_numpy_iterator())

        # Iterate through all epochs
        for e in range(epochs):
            logger.info('Epoch %d/%d', e + 1, epochs)

            # Resetting states to further append losses
            self.G_loss.reset_states()
            self.D_loss.reset_states()

            # Defining a customized progress bar
            b = Progbar(n_batches, stateful_metrics=['loss(G)', 'loss(D)'])

            i = 0
            # Iterate through all possible training batches
            for batch, tar in enumerate(batches):
                # Performs the optimization step
                self.step(tar, good_batches[i])

                # Adding corresponding values to the progress bar
                b.add(1,
                      values=[('loss(G)', self.G_loss.result()),
                              ('loss(D)', self.D_loss.result())])
                i += 1

            # Exponentially annealing the Gumbel-Softmax temperature
            self.G.tau = self.init_tau**((epochs - e) / epochs)

            # Dumps the losses to history
            self.history['G_loss'].append(self.G_loss.result().numpy())
            self.history['D_loss'].append(self.D_loss.result().numpy())

            logger.to_file('Loss(G): %s | Loss(D): %s',
                           self.G_loss.result().numpy(),
                           self.D_loss.result().numpy())
예제 #30
0
 def __init__(self):
     self.df = pd.DataFrame()
     self.past_history = 20      #訓練用的過去天數
     self.future_target = 7     #預測未來天數
     self.col = 5
     self.checkpoint_path = 'model_weights\weights'
     self.checkpoint_dir = os.path.dirname(self.checkpoint_path)
     self.check_index = self.checkpoint_path + '.index'
     self.model = self.build_model()
     self.epochs = 30
     self.epoch_loss_avg = tf.keras.metrics.Mean()
     self.optimizer = tf.optimizers.RMSprop(learning_rate=0.0001, epsilon=0.000065)
     self.loss_function = tf.keras.losses.MSE
     self.bar = Progbar(self.epochs)