Exemplo n.º 1
0
  def _build(self):
    """Builds block according to the arguments."""
    # pylint: disable=g-long-lambda
    bid = itertools.count(0)
    get_norm_name = lambda: 'tpu_batch_normalization' + ('' if not next(
        bid) else '_' + str(next(bid) // 2))
    cid = itertools.count(0)
    get_conv_name = lambda: 'conv2d' + ('' if not next(cid) else '_' + str(
        next(cid) // 2))
    # pylint: enable=g-long-lambda

    mconfig = self._mconfig
    block_args = self._block_args
    filters = block_args.input_filters * block_args.expand_ratio
    kernel_size = block_args.kernel_size
    if block_args.expand_ratio != 1:
      # Expansion phase:
      self._expand_conv = tf.keras.layers.Conv2D(
          filters,
          kernel_size=kernel_size,
          strides=block_args.strides,
          kernel_initializer=conv_kernel_initializer,
          padding='same',
          use_bias=False,
          name=get_conv_name())
      self._norm0 = utils.normalization(
          mconfig.bn_type,
          axis=self._channel_axis,
          momentum=mconfig.bn_momentum,
          epsilon=mconfig.bn_epsilon,
          groups=mconfig.gn_groups,
          name=get_norm_name())

    if self._has_se:
      num_reduced_filters = max(
          1, int(block_args.input_filters * block_args.se_ratio))
      self._se = SE(mconfig, num_reduced_filters, filters, name='se')
    else:
      self._se = None
    # Output phase:
    filters = block_args.output_filters
    self._project_conv = tf.keras.layers.Conv2D(
        filters,
        kernel_size=1 if block_args.expand_ratio != 1 else kernel_size,
        strides=1 if block_args.expand_ratio != 1 else block_args.strides,
        kernel_initializer=conv_kernel_initializer,
        padding='same',
        use_bias=False,
        name=get_conv_name())
    self._norm1 = utils.normalization(
        mconfig.bn_type,
        axis=self._channel_axis,
        momentum=mconfig.bn_momentum,
        epsilon=mconfig.bn_epsilon,
        groups=mconfig.gn_groups,
        name=get_norm_name())
Exemplo n.º 2
0
    def update_model(self, HL_replay_buffer, logger):
        early_stopper = EarlyStopping(patience=7)
        split = 10.0
        state_norm = utils.normalization(HL_replay_buffer.obses,
                                         self.all_mean_var[0],
                                         self.all_mean_var[1])
        action_norm = utils.normalization(HL_replay_buffer.actions,
                                          self.all_mean_var[2],
                                          self.all_mean_var[3])
        delta_state_norm = utils.normalization(HL_replay_buffer.next_obses,
                                               self.all_mean_var[4],
                                               self.all_mean_var[5])
        train_capacity = int(HL_replay_buffer.capacity * (split - 1) / split)
        test_idxs = np.arange(-int(HL_replay_buffer.capacity / split), 0)

        state_test = torch.as_tensor(state_norm[test_idxs],
                                     device=self.device).float()
        action_test = torch.as_tensor(action_norm[test_idxs],
                                      device=self.device).float()
        delta_state_test = torch.as_tensor(delta_state_norm[test_idxs],
                                           device=self.device).float()

        for i in range(self.model_update_steps):
            self.update_step += 1
            idxs = np.random.randint(0, train_capacity, size=self.batch_size)
            # idxs = np.random.randint(0, 1100, size=self.batch_size)

            state = torch.as_tensor(state_norm[idxs],
                                    device=self.device).float()
            action = torch.as_tensor(action_norm[idxs],
                                     device=self.device).float()
            delta_state = torch.as_tensor(delta_state_norm[idxs],
                                          device=self.device).float()

            pred_delta_state = self.forward_model(state, action)
            model_loss = F.mse_loss(pred_delta_state, delta_state)
            self.model_optimizer.zero_grad()
            model_loss.backward()
            self.model_optimizer.step()

            logger.log('train/model_loss', model_loss)
            logger.dump(self.update_step)

            if (i + 1) % 100 == 0:
                pred_delta_state = self.forward_model(state_test, action_test)
                model_loss = F.mse_loss(pred_delta_state, delta_state_test)
                logger.log('train/val_loss', model_loss)
                logger.dump(self.update_step)
                early_stopper(model_loss)

            if early_stopper.early_stop:
                break

        self.save_data('.')
Exemplo n.º 3
0
 def model_predict(self, model_obs, latent_action):
     model_obs_norm = utils.normalization(model_obs,
                                          mean=self.all_mean_var[0],
                                          std=self.all_mean_var[1])
     latent_action_norm = utils.normalization(latent_action,
                                              mean=self.all_mean_var[2],
                                              std=self.all_mean_var[3])
     predict_norm = self.forward_model.predict(model_obs_norm,
                                               latent_action_norm)
     return utils.inverse_normalization(predict_norm,
                                        mean=self.all_mean_var[4],
                                        std=self.all_mean_var[5])
Exemplo n.º 4
0
    def train(self, num_steps):

        self.sess.run(tf.global_variables_initializer())
        for step in range(num_steps):
            input_images, output_images = self.sess.run(self.queue.dequeue())
            input_images = normalization(input_images)
            output_images = normalization(output_images)
            feed_dict = {
                self.input_images: input_images,
                self.output_images: output_images
            }
            _, loss = self.sess.run([self.train_step, self.loss], feed_dict)
            print(step + 1, loss)
Exemplo n.º 5
0
def cross_subject(data, label, session_id, category_number, batch_size, iteration, lr, momentum, log_interval):
    # cross-subject, for 3 sessions, 1-14 as sources, 15 as target
    one_session_data, one_session_label = copy.deepcopy(data[session_id]), copy.deepcopy(label[session_id])
    target_data, target_label = one_session_data.pop(), one_session_label.pop()
    source_data, source_label = copy.deepcopy(one_session_data), copy.deepcopy(one_session_label.copy())
    # print(len(source_data))
    source_data_comb = source_data[0]
    source_label_comb = source_label[0]
    for j in range(1, len(source_data)):
        source_data_comb = np.vstack((source_data_comb, source_data[j]))
        source_label_comb = np.vstack((source_label_comb, source_label[j]))
    if bn == 'ele':
        source_data_comb = utils.norminy(source_data_comb)
        target_data = utils.norminy(target_data)
    elif bn == 'sample':
        source_data_comb = utils.norminx(source_data_comb)
        target_data = utils.norminx(target_data)
    elif bn == 'global':
        source_data_comb = utils.normalization(source_data_comb)
        target_data = utils.normalization(target_data)
    elif bn == 'none':
        pass
    else:
        pass
    # source_data_comb = utils.norminy(source_data_comb)
    # target_data = utils.norminy(target_data)
    source_loader = torch.utils.data.DataLoader(dataset=utils.CustomDataset(source_data_comb, source_label_comb),
                                                            batch_size=batch_size,
                                                            shuffle=True,
                                                            drop_last=True)
    # source_loaders = []
    # for j in range(len(source_data)):
    #     source_loaders.append(torch.utils.data.DataLoader(dataset=utils.CustomDataset(source_data[j], source_label[j]),
    #                                                         batch_size=batch_size,
    #                                                         shuffle=True,
    #                                                         drop_last=True))
    target_loader = torch.utils.data.DataLoader(dataset=utils.CustomDataset(target_data, target_label),
                                                            batch_size=batch_size, 
                                                            shuffle=True, 
                                                            drop_last=True)
    model = DANNet(model=models.DAN(pretrained=False, number_of_category=category_number),
                source_loader=source_loader,
                target_loader=target_loader,
                batch_size=batch_size,
                iteration=iteration,
                lr=lr,
                momentum=momentum,
                log_interval=log_interval)
    # print(model.__getModel__())
    acc = model.train()
    return acc
Exemplo n.º 6
0
    def __data_generation(self, list_ids_temp):
        'Generates data containing batch_size samples'
        x = np.empty((self.batch_size, *self.dim, self.n_channels))
        y = np.empty((self.batch_size, *self.dim))

        dataset_mean = self.mean
        dataset_std = self.std

        for i, ID in enumerate(list_ids_temp):

            img1 = skimage.img_as_float64(
                imread(dataset_add + "GEE_mapbiomas/" + ID +
                       "_2019-01-01.tif"))
            img2 = skimage.img_as_float64(
                imread(dataset_add + "GEE_mapbiomas/" + ID +
                       "_2019-04-01.tif"))
            img3 = skimage.img_as_float64(
                imread(dataset_add + "GEE_mapbiomas/" + ID +
                       "_2019-07-01.tif"))
            img4 = skimage.img_as_float64(
                imread(dataset_add + "GEE_mapbiomas/" + ID +
                       "_2019-10-01.tif"))
            img5 = skimage.img_as_float64(
                imread(dataset_add + "GEE_mapbiomas/" + ID +
                       "_2020-01-01.tif"))
            mask = skimage.img_as_float64(
                imread(dataset_add + "GEE_mapbiomas_masks/" + ID + ".tif"))

            img1 = U.normalization(img1, mean=dataset_mean, std=dataset_std)
            img2 = U.normalization(img2, mean=dataset_mean, std=dataset_std)
            img3 = U.normalization(img3, mean=dataset_mean, std=dataset_std)
            img4 = U.normalization(img4, mean=dataset_mean, std=dataset_std)
            img5 = U.normalization(img5, mean=dataset_mean, std=dataset_std)

            img = np.concatenate((img1, img2, img3, img4, img5), axis=2)

            # 32x32 random crop
            if self.shuffle == True:
                img, mask = U.random_crop(img, mask, 32, 32)
            else:
                img = img[:32, :32]
                mask = mask[:32, :32]

            x[i, ] = img
            y[i, ] = mask / 255.

        y = np.expand_dims(y, axis=3)

        return x, y
Exemplo n.º 7
0
 def embedding_regions(self, method, embedding_nr):
     regions_dict = {}
     for nr in embedding_nr:
         embedding_img = self.embedding_images(embedding_nr[nr])
         embedding_uint = img_as_ubyte(normalization(embedding_img))
         embedding_uint_bin = (
             embedding_uint >
             skfilters.threshold_otsu(embedding_uint)).astype(int)
         if method == "cv":
             region = calc_ac(embedding_uint_bin,
                              "cv",
                              init_img=embedding_img)
         elif method == "mcv":
             region = calc_ac(embedding_uint_bin,
                              "mcv",
                              init_img=embedding_img)
         elif method == "mgac":
             region = calc_ac(embedding_uint_bin,
                              "mgac",
                              init_img=embedding_img)
         elif method == "contours_low":
             region = calc_ac(embedding_img,
                              "contours_low",
                              init_img=embedding_uint_bin)
         elif method == "contours_high":
             region = calc_ac(embedding_img,
                              "contours_high",
                              init_img=embedding_uint_bin)
         regions_dict[nr] = region
     region_sum = np.sum([img for key, img in regions_dict.items()], axis=0)
     return region_sum, regions_dict
Exemplo n.º 8
0
def main ():
    # data_names = ['letter', 'spam']
    # data_names = ['balance']
    # data with continuous feature and not originally missing
    #
    data_names = ['balance','banknote','blood','breasttissue', 'climate','connectionistvowel',
                  'ecoli','glass','hillvalley','ionosphere', 'parkinsons','planning','seedst',
                  'thyroid','vehicle','vertebral','wine','yeast']
    print(len(data_names))
    miss_rate = 0.2
    batch_size = 64
    alpha = 100
    iterations = 1000
    n_times = 30

    for k in range(len(data_names)):

        data_name = data_names[k]

        print("Dataset: ", data_name)
        rmse = []
        # acc_dct = []
        # acc_knn = []
        # acc_nb = []
        ori_data_x, y, _, _ = data_loader(data_name, miss_rate)
        ori_data_x, _ = normalization(ori_data_x)
        scf = StratifiedShuffleSplit(n_splits=5)
        score_dct = cross_val_score(DecisionTreeClassifier(max_depth=5),ori_data_x, y, cv=scf, scoring='accuracy')
        print(score_dct)
        score_knn = cross_val_score(KNeighborsClassifier(),ori_data_x, y, cv=scf, scoring='accuracy')
        print(score_knn)
        score_nb = cross_val_score(GaussianNB(),ori_data_x, y, cv=scf, scoring='accuracy')
        print(score_nb)

        print("---------------------------")
Exemplo n.º 9
0
def load_recording(path, use_mfcc=False):
    # fileContents = tf.io.read_file(path)
    # splitedFileContents = tf.string_split([fileContents], os.linesep)
    df = pd.read_csv(
        path,
        skiprows=[0],
        header=None,
        names=[
            "COUNTER", "INTERPOLATED", "F3", "FC5", "AF3", "F7", "T7", "P7",
            "O1", "O2", "P8", "T8", "F8", "AF4", "FC6", "F4", "RAW_CQ", "GYROX"
        ]
    )  # "GYROY", "MARKER", "MARKER_HARDWARE", "SYNC", "TIME_STAMP_s", "TIME_STAMP_ms", "CQ_AF3", "CQ_F7", "CQ_F3", "CQ_FC5", "CQ_T7", "CQ_P7", "CQ_O1", "CQ_O2", "CQ_P8", "CQ_T8", "CQ_FC6", "CQ_F4", "CQ_F8", "CQ_AF4", "CQ_CMS", "CQ_DRL"])

    df = df[:Config.RECORDING_NUM_SAMPLES]
    df = df[Config.SENSORS_LABELS]
    recording = df.values
    recording.dtype = np.float64
    recording = utils.normalization(recording)

    if recording.shape[0] < Config.RECORDING_NUM_SAMPLES:
        recording = np.pad(
            recording,
            ((0, Config.RECORDING_NUM_SAMPLES - recording.shape[0]), (0, 0)),
            mode="edge")

    if recording.shape[0] != Config.RECORDING_NUM_SAMPLES:
        raise Exception(
            f"Session number of samples is super not OK: [{recording.shape[0]}]"
        )

    if use_mfcc:
        recording = compute_mfcc(recording)

    return recording
Exemplo n.º 10
0
  def __init__(self, mconfig, name=None):
    super().__init__(name=name)

    self.endpoints = {}
    self._mconfig = mconfig

    self._conv_head = tf.keras.layers.Conv2D(
        filters=round_filters(mconfig.feature_size or 1280, mconfig),
        kernel_size=1,
        strides=1,
        kernel_initializer=conv_kernel_initializer,
        padding='same',
        data_format=mconfig.data_format,
        use_bias=False,
        name='conv2d')
    self._norm = utils.normalization(
        mconfig.bn_type,
        axis=(1 if mconfig.data_format == 'channels_first' else -1),
        momentum=mconfig.bn_momentum,
        epsilon=mconfig.bn_epsilon,
        groups=mconfig.gn_groups)
    self._act = utils.get_act_fn(mconfig.act_fn)

    self._avg_pooling = tf.keras.layers.GlobalAveragePooling2D(
        data_format=mconfig.data_format)

    if mconfig.dropout_rate > 0:
      self._dropout = tf.keras.layers.Dropout(mconfig.dropout_rate)
    else:
      self._dropout = None

    self.h_axis, self.w_axis = ([2, 3] if mconfig.data_format
                                == 'channels_first' else [1, 2])
Exemplo n.º 11
0
def gain_test(data_test, sess, G_sample, X, M):
    data_m_test = 1 - np.isnan(data_test)

    no_test, dim_test = data_test.shape

    norm_data_t, norm_parameters_test = normalization(data_test)
    norm_data_test = np.nan_to_num(norm_data_t, 0)

    # Prepare data format
    Z_mb_test = uniform_sampler(0, 0.01, no_test, dim_test)
    M_mb_test = data_m_test
    X_mb_test = norm_data_test
    X_mb_test = M_mb_test * X_mb_test + (1 - M_mb_test) * Z_mb_test

    # Impute data test
    imputed_data_test = sess.run([G_sample],
                                 feed_dict={
                                     X: X_mb_test,
                                     M: M_mb_test
                                 })[0]
    imputed_data_test = data_m_test * norm_data_test + (
        1 - data_m_test) * imputed_data_test

    # Renormalization
    imputed_data_test = renormalization(imputed_data_test,
                                        norm_parameters_test)

    # Rounding
    imputed_data_test = rounding(imputed_data_test, data_test)

    return imputed_data_test
Exemplo n.º 12
0
def initiate_cross_sub_reservoir():
    sub_data, sub_label = utils.load_by_session(dataset_name)  # 3*14*(m*310)
    sub_data, sub_label = shuffle(sub_data, sub_label, random_state=0)
    for i in range(3):
        for j in range(14):
            clf = LogisticRegression(random_state=0, max_iter=10000)
            clf.fit(utils.normalization(sub_data[i][j]),
                    sub_label[i][j].squeeze())
            print(("clf " + str(i) + " " + str(j)),
                  utils.test(clf, utils.normalization(sub_data[i][j]),
                             sub_label[i][j].squeeze()))
            if dataset_name == 'seed4':
                path = "models/seed4/csu/sesn" + str(i) + "/lr" + str(j) + ".m"
            elif dataset_name == 'seed3':
                path = "models/seed3/csu/sesn" + str(i) + "/lr" + str(j) + ".m"
            joblib.dump(clf, path)
Exemplo n.º 13
0
def cross_session(data, label, subject_id, category_number, batch_size, iteration, lr, momentum, log_interval):
    target_data, target_label = copy.deepcopy(data[2][subject_id]), copy.deepcopy(label[2][subject_id])
    source_data, source_label = [copy.deepcopy(data[0][subject_id]), copy.deepcopy(data[1][subject_id])], [copy.deepcopy(label[0][subject_id]), copy.deepcopy(label[1][subject_id])]
    # one_sub_data, one_sub_label = data[i], label[i]
    # target_data, target_label = one_session_data.pop(), one_session_label.pop()
    # source_data, source_label = one_session_data.copy(), one_session_label.copy()
    # print(len(source_data))
    source_data_comb = np.vstack((source_data[0], source_data[1]))
    source_label_comb = np.vstack((source_label[0], source_label[1]))
    for j in range(1, len(source_data)):
        source_data_comb = np.vstack((source_data_comb, source_data[j]))
        source_label_comb = np.vstack((source_label_comb, source_label[j]))
    if bn == 'ele':
        source_data_comb = utils.norminy(source_data_comb)
        target_data = utils.norminy(target_data)
    elif bn == 'sample':
        source_data_comb = utils.norminx(source_data_comb)
        target_data = utils.norminx(target_data)
    elif bn == 'global':
        source_data_comb = utils.normalization(source_data_comb)
        target_data = utils.normalization(target_data)
    elif bn == 'none':
        pass
    else:
        pass
    # source_data_comb = utils.norminy(source_data_comb)
    # target_data = utils.norminy(target_data)

    source_loader = torch.utils.data.DataLoader(dataset=utils.CustomDataset(source_data_comb, source_label_comb),
                                                            batch_size=batch_size,
                                                            shuffle=True,
                                                            drop_last=True)
    target_loader = torch.utils.data.DataLoader(dataset=utils.CustomDataset(target_data, target_label),
                                                            batch_size=batch_size, 
                                                            shuffle=True, 
                                                            drop_last=True)
    model = DANNet(model=models.DAN(pretrained=False, number_of_category=category_number),
                source_loader=source_loader,
                target_loader=target_loader,
                batch_size=batch_size,
                iteration=iteration,
                lr=lr,
                momentum=momentum,
                log_interval=log_interval)
    # print(model.__getModel__())
    acc = model.train()
    return acc
Exemplo n.º 14
0
def initiate_cross_sub_ses_reservoir():
    subs_data, subs_label = utils.load_session_data_label(dataset_name, 0)
    subs_data, subs_label = shuffle(subs_data, subs_label, random_state=0)
    # print(len(subs_data[0]))
    for i in range(15):
        clf = LogisticRegression(random_state=0, max_iter=10000)
        clf.fit(utils.normalization(subs_data[i]), subs_label[i].squeeze())
        print(
            "clf: ",
            utils.test(clf, utils.normalization(subs_data[i]),
                       subs_label[i].squeeze()))
        if dataset_name == 'seed4':
            path = "models/seed4/csun/lr" + str(i) + ".m"
        elif dataset_name == 'seed3':
            path = "models/seed3/csun/lr" + str(i) + ".m"
        # path = "models/csun/lr" + str(i) + ".m"
        joblib.dump(clf, path)
Exemplo n.º 15
0
 def predict(self, image):
     image = to_pil(image)
     image_tensor = test_trainsforms(image).float()
     image_tensor = image_tensor.unsqueeze(0)
     input = image_tensor.to(self.device)
     output = self.model(input)
     output = output.data.cpu().numpy()
     index = output.argmax()
     return index, normalization(output)
Exemplo n.º 16
0
def initiate_cross_ses_reservoir():
    ses_data, ses_label = utils.load_by_subject(dataset_name)  # 15*2*(m*310)
    ses_data, ses_label = shuffle(ses_data, ses_label, random_state=0)
    for i in range(15):
        for j in range(2):
            clf = LogisticRegression(random_state=0, max_iter=10000)
            # clf = svm.LinearSVC(max_iter=10000)
            # clf = CalibratedClassifierCV(clf, cv=5)
            clf.fit(utils.normalization(ses_data[i][j]),
                    ses_label[i][j].squeeze())
            print(("clf " + str(i) + " " + str(j)),
                  utils.test(clf, utils.normalization(ses_data[i][j]),
                             ses_label[i][j].squeeze()))
            if dataset_name == 'seed4':
                path = "models/seed4/csn/sub" + str(i) + "/lr" + str(j) + ".m"
            elif dataset_name == 'seed3':
                path = "models/seed3/csn/sub" + str(i) + "/lr" + str(j) + ".m"
            # path = "models/csn/sub" + str(i) + "/lr" + str(j) + ".m"
            joblib.dump(clf, path)
Exemplo n.º 17
0
def run(training_data, test_data, num_runs=10, num_kernels=100):
    results = np.zeros(num_runs)
    timings = np.zeros([4, num_runs])  # training transform, test transform, training, test

    Y_training, X_training = training_data[:, 0].astype(int), standardization(normalization(training_data[:, 1:]))
    Y_test, X_test = test_data[:, 0].astype(int), standardization(normalization(test_data[:, 1:]))

    for i in range(num_runs):
        input_length = X_training.shape[1]
        kernels = generate_kernels(input_length, num_kernels)

        # -- transform training ------------------------------------------------

        time_a = time.perf_counter()
        X_training_transform = apply_kernels(X_training, kernels)
        time_b = time.perf_counter()
        timings[0, i] = time_b - time_a

        # -- transform test ----------------------------------------------------

        time_a = time.perf_counter()
        X_test_transform = apply_kernels(X_test, kernels)
        time_b = time.perf_counter()
        timings[1, i] = time_b - time_a

        # -- training ----------------------------------------------------------

        time_a = time.perf_counter()
        classifier = RidgeClassifierCV(alphas=10 ** np.linspace(-3, 3, 10), normalize=True)
        classifier.fit(X_training_transform, Y_training)
        time_b = time.perf_counter()
        timings[2, i] = time_b - time_a

        # -- test --------------------------------------------------------------

        time_a = time.perf_counter()
        results[i] = classifier.score(X_test_transform, Y_test)
        time_b = time.perf_counter()
        timings[3, i] = time_b - time_a

    return results, timings
Exemplo n.º 18
0
def data_generator(path, batch_size=8, input_shape=96, scale=2):
    '''data generator for fit_generator'''
    fns = os.listdir(path)
    n = len(fns)
    i = 0
    while True:
        lrs, hrs = [], []
        for b in range(batch_size):
            if i == 0:
                np.random.shuffle(fns)
            fn = fns[i]
            fn = os.path.join(path, fn)
            lr, hr = utils.pair(fn, input_shape, scale)
            lr = utils.normalization(lr)
            hr = utils.normalization(hr)
            lrs.append(lr)
            hrs.append(hr)
            i = (i + 1) % n
        lrs = np.array(lrs)
        hrs = np.array(hrs)
        yield lrs, hrs
Exemplo n.º 19
0
    def transform(imageB64):
        """
        :param imageB64: 图片base64编码
        :return: 转换后的图片base64编码
        """
        rgb_img = base64_to_image(imageB64)

        result = vc.process_video(rgb_img, options)
        result = normalization(result)

        r, g, b = cv2.split(result)
        img_bgr = cv2.merge([b, g, r])
        return np.array(result).tolist()
def main(args):
	if args.file == None or args.file.split(".")[-1] != "csv":
		raise Exception("Invalid input file.")

	file_name = args.file
	file_path = Config.RECORDING_PATH_ROOT + "train\herastrau_train\\" + file_name

	df = pd.read_csv(file_path, skiprows=[0], header=None, names=["COUNTER", "INTERPOLATED", "F3", "FC5", "AF3", "F7", "T7", "P7", "O1", "O2", "P8", "T8", "F8", "AF4", "FC6", "F4", "RAW_CQ", "GYROX"]) # "GYROY", "MARKER", "MARKER_HARDWARE", "SYNC", "TIME_STAMP_s", "TIME_STAMP_ms", "CQ_AF3", "CQ_F7", "CQ_F3", "CQ_FC5", "CQ_T7", "CQ_P7", "CQ_O1", "CQ_O2", "CQ_P8", "CQ_T8", "CQ_FC6", "CQ_F4", "CQ_F8", "CQ_AF4", "CQ_CMS", "CQ_DRL"])
	df = df[Config.SENSORS_LABELS]
	recording = df.values
	recording.dtype = np.float64
	recording = utils.normalization(recording)
	utils.plot_recording(recording)
Exemplo n.º 21
0
def load_recording(path, use_mfcc=False, use_gabor=False):
    # fileContents = tf.io.read_file(path)
    # splitedFileContents = tf.string_split([fileContents], os.linesep)
    df = pd.read_csv(
        path,
        skiprows=[0],
        header=None,
        names=[
            "COUNTER", "INTERPOLATED", "F3", "FC5", "AF3", "F7", "T7", "P7",
            "O1", "O2", "P8", "T8", "F8", "AF4", "FC6", "F4", "RAW_CQ", "GYROX"
        ]
    )  # "GYROY", "MARKER", "MARKER_HARDWARE", "SYNC", "TIME_STAMP_s", "TIME_STAMP_ms", "CQ_AF3", "CQ_F7", "CQ_F3", "CQ_FC5", "CQ_T7", "CQ_P7", "CQ_O1", "CQ_O2", "CQ_P8", "CQ_T8", "CQ_FC6", "CQ_F4", "CQ_F8", "CQ_AF4", "CQ_CMS", "CQ_DRL"])

    df = df[:Config.RECORDING_NUM_SAMPLES]
    df = df[Config.SENSORS_LABELS]
    recording = df.values
    recording.dtype = np.float64
    recording = utils.normalization(recording)

    if recording.shape[0] < Config.RECORDING_NUM_SAMPLES:
        recording = np.pad(
            recording,
            ((0, Config.RECORDING_NUM_SAMPLES - recording.shape[0]), (0, 0)),
            mode="edge")

    if recording.shape[0] != Config.RECORDING_NUM_SAMPLES:
        raise Exception(
            f"Session number of samples is super not OK: [{recording.shape[0]}]"
        )

    if use_mfcc:
        recording = compute_mfcc(recording)

    res = recording

    if (use_gabor):
        recording = np.transpose(recording)
        gabor_filters = [
            genGabor((40, 1), omega=i) for i in np.arange(0.1, 1, 0.2)
        ]
        # res = np.empty((0), dtype=recording.dtype)
        res = []

        for gabor in gabor_filters:
            for line in range(recording.shape[0]):
                res.append(convolve(recording[line], gabor, mode="same"))

        res = np.transpose(np.array(res))

    return res
Exemplo n.º 22
0
 def set_desc_score(self,
                    volume=500000000,
                    win_low=0.02,
                    win_upp=0.98,
                    normal_count=3,
                    market=''):
     data = self.data.copy()
     if market == '':
         data = data[data['vol_mean_20'] >= volume].reset_index(drop=True)
     else:
         data = data[data['vol_mean_20'] >= volume]
         data = data[data['mkt'] == market].reset_index(drop=True)
     data = utils.winsorization(data, self.info_desc, win_low, win_upp)
     for i in range(normal_count):
         data = utils.normalization(data, self.info_desc, -3, 3)
     self.normalized_data = data
Exemplo n.º 23
0
 def __init__(self, mconfig, stem_filters, name=None):
   super().__init__(name=name)
   self._conv_stem = tf.keras.layers.Conv2D(
       filters=round_filters(stem_filters, mconfig),
       kernel_size=3,
       strides=2,
       kernel_initializer=conv_kernel_initializer,
       padding='same',
       data_format=mconfig.data_format,
       use_bias=False,
       name='conv2d')
   self._norm = utils.normalization(
       mconfig.bn_type,
       axis=(1 if mconfig.data_format == 'channels_first' else -1),
       momentum=mconfig.bn_momentum,
       epsilon=mconfig.bn_epsilon,
       groups=mconfig.gn_groups)
   self._act = utils.get_act_fn(mconfig.act_fn)
Exemplo n.º 24
0
def get_data(raw):
    data = raw.set_eeg_reference(ref_channels=['A1', 'A2']).notch_filter(
        freqs=50).filter(3, 70).resample(sfreq=200).drop_channels(
            ['A1', 'A2']).get_data().clip(-0.0001, 0.0001)[None, :, :]
    ch_names = raw.info["ch_names"]
    sfreq = raw.info["sfreq"]

    # cut window
    x = []
    win_size = 0.6
    win_slide = 0.1
    for i in range(0, data.shape[2] - int(win_size * sfreq),
                   int(win_slide * sfreq)):
        start_point = i
        end_point = i + int(win_size * sfreq)
        x.append(normalization(data[:, :, start_point:end_point]))
    x = np.concatenate(x, 0).astype(np.float32)

    return x
Exemplo n.º 25
0
def cph(data_x, cph_parameters, data_image):
    seed = 25
    random.seed(seed)
    np.random.seed(seed)
    tf.set_random_seed(seed)
    '''Impute missing values in data_x
  
  Args:
    - data_x: original data with missing values
    - parameters: CPH network parameters:
      - batch_size: Batch size
      - hint_rate: Hint rate
      - alpha: Hyperparameter
      - iterations: Iterations
      
  Returns:
    - imputed_data: imputed data
  '''
    # Define mask matrix
    data_m = 1 - np.isnan(data_x)

    # System parameters
    batch_size = cph_parameters['batch_size']
    hint_rate = cph_parameters['hint_rate']
    alpha = cph_parameters['alpha']
    iterations = cph_parameters['iterations']

    # Other parameters
    no, dim = data_x.shape

    # Hidden state dimensions
    h_dim = int(dim)
    #print(h_dim)

    # Normalization
    norm_data, norm_parameters = normalization(data_x)
    #norm_data_x = np.nan_to_num(norm_data, 0)
    norm_data_x = np.nan_to_num(data_x, 0)

    ## CPH architecture
    # Input placeholders
    X_pre = tf.placeholder(tf.float32, shape=[1, 483, dim, 3])
    # Data vector
    #X = tf.placeholder(tf.float32, shape = [None, dim])
    # Mask vector
    M = tf.placeholder(tf.float32, shape=[None, dim])
    # Hint vector
    H = tf.placeholder(tf.float32, shape=[None, dim])

    # Discriminator variables
    D_W1 = tf.Variable(xavier_init([dim * 2, h_dim]))  # Data + Hint as inputs
    D_b1 = tf.Variable(tf.zeros(shape=[h_dim]))

    D_W2 = tf.Variable(xavier_init([h_dim, h_dim]))
    D_b2 = tf.Variable(tf.zeros(shape=[h_dim]))

    D_W3 = tf.Variable(xavier_init([h_dim, dim]))
    D_b3 = tf.Variable(tf.zeros(shape=[dim]))  # Multi-variate outputs

    theta_D = [D_W1, D_W2, D_W3, D_b1, D_b2, D_b3]

    #Generator variables
    conv_filter_w1 = tf.Variable(tf.random_normal([1, 4, 3, 3]))
    conv_filter_b1 = tf.Variable(tf.random_normal([3]))

    conv_filter_w2 = tf.Variable(tf.random_normal([1, 4, 3, 1]))
    conv_filter_b2 = tf.Variable(tf.random_normal([1]))
    # Data + Mask as inputs (Random noise is in missing components)
    G_W1 = tf.Variable(xavier_init([dim * 2, h_dim]))
    G_b1 = tf.Variable(tf.zeros(shape=[h_dim]))

    G_W2 = tf.Variable(xavier_init([h_dim, h_dim]))
    G_b2 = tf.Variable(tf.zeros(shape=[h_dim]))

    G_W3 = tf.Variable(xavier_init([h_dim, dim]))
    G_b3 = tf.Variable(tf.zeros(shape=[dim]))

    theta_G = [
        G_W1, G_W2, G_W3, G_b1, G_b2, G_b3, conv_filter_w1, conv_filter_b1,
        conv_filter_w2, conv_filter_b2
    ]

    ## CPH functions
    # CNN + Generator
    def generator(x, m):
        relu_feature_maps1 = tf.nn.relu( \
          tf.nn.conv2d(x, conv_filter_w1, strides=[1, 1, 1, 1], padding='SAME') + conv_filter_b1)
        max_pool1 = tf.nn.max_pool(relu_feature_maps1,
                                   ksize=[1, 1, 4, 1],
                                   strides=[1, 1, 1, 1],
                                   padding='SAME')

        relu_feature_maps2 = tf.nn.relu( \
          tf.nn.conv2d(max_pool1, conv_filter_w2, strides=[1, 1, 1, 1], padding='SAME') + conv_filter_b2)
        max_pool2 = tf.nn.max_pool(relu_feature_maps2,
                                   ksize=[1, 1, 4, 1],
                                   strides=[1, 1, 1, 1],
                                   padding='SAME')

        x2 = tf.reshape(max_pool2, [483, dim])

        # Concatenate Mask and Data
        inputs = tf.concat(values=[x2, m], axis=1)
        G_h1 = tf.nn.relu(tf.matmul(inputs, G_W1) + G_b1)
        G_h2 = tf.nn.relu(tf.matmul(G_h1, G_W2) + G_b2)
        # MinMax normalized output
        G_prob = tf.nn.sigmoid(tf.matmul(G_h2, G_W3) + G_b3)
        return G_prob

    # Discriminator
    def discriminator(x, h):
        # Concatenate Data and Hint
        inputs = tf.concat(values=[x, h], axis=1)
        D_h1 = tf.nn.relu(tf.matmul(inputs, D_W1) + D_b1)
        D_h2 = tf.nn.relu(tf.matmul(D_h1, D_W2) + D_b2)
        D_logit = tf.matmul(D_h2, D_W3) + D_b3
        D_prob = tf.nn.sigmoid(D_logit)
        return D_prob

    ## CPH structure
    # Generator
    G_sample = generator(X_pre, M)
    X2 = X_pre[0, :, :, 0]
    # Combine with observed data
    Hat_X = X2 * M + G_sample * (1 - M)

    # Discriminator
    D_prob = discriminator(Hat_X, H)

    ## CPH loss
    D_loss_temp = -tf.reduce_mean(M * tf.log(D_prob + 1e-8) \
                                  + (1-M) * tf.log(1. - D_prob + 1e-8))

    G_loss_temp = -tf.reduce_mean((1 - M) * tf.log(D_prob + 1e-8))

    MSE_loss = \
    tf.reduce_mean((M * X2 - M * G_sample)**2) / tf.reduce_mean(M)

    D_loss = D_loss_temp
    G_loss = G_loss_temp + alpha * MSE_loss

    ## CPH solver
    D_solver = tf.train.AdamOptimizer().minimize(D_loss, var_list=theta_D)
    G_solver = tf.train.AdamOptimizer().minimize(G_loss, var_list=theta_G)

    ## Iterations
    sess = tf.Session()
    sess.run(tf.global_variables_initializer())

    # Start Iterations
    for it in tqdm(range(iterations)):

        # Sample batch
        batch_idx = sample_batch_index(no, batch_size)
        #print(len(batch_idx))
        image_mb = data_image[:, batch_idx, :, :]
        X_mb = norm_data_x[batch_idx, :]
        M_mb = data_m[batch_idx, :]
        # Sample random vectors
        Z_mb = uniform_sampler(0, 0.01, batch_size, dim)
        # Sample hint vectors
        H_mb_temp = binary_sampler(hint_rate, batch_size, dim)

        H_mb = M_mb * H_mb_temp
        # Combine random vectors with observed vectors
        X_mb = M_mb * X_mb + (1 - M_mb) * Z_mb
        image_mb[0, :, :, 0] = X_mb

        _, D_loss_curr = sess.run([D_solver, D_loss_temp],
                                  feed_dict={
                                      M: M_mb,
                                      X_pre: image_mb,
                                      H: H_mb
                                  })
        _, G_loss_curr, MSE_loss_curr = \
        sess.run([G_solver, G_loss_temp, MSE_loss],
                 feed_dict = {X_pre: image_mb, M: M_mb, H: H_mb})

    ## Return imputed data
    Z_mb = uniform_sampler(0, 0.01, no, dim)
    M_mb = data_m
    X_mb = norm_data_x
    X_mb = M_mb * X_mb + (1 - M_mb) * Z_mb
    image_mb = data_image
    image_mb[0, :, :, 0] = X_mb

    imputed_data = sess.run([G_sample], feed_dict={
        X_pre: image_mb,
        M: M_mb
    })[0]

    imputed_data = data_m * norm_data_x + (1 - data_m) * imputed_data

    # Renormalization
    #imputed_data = renormalization(imputed_data, norm_parameters)

    # Rounding
    imputed_data = rounding(imputed_data, data_x)

    return imputed_data
Exemplo n.º 26
0
def gain(data_x, feature_name, onehotencoder, ori_data_dim, gain_parameters):
    '''Impute missing values in data_x
  
  Args:
    - data_x: original data with missing values
    - feature_name: feature namelist of original data
    - onehotencoder: onehotencoder of this data
    - ori_data_dim: dimensions of original data    
    - gain_parameters: GAIN network parameters:
      - data_name: the file name of dataset
      - batch_size: Batch size
      - hint_rate: Hint rate
      - alpha: Hyperparameter
      - iterations: Iterations
      - onehot: the number of feature for onehot encoder (start from first feature)
      - predict: option for prediction mode
      
  Returns:
    - imputed_data: imputed data
  '''
    # Define mask matrix
    data_m = 1 - np.isnan(data_x)

    # System parameters
    data_name = gain_parameters['data_name']
    batch_size = gain_parameters['batch_size']
    hint_rate = gain_parameters['hint_rate']
    alpha = gain_parameters['alpha']
    iterations = gain_parameters['iterations']
    onehot = gain_parameters['onehot']
    predict = gain_parameters['predict']

    # Model Path
    model_path = 'model/' + data_name

    # Other parameters
    no, dim = data_x.shape

    # Hidden state dimensions
    h_dim = int(dim)

    # Normalization
    norm_data, norm_parameters = normalization(data_x)
    norm_data_x = np.nan_to_num(norm_data, 0)

    ## GAIN architecture
    # Input placeholders
    # Data vector q
    X = tf.placeholder(tf.float32, shape=[None, dim], name='X')
    # Mask vector
    M = tf.placeholder(tf.float32, shape=[None, dim], name='M')
    # Hint vector
    H = tf.placeholder(tf.float32, shape=[None, dim], name='H')

    # Discriminator variables
    D_W1 = tf.Variable(xavier_init([dim * 2, h_dim]),
                       name='D_W1')  # Data + Hint as inputs
    D_b1 = tf.Variable(tf.zeros(shape=[h_dim]), name='D_b1')

    D_W2 = tf.Variable(xavier_init([h_dim, h_dim]), name='D_W2')
    D_b2 = tf.Variable(tf.zeros(shape=[h_dim]), name='D_b2')

    D_W3 = tf.Variable(xavier_init([h_dim, dim]), name='D_W3')
    D_b3 = tf.Variable(tf.zeros(shape=[dim]),
                       name='D_b3')  # Multi-variate outputs

    theta_D = [D_W1, D_W2, D_W3, D_b1, D_b2, D_b3]

    #Generator variables
    # Data + Mask as inputs (Random noise is in missing components)
    G_W1 = tf.Variable(xavier_init([dim * 2, h_dim]), name='G_W1')
    G_b1 = tf.Variable(tf.zeros(shape=[h_dim]), name='G_b1')

    G_W2 = tf.Variable(xavier_init([h_dim, h_dim]), name='G_W2')
    G_b2 = tf.Variable(tf.zeros(shape=[h_dim]), name='G_b2')

    G_W3 = tf.Variable(xavier_init([h_dim, dim]), name='G_W3')
    G_b3 = tf.Variable(tf.zeros(shape=[dim]), name='G_b3')

    theta_G = [G_W1, G_W2, G_W3, G_b1, G_b2, G_b3]

    ## GAIN functions
    # Generator
    def generator(x, m):
        # Concatenate Mask and Data
        inputs = tf.concat(values=[x, m], axis=1)
        G_h1 = tf.nn.relu(tf.matmul(inputs, G_W1) + G_b1)
        G_h2 = tf.nn.relu(tf.matmul(G_h1, G_W2) + G_b2)
        # MinMax normalized output
        G_prob = tf.nn.sigmoid(tf.matmul(G_h2, G_W3) + G_b3)
        return G_prob

    # Discriminator
    def discriminator(x, h):
        # Concatenate Data and Hint
        inputs = tf.concat(values=[x, h], axis=1)
        D_h1 = tf.nn.relu(tf.matmul(inputs, D_W1) + D_b1)
        D_h2 = tf.nn.relu(tf.matmul(D_h1, D_W2) + D_b2)
        D_logit = tf.matmul(D_h2, D_W3) + D_b3
        D_prob = tf.nn.sigmoid(D_logit)
        return D_prob

    ## GAIN structure
    # Generator
    G_sample = generator(X, M)

    # Combine with observed data
    Hat_X = X * M + G_sample * (1 - M)

    # Discriminator
    D_prob = discriminator(Hat_X, H)

    ## GAIN loss
    D_loss_temp = -tf.reduce_mean(M * tf.log(D_prob + 1e-8) \
                                  + (1-M) * tf.log(1. - D_prob + 1e-8))

    G_loss_temp = -tf.reduce_mean((1 - M) * tf.log(D_prob + 1e-8))

    MSE_loss = \
    tf.reduce_mean((M * X - M * G_sample)**2) / tf.reduce_mean(M)

    D_loss = D_loss_temp
    G_loss = G_loss_temp + alpha * MSE_loss

    ## GAIN solver
    D_solver = tf.train.AdamOptimizer().minimize(D_loss, var_list=theta_D)
    G_solver = tf.train.AdamOptimizer().minimize(G_loss, var_list=theta_G)

    ## Iterations
    sess = tf.Session()
    saver = tf.train.Saver()
    if predict is True and os.path.exists(model_path + '.ckpt.meta'):
        print("Model Restore")
        saver.restore(sess, model_path + '.ckpt')
    else:
        sess.run(tf.global_variables_initializer())

    # Start Iterations
    for it in tqdm(range(iterations)):

        # Sample batch
        batch_idx = sample_batch_index(no, batch_size)
        X_mb = norm_data_x[batch_idx, :]
        M_mb = data_m[batch_idx, :]
        # Sample random vectors
        Z_mb = uniform_sampler(0, 0.01, batch_size, dim)
        # Sample hint vectors
        H_mb_temp = binary_sampler(hint_rate, batch_size, dim)
        H_mb = M_mb * H_mb_temp

        # Combine random vectors with observed vectors
        X_mb = M_mb * X_mb + (1 - M_mb) * Z_mb

        _, D_loss_curr = sess.run([D_solver, D_loss_temp],
                                  feed_dict={
                                      M: M_mb,
                                      X: X_mb,
                                      H: H_mb
                                  })
        _, G_loss_curr, MSE_loss_curr = \
        sess.run([G_solver, G_loss_temp, MSE_loss],
                 feed_dict = {X: X_mb, M: M_mb, H: H_mb})
    if predict is False:
        save_path = saver.save(sess, model_path + '.ckpt')

    ## Return imputed data
    Z_mb = uniform_sampler(0, 0.01, no, dim)
    M_mb = data_m
    X_mb = norm_data_x
    X_mb = M_mb * X_mb + (1 - M_mb) * Z_mb

    imputed_data = sess.run([G_sample], feed_dict={X: X_mb, M: M_mb})[0]

    imputed_data = data_m * norm_data_x + (1 - data_m) * imputed_data

    # Renormalization
    imputed_data = renormalization(imputed_data, norm_parameters)

    # Rounding
    imputed_data = rounding(imputed_data, data_x)

    # Reverse encoding
    if onehot > 0:
        imputed_data = reverse_encoding(imputed_data, feature_name,
                                        onehotencoder, onehot, ori_data_dim)

    return imputed_data
Exemplo n.º 27
0
            print('Normalization type: ', bn)
            if bn == 'ele':
                data_tmp = copy.deepcopy(data)
                for i in range(len(data_tmp)):
                    for j in range(len(data_tmp[0])):
                        data_tmp[i][j] = utils.norminy(data_tmp[i][j])
            elif bn == 'sample':
                data_tmp = copy.deepcopy(data)
                for i in range(len(data_tmp)):
                    for j in range(len(data_tmp[0])):
                        data_tmp[i][j] = utils.norminx(data_tmp[i][j])
            elif bn == 'global':
                data_tmp = copy.deepcopy(data)
                for i in range(len(data_tmp)):
                    for j in range(len(data_tmp[0])):
                        data_tmp[i][j] = utils.normalization(data_tmp[i][j])
            elif bn == 'none':
                data_tmp = copy.deepcopy(data)
            else:
                pass

            trial_total, category_number, _ = utils.get_number_of_label_n_trial(
                dataset_name)

            # training settings
            batch_size = 32
            iteration = 15000
            lr = 0.01
            momentum = 0.9
            log_interval = 10
Exemplo n.º 28
0
te_list = [line.rstrip('\n') for line in file]
file.close()

for idx in range(len(te_list)):

    print(idx)
    path = te_list[idx]
   
    mask = skimage.img_as_float64(imread(dataset_add + "/GEE_mapbiomas_masks/"+ path + ".tif"))

    if np.max(mask.shape) <= 400:  

        mask = mask/255.
        mask  = np.expand_dims(mask, axis=2)

        img1  = U.normalization(skimage.img_as_float64(imread(dataset_add + "GEE_mapbiomas/"+ path + "_2019-01-01.tif")), mean=dataset_mean_ind, std=dataset_std_ind)
        img2  = U.normalization(skimage.img_as_float64(imread(dataset_add + "GEE_mapbiomas/"+ path + "_2019-04-01.tif")), mean=dataset_mean_ind, std=dataset_std_ind)
        img3  = U.normalization(skimage.img_as_float64(imread(dataset_add + "GEE_mapbiomas/"+ path + "_2019-07-01.tif")), mean=dataset_mean_ind, std=dataset_std_ind)
        img4  = U.normalization(skimage.img_as_float64(imread(dataset_add + "GEE_mapbiomas/"+ path + "_2019-10-01.tif")), mean=dataset_mean_ind, std=dataset_std_ind)
        img5  = U.normalization(skimage.img_as_float64(imread(dataset_add + "GEE_mapbiomas/"+ path + "_2020-01-01.tif")), mean=dataset_mean_ind, std=dataset_std_ind)

        imgcon = np.concatenate((img1,img2,img3,img4,img5),axis=2)

        data   = U.forward_crop(imgcon, window=(32,32), channels=10, stride=4)
        labels = U.forward_crop(mask, (32,32), channels=1, stride=4)
        
        pred = model.predict(data, batch_size=8, verbose=100)
        pred = U.reconstruct(pred, mask.shape, window=(32,32), channels=1, stride=4)


        y_scores_i = pred.reshape(pred.shape[0]*pred.shape[1]*pred.shape[2], 1)
Exemplo n.º 29
0
  def _build(self):
    """Builds block according to the arguments."""
    # pylint: disable=g-long-lambda
    bid = itertools.count(0)
    get_norm_name = lambda: 'tpu_batch_normalization' + ('' if not next(
        bid) else '_' + str(next(bid) // 2))
    cid = itertools.count(0)
    get_conv_name = lambda: 'conv2d' + ('' if not next(cid) else '_' + str(
        next(cid) // 2))
    # pylint: enable=g-long-lambda

    mconfig = self._mconfig
    filters = self._block_args.input_filters * self._block_args.expand_ratio
    kernel_size = self._block_args.kernel_size

    # Expansion phase. Called if not using fused convolutions and expansion
    # phase is necessary.
    if self._block_args.expand_ratio != 1:
      self._expand_conv = tf.keras.layers.Conv2D(
          filters=filters,
          kernel_size=1,
          strides=1,
          kernel_initializer=conv_kernel_initializer,
          padding='same',
          data_format=self._data_format,
          use_bias=False,
          name=get_conv_name())
      self._norm0 = utils.normalization(
          mconfig.bn_type,
          axis=self._channel_axis,
          momentum=mconfig.bn_momentum,
          epsilon=mconfig.bn_epsilon,
          groups=mconfig.gn_groups,
          name=get_norm_name())

    # Depth-wise convolution phase. Called if not using fused convolutions.
    self._depthwise_conv = tf.keras.layers.DepthwiseConv2D(
        kernel_size=kernel_size,
        strides=self._block_args.strides,
        depthwise_initializer=conv_kernel_initializer,
        padding='same',
        data_format=self._data_format,
        use_bias=False,
        name='depthwise_conv2d')

    self._norm1 = utils.normalization(
        mconfig.bn_type,
        axis=self._channel_axis,
        momentum=mconfig.bn_momentum,
        epsilon=mconfig.bn_epsilon,
        groups=mconfig.gn_groups,
        name=get_norm_name())

    if self._has_se:
      num_reduced_filters = max(
          1, int(self._block_args.input_filters * self._block_args.se_ratio))
      self._se = SE(self._mconfig, num_reduced_filters, filters, name='se')
    else:
      self._se = None

    # Output phase.
    filters = self._block_args.output_filters
    self._project_conv = tf.keras.layers.Conv2D(
        filters=filters,
        kernel_size=1,
        strides=1,
        kernel_initializer=conv_kernel_initializer,
        padding='same',
        data_format=self._data_format,
        use_bias=False,
        name=get_conv_name())
    self._norm2 = utils.normalization(
        mconfig.bn_type,
        axis=self._channel_axis,
        momentum=mconfig.bn_momentum,
        epsilon=mconfig.bn_epsilon,
        groups=mconfig.gn_groups,
        name=get_norm_name())
Exemplo n.º 30
0
digits_train, y_train = utils.read('train')
digits_test, y_test = utils.read('test')

digits_train, digits_test = utils.resize(digits_train,
                                         16), utils.resize(digits_test, 16)
digits_train, digits_test = utils.get_deskew_imgs(
    digits_train), utils.get_deskew_imgs(digits_test)
holes_train, holes_test = utils.get_hole_features(
    digits_train), utils.get_hole_features(digits_test)
pix_train, pix_test = utils.get_pix_features(
    digits_train), utils.get_pix_features(digits_test)
X_train, X_test = np.hstack([pix_train,
                             holes_train]), np.hstack([pix_test, holes_test])

mean_normalizer = utils.normalization(X_train)
X_train = mean_normalizer.transform(X_train)
X_test = mean_normalizer.transform(X_test)

mx_score = 0
best = (-1, -1)
clf = knn.KNN(mode='weighted')
for n_component in range(3, 61, 3):
    for k in range(1, 11):
        _pca = pca.PCA(X_train)
        X_train_reduced = _pca.transform(X_train, n_component)
        X_test_reduced = _pca.transform(X_test, n_component)

        start_time = timeit.default_timer()
        validation_scores = []
        kf = KFold(n_splits=10)
Exemplo n.º 31
0
def kernel_fusion_classification(input_kernels_tr, input_kernels_te, a, feat_types, class_labels, train_test_idx, \
                                 C=[1], square_kernels=True, opt_criterion='acc', verbose=False):
    '''

    :param input_kernels_tr:
    :param input_kernels_te:
    :param a:
    :param feat_types:
    :param class_labels:
    :param train_test_idx:
    :param C:
    :param square_kernels:
    :param opt_criterion:
    :return:
    '''

    # Assign weights to channels
    feat_weights = INTERNAL_PARAMETERS['weights']
    if feat_weights is None: # if not specified a priori (when channels' specification)
        feat_weights = {feat_t : 1.0/len(input_kernels_tr) for feat_t in input_kernels_tr.keys()}

    tr_inds, te_inds = train_test_idx[0], train_test_idx[1]
    # lb = LabelBinarizer(neg_label=-1, pos_label=1)

    class_ints = np.dot(class_labels, np.logspace(0, class_labels.shape[1]-1, class_labels.shape[1]))
    skf = cross_validation.StratifiedKFold(class_ints[tr_inds], n_folds=4, shuffle=True, random_state=74)

    Rval = np.zeros((class_labels.shape[1], len(a), len(C)), dtype=np.float32)
    for k in xrange(class_labels.shape[1]):
        print "[Validation] Optimizing weights and svm-C for class %d/%d" % (k+1, class_labels.shape[1])
        for i, a_i in enumerate(a):
            kernels_tr = deepcopy(input_kernels_tr)
            # kernels_te = deepcopy(input_kernels_te)

            for feat_t in kernels_tr.keys():
                if isinstance(kernels_tr[feat_t]['root'], tuple):
                    kernels_tr[feat_t]['root'] = utils.normalize(kernels_tr[feat_t]['root'][0])
                    x = kernels_tr[feat_t]['nodes']
                    kernels_tr[feat_t]['nodes'] = utils.normalize(a_i[1]*x[0]+(1-a_i[1])*x[1] if len(x)==2 else x[0])

                    kernels_tr[feat_t] = a_i[2]*np.array(kernels_tr[feat_t]['root']) + (1-a_i[2])*np.array(kernels_tr[feat_t]['nodes'])

                else:
                    kernels_tr[feat_t]['root']  = [utils.normalize(x[0] if np.sum(x[0])>0 else kernels_tr[feat_t]['nodes'][j][0])
                                                   for j,x in enumerate(kernels_tr[feat_t]['root'])]
                    kernels_tr[feat_t]['nodes'] = [utils.normalize(a_i[1][j]*x[0]+(1-a_i[1][j])*x[1] if len(x)==2 else x[0])
                                                   for j,x in enumerate(kernels_tr[feat_t]['nodes'])]

                    kernels_tr[feat_t] = list(a_i[2]*np.array(kernels_tr[feat_t]['root']) + (1-a_i[2])*np.array(kernels_tr[feat_t]['nodes']))

            K_tr = None
            # Weight each channel accordingly
            for j, feat_t in enumerate(kernels_tr.keys()):
                if K_tr is None:
                    K_tr = np.zeros(kernels_tr[feat_t].shape if isinstance(kernels_tr[feat_t],np.ndarray) else kernels_tr[feat_t][0].shape, dtype=np.float32)
                K_tr += a_i[3][j] * utils.sum_of_arrays(kernels_tr[feat_t], a_i[0])

            if square_kernels:
                K_tr = np.sign(K_tr) * np.sqrt(np.abs(K_tr))

            for j, c_j in enumerate(C):
                # print l, str(i+1) + '/' + str(len(C[k][0])), str(j+1) + '/' + str(len(C[k][1]))
                Rval[k,i,j] = 0
                for (val_tr_inds, val_te_inds) in skf:
                    # test instances not indexed directly, but a mask is created excluding negative instances
                    val_te_msk = np.ones(tr_inds.shape, dtype=np.bool)
                    val_te_msk[val_tr_inds] = False
                    negatives_msk = np.negative(np.any(class_labels[tr_inds] > 0, axis=1))
                    val_te_msk[negatives_msk] = False

                    acc_tmp, ap_tmp, _ = _train_and_classify_binary(
                        K_tr[val_tr_inds,:][:,val_tr_inds], K_tr[val_te_msk,:][:,val_tr_inds], \
                        class_labels[tr_inds,k][val_tr_inds], class_labels[tr_inds,k][val_te_msk], \
                        probability=True, c=c_j)

                    if str.lower(opt_criterion) == 'map':
                        Rval[k,i,j] += ap_tmp/skf.n_folds # if acc_tmp > 0.50 else 0
                    else: # 'acc' or other criterion
                        Rval[k,i,j] += acc_tmp/skf.n_folds

    # print p, np.mean(p)

    # X, Y = np.meshgrid(np.linspace(0,len(c)-1,len(c)),np.linspace(0,len(a)-1,len(a)))
    # fig = plt.figure(figsize=plt.figaspect(0.5))
    # for k in xrange(class_labels.shape[1]):
    #     ax = fig.add_subplot(2,5,k+1, projection='3d')
    #     ax.plot_surface(X, Y, Rval_acc[k,:,:])
    #     ax.set_zlim([0.5, 1])
    #     ax.set_xlabel('c value')
    #     ax.set_ylabel('a value')
    #     ax.set_zlabel('acc [0-1]')
    # plt.show()



    te_msk = np.ones((len(te_inds),), dtype=np.bool)
    negatives_msk = np.negative(np.any(class_labels[te_inds] > 0, axis=1))
    te_msk[negatives_msk] = False

    results_val = []
    for k in xrange(class_labels.shape[1]):
        best_res = np.max(Rval[k,:,:])
        print best_res, "\t",
        results_val.append(best_res)
    print("Validation best %s : %2.2f" %(opt_criterion, np.mean(results_val)*100.0))


    acc_classes = []
    ap_classes = []
    for k in xrange(class_labels.shape[1]):
        i,j = np.unravel_index(np.argmax(Rval[k,:,:]), Rval[k,:,:].shape)
        a_best, c_best = a[i], C[j]
        print a_best, c_best

        kernels_tr = deepcopy(input_kernels_tr)
        kernels_te = deepcopy(input_kernels_te)

        for feat_t in kernels_tr.keys():
            if isinstance(kernels_tr[feat_t]['root'], tuple):
                kernels_tr[feat_t]['root'], pr = utils.normalization(kernels_tr[feat_t]['root'][0])
                kernels_te[feat_t]['root']     = pr * kernels_te[feat_t]['root'][0]

                xn_tr, xn_te = kernels_tr[feat_t]['nodes'], kernels_te[feat_t]['nodes']
                kernels_tr[feat_t]['nodes'], pn = utils.normalization(a_best[1]*xn_tr[0]+(1-a_best[1])*xn_tr[1] if len(xn_tr)==2 else xn_tr[0])
                kernels_te[feat_t]['nodes']     = pn * (a_best[1]*xn_te[0]+(1-a_best[1])*xn_te[1] if len(xn_te)==2 else xn_te[0])

                kernels_tr[feat_t] = a_best[2]*kernels_tr[feat_t]['root'] + (1-a_best[2])*kernels_tr[feat_t]['nodes']
                kernels_te[feat_t] = a_best[2]*kernels_te[feat_t]['root'] + (1-a_best[2])*kernels_te[feat_t]['nodes']
            else:
                for i in xrange(len(kernels_tr[feat_t]['root'])):
                    kernels_tr[feat_t]['root'][i], pr  = utils.normalization(kernels_tr[feat_t]['root'][i][0] if np.sum(kernels_tr[feat_t]['root'][i][0]) > 0
                                                                             else kernels_tr[feat_t]['nodes'][i][0])
                    kernels_te[feat_t]['root'][i]      = pr * (kernels_te[feat_t]['root'][i][0] if np.sum(kernels_te[feat_t]['root'][i][0]) > 0
                                                               else kernels_te[feat_t]['nodes'][i][0])

                    xn_tr, xn_te = kernels_tr[feat_t]['nodes'][i], kernels_te[feat_t]['nodes'][i]
                    kernels_tr[feat_t]['nodes'][i], pn = utils.normalization(a_best[1][i]*xn_tr[0]+(1-a_best[1][i])*xn_tr[1] if len(xn_tr)==2 else xn_tr[0])
                    kernels_te[feat_t]['nodes'][i]     = pn * (a_best[1][i]*xn_te[0]+(1-a_best[1][i])*xn_te[1] if len(xn_te)==2 else xn_te[0])

                kernels_tr[feat_t] = list(a_best[2]*np.array(kernels_tr[feat_t]['root']) + (1-a_best[2])*np.array(kernels_tr[feat_t]['nodes']))
                kernels_te[feat_t] = list(a_best[2]*np.array(kernels_te[feat_t]['root']) + (1-a_best[2])*np.array(kernels_te[feat_t]['nodes']))

        K_tr = K_te = None
        # Weight each channel accordingly
        for j,feat_t in enumerate(kernels_tr.keys()):
            if K_tr is None:
                K_tr = np.zeros(kernels_tr[feat_t].shape if isinstance(kernels_tr[feat_t],np.ndarray) else kernels_tr[feat_t][0].shape, dtype=np.float32)
                K_te = np.zeros(kernels_te[feat_t].shape if isinstance(kernels_te[feat_t],np.ndarray) else kernels_te[feat_t][0].shape, dtype=np.float32)
            K_tr += a_best[3][j] * utils.sum_of_arrays(kernels_tr[feat_t], a_best[0])
            K_te += a_best[3][j] * utils.sum_of_arrays(kernels_te[feat_t], a_best[0])

        if square_kernels:
            K_tr, K_te = np.sign(K_tr) * np.sqrt(np.abs(K_tr)), np.sign(K_te) * np.sqrt(np.abs(K_te)) #np.sqrt(K_tr), np.sqrt(K_te)
        acc, ap, _ = _train_and_classify_binary(K_tr, K_te[te_msk], class_labels[tr_inds,k], class_labels[te_inds,k][te_msk], probability=True, c=c_best)

        acc_classes.append(acc)
        ap_classes.append(ap)

    return dict(acc_classes=acc_classes, ap_classes=ap_classes)
Exemplo n.º 32
0
def learning_based_fusion_classification(input_kernels_tr, input_kernels_te, a, feat_types, class_labels, train_test_idx, \
                                         C=[1], square_kernel=False, opt_criterion='acc'):
    '''

    :param kernels_tr:
    :param kernels_te:
    :param a: trade-off parameter controlling importance of root representation vs edges representation
    :param feat_types:
    :param class_labels:
    :param train_test_idx:
    :param C:
    :return:
    '''

    tr_inds, te_inds = train_test_idx[0], train_test_idx[1]
    # lb = LabelBinarizer(neg_label=-1, pos_label=1)

    class_ints = np.dot(class_labels, np.logspace(0, class_labels.shape[1]-1, class_labels.shape[1]))
    skf = cross_validation.StratifiedKFold(class_ints[tr_inds], n_folds=10, shuffle=False, random_state=74)

    # S = [None] * class_labels.shape[1]  # selected (best) params
    # p = [None] * class_labels.shape[1]  # performances
    # C = [(a, C) for k in xrange(class_labels.shape[1])]  # candidate values for params

    kernels_tr = []
    for feat_t in input_kernels_tr.keys():
        for k,v in input_kernels_tr[feat_t].iteritems():
            for x in v:
                if np.any(x != 0):
                    kernels_tr.append(x)
    kernels_te = []
    for feat_t in input_kernels_te.keys():
        for k,v in input_kernels_te[feat_t].iteritems():
            for x in v:
                if np.any(x != 0):
                    kernels_te.append(x)

    Rp = [None] * class_labels.shape[1]
    for cl in xrange(class_labels.shape[1]):
        print "[Validation] Optimizing weights and svm-C for class %d/%d" % (cl + 1, class_labels.shape[1])
        Rp[cl] = np.zeros((len(kernels_tr),len(a),len(C)), dtype=np.float32)

        for i, a_i in enumerate(a):
            for k, x in enumerate(kernels_tr):
                K_tr = utils.normalize(a_i*x[0]+(1-a_i)*x[1]) if len(x)==2 else utils.normalize(x[0])

                if square_kernel:
                    K_tr = np.sign(K_tr) * np.sqrt(np.abs(K_tr))

                for j, c_j in enumerate(C):
                    for (val_tr_inds, _) in skf:
                        # test instances not indexed directly, but a mask is created excluding negative instances
                        val_te_msk = np.ones(tr_inds.shape, dtype=np.bool)
                        val_te_msk[val_tr_inds] = False
                        negatives_msk = np.all(class_labels[tr_inds] <= 0, axis=1)
                        val_te_msk[negatives_msk] = False

                        acc, ap, _ = _train_and_classify_binary(
                            K_tr[val_tr_inds,:][:,val_tr_inds], K_tr[val_te_msk,:][:,val_tr_inds], \
                            class_labels[tr_inds,cl][val_tr_inds], class_labels[tr_inds,cl][val_te_msk], \
                            probability=True, c=c_j)

                        # TODO: decide what it is
                        if str.lower(opt_criterion) == 'map':
                            Rp[cl][k,i,j] += ap / skf.n_folds
                        else:
                            Rp[cl][k,i,j] += acc / skf.n_folds

    params = [None] * class_labels.shape[1]
    perfs = [None] * class_labels.shape[1]
    for cl in xrange(class_labels.shape[1]):
        # if params[cl] is None:
        params[cl] = np.zeros((Rp[cl].shape[0],2),dtype=np.float32)
        perfs[cl]  = np.zeros((Rp[cl].shape[0],),dtype=np.float32)
        for k in xrange(Rp[cl].shape[0]):
            P = Rp[cl][k,:,:]  # #{a}x#{C} performance matrix
            coords = np.unravel_index(np.argmax(P), P.shape)
            params[cl][k,0], params[cl][k,1] = a[coords[0]], C[coords[1]]

    clfs = [None] * class_labels.shape[1]
    for cl in xrange(class_labels.shape[1]):
        print "[Validation] Optimizing stacket classifiers %d/%d" % (cl + 1, class_labels.shape[1])

        D_tr = []  # training data for stacked learning (predicted outputs from single clfs)
        y_tr = []
        for (val_tr_inds, val_te_inds) in skf:
            # Remove negatives from test data
            # val_te_msk = np.ones(tr_inds.shape, dtype=np.bool)
            # val_te_msk[val_tr_inds] = False
            # negatives_msk = np.all(class_labels[tr_inds] <= 0, axis=1)
            # val_te_msk[negatives_msk] = False

            # Get the predictions of each and every kernel
            X = np.zeros((len(val_te_inds), params[cl].shape[0]))
            # X = np.zeros((len(val_te_inds), 2*params[cl].shape[0]))  # matrix of predictions
            for k,x in enumerate(kernels_tr):
                # Value of best parameters
                a_val, c_val = params[cl][k,0], params[cl][k,1]

                # Merge a kernel according to a_val
                K_tr = utils.normalize(a_val*x[0]+(1-a_val)*x[1] if len(x)==2 else x[0])

                if square_kernel:
                    K_tr = np.sign(K_tr) * np.sqrt(np.abs(K_tr))

                # Train using c_val as SVM-C parameter
                clf = _train_binary(K_tr[val_tr_inds,:][:,val_tr_inds], class_labels[tr_inds,cl][val_tr_inds], probability=True, c=c_val)
                X[:,k] = clf.decision_function(K_tr[val_te_inds,:][:,val_tr_inds])
                # X[:,2*k] = clf.predict_proba(K_tr[val_te_inds,:][:,val_tr_inds])[:,0]
                # X[:,2*k+1] = clf.decision_function(K_tr[val_te_inds,:][:,val_tr_inds])
            D_tr.append(X)
            y_tr.append(class_labels[tr_inds,cl][val_te_inds])

        D_tr = (np.vstack(D_tr))

        y_tr = np.concatenate(y_tr)
        n = len(class_labels[tr_inds,cl])

        if str.lower(opt_criterion) == 'map':
            grid_scorer = make_scorer(average_precision_score, greater_is_better=True)
        else:
            grid_scorer = make_scorer(average_binary_accuracy, greater_is_better=True)
        LOOCV = cross_validation.StratifiedKFold(class_labels[tr_inds,cl], n_folds=2, shuffle=False, random_state=74)
        clfs[cl] = grid_search.GridSearchCV(svm.SVC(), svm_parameters, \
                                       n_jobs=20, cv=LOOCV, scoring=grid_scorer, verbose=False)
        clfs[cl].fit(D_tr,y_tr)
        clfs[cl].best_params_

    val_scores = [clf.best_score_ for clf in clfs]
    print val_scores
    print np.mean(val_scores), np.std(val_scores)

    # quit()

    #
    # Test
    #

    # Train individual classifiers to use in test partition
    ind_clfs = [None] * class_labels.shape[1]
    F = np.zeros((class_labels.shape[1], len(kernels_tr)))
    for cl in xrange(class_labels.shape[1]):
        ind_clfs[cl] = [None] * len(kernels_tr)

        for k,x in enumerate(kernels_tr):
            a_val, c_val = params[cl][k,0],params[cl][k,1]
            K_tr, F[cl,k] = utils.normalization((a_val*x[0]+(1-a_val)*x[1]) if len(x)==2 else x[0])
            if square_kernel:
                K_tr = np.sign(K_tr) * np.sqrt(np.abs(K_tr))
            ind_clfs[cl][k] = _train_binary(K_tr, class_labels[tr_inds,cl], probability=True, c=c_val)

    # Use a mask to exclude negatives from test
    te_msk = np.ones((len(te_inds),), dtype=np.bool)
    negatives_msk = np.negative(np.any(class_labels[te_inds] > 0, axis=1))
    te_msk[negatives_msk] = False

    # Construct the stacked test data and predict
    acc_classes = []
    ap_classes = []
    for cl in xrange(class_labels.shape[1]):
        X_te = np.zeros((len(te_inds), len(kernels_te)))
        # X_te = np.zeros((len(te_inds), 2*len(kernels_te)))
        for k,x in enumerate(kernels_te):
            a_val, c_val = params[cl][k,0], params[cl][k,1]

            K_te = F[cl,k] * ((a_val*x[0]+(1-a_val)*x[1]) if len(x)==2 else x[0])
            if square_kernel:
                K_te = np.sign(K_te) * np.sqrt(np.abs(K_te))
            X_te[:,k] = ind_clfs[cl][k].decision_function(K_te)
            # X_te[:,2*k] = ind_clfs[cl][k].predict_proba(K_te)[:,0]
            # X_te[:,2*k+1] = ind_clfs[cl][k].decision_function(K_te)

        X_te = X_te[te_msk,:]

        y_preds = clfs[cl].predict(X_te)
        acc = average_binary_accuracy(class_labels[te_inds,cl][te_msk], y_preds)
        ap = average_precision_score(class_labels[te_inds,cl][te_msk], y_preds) #clfs[cl].decision_function(X_te))

        acc_classes.append(acc)
        ap_classes.append(ap)

    print acc_classes, np.mean(acc_classes)
    print ap_classes, np.mean(ap_classes)
    return dict(acc_classes=acc_classes, ap_classes=ap_classes)