def load_IEC(denoise=True, pre=False):
    """
    loading IEC CSE dataset and preprocess the signal.

    Arg:
        pre: (bool) load from saved preprocessed data or not
        denoise: (bool) preprocess using wavelet thresholding or not
    Returns:
        ekg_sig: (Tensor) with sized [#signals, 1 lead, signal_length]
    """
    # (num of ekg signal, length, 1)
    if pre:
        ekg_sig = torch.load(config["RetinaNet"]["output_path"] +
                             "IEC_preprocessed_data.pt").to('cuda')
    else:
        ekg_sig = []
        for i in range(1, 126):
            ekg_filename = config["General"]["IEC_path"] + 'CSE' + str(
                i).rjust(3, '0') + '.raw'
            try:
                sig = read_IEC(ekg_filename)
                sig = np.reshape(sig[0], (len(sig[0]), 1))
                ekg_sig.append(sig.astype(float))
            except IOError:
                print("file {} does not exist".format("CSE" +
                                                      str(i).rjust(3, '0')))

        ekg_sig = IEC_dataset_preprocessing(ekg_sig, smooth=False, dns=denoise)
        ekg_sig = ekg_sig.to('cuda')
        ekg_sig = normalize(ekg_sig, instance=True)
        torch.save(
            ekg_sig,
            config["RetinaNet"]["output_path"] + "IEC_preprocessed_data.pt")

    return ekg_sig
def none_mdas_process(train,
                      test,
                      n_components=500,
                      p=70,
                      alpha=0.5,
                      lamb1=0.1,
                      lamb2=0.1,
                      random_state=42):
    x_tar_ns, x_sou_ns, x_sou_ds, y_train = train
    x_tar_ds, y_test = test
    x_sou_ds, y_train = random_sampling_ds(x_sou_ds, y_train, random_state)
    x_sou_ns = random_sampling_ns(x_tar_ns, x_sou_ns, random_state)
    y_train = np.vstack(y_train)[:, 0]

    # dictionary learning
    dic = x_sou_ns + x_sou_ds
    dic.append(x_tar_ns)
    dic = np.vstack(dic)
    dic = np.unique(dic, axis=0)
    mdl = MiniBatchDictionaryLearning(n_components=n_components,
                                      n_iter=150,
                                      random_state=42)
    mdl.fit(dic)
    dic = mdl.components_
    print('Dictionary loaded')

    model = MDAS(p=p, alpha=alpha, lamb1=lamb1, lamb2=lamb2, dic=dic)
    x_sou_ds = model.fit(x_tar_ns, x_sou_ns, x_sou_ds)
    X_test = model.transform(x_tar_ds)
    X_train = np.vstack(x_sou_ds)

    X_train, X_test = normalize(X_train, X_test)
    return X_train, X_test, y_train, y_test[:, 0]
def plot_image_analytical(image_nc,
                          coords,
                          height,
                          width,
                          step,
                          folder="coffecustom_SineLayer"):
    '''
    :param step:
    :param folder:
    :param coords:
    :param image_nc: image in the form n x c where n = HxW
    :param height: heigth of the image
    :param width: width of the image
    '''
    titles = ["Image", "Gradient", "Laplacian"]
    channels = image_nc.shape[-1]
    image_hwc = image_nc.reshape([height, width, channels])
    lapl_hwc = laplace(image_nc, coords).reshape([height, width, channels])
    grad_hwc = gradient(image_nc, coords).reshape([height, width, 2, channels])

    fig, axes = plt.subplots(1, 3, figsize=(18, 6))
    plt.suptitle("Analytical", fontsize=15)

    if channels == 1:
        axes[0].imshow(image_hwc.cpu().view(height, width).detach().numpy())
        axes[1].imshow(
            grad_hwc.norm(dim=-2).cpu().view(height, width).detach().numpy())
        axes[2].imshow(lapl_hwc.cpu().view(height, width).detach().numpy())
    else:
        axes[0].imshow(
            normalize(image_hwc).cpu().view(height, width,
                                            channels).detach().numpy())
        axes[1].imshow(
            normalize(grad_hwc.norm(dim=-2)).cpu().view(
                height, width, channels).detach().numpy())
        axes[2].imshow(
            normalize(lapl_hwc).cpu().view(height, width,
                                           channels).detach().numpy())

    for i in range(3):
        axes[i].set_title(titles[i])

    os.makedirs(f"./plots/{folder}/", exist_ok=True)
    plt.savefig(f"./plots/{folder}/Analytical_{step}.png")
    plt.show()
Exemplo n.º 4
0
def load_ANE_CAL(denoise=True, pre=False, save=True, nor=True):
    """
    loading IEC ANE and CAL dataset and preprocess the signal

    Arg:
        denoise: (bool) denoise using wavelet threshold or not
        pre: (bool) load from saved preprocessed data or not
        save: (bool) save preprocessed data to file or not
        normalize: (bool) normalize the signal or not
    Return:
        ekg_sig: (Tensor) with shape [#signals, 1 lead, signal_length]
    """
    name = json.loads(config["General"]["CAL_name"])
    # (num of ekg signal, length, 1)
    if pre and os.path.isfile(config["RetinaNet"]["output_path"] +
                              "CAL_preprocessed_data.pt"):
        ekg_sig = torch.load(config["RetinaNet"]["output_path"] +
                             "CAL_preprocessed_data.pt").to('cuda')
        if nor:
            ekg_sig = normalize(ekg_sig, instance=True)
    else:
        ekg_sig = []
        for i in range(len(name)):
            for j in range(1, 6):
                ekg_filename = f'{config["General"]["IEC_path"]}{name[i]}_{str(j)}.raw'
                try:
                    sig = read_IEC(ekg_filename)
                    sig = np.reshape(sig[0], (len(sig[0]), 1))
                    ekg_sig.append(sig.astype(float))
                except IOError:
                    print(f"file {name[i]}_{str(j)} does not exist")

        ekg_sig = IEC_dataset_preprocessing(ekg_sig, smooth=False, dns=denoise)

        ekg_sig = ekg_sig.to('cuda')
        if nor:
            ekg_sig = normalize(ekg_sig, instance=True)
        if save:
            torch.save(
                ekg_sig, config["RetinaNet"]["output_path"] +
                "CAL_preprocessed_data.pt")

    return ekg_sig
def non_process(train, test):

    x_tar_ns, x_sou_ns, x_sou_ds, y_train = train
    X_train = np.vstack(x_sou_ds)
    y_train = np.vstack(y_train)[:, 0]

    X_test, y_test = test
    y_test = y_test[:, 0]

    X_train, X_test = normalize(X_train, X_test)
    return X_train, X_test, y_train, y_test
def pca_process(train, test):
    x_tar_ns, x_sou_ns, x_sou_ds, y_train = train
    y_train = np.vstack(y_train)[:, 0]
    x_tar_ds, y_test = test
    x_sou_mat = np.vstack(x_sou_ds)

    model = PCA(n_components=50)
    X_train = model.fit_transform(x_sou_mat)
    X_test = model.transform(x_tar_ds)

    X_train, X_test = normalize(X_train, X_test)
    return X_train, X_test, y_train, y_test[:, 0]
def sampling_process(train, test, random_state=42):
    x_tar_ns, x_sou_ns, x_sou_ds, y_train = train
    x_sou_ds_new, y_train_new = random_sampling_ds(x_sou_ds,
                                                   y_train,
                                                   random_state=random_state)
    X_train = np.vstack(x_sou_ds_new)
    y_train = np.vstack(y_train_new)[:, 0]

    X_test, y_test = test
    y_test = y_test[:, 0]

    X_train, X_test = normalize(X_train, X_test)
    return X_train, X_test, y_train, y_test
def coral_process(train, test):
    x_tar_ns, x_sou_ns, x_sou_ds, y_train = train
    y_train = np.vstack(y_train)[:, 0]
    x_tar_ds, y_test = test
    x_sou_mat = np.vstack(x_sou_ns + x_sou_ds)
    len_x_sou_ns = len(np.vstack(x_sou_ds))

    model = CORAL()
    model.fit(x_sou_mat, x_tar_ns)
    X_train = model.transform(x_sou_mat)
    X_test = model.transform(x_tar_ds)

    X_train, X_test = normalize(X_train, X_test)
    return X_train[-len_x_sou_ns:], X_test, y_train, y_test[:, 0]
Exemplo n.º 9
0
def test(net, signal_file_path):
    """
    test the signal using retinanet and return all of the predict result

    Args:
        net: (nn.Module) RetinaNet module
        signal_file_path: (string) this file must be .raw format
    Returns:
        intervals: (list) p duration, pq interval, qrs duration, qt interval mean and var value
        qrs_intervals: (list) q duration, r duration, and s duration mean value
    """
    """load signal file from .raw"""
    raw_ekg_sig = []
    try:
        sig = read_IEC(signal_file_path)
        sig = np.reshape(sig[0], (len(sig[0]), 1))
        raw_ekg_sig.append(sig.astype(float))
    except IOError:
        print(f"file {signal_file_path} does not exist.")

    ekg_sig = IEC_dataset_preprocessing(raw_ekg_sig, smooth=False,
                                        dns=False).to('cuda')
    denoise_sig = IEC_dataset_preprocessing(raw_ekg_sig,
                                            smooth=False,
                                            dns=True).to('cuda')
    """predict the pqrst segmentation result"""
    final_intervals = []
    final_preds = []
    denoise_sig = normalize(denoise_sig)
    for i in range(denoise_sig.size(0) // 128 + 1):
        plots, intervals, pred_signals = test_retinanet(
            net, denoise_sig[i * 128:(i + 1) * 128, :, :], 4992, visual=False)
        print(intervals)
        final_intervals.extend(intervals)
        final_preds.append(pred_signals)
    final_preds = torch.cat(final_preds, dim=0)

    pred = qrs_seperation(ekg_sig, final_preds)

    for i in range(len(pred)):
        pred[i]["q_duration"] = np.mean(pred[i]["q_duration"])
        pred[i]["r_duration"] = np.mean(pred[i]["r_duration"])
        pred[i]["s_duration"] = np.mean(pred[i]["s_duration"])
    print(pred)

    return final_intervals, pred
def tca_process(train, test, p=50):
    x_tar_ns, x_sou_ns, x_sou_ds, y_train = train
    y_train = np.vstack(y_train)[:, 0]
    x_tar_ds, y_test = test

    x_sou_ds, y_train = random_sampling_ds(x_sou_ds, y_train, 42)
    x_sou_ns = random_sampling_ns(x_tar_ns, x_sou_ns, 42)

    y_train = np.vstack(y_train)[:, 0]
    x_sou_ns_mat = np.vstack(x_sou_ns)
    x_sou_ds_mat = np.vstack(x_sou_ds)

    model = TCA(dim=p)
    model.fit(x_sou_ns_mat, x_tar_ns)
    X_train = model.transform(x_sou_ds_mat)
    X_test = model.transform(x_tar_ds)

    X_train, X_test = normalize(X_train, X_test)
    return X_train, X_test, y_train, y_test[:, 0]
def mdas_process(train,
                 test,
                 p=70,
                 alpha=0.5,
                 lamb1=0.1,
                 lamb2=0.1,
                 random_state=42):
    x_tar_ns, x_sou_ns, x_sou_ds, y_train = train
    x_tar_ds, y_test = test
    x_sou_ds, y_train = random_sampling_ds(x_sou_ds, y_train, random_state)
    x_sou_ns = random_sampling_ns(x_tar_ns, x_sou_ns, random_state)
    y_train = np.vstack(y_train)[:, 0]

    model = MDAS(p=p, alpha=alpha, lamb1=lamb1, lamb2=lamb2)
    x_sou_ds = model.fit(x_tar_ns, x_sou_ns, x_sou_ds)
    X_test = model.transform(x_tar_ds)
    X_train = np.vstack(x_sou_ds)

    X_train, X_test = normalize(X_train, X_test)
    return X_train, X_test, y_train, y_test[:, 0]
Exemplo n.º 12
0
def testing_using_retinanet():
    """
    test RetinaNet and return the segmentation result.

    Args:
        signal: (tensor) with sized [batch_size, data_length]
    Returns:
        (json): result prediction with preprocessed input and final predict segmentation.
    """
    global retinanet_model_path
    signal = request.json['ECG']
    #denoise = request.json["denoise"]
    denoise = False

    signal = np.asarray(signal).astype(float)
    signal = signal.reshape((signal.shape[0], signal.shape[1], 1))
    target_length = (signal.shape[1] // 64) * 64
    signal = IEC_dataset_preprocessing(signal,
                                       smooth=False,
                                       dns=denoise,
                                       target_length=target_length).cuda()
    signal = normalize(signal)
    net = RetinaNet(3).cuda()
    #net.load_state_dict(torch.load("weights/retinanet_best_IEC.pkl"))
    net.load_state_dict(torch.load(retinanet_model_path))
    batch_size = 128
    final_preds = []
    for i in range(signal.size(0) // batch_size + 1):
        _, _, pred_signals = test_retinanet(net,
                                            signal[i * batch_size:(i + 1) *
                                                   batch_size, :, :],
                                            target_length,
                                            visual=False)
        final_preds.append(pred_signals)
    final_preds = torch.cat(final_preds, dim=0)

    return jsonify({'raw': signal.tolist(), "label": final_preds.tolist()})
Exemplo n.º 13
0
def image_fitting_validate(model_output, coords, dataset, layer_folder):
    name = f'./plots/image_fitting/{layer_folder}/{dataset.name}/results'
    titles = ["Image", "Gradient", "Laplacian"]
    mse = get_mse(dataset.pixels, model_output.cpu())
    plots = [dataset.pixels, model_output]
    functions = [get_gradient_num, get_laplacian_num]
    norm_lambda = lambda img, index: img if index else img.norm(dim=-2)
    for i in range(2):
        plots.append(
            norm_lambda(
                functions[i](dataset.pixels, dataset.height, dataset.width,
                             dataset.channels), i))
        plots.append(
            norm_lambda(
                functions[i](model_output, dataset.height, dataset.width,
                             dataset.channels), i))
    for i in range(len(plots)):
        if dataset.channels == 1:
            plots[i] = plots[i].cpu().view(dataset.height,
                                           dataset.width).detach().numpy()
        else:
            plots[i] = normalize(
                plots[i].cpu().view(dataset.height, dataset.width,
                                    dataset.channels)).detach().numpy()
    fig, axes = plt.subplots(2, 3, figsize=(18, 12))
    plt.suptitle("Final results for Image Fitting Task", fontsize=20)
    for i in range(3):
        for j in range(2):
            axes[j][i].imshow(plots[i * 2 + j])
            if j == 0: axes[j][i].set_title(titles[i])
    axes[0][0].set_ylabel("Ground truth")
    axes[1][0].set_ylabel("Output")
    os.makedirs(f"{name}", exist_ok=True)
    plt.savefig(f"{name}.png")
    plt.show()
    return [mse]
Exemplo n.º 14
0
    w2v_file = config["pretrained_embedding"]  # w2v_file
    data_index = config["index"]  # Indri index
    mapped_w2v_file = config["output_embedding"]  # output shared w2v dict

    print('load word dict ...')
    word_dict = load_word_dict(data_index)
    print("Dictionary length: {}".format(len(word_dict)))

    print('load word vectors ...')
    embeddings = load_word_embedding(word_dict, w2v_file)

    print('save word vectors ...')
    with open(mapped_w2v_file, 'w') as fw:
        # assert word_dict
        for w, idx in tqdm(word_dict.items()):
            try:
                print(word_dict[w], ' '.join(map(str, embeddings[idx])), file=fw)
            except Exception as error:
                print('Error saving this word : {}\n'.format(word_dict[w]) + repr(error))
                # print(embeddings[idx])

    print('Map word vectors finished ...')

    if config["normalize"]:
        print("Normalization ...")
        normalize(mapped_w2v_file, config["normalized_embedding"])

    print("Embeddings OK.")


Exemplo n.º 15
0
    def __getitem__(self, idx):
        l_bound = idx * self.config["BATCH_SIZE"]
        r_bound = (idx + 1) * self.config["BATCH_SIZE"]

        if idx % 10 == 0:
            self.scale_factor = np.random.randint(
                -3, 7, 1)[0] if self.config["MULTI_SCALE_TRAINING"] else 0

        image_width = self.config["IMAGE_W"] + (self.config["IMAGE_W"] //
                                                self.config["GRID_W"] *
                                                self.scale_factor)
        image_height = self.config["IMAGE_H"] + (self.config["IMAGE_H"] //
                                                 self.config["GRID_H"] *
                                                 self.scale_factor)

        grid_w = self.config["GRID_W"] + self.scale_factor
        grid_h = self.config["GRID_H"] + self.scale_factor

        grid_dims = np.tile(np.array([grid_h, grid_w]),
                            (self.config["BATCH_SIZE"], 1))

        if r_bound > len(self.annotations):
            r_bound = len(self.annotations)
            l_bound = r_bound - self.config["BATCH_SIZE"]

        instance_count = 0

        x_batch = np.zeros(
            (self.config["BATCH_SIZE"], image_height, image_width, 3))
        y_batch = np.zeros((self.config["BATCH_SIZE"], grid_h, grid_w,
                            self.config["BOX"], 4 + 1 + self.config["CLASS"]))

        for idx in self.batch_idx[l_bound:r_bound]:
            batch_image = self.images[idx]
            batch_annotations = self.annotations[idx]

            img, all_objs = self.get_image_with_box(
                batch_image,
                batch_annotations,
                image_height,
                image_width,
                augmentation=self.augmentation)

            for obj in all_objs:
                if obj["xmax"] > obj["xmin"] and obj["ymax"] > obj[
                        "ymin"] and obj["name"] in self.config["LABELS"]:
                    center_x = .5 * (obj["xmin"] + obj["xmax"])
                    center_x = center_x / (float(image_width) / grid_w)
                    center_y = .5 * (obj["ymin"] + obj["ymax"])
                    center_y = center_y / (float(image_height) / grid_h)

                    grid_x = int(np.floor(center_x))
                    grid_y = int(np.floor(center_y))

                    if grid_x < grid_w and grid_y < grid_h:
                        class_idx = self.config["LABELS"].index(obj["name"])

                        center_w = (obj["xmax"] - obj["xmin"]) / (
                            float(image_width) / grid_w)
                        center_h = (obj["ymax"] - obj["ymin"]) / (
                            float(image_height) / grid_h)

                        box = [center_x, center_y, center_w, center_h]

                        best_anchor = -1
                        max_iou = -1

                        shifted_box = BoundBox(0, 0, center_w, center_h)

                        for i in range(len(self.anchors)):
                            anchor = self.anchors[i]
                            iou = bbox_iou(shifted_box, anchor)

                            if max_iou < iou:
                                best_anchor = i
                                max_iou = iou

                        y_batch[instance_count, grid_y, grid_x, best_anchor,
                                0:4] = box
                        y_batch[instance_count, grid_y, grid_x, best_anchor,
                                4] = 1.
                        y_batch[instance_count, grid_y, grid_x, best_anchor,
                                5 + class_idx] = 1

            x_batch[instance_count] = normalize(img) if self.norm else img
            instance_count += 1

        y_batch = np.reshape(
            y_batch,
            (self.config["BATCH_SIZE"], grid_h, grid_w, self.config["BOX"] *
             (4 + 1 + self.config["CLASS"])))
        return [x_batch, grid_dims], y_batch
Exemplo n.º 16
0
def test_retinanet_by_qrs(net):
    """
    testing the CAL and ANE dataset q, r, s duration using rdp algorithm.

    Args:
        net: (nn.Module) Retinanet module
    """
    ekg_sig = load_ANE_CAL(denoise=False, pre=False, nor=False)
    turn_point = get_signals_turning_point_by_rdp(ekg_sig, load=True)
    print(len(turn_point[0]))

    final_preds = []
    ekg_sig = normalize(ekg_sig)
    for i in range(ekg_sig.size(0) // 128 + 1):
        _, _, pred_signals = test_retinanet(net,
                                            ekg_sig[i * 128:(i + 1) *
                                                    128, :, :],
                                            4992,
                                            visual=False)
        final_preds.append(pred_signals)
    final_preds = torch.cat(final_preds, dim=0)
    ekg_sig = ekg_sig.cpu().numpy()

    onset_offset = onset_offset_generator(final_preds)
    qrs_interval = []
    for i in range(onset_offset.shape[0]):
        qrs_interval.append([])
        j = 0
        while j < 4992:
            if onset_offset[i, 2, j] == -1:
                qrs_interval[i].append([j])
                j += 1
                while onset_offset[i, 2, j] == 0:
                    j += 1
                qrs_interval[i][-1].append(j)
            j += 1

    enlarge_qrs = enlarge_qrs_list(qrs_interval)

    turning = []
    for index in range(ekg_sig.shape[0]):
        turning.append([])
        for j in range(len(enlarge_qrs[index])):
            filtered_peaks = list(
                filter(
                    lambda i: i >= enlarge_qrs[index][j][0] and i <=
                    enlarge_qrs[index][j][1], turn_point[index]))
            turning[index].append(filtered_peaks)
            idx = find_index_closest_to_value(
                ekg_sig[index, 0, filtered_peaks[1]:filtered_peaks[2]],
                ekg_sig[index, 0, filtered_peaks[0]])
            idx = idx + filtered_peaks[1] - enlarge_qrs[index][j][0]

    pred = []
    for i in range(len(turning)):
        pred.append({"q_duration": [], "r_duration": [], "s_duration": []})
        mode = np.argmax(np.bincount([len(i) for i in turning[i]]))
        for j in range(len(turning[i])):
            if len(turning[i][j]) != mode:
                continue
            if mode >= 5:
                # q,r,s
                # find q duration
                q_end = find_index_closest_to_value(
                    ekg_sig[i, 0, turning[i][j][1]:turning[i][j][2]],
                    ekg_sig[i, 0, turning[i][j][0]])
                q_end = q_end + turning[i][j][1]
                q_duration = q_end - turning[i][j][0]
                pred[i]["q_duration"].append(q_duration)
                # find s duration
                s_start = find_index_closest_to_value(
                    ekg_sig[i, 0, turning[i][j][2]:turning[i][j][3]],
                    ekg_sig[i, 0, turning[i][j][4]])
                s_start = s_start + turning[i][j][2]
                s_duration = turning[i][j][4] - s_start
                pred[i]["s_duration"].append(s_duration)
                # find r duration
                r_start = q_end
                r_end = s_start
                r_duration = r_end - r_start
                pred[i]["r_duration"].append(r_duration)
            elif mode == 4:
                # q,r or r,s
                if ekg_sig[i, 0, turning[i][j][1]] > ekg_sig[i, 0,
                                                             turning[i][j][2]]:
                    pred[i]["q_duration"].append(0)
                    # r, s
                    # find s duration
                    s_start = find_index_closest_to_value(
                        ekg_sig[i, 0, turning[i][j][1]:turning[i][j][2]],
                        ekg_sig[i, 0, turning[i][j][3]])
                    s_start = s_start + turning[i][j][1]
                    s_duration = turning[i][j][3] - s_start
                    pred[i]["s_duration"].append(s_duration)
                    # find r duration
                    r_end = s_start
                    r_duration = r_end - turning[i][j][0]
                    pred[i]["r_duration"].append(r_duration)
                else:
                    if i == 84:
                        print(turning[i][j][1], turning[i][j][2])
                    # q, r
                    pred[i]["s_duration"].append(0)
                    # find q duration
                    q_end = find_index_closest_to_value(
                        ekg_sig[i, 0, turning[i][j][1]:turning[i][j][2]],
                        ekg_sig[i, 0, turning[i][j][0]])
                    q_end = q_end + turning[i][j][1]
                    q_duration = q_end - turning[i][j][0]
                    pred[i]["q_duration"].append(q_duration)
                    # find r duration
                    r_start = q_end
                    r_duration = turning[i][j][3] - r_start
                    pred[i]["r_duration"].append(r_duration)
            elif mode <= 3:
                # only q or r
                if ekg_sig[i, 0, turning[i][j][1]] > ekg_sig[i, 0,
                                                             turning[i][j][0]]:
                    # r
                    pred[i]["q_duration"].append(0)
                    pred[i]["s_duration"].append(0)
                    r_duration = turning[i][j][2] - turning[i][j][0]
                    pred[i]["r_duration"].append(r_duration)
                else:
                    # q
                    pred[i]["r_duration"].append(0)
                    pred[i]["s_duration"].append(0)
                    q_duration = turning[i][j][2] - turning[i][j][0]
                    pred[i]["q_duration"].append(q_duration)

    standard_qrs = []
    # ANE
    standard_qrs.append({"q_duration": 12, "r_duration": 52, "s_duration": 30})
    standard_qrs.append({"q_duration": 12, "r_duration": 52, "s_duration": 30})
    standard_qrs.append({"q_duration": 12, "r_duration": 52, "s_duration": 30})
    #CAL
    standard_qrs.append({"q_duration": 0, "r_duration": 50, "s_duration": 50})
    standard_qrs.append({"q_duration": 0, "r_duration": 50, "s_duration": 50})
    standard_qrs.append({"q_duration": 0, "r_duration": 50, "s_duration": 50})
    standard_qrs.append({"q_duration": 0, "r_duration": 50, "s_duration": 50})
    standard_qrs.append({"q_duration": 0, "r_duration": 50, "s_duration": 50})
    standard_qrs.append({"q_duration": 0, "r_duration": 56, "s_duration": 0})
    standard_qrs.append({"q_duration": 0, "r_duration": 56, "s_duration": 0})
    standard_qrs.append({"q_duration": 0, "r_duration": 56, "s_duration": 0})
    standard_qrs.append({"q_duration": 56, "r_duration": 0, "s_duration": 0})
    standard_qrs.append({"q_duration": 56, "r_duration": 0, "s_duration": 0})
    standard_qrs.append({"q_duration": 56, "r_duration": 0, "s_duration": 0})
    standard_qrs.append({"q_duration": 0, "r_duration": 18, "s_duration": 18})
    standard_qrs.append({"q_duration": 0, "r_duration": 50, "s_duration": 50})
    standard_qrs.append({"q_duration": 0, "r_duration": 50, "s_duration": 50})

    mean_diff = np.zeros((3, 17))
    for i in range(17):
        q_temp_mean = []
        r_temp_mean = []
        s_temp_mean = []
        for j in range(5):
            q_temp_mean.append(np.mean(pred[i * 5 + j]["q_duration"]))
            r_temp_mean.append(np.mean(pred[i * 5 + j]["r_duration"]))
            s_temp_mean.append(np.mean(pred[i * 5 + j]["s_duration"]))
        mean_diff[0][
            i] = np.mean(q_temp_mean) * 2 - standard_qrs[i]["q_duration"]
        mean_diff[1][
            i] = np.mean(r_temp_mean) * 2 - standard_qrs[i]["r_duration"]
        mean_diff[2][
            i] = np.mean(s_temp_mean) * 2 - standard_qrs[i]["s_duration"]
    #print(pd.DataFrame(mean_diff.T, columns=["q","r","s"]))
    #print(np.mean(mean_diff, axis=1))
    #print(np.std(mean_diff, axis=1, ddof=1))
    mean_diff = removeworst(mean_diff, 4)
    mean_diff_mean = np.mean(mean_diff, axis=1)
    mean_diff_std = np.std(mean_diff, axis=1, ddof=1)
    print(mean_diff_mean)
    print(mean_diff_std)
Exemplo n.º 17
0
def poisson_image_validate(model_output,
                           coords,
                           dataset,
                           layer_folder,
                           numerical=True,
                           shift_grad=False,
                           shift_image=True,
                           lapl_factor=1e-4,
                           grad_factor=.1):
    name = f'./plots/poisson_image/{layer_folder}/{dataset.name}/results'
    titles = ["Image", "Gradient", "Laplacian"]
    model_output = model_output.cpu()
    if shift_image:
        model_output = shift(model_output, dataset.pixels)

    mses = [get_mse(dataset.pixels, model_output).detach()]

    gt_grad = get_gradient_num(dataset.pixels, dataset.height, dataset.width,
                               dataset.channels)
    if numerical:
        out_grad = get_gradient_num(model_output.cpu(), dataset.height,
                                    dataset.width, dataset.channels).detach()
        if shift_grad:
            out_grad = shift(out_grad, gt_grad, grad=True)
        mses.append(get_mse(gt_grad, out_grad))
        mses.append(
            get_mse(
                get_laplacian_num(dataset.pixels, dataset.height,
                                  dataset.width, dataset.channels),
                get_laplacian_num(model_output.cpu(), dataset.height,
                                  dataset.width, dataset.channels).detach()))
    else:
        out_grad = grad_factor * gradient(model_output, coords,
                                          no_graph=True).detach().cpu()
        if (shift_grad):
            out_grad = shift(out_grad, gt_grad, grad=True)
        mses.append(get_mse(gt_grad, out_grad))
        mses.append(
            get_mse(
                get_laplacian_num(dataset.pixels, dataset.height,
                                  dataset.width, dataset.channels),
                lapl_factor *
                laplace(model_output, coords, no_graph=True).detach().cpu()))
    plots = [dataset.pixels, model_output]
    functions = [get_gradient_num, get_laplacian_num]
    norm_lambda = lambda img, index: img if index else img.norm(dim=-2)
    for i in range(2):
        plots.append(
            norm_lambda(
                functions[i](dataset.pixels, dataset.height, dataset.width,
                             dataset.channels), i))
        plots.append(
            norm_lambda(
                functions[i](model_output, dataset.height, dataset.width,
                             dataset.channels), i))
    for i in range(len(plots)):
        if dataset.channels == 1:
            plots[i] = plots[i].cpu().view(dataset.height,
                                           dataset.width).detach().numpy()
        elif i == 0 or i == 1:
            plots[i] = torch.clamp(plots[i].cpu(), min=0.0, max=1.0).view(
                dataset.height, dataset.width,
                dataset.channels).detach().numpy()
        else:
            plots[i] = normalize(
                plots[i].cpu().view(dataset.height, dataset.width,
                                    dataset.channels)).detach().numpy()
    fig, axes = plt.subplots(2, 3, figsize=(18, 12))
    plt.suptitle("Final results for Poisson Image Task", fontsize=20)
    for i in range(3):
        for j in range(2):
            axes[j][i].imshow(plots[i * 2 + j])
            if j == 0: axes[j][i].set_title(titles[i])
    axes[0][0].set_ylabel("Ground truth")
    axes[1][0].set_ylabel("Output")
    plt.savefig(f"{name}.png")
    plt.show()
    return mses
Exemplo n.º 18
0
def denoise(adata):
    assert isinstance(adata,
                      anndata.AnnData), 'adata must be an AnnData instance'

    # set seed for reproducibility
    random_state = 0
    random.seed(random_state)
    np.random.seed(random_state)
    tf.random.set_seed(random_state)
    os.environ['PYTHONHASHSEED'] = '0'

    # data manipulation
    # this creates adata.raw with raw counts and copies adata if copy==True
    adata = data_utils.read_dataset(adata, test_split=True, copy=True)

    # check for zero genes => why not handle in normalize
    nonzero_genes, _ = sc.pp.filter_genes(adata.X, min_counts=1)
    assert nonzero_genes.all(
    ), 'Please remove all-zero genes before using DCA.'

    adata = data_utils.normalize(
        adata,
        filter_min_counts=False,  # no filtering, keep cell and gene idxs same
        size_factors=True,
        normalize_input=True,
        logtrans_input=False)

    network_kwds = {
        'input_size': adata.n_vars,
        'output_size': adata.n_vars,
        'hidden_size': (64, 32, 64),
        'hidden_dropout': 0.,
        'batchnorm': True,
        'activation': 'relu',
        'init': 'glorot_uniform'
    }

    net = zae.ZINBAutoencoder(**network_kwds)
    net.save()
    net.build()

    training_kwds = {
        'epochs': 300,
        'reduce_lr': 10,
        'early_stop': 15,
        'batch_size': 32,
        'optimizer': 'rmsprop',
        'verbose': False,
        'threads': 1,
    }

    hist = net.train(adata[adata.obs.dca_split == 'train'], **training_kwds)

    res = net.predict(adata, return_info=True, copy=True)

    copy = True
    adata = res if copy else adata

    return_info = True
    if return_info:
        adata.uns['dca_loss_history'] = hist.history

    return_model = False
    if return_model:
        return (adata, net) if copy else net
    else:
        return adata if copy else None
Exemplo n.º 19
0
 def _normalize(self, tensor):
     return normalize(tensor)
Exemplo n.º 20
0
    ponsets = np.array([x[0].Onset for x in perf_list])
    pvelocities = np.array([x[0].Velocity for x in perf_list])

    # put first perf onset to t=0:
    ponsets = ponsets - np.min(ponsets)
    # only use onsets on beats
    beatSelection = onsets % 1 == 0
    onsets_beats = onsets[beatSelection]
    ponsets_beats = ponsets[beatSelection]

    eqcl = makeEquivalenceClasses(onsets_beats)
    onset2ponset = makeMeanOverEqClasses(eqcl, ponsets_beats)
    u_onsets = np.array(sorted(eqcl.keys()))
    u_ponsets = np.array([onset2ponset[x] for x in u_onsets])

    n_u_onsets = normalize(u_onsets.copy().reshape((-1, 1)))[:, 0]
    n_u_ponsets = normalize(u_ponsets.copy().reshape((-1, 1)))[:, 0]

    ioi_r = np.diff(n_u_ponsets) / np.diff(n_u_onsets)

    # locate invalid ioi ratio values
    valid_idx = np.logical_not(
        np.array(ioi_r <= 0, np.bool) + np.isinf(ioi_r) + np.isnan(ioi_r))

    ioi_r = ioi_r[valid_idx]
    ioi_r = np.log2(ioi_r)

    xcoords = (u_onsets[1:] + u_onsets[:-1]) / 2
    xcoords = xcoords[valid_idx]

    idx = np.argsort(xcoords)
Exemplo n.º 21
0
    def __getitem__(self, index):
        """
        Get the 'index'th item from the dataset, a tuple (img, pos_in_img, 2d_pose, 3d_pose, meta).

        meta is a dict of metadata that could be useful. The following information is provided in meta:
        index = the index of the item in the dataset
        cam = the camera object used in the projection
        Q = the orthogonal transform that was applied to the pose before projection
        
        This function performs the following:
        1. Gets data from the appropriate storage
        2. (if self.orthogonal_data_augmentation) Performs an orthogonal transformation around the hip joint
        3. Projects the pose to compute the 2D pose
        4. Subsample pose data, to only consider the dimensions that we really want to use
        5. Normalizes the 2D and 3D poses (either instance normalization, or, according to dataset statistics)
        6. Perform joint dropping/masking

        7. Loading the correct image from the dataset directory
        8. Normalize the image into something to input to a stacked hourglass network (shape of 256x256)
        9. Computing the ground truth for the output heatmaps for the stacked hourglass

        10. Store all of the meta data that could be required for each example (such as means and std's for de-normalization)
        
        :param index: What index we want to get from the dataset, in the range [0, len(dataset)]
        :return: Returns the tuple (img, pos_in_img, 2d_pose, 3d_pose, meta), which are as follows:
            img = the image that we want to predict a 3D pose from
            pose_in_img = the 2D pose, in image coordinates of 'img'
            2d_pose = a normalized 2D pose, input to the 3D prediction network
            3d_pose = the 3D pose that we wish to predict
            meta = a dictionary of information that could be useful (defined above).
        """
        # Get the indices into the imgs/cams/pose
        frame_number = index // self.cams_per_frame
        camera_number = (index % self.cams_per_frame) + 1

        # Step 1, index into arrays
        # Get the image, camera and pose (in camera coordinates)
        subject = self.pose_meta[frame_number]["subject_number"]
        cam = self.cams[(subject,camera_number)]
        pose = self.pose[frame_number]
        
        # Step 2, apply the (random) orthogonal transform
        Q = np.eye(3)
        pr = random.random()
        if pr < self.orthogonal_data_augmentation_prob:
            Q = self.rand_orthogonal_transform_matrix()
        augmented_pose = self.apply_orthogonal_transform_3d(pose, Q)

        # Step 3, project the pose (this transforms the pose into camera coords and then projects)
        augmented_pose_2d = self.project_pose_3d(augmented_pose, cam)

        # Step 4, sub sample the joints, so that we only give the network the ones that move
        augmented_pose_2d = augmented_pose_2d[self.pose_2d_indx_to_use]
        augmented_pose = augmented_pose[self.pose_3d_indx_to_use]

        # Step 5, normalize the 2D and 3D poses. (Note that we need to manually transform into camera coords, and
        # 'project_pose_3d' includes this transformation before projection)
        normalized_pose_2d, hip_pos_2d, scale_2d = self.normalize_single_pose(augmented_pose_2d, self.num_joints_pred_2d, is_2d=True)
        augmented_pose_cam = self.world_to_camera_single_pose_3d(augmented_pose, cam)
        normalized_pose, hip_pos, scale_3d = self.normalize_single_pose(augmented_pose_cam, self.num_joints_pred_3d, is_2d=False)

        # Step 6, randomly drop some joints (only on the input/2D pose)
        joint_mask = np.array(np.random.uniform(size=self.num_joints) > self.drop_joint_prob, dtype=int)
        if self.drop_joint_prob > 0.0:
            normalized_pose_2d = np.reshape(normalized_pose_2d, (self.num_joints, -1))
            normalized_pose_2d *= np.expand_dims(joint_mask, axis=1)
            normalized_pose_2d = normalized_pose_2d.flatten()

        img_for_hg_input, target_heatmap = None, None
        if self.load_image_data:
            # Step 7, load the correct image from the dataset
            # pose_meta[i]["sequence_id"] is something like 'smoking 1.h5', and we want 'smoking 1'
            # The camera name is the 7th parameter in cam, out of 7
            action = self.pose_meta[frame_number]["sequence_id"].split(".")[0]
            camera_name = cam[6]
            filename = "{s}/{a}/{c}/{f}.jpg".format(s=subject, a=action, c=camera_name, f=frame_number)
            full_filename = os.path.join(self.dataset_img_path, filename)
            numpy_img = scipy.misc.imread(full_filename, mode='RGB').astype(np.float)

            # Step 8, color normalize, and squeeze the image into something to be used by the stacked hourglass
            # The cropping subroutines allow us to specify a center and scale (so pick those to keep the whole image)
            height, width, channels = numpy_img.shape
            center = [height // 2, width // 2]
            scale = max(*center) / self.hg_resolution
            in_res = [self.hg_in_res, self.hg_in_res]
            img_for_hg_input = hg_transforms.crop_numpy(numpy_img, center, scale, in_res, rot=0)
            img_for_hg_input = data_utils.normalize(img_for_hg_input, self.img_mean, self.img_std)

            # Convert the image to a PyTorch tensor, and transpose shape from (H,W,C) to (C,H,W)
            img_for_hg_input = torch.from_numpy(np.transpose(img_for_hg_input, (2, 0, 1))).float()

            # Step 9, compute the 2D pose ground truth in the normalized image, and, compute the target heatmap
            target_2d_pts = np.reshape(augmented_pose_2d.clone(), [-1,2])
            out_res = [self.hg_out_res, self.hg_out_res]
            target_heatmap = torch.zeros(self.num_joints_pred_2d, self.hg_out_res, self.hg_out_res)
            for i in range(self.num_joints_pred_2d):
                target_2d_pts[i] = hg_transforms.transform(target_2d_pts[i] + 1, center, scale, out_res, rot=r)
                target_2d_pt = torch.from_numpy(target_2d_pts[i])
                target_heatmap[i] = draw_labelmap(target[i], target_2d_pt - 1, 1.0, type="Gaussian")
            target_2d_pts = torch.from_numpy(target_2d_pts)

        # Step 10, store any meta data
        meta = {
            'index': index,
            'frame_number': frame_number,
            'cam_number': camera_number,
            'cam': cam,
            'Q': Q,
            'joint_mask': joint_mask,
            '3d_pose_camera_coords': augmented_pose_cam,
            '2d_indx_used': self.pose_2d_indx_to_use,
            '3d_indx_used': self.pose_3d_indx_to_use,
            '2d_indx_ignored': self.pose_2d_indx_to_ignore,
            '3d_indx_ignored': self.pose_3d_indx_to_ignore,
        }
        if self.load_image_data:
            # data related to the image
            meta.update({
                "img_filename": full_filename,
                "img_center": center,
                "img_scale": scale,
                "2D_pose_orig_img": augmented_pose_2d, # final computation is at step 4
                "scaled_target_2D_pose": target_2d_pts,
            })
        if self.dataset_normalization:
            # to "unNormalize" in datasrt normalization
            meta.update({
                '2d_mean': self.pose_2d_mean,
                '3d_mean': self.pose_3d_mean,
                '2d_std': self.pose_2d_std,
                '3d_std': self.pose_3d_std,
            })
        else:
            # to "unNormalize" in instance normalization
            meta.update({
                '2d_hip_pos': hip_pos_2d,
                '3d_hip_pos': hip_pos,
                '2d_scale': scale_2d,
                '3d_scale': scale_3d,
            })

        # Return the tuple
        return (img_for_hg_input, target_heatmap, torch.Tensor(normalized_pose_2d), torch.Tensor(normalized_pose), meta)