Exemplo n.º 1
0
    def align(self, output=None, align_encoder_id=0, **kwargs):
        # if self.binary and any(self.binary):
        #     raise NotImplementedError

        if len(self.filenames.test) != len(self.extensions):
            raise Exception('wrong number of input files')

        binary = self.binary and any(self.binary)

        paths = self.filenames.test or [None]
        lines = utils.read_lines(paths, binary=self.binary)

        for line_id, lines in enumerate(lines):
            token_ids = [
                sentence if vocab is None else utils.sentence_to_token_ids(
                    sentence,
                    vocab.vocab,
                    character_level=self.character_level.get(ext)) for ext,
                vocab, sentence in zip(self.extensions, self.vocabs, lines)
            ]

            _, weights = self.seq2seq_model.step(data=[token_ids],
                                                 align=True,
                                                 update_model=False)

            trg_vocab = self.trg_vocab[0]
            trg_token_ids = token_ids[len(self.src_ext)]
            trg_tokens = [
                trg_vocab.reverse[i]
                if i < len(trg_vocab.reverse) else utils._UNK
                for i in trg_token_ids
            ]

            weights = weights.squeeze()
            max_len = weights.shape[1]

            if binary:
                src_tokens = None
            else:
                src_tokens = lines[align_encoder_id].split()[:max_len -
                                                             1] + [utils._EOS]
            trg_tokens = trg_tokens[:weights.shape[0] - 1] + [utils._EOS]

            output_file = '{}.{}.svg'.format(output, line_id +
                                             1) if output is not None else None

            utils.heatmap(src_tokens,
                          trg_tokens,
                          weights,
                          output_file=output_file)
Exemplo n.º 2
0
def plot_heatmap(df,
                 path,
                 vmin=0,
                 vmax=1,
                 nticks=11,
                 digits="{x:.3f}",
                 cmap="viridis",
                 metric="mAP@all"):
    fig, ax = plt.subplots()

    arr = [0, 1, 2, 5, 4, 3]
    val = df.values[arr, :][:, arr]
    leg = ['clipart', 'infograph', 'painting', 'pencil', 'photo', 'sketch']

    im, cbar = heatmap(val,
                       leg,
                       leg,
                       ax=ax,
                       cmap=cmap,
                       cbarlabel=metric,
                       vmax=0.7,
                       cbar_kw={'boundaries': np.linspace(vmin, vmax, nticks)})
    texts = annotate_heatmap(im, valfmt=digits, textcolors=["white", "black"])

    fig.savefig(path, bbox_inches='tight', pad_inches=0, dpi=300)
Exemplo n.º 3
0
 def visualize(self, show_light=False, show_base=True):
     dsize = None
     hmap = heatmap(self.full_pred)
     if self.full_image is not None and show_light:
         light_heat = cv2.addWeighted(self.full_image[:, :, :3], 0.6, hmap,
                                      0.4, 0)
         if dsize:
             light_heat = cv2.resize(light_heat, (dsize, dsize))
         cv2.imshow('light heat', light_heat)
         if self.full_mask is not None and self.show_mask:
             light_mask = cv2.addWeighted(
                 self.full_image[:, :, :3], 0.6,
                 cv2.cvtColor(self.full_mask, cv2.COLOR_GRAY2BGR), 0.4, 0)
             if dsize:
                 light_mask = cv2.resize(light_mask, (dsize, dsize))
             cv2.imshow('light mask', light_mask)
     if self.full_image is not None and show_base:
         if dsize:
             cv2.imshow(
                 'image',
                 cv2.resize(self.full_image[:, :, :3], (dsize, dsize)))
         else:
             cv2.imshow('image', self.full_image[:, :, :3])
         if dsize:
             hmap = cv2.resize(hmap, (dsize, dsize))
         cv2.imshow('heatmap', hmap)
         if self.full_mask is not None and self.show_mask:
             if dsize:
                 cv2.imshow('mask',
                            cv2.resize(self.full_mask, (dsize, dsize)))
             else:
                 cv2.imshow('mask', self.full_mask)
     if show_light or show_base:
         cv2.waitKey()
def predict(config, args):
    gpu_manage(args)
    dataset = TestDataset(args.test_dir, config.in_ch, config.out_ch)
    data_loader = DataLoader(dataset=dataset,
                             num_workers=config.threads,
                             batch_size=1,
                             shuffle=False)

    ### MODELS LOAD ###
    print('===> Loading models')

    gen = Generator(gpu_ids=config.gpu_ids)

    param = torch.load(args.pretrained)
    gen.load_state_dict(param)

    if args.cuda:
        gen = gen.cuda(0)

    with torch.no_grad():
        for i, batch in enumerate(tqdm(data_loader)):
            x = Variable(batch[0])
            filename = batch[1][0]
            if args.cuda:
                x = x.cuda()

            att, out = gen(x)

            h = 1
            w = 3
            c = 3
            p = config.width

            allim = np.zeros((h, w, c, p, p))
            x_ = x.cpu().numpy()[0]
            out_ = out.cpu().numpy()[0]
            in_rgb = x_[:3]
            out_rgb = np.clip(out_[:3], 0, 1)
            att_ = att.cpu().numpy()[0] * 255
            heat_att = heatmap(att_.astype('uint8'))

            allim[0, 0, :] = in_rgb * 255
            allim[0, 1, :] = out_rgb * 255
            allim[0, 2, :] = heat_att
            allim = allim.transpose(0, 3, 1, 4, 2)
            allim = allim.reshape((h * p, w * p, c))

            save_image(args.out_dir, allim, i, 1, filename=filename)
Exemplo n.º 5
0
def predict(args):

    gpu_manage(args)
    ### MODELS LOAD ###
    print('===> Loading models')

    gen = Generator(gpu_ids=args.gpu_ids)

    param = torch.load(args.pretrained)
    gen.load_state_dict(param)

    if args.cuda:
        gen = gen.cuda(0)

    print('<=== Model loaded')

    print('===> Loading test image')
    img = cv2.imread(args.test_filepath, 1).astype(np.float32)
    img = img / 255
    img = img.transpose(2, 0, 1)
    img = img[None]
    print('<=== test image loaded')

    with torch.no_grad():
        x = torch.from_numpy(img)
        if args.cuda:
            x = x.cuda()

        print('===> Removing the cloud...')
        start_time = time.time()
        att, out = gen(x)
        print('<=== finish! %.3fs cost.' % (time.time() - start_time))

        x_ = x.cpu().numpy()[0]
        x_rgb = x_ * 255
        x_rgb = x_rgb.transpose(1, 2, 0).astype('uint8')
        out_ = out.cpu().numpy()[0]
        out_rgb = np.clip(out_[:3], 0, 1) * 255
        out_rgb = out_rgb.transpose(1, 2, 0).astype('uint8')
        att_ = att.cpu().numpy()[0] * 255
        att_heatmap = heatmap(att_.astype('uint8'))[0]
        att_heatmap = att_heatmap.transpose(1, 2, 0)

        allim = np.hstack((x_rgb, out_rgb, att_heatmap))
        show(allim)
Exemplo n.º 6
0
def test_tile(loader, model, epoch, reg_limit, reg_loader, output_path):
    """
    :param testset:         测试数据集
    :param batch_size:      Dataloader 打包的小 batch 大小
    :param workers:         Dataloader 使用的进程数
    :param model:           网络模型
    :param output_path:     保存模型文件的目录
    """

    # 热图中各个 tile 的信息保存在 output_path/<timestamp>-pred-e<epoch>-p<tilesize>-i<interval>-c<threshold>.csv
    fconv = open(os.path.join(output_path, '{}-pred-e{}-p{}-i{}-c{}.csv'.format(
        now, epoch, args.tile_size, args.interval, args.threshold)), 'w', newline="")
    w = csv.writer(fconv)
    w.writerow(['tile_size', '{}'.format(testset.tile_size)])
    w.writerow(['interval', '{}'.format(testset.interval)])
    w.writerow(['idx', 'grid', 'prob'])
    fconv.close()

    def rank(testset, probs):
        """按概率对 tile 排序,便于与置信度进行比较。"""

        groups = np.array(testset.tileIDX)
        tiles = np.array(testset.tiles_grid)

        order = np.lexsort((probs, groups))
        groups = groups[order]
        probs = probs[order]
        tiles = tiles[order]

        # index = np.empty(len(groups), 'bool')
        # index[-topk:] = True
        # index[:-topk] = groups[topk:] != groups[:-topk]
        index = [prob > args.threshold for prob in probs]

        return tiles[index], probs[index], groups[index]

    print('Start testing ...')

    testset.setmode("tile")
    probs = inference_tiles(loader, model, device, mode='test')
    tiles, probs, groups = rank(testset, probs)

    # clear artifact images
    if reg_limit:
        reg_testset.setmode("image")
        model.setmode("image")

        with open(os.path.join(output_path, '{}-count-e{}.csv'.format(now, epoch)),
                  'w', newline="") as f:
            w = csv.writer(f, delimiter=',')
            w.writerow(['id', 'count', 'organ'])

            counts = inference_image(reg_loader, model, device, mode='test')[1]
            for i, y in enumerate(counts, start=1):
                w.writerow([i, y, reg_testset.organs[i - 1]])

        img_indices = np.select([counts != 0], [counts]).nonzero()[0]
        indices = [i for i, g in enumerate(groups) if g in img_indices]
        tiles = tiles[indices]
        probs = probs[indices]
        groups = groups[indices]

    # 生成热图
    fconv = open(os.path.join(output_path, '{}-pred-e{}-p{}-i{}-c{}.csv'.format(
        now, epoch, args.tile_size, args.interval, args.threshold)), 'a', newline="")
    heatmap(testset, tiles, probs, groups, fconv, output_path)
    fconv.close()
Exemplo n.º 7
0
                out.write("Blobs stats for model {}: (d_centers, mean_n_blobs, std_n_blobs) = ({}, {}, {})\n".format(model_name, d_centers, mean_n_blobs, std_n_blobs))
            print("Blobs statistics done for size {}\n".format(size))

if HEATMAPS:
    print("\n")
    for size in [64, 1000]:
        for model_name in all_models_name:
            model_in_dir = os.path.join(IMAGES_DIR, model_name, str(size)) # input directory
            if not os.path.exists(model_in_dir):
                print("Images of size {} not found for model {}".format(size, model_name))
                continue
            model_out_dir = os.path.join(OUT_DIR, model_name, str(size))
            if not os.path.isdir(model_out_dir):
                os.makedirs(model_out_dir)
            image_set = [image for image in glob.glob(os.path.join(model_in_dir, "*"))]
            s = utils.heatmap(image_set, decode=True, shape=(size, size))
            print("Entropy for heatmap {} for size {} is {}".format(model_name, size, shannon_entropy(s)))
            h = sns.heatmap(s, cmap="gray")
            plt.annotate('entropy = {}'.format(shannon_entropy(s)), (0, 0), (0, -30), xycoords='axes fraction',
                                               textcoords='offset points', va='top')
            plt.savefig(os.path.join(model_out_dir, "heatmap_{}_{}.png".format(model_name, size)))
            plt.close()
        print("Heatmaps generated for size {}".format(size))

if KNN:
    print("\n")
    with open(os.path.join(OUT_DIR, "knn_results.txt"), "w+") as out:
        for size in [64]:
            for k in [1, 3, 5]:
                out.write("Images of size {}x{}\n".format(size, size))
                for model_name in all_models_name:
Exemplo n.º 8
0
def extract_experiments(data,
                        datapath,
                        resultspath,
                        metric='area',
                        method='raw'):  #,control='starvation 16h, no EGF'):
    '''Extracts the Mass Spec data, when multiple experiments are stored in a single file.'''
    ############################################
    NORMMETHOD = 'l1'  # 'l2' destroys signal from low throughput
    ############################################

    # Get data and group by experiment

    df = pd.read_csv('{}/IPMSData.txt'.format(datapath), delimiter='\t')
    #print min(df['_e2g_nGPArea_Sum_cgpAdj']), np.median(df['_e2g_nGPArea_Sum_cgpAdj']),np.mean(df['_e2g_nGPArea_Sum_cgpAdj'])

    ############################################################################################
    min_value = min(df[df['iBAQ'] != 0]['iBAQ'])  # "min value"
    print(min_value)  # 3.02044e-05
    #embed()
    df['iBAQ'] = df['iBAQ'].fillna(min_value)

    ########################################################################################
    print(len(df['iBAQ']), "original length")
    #df=df[df['_e2g_nGPArea_Sum_cgpAdj'] >0.03]
    ########################################################################################

    df = df[df['GeneName'] != 'EGFR']

    #gb=df.groupby('Experiment')

    lines = open('{}/ExperimentsKey.csv'.format(datapath)).read().replace(
        '\r', '\n').split('\n')
    expmapping = {}
    useexp = {}
    experimentset = {}
    for line in lines[1:]:
        if line == '':
            continue
        line = line.strip().split(',')
        exp = int(line[0])
        expmapping[exp] = float(line[1])
        useexp[exp] = int(line[3])
        experimentset[exp] = int(line[2])
    df['Set'] = [experimentset[x] for x in df['Experiment']]
    df['Use'] = [useexp[x] for x in df['Experiment']]
    # Get experiments and all genes in IPMSs
    experiments = sorted(list(set(df['Experiment'])))
    experiments = [x for x in experiments if useexp[x]]
    allgenes = sorted(list(set(df['GeneName'])))
    print('total genes', len(allgenes))

    #Get unique experiments
    uniqueexperiments = sorted(list(set([expmapping[x] for x in experiments])))
    #replicates=len(experiments)/len(uniqueexperiments)

    # Initialize a matrix for each experiment set to store the IPMS data
    # for each experiment on every gene
    mats = []
    raw_mats = []
    for expset in set(experimentset.values()):
        data = np.empty((len(uniqueexperiments), len(allgenes)))
        data.fill(min_value)

        rawdata = np.empty((len(uniqueexperiments), len(allgenes)))
        rawdata.fill(min_value)
        labels = [str(x) for x in uniqueexperiments]
        #print labels
        times = uniqueexperiments
        #times=times*60.

        toaverage = []
        toaverageind = []
        tmpdf = df[df['Set'] == expset]
        tmpdf = tmpdf[tmpdf['Use'] == 1]
        gb = tmpdf.groupby('Experiment')
        tmpexperiments = list(set(tmpdf['Experiment']))

        # Fill matrix
        for i in range(0, len(tmpexperiments)):
            experiment = tmpexperiments[i]
            dfa = gb.get_group(experiment)

            genes = list(dfa['GeneName'])
            #print genes
            ind = [allgenes.index(x) for x in genes]
            pair = (labels.index(str(expmapping[experiment])), ind)

            if metric == 'area':
                if pair in toaverage:
                    data[pair] = data[pair] + dfa['iBAQ']
                    print('HERE')
                    exit()
                else:
                    data[pair] = dfa['iBAQ']
                    rawdata[pair] = dfa['iBAQ']
                    toaverage.append(pair)
                    for p in pair[1]:
                        toaverageind.append((pair[0], p))
            else:
                print('Please choose "area"')
                sys.exit()
            toaverage = list(set(toaverageind))

        # Normalization
        data[data == 0] = min_value
        rawdata[rawdata == 0] = min_value
        if method == 'norm':
            data = normalize(data, norm=NORMMETHOD,
                             axis=0)  # Normalize each gene
            #data=np.nan_to_num(data/data[labels.index(control),:])
        elif 'gradient' in method:
            if 'norm' in method:
                data = normalize(data, norm=NORMMETHOD, axis=1)
                data = normalize(data, norm=NORMMETHOD,
                                 axis=0)  # Normalize each gene
            #embed()
            #data=-np.log10(data)
            new = np.zeros_like(data)
            for i in range(1, len(uniqueexperiments)):
                new[i, :] = (data[i, :] - data[i - 1, :]) / (times[i] -
                                                             times[i - 1])
                #data[i,:]=(data[i,:]-data[0,:])/times[i]
            data = new
            data = data[1:, :]
        else:
            print('defaulting to no normalization')
        mats.append(data)
        raw_mats.append(rawdata)
    #mbed()
    raw = np.mean(np.array(raw_mats), axis=0)
    data = np.mean(np.array(mats), axis=0)
    if 'gradient' in method:
        labels = labels[1:]
        times = times[1:]

    # Write matrix to file
    f = open('{}/Matrix_{}_{}.txt'.format(resultspath, metric, method), 'w')
    w, h = data.shape  # @UnusedVariable
    f.write('Genes\t{}\n'.format('\t'.join(labels)))
    for i in range(0, h):
        s = '\t'.join([str(x) for x in list(data[:, i])])
        f.write("{}\t{}\n".format(allgenes[i], s))
    f.close()

    # Visualize changes as heatmap
    heatmap(data, labels, allgenes, 'noclusters_{}_{}'.format(metric, method),
            resultspath)
    #embed()
    # Plot all gene trends together
    plt.figure()
    plt.plot(times, np.mean(data, axis=1), '--', lw=3)
    #plt.xscale('log')
    for i in range(0, len(allgenes)):
        plt.plot(times, data[:, i], alpha=0.01)
    plt.savefig("{}/allchanges_{}_{}.pdf".format(resultspath, metric, method))
    plt.xscale('linear')
    plt.close()

    # Cluster
    clusters, dev = cluster(data, labels, allgenes, times, resultspath,
                            metric + '_' + method)

    return data, allgenes, labels, (clusters, dev), raw
Exemplo n.º 9
0
        loc = coord['location']

        client.request('vset /camera/1/location %f %f %f' %
                       (loc[0], loc[1], loc[2]))
        client.request('vset /camera/1/rotation %f %f %f' %
                       (rot[0], rot[1], rot[2]))
        # This is a stupid hack to get the LODs to load, because UE sucks...
        client.request('vset /camera/0/location %f %f %f' %
                       (loc[0], loc[1], loc[2] - 190))

        frame_lit = client.request('vget /camera/1/lit png')
        frame_lit = read_png(frame_lit)
        print(frame_lit)
        frame_mask = client.request('vget /camera/1/object_mask png')
        frame_mask = read_png(frame_mask)
        frame_depth = client.request('vget /camera/1/depth npy')
        frame_depth = read_npy(frame_depth)
        frame_depth = normalize(frame_depth)
        frame_depth = heatmap(frame_depth)

        cv2.imshow('frame_lit', frame_lit)
        cv2.imshow('frame_mask', frame_mask)
        cv2.imshow('frame_depth', frame_depth)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            run = False
            break

client.disconnect()
cv2.destroyAllWindows()
Exemplo n.º 10
0
num_runs = 1
avg_reward = np.zeros(num_episodes)

grid.env_init()
agent.agent_init()

is_terminal = False
start_state = grid.env_start()
l_action = agent.agent_start(start_state)

grid.current_state = [8,8]
agent.prev_action = 2
agent.prev_state = [9,8]
# state = grid.env_step(1)
agent.agent_end(1)
agent.predictive_novelty([9,8])
grid.current_state = [8,8]
agent.prev_action = 2
agent.prev_state = [9,8]

grid.current_state = [8,8]
agent.prev_action = 2
agent.prev_state = [9,8]
# state = grid.env_step(1)

agent.agent_end(1)
print(agent.Q)
utils.heatmap(agent.Q,2)
utils.pltshow()

Exemplo n.º 11
0
        if l <= 16:       rho = lambda p: p + 0.25*p.clamp(min=0); incr = lambda z: z+1e-9
        if 17 <= l <= 30: rho = lambda p: p;                       incr = lambda z: z+1e-9+0.25*((z**2).mean()**.5).data
        if l >= 31:       rho = lambda p: p;                       incr = lambda z: z+1e-9

        z = incr(utils.newlayer(layers[l],rho).forward(A[l]))  # step 1
        s = (R[l+1]/z).data                                    # step 2
        (z*s).sum().backward(); c = A[l].grad                  # step 3
        R[l] = (A[l]*c).data                                   # step 4

    else:

        R[l] = R[l+1]

for i,l in enumerate([31,21,11,1]):
    utils.heatmap(i,np.array(R[l][0]).sum(axis=0),0.5*i+1.5,0.5*i+1.5)


A[0] = (A[0].data).requires_grad_(True)

lb = (A[0].data*0+(0-mean)/std).requires_grad_(True)
hb = (A[0].data*0+(1-mean)/std).requires_grad_(True)

z = layers[0].forward(A[0]) + 1e-9                                     # step 1 (a)
z -= utils.newlayer(layers[0],lambda p: p.clamp(min=0)).forward(lb)    # step 1 (b)
z -= utils.newlayer(layers[0],lambda p: p.clamp(max=0)).forward(hb)    # step 1 (c)
s = (R[1]/z).data                                                      # step 2
(z*s).sum().backward(); c,cp,cm = A[0].grad,lb.grad,hb.grad            # step 3
R[0] = (A[0]*c+lb*cp+hb*cm).data                                       # step 4