예제 #1
0
def fn_name(resolution, surface_filename, surface_path):
    gmdt = apply_mask(resolution, read_surface(surface_filename, surface_path))

    currents, u, v = calc_currents(resolution, gmdt)
    currents = apply_mask(resolution, currents)
    # print(gmdt.min(), gmdt.max(), gmdt.mean())
    mdt = centralise_mdt(resolution, gmdt)
    # print(mdt.min(), mdt.max(), mdt.mean())
    return mdt, currents
예제 #2
0
def timeseries_avg_3d(file_path,
                      var_name,
                      grid,
                      gtype='t',
                      time_index=None,
                      t_start=None,
                      t_end=None,
                      time_average=False,
                      mask=None):

    data = read_netcdf(file_path,
                       var_name,
                       time_index=time_index,
                       t_start=t_start,
                       t_end=t_end,
                       time_average=time_average)
    if len(data.shape) == 3:
        # Just one timestep; add a dummy time dimension
        data = np.expand_dims(data, 0)
    # Process one time index at a time to save memory
    timeseries = []
    for t in range(data.shape[0]):
        if mask is None:
            data_tmp = mask_3d(data[t, :], grid, gtype=gtype)
        else:
            data_tmp = apply_mask(data[t, :],
                                  np.invert(mask),
                                  depth_dependent=True)
        # Volume average
        timeseries.append(volume_average(data_tmp, grid, gtype=gtype))
    return np.array(timeseries)
예제 #3
0
def eval(net, data_loader, epoch, writer):
    print('Evaluatin at epoch %d'%(epoch))
    net.eval()
    for iter, sample in enumerate(data_loader):
        image = sample['image'].cuda()
        depth = sample['depth'].cuda()
        label = sample['label'].cuda()

        with torch.no_grad():
            params, pred_masks = net(image)

        mask = F.softmax(pred_masks[0], dim=1)
        for j in range(image.size(0)):
            writer.add_image('Val Input Image/%d' % (j+iter*image.size(0)), tensor_to_X_image(image[j].cpu()),
                             iter + epoch * len(data_loader))
            writer.add_image('Val GT Depth/%d' % (j+iter*image.size(0)), 1. / depth[j], iter + epoch * len(data_loader))
            writer.add_image('Val GT Mask/%d' % (j+iter*image.size(0)), label[j], iter + epoch * len(data_loader))

            # apply mask to input image
            cur_mask = mask[j].detach().cpu().numpy().argmax(axis=0)
            masked_image = apply_mask(image[j].cpu(), cur_mask, ignore_index=mask.size(1) - 1)
            writer.add_image('Val Masked Image/%d' % (j+iter*image.size(0)), masked_image, iter + epoch * len(data_loader))

            # predict mask
            for k in range(mask.size(1) - 1):
                writer.add_image('Val Mask %d/%d' % (k, j+iter*image.size(0)), mask[j, k:k + 1], iter + epoch * len(data_loader))

            # non plane mask
            writer.add_image('Val Non-plane Mask/%d' % (j+iter*image.size(0)), mask[j, -1:], iter + epoch * len(data_loader))
예제 #4
0
def main(r, g, b, negdh, posdh, mins, maxs, minv, maxv, input_path, output_path):
    if input_path is None:
        camera = cv2.VideoCapture(0)
        camera.set(
            cv2.CAP_PROP_FRAME_WIDTH, 800
        )  # Higer value will cause error on FPGA
        camera.set(
            cv2.CAP_PROP_FRAME_HEIGHT, 600
        )  # Higer value will cause error on FPGA
        camera.set(cv2.CAP_PROP_FPS, 5)
        _, frame = camera.read()
    else:
        frame = cv2.imread(input_path)

    frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
    mask, frame = apply_mask(frame, r, g, b, negdh, posdh, mins, maxs, minv, maxv)
    nrpc = calculate_nrpc(mask)

    if output_path is not None:
        time_str = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
        text = "{}, NRPC: {:.4f}".format(time_str, nrpc)
        draw_text(img=frame, text=text)
        cv2.imwrite(output_path, frame)

    return nrpc
예제 #5
0
파일: display.py 프로젝트: pwierzgala/evaa
    def paintEvent(self, event):
        super().paintEvent(event)
        frame = self.camera.frame
        if frame is not None:
            painter = QPainter(self)

            (h, w) = frame.shape[:2]

            if self.selected_color_rgb[0] is not None:
                _, frame = apply_mask(
                    frame=frame,
                    r=self.selected_color_rgb[0],
                    g=self.selected_color_rgb[1],
                    b=self.selected_color_rgb[2],
                    negdh=self.hsv_min_hue,
                    posdh=self.hsv_max_hue,
                    mins=self.hsv_min_saturation,
                    maxs=self.hsv_max_saturation,
                    minv=self.hsv_min_value,
                    maxv=self.hsv_max_value,
                )

            q_image = QImage(frame, w, h, QImage.Format_RGB888)
            q_pixmap = QPixmap.fromImage(q_image)
            painter.drawPixmap(0, 0, q_pixmap)
예제 #6
0
파일: asyncws.py 프로젝트: riffm/asyncws
 def _parse_frame_payload(self):
     frame = self.frames[-1]
     payload = self._get_data()
     if frame['mask'] and payload:
         payload = apply_mask(payload, frame['mask'])
     frame['payload'] = payload
     self._handle_frame()
예제 #7
0
 def test_apply_mask(self):
     self.assertEqual(utils.apply_mask('192.168.233.89', '255.255.255.0'), '192.168.233.0')
     self.assertEqual(utils.apply_mask('192.168.233.89', '255.255.255.254'), '192.168.233.88')
     self.assertEqual(utils.apply_mask('192.168.233.89', '0.0.0.0'), '0.0.0.0')
     self.assertEqual(utils.apply_mask('192.168.233.89', '255.0.0.0'), '192.0.0.0')
     self.assertEqual(utils.apply_mask('192.168.233.89', '255.0.0.0'), '192.0.0.0')
     self.assertEqual(utils.apply_mask('192.168.233.89', '1.0.0.0'), 'invalid entry')
     self.assertEqual(utils.apply_mask('192.168.233', '255.0.0.0'), 'invalid entry')
예제 #8
0
def evaluate_model(hyperparams):
    means = []
    for mask in tqdm(masks):
        # generate missing data, model and trainer
        data = LinkPredData(args.dataset, seed=args.seed)
        apply_mask(data.features, mask)  # convert masked number to nan
        model = VGAEmf(data, args.nhid, args.latent_dim, hyperparams['dropout'], args.ncomp)
        params = {
            'lr': hyperparams['lr'],
            'weight_decay': hyperparams['weight_decay'],
            'epochs': args.epoch,
        }
        trainer = LinkPredTrainer(data, model, params, niter=20)

        # run the model
        result = trainer.run()
        means.append(result['test_auc'])

    return mean(means)
예제 #9
0
        def get_no_of_pixel_and_field(mask, image):

            mask = mask.copy()
            image = image.copy()
            no_of_pixels = np.nansum(mask)

            ulta_mask = -1 * (mask - 1)

            _masked_image = apply_mask(image, ulta_mask)

            return no_of_pixels, np.nansum(_masked_image)
예제 #10
0
def objective(trial):
    # Tune hyperparameters (dropout, weight decay, learning rate) using Optuna
    dropout = trial.suggest_uniform('dropout', 0., 0.1)
    lr = trial.suggest_loguniform('lr', 5e-4, 2e-2)
    weight_decay = trial.suggest_loguniform('weight_decay', 1e-10, 1e-3)

    # prepare data and model
    data = LinkPredData(args.dataset, seed=args.seed)
    apply_mask(data.features, masks[0])
    model = VGAEmf(data, args.nhid, args.latent_dim, dropout, args.ncomp)

    # run model
    params = {
        'lr': lr,
        'weight_decay': weight_decay,
        'epochs': args.epoch,
    }
    trainer = LinkPredTrainer(data, model, params, niter=10)
    result = trainer.run()
    return -result['val_auc']
예제 #11
0
def objective(trial):
    # Tune hyperparameters (dropout, weight decay, learning rate) using Optuna
    dropout = trial.suggest_uniform('dropout', 0.4, 0.8)
    lr = trial.suggest_loguniform('lr', 5e-4, 1e-2)
    weight_decay = trial.suggest_loguniform('weight_decay', 1e-6, 1e-1)

    # prepare data and model
    data = NodeClsData(args.dataset)
    apply_mask(data.features, masks[0])
    model = GCNmf(data, args.nhid, dropout, args.ncomp)

    # run model
    params = {
        'lr': lr,
        'weight_decay': weight_decay,
        'epochs': args.epoch,
        'patience': args.patience,
        'early_stopping': True
    }
    trainer = NodeClsTrainer(data, model, params, niter=10)
    result = trainer.run()
    return -result['val_acc']
예제 #12
0
def evaluate_model(hyperparams):
    means = []
    dropout = hyperparams['dropout']
    for mask in tqdm(masks):
        # generate missing data, model and trainer
        data = NodeClsData(args.dataset)
        apply_mask(data.features, mask)  # convert masked number to nan
        model = GCNmf(data, args.nhid, dropout, args.ncomp)
        params = {
            'lr': hyperparams['lr'],
            'weight_decay': hyperparams['weight_decay'],
            'epochs': args.epoch,
            'patience': args.patience,
            'early_stopping': True
        }
        trainer = NodeClsTrainer(data, model, params, niter=20)

        # run the model
        result = trainer.run()
        means.append(result['test_acc'])

    return mean(means)
예제 #13
0
파일: asyncws.py 프로젝트: riffm/asyncws
 def _ctl_frame_close(self):
     frame = self.frames.pop()
     payload = frame['payload']
     if frame['mask'] and payload:
         payload = apply_mask(payload, frame['mask'])
     self.log_info('Connection is closing: %s' % payload)
     if self._closing_frame_sent:
         self.close()
     else:
         self.send_message(payload, opcode=0x8)
         self.close_when_done()
     self.on_close(payload)
     sys.exit(0)
예제 #14
0
def render():
    """
    Renders a single frame.

    Returns
    -------
    ndarray
        Array representing a frame in BGR color format.
    """
    global masked_frame, image

    ret, frame = camera.read()
    frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
    mask, masked_frame = apply_mask(
        frame=frame,
        r=selected_color[0][0][2],
        g=selected_color[0][0][1],
        b=selected_color[0][0][0],
        negdh=cv2.getTrackbarPos(hue_delta_minus_name, window_name),
        posdh=cv2.getTrackbarPos(hue_delta_plus_name, window_name),
        mins=cv2.getTrackbarPos(min_saturation_name, window_name),
        maxs=cv2.getTrackbarPos(max_saturation_name, window_name),
        minv=cv2.getTrackbarPos(min_value_name, window_name),
        maxv=cv2.getTrackbarPos(max_value_name, window_name),
    )

    # Calculate and draw metric value.
    nrpc = calculate_nrpc(mask)
    time_str = time.strftime("%H:%M:%S", time.localtime())
    text = "{}, FPS: {:05.2f}, NRPC: {:.4f}".format(time_str, fps, nrpc)
    draw_text(img=frame, text=text)

    # Change color format back from RGB to BGR.
    frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)
    masked_frame = cv2.cvtColor(masked_frame, cv2.COLOR_RGB2BGR)

    # Concatenate horizontally (1) frames and (2) colors. Then, concatenate
    # vertically (1) and (2) vertically.
    frames = np.concatenate((frame, masked_frame), axis=1)
    colors = np.concatenate(
        (current_color, selected_color, lower_filtered_color, upper_filtered_color),
        axis=1,
    )
    image = np.concatenate((frames, colors), axis=0)

    return image
예제 #15
0
 def process_and_respond(self, query):
     '''
     Processes content for valid query.
     Parameters:
     query - the parameters provided to the query
     '''
     body = 'Hello! The subnet is: ' + utils.apply_mask(query[0], query[1])
     html = "<!DOCTYPE html><html>"
     html += "<head><title>"
     html += "Response from L13Server"
     html += "</title></head>"
     html += "<body><p><h1>"
     html += body
     html += "</h1></p></body>"
     html += "</html>"
     self.log_message("page built")
     return html
예제 #16
0
파일: Region.py 프로젝트: djrtwo/SoundSpace
 def mask_image(self, image):
     mask = utils.mask_from_contour_points(image, self.points)
     return utils.apply_mask(image, mask)
    def forward(self, x, level, query_in_out_each_level=False, query_hidden=False):
        """
        :param x: a 4D input of NN
        :param level: level index
        :param query_in_out_each_level: if to query the input/output at each level
        (maybe used for enforce losses at different levels)
        :param query_hidden: if to query hidden representations
        :return: output of NN, a list of hidden representations at current level
        """
        # collectors
        all_hidden = dict()
        all_inputs = dict()
        all_outputs = dict()

        # forward prop
        assert level >= 0, print('level index should be a non-negative integer!')
        resolved_maps_dict = self.resolved_maps[str(level)]
        if level == 0:
            if query_in_out_each_level:
                all_inputs['0'] = x
            encoded = self._modules['L0_Conv_0'](x)
            if query_hidden:
                all_hidden['L0_0'] = encoded
            # ----- pad -----
            encoded = torch.nn.functional.pad(encoded, (1, 1, 1, 1), 'replicate')
            # ---------------
            y = self._modules['L0_deConv_0'](encoded)
            # chop off the boundaries
            y = y[:, :, 2:-2, 2:-2]
            for i in range(1, self.n_filter_groups_each_level['0']):
                encoded = self._modules['L0_Conv_{}'.format(i)](x)
                if self.use_maps:
                    masked_encoded = apply_mask(encoded, resolved_maps_dict[str(i - 1)])
                else:
                    masked_encoded = encoded
                if query_hidden:
                    all_hidden['L0_{}'.format(i)] = masked_encoded
                y += self._modules['L0_deConv_{}'.format(i)](masked_encoded)
            if query_in_out_each_level:
                all_outputs['0'] = y
        else:
            encoded = self._modules['L{}_Conv_0'.format(level)](x)
            decoded, ins, outs, hs = \
                self.forward(encoded, level-1, query_in_out_each_level, query_hidden)
            # ----- pad -----
            decoded = torch.nn.functional.pad(decoded, (1, 1, 1, 1), 'replicate')
            # ---------------
            if query_in_out_each_level:
                all_inputs[str(level)] = x
                all_inputs.update(ins)
                all_outputs.update(outs)
            if query_hidden:
                all_hidden.update(hs)
                all_hidden['L{}_0'.format(level)] = encoded
            y = self._modules['L{}_deConv_0'.format(level)](decoded)
            y = y[:, :, 2:-2, 2:-2]
            for i in range(1, self.n_filter_groups_each_level[str(level)]):
                encoded = self._modules['L{}_Conv_{}'.format(level, i)](x)
                if self.use_maps:
                    masked_encoded = apply_mask(encoded, resolved_maps_dict[str(i - 1)])
                else:
                    masked_encoded = encoded
                if query_hidden:
                    all_hidden['L{}_{}'.format(level, i)] = masked_encoded
                y += self._modules['L{}_deConv_{}'.format(level, i)](masked_encoded)
            if query_in_out_each_level:
                all_outputs[str(level)] = y

        return y, all_inputs, all_outputs, all_hidden
예제 #18
0
    def actual_process(self, file=None, previous_operation_name=None):

        sys.stdout.write(
            'Started The Process for MaskingMagnetograms :{}\n'.format(
                file.filename))

        hmi_ic_chain = Thresholding(
            operation_name='mask',
            suffix=None,
            k=-5,
            op=operator.le,
            radius_factor=0.96,
            do_closing=True).set_prev(
                AIAPrep(operation_name='aiaprep', radius_factor=1.0).set_prev(
                    DownloadFiles(operation_name='data')))

        previous_operation_hmi_ic = hmi_ic_chain.process(self._hmi_ic_file)

        exclude_mask_hmi_ic = PreviousOperation(file=self._hmi_ic_file,
                                                previous_op='mask',
                                                suffix=None)

        hmi_mag_chain = AIAPrep(operation_name='aiaprep',
                                radius_factor=1.0).set_prev(
                                    DownloadFiles(operation_name='data'))

        previous_operation_hmi_mag = hmi_mag_chain.process(file)

        aia_prep_chain = AIAPrep(operation_name='aiaprep',
                                 radius_factor=1.0).set_prev(
                                     DownloadFiles(operation_name='data',
                                                   fname_from_rec=True))

        aia_align_chain = AlignAfterAIAPrep(
            'aligned_data', file, radius_factor=1.0).set_prev(aia_prep_chain)

        ldr_chain_aia = LimbDarkeningCorrection(
            operation_name='ldr', radius_factor=0.96).set_prev(aia_align_chain)

        aia_chain_plages = Thresholding(
            operation_name='mask',
            suffix='plages',
            k=1.71,
            op=operator.ge,
            post_processor=do_area_filtering,
            radius_factor=0.96,
            do_closing=True,
            exclude_mask_file=[exclude_mask_hmi_ic]).set_prev(ldr_chain_aia)

        previous_operation_aia_plages = aia_chain_plages.process(
            self._aia_file, )

        exclude_mask_aia_plages = PreviousOperation(file=self._aia_file,
                                                    previous_op='mask',
                                                    suffix='plages')

        aia_chain_active_networks = Thresholding(operation_name='mask',
                                                 suffix='active_networks',
                                                 k=2,
                                                 op=operator.ge,
                                                 k2=6,
                                                 op2=operator.ge,
                                                 radius_factor=0.96,
                                                 value_1=1.0,
                                                 value_2=0.0,
                                                 do_closing=False,
                                                 exclude_mask_file=[
                                                     exclude_mask_hmi_ic,
                                                     exclude_mask_aia_plages
                                                 ]).set_prev(ldr_chain_aia)

        hmi_crop_task = CropImage(operation_name='crop_hmi_afterprep',
                                  radius_factor=0.96).set_prev(hmi_mag_chain)

        previous_operation_active_networks = aia_chain_active_networks.process(
            self._aia_file, )

        hmi_ic_mask_data, hmi_ic_mask_header = self._hmi_ic_file.get_fits_hdu(
            previous_operation_hmi_ic.operation_name)

        previous_operation_hmi_mag = hmi_crop_task.process(file)

        sys.stdout.write('Performed all the chains for : {}\n'.format(
            file.filename))

        ampd, amph = self._aia_file.get_fits_hdu(
            previous_operation_aia_plages.operation_name, 'plages')

        aia_mask_plages_data, _ = ampd, amph

        apnd, apnh = self._aia_file.get_fits_hdu(
            previous_operation_active_networks.operation_name,
            'active_networks')

        aia_mask_active_networks_data, _ = apnd, apnh

        hmi_mag_image_data, hmi_mag_image_header = file.get_fits_hdu(
            previous_operation_hmi_mag.operation_name)

        masked_image = hmi_mag_image_data.copy()

        total_mask = np.add(
            np.add(aia_mask_plages_data, aia_mask_active_networks_data),
            hmi_ic_mask_data)

        total_mask[total_mask >= 1.0] = 1.0

        no_of_pixels_total_field = len(
            circle(2048.5 - 1, 2048.5 - 1,
                   hmi_mag_image_header['R_SUN'] * 0.96)[0])

        # apply mask is masking the features in the mask and
        # returning the non masked elements
        masked_image = apply_mask(masked_image, total_mask)

        background_field = np.nansum(masked_image)

        no_of_background_field = no_of_pixels_total_field - np.nansum(
            total_mask)

        total_magnetic_field = np.nansum(hmi_mag_image_data)

        def get_no_of_pixel_and_field(mask, image):

            mask = mask.copy()
            image = image.copy()
            no_of_pixels = np.nansum(mask)

            ulta_mask = -1 * (mask - 1)

            _masked_image = apply_mask(image, ulta_mask)

            return no_of_pixels, np.nansum(_masked_image)

        mask_active_network_plage = np.add(aia_mask_plages_data,
                                           aia_mask_active_networks_data)

        mask_active_network_plage[mask_active_network_plage >= 1.0] = 1.0

        a, b = get_no_of_pixel_and_field(mask_active_network_plage,
                                         hmi_mag_image_data)

        no_of_pixel_plage_and_active, total_mag_field_plage_active = a, b

        no_of_sunspot_pixel, sunspot_field = get_no_of_pixel_and_field(
            hmi_ic_mask_data, hmi_mag_image_data)

        record = Record(
            date=get_date(file),
            hmi_filename=file.filename,
            hmi_ic_filename=self._hmi_ic_file.filename,
            aia_filename=self._aia_file.filename,
            time_difference=np.abs(
                get_julian_day(file) - get_julian_day(self._aia_file)),
            no_of_pixel_sunspot=no_of_sunspot_pixel,
            total_mag_field_sunspot=sunspot_field,
            no_of_pixel_plage_and_active=no_of_pixel_plage_and_active,
            total_mag_field_plage_active=total_mag_field_plage_active,
            no_of_pixel_background=no_of_background_field,
            total_background_field=background_field,
            total_pixels=no_of_pixels_total_field,
            total_magnetic_field=total_magnetic_field)

        record.save()

        return masked_image, hmi_mag_image_header
예제 #19
0
parser.add_argument('--nhid', default=16, type=int, help='the number of hidden units')
parser.add_argument('--dropout', default=0.5, type=float, help='dropout rate')
# parser.add_argument('--ncomp', default=5, type=int, help='the number of Gaussian components')
parser.add_argument('--lr', default=0.005, type=float, help='learning rate')
parser.add_argument('--wd', default=1e-2, type=float, help='weight decay')
parser.add_argument('--epoch', default=10000, type=int, help='the number of training epoch')
parser.add_argument('--patience', default=100, type=int, help='patience for early stopping')
parser.add_argument('--verbose', action='store_true', help='verbose')

parser.add_argument('--emb1', default=100, type=int, help='k : the size of linear combination')
parser.add_argument('--emb2', default=100, type=int, help='m : the size of rank refularization')
parser.add_argument('--emb3_1', default=100, type=int, help='la : the size of set embedding')
parser.add_argument('--emb3_2', default=100, type=int, help='lb : the size of set embedding')

args = parser.parse_args()

if __name__ == '__main__':
    data = NodeClsData(args.dataset)
    mask = generate_mask(data.features, args.rate, args.type)
    apply_mask(data.features, mask)
    model = GCNfse(data, nhid=args.nhid, dropout=args.dropout, n_emb1=args.emb1, n_emb2=args.emb2, n_emb3_1=args.emb3_1, n_emb3_2=args.emb3_2)
    params = {
        'lr': args.lr,
        'weight_decay': args.wd,
        'epochs': args.epoch,
        'patience': args.patience,
        'early_stopping': True
    }
    trainer = NodeClsTrainer(data, model, params, niter=20, verbose=args.verbose)
    trainer.run()
예제 #20
0
def labelit(img, mask_part, labelbase, rois):
    """
    人工给每部分重新打标签
    :param img: 图片
    :param mask_part: 标签
    :return: 重打过后的mask
    """
    n_part = len(mask_part)
    colors = random_colors(n_part)
    minrow = rois[0]
    mincol = rois[1]
    maxrow = rois[2]
    maxcol = rois[3]
    newmask = np.zeros(mask_part[0].shape)
    newmask = newmask.astype(np.int32)
    print("按从左到右顺序输入标签:")

    for i in range(n_part):
        mask = mask_part[i]
        temp = copy.copy(img)
        onepart = apply_mask(temp, mask, colors[i])
        onepart = onepart[minrow:maxrow, mincol:maxcol, ]

        # print(i,onepart.shape)
        ax = plt.subplot(1, n_part, i + 1)
        # print(ax)
        plt.imshow(onepart)
        ax.set_title('part')
        plt.xticks([]), plt.yticks([])
        plt.tight_layout()
    plt.gca().xaxis.set_major_locator(plt.NullLocator())
    plt.gca().yaxis.set_major_locator(plt.NullLocator())
    plt.margins(0, 0)
    plt.show()

    labels = str(input())
    if labels[0] == '.':
        return []
    # if labels[0]=='r' or labels[0]=='R':
    #     for i in range(n_part):
    #         mask = mask_part[i]
    #         temp = copy.copy(img)
    #         onepart = apply_mask(temp, mask, colors[i])
    #         onepart = onepart[minrow:maxrow, mincol:maxcol, ]
    #         cv2.imwrite(str(i)+'.png',onepart)
    #     for i in range(n_part):
    #         onepart=cv2.imread(str(i)+'.png')
    #         ax = plt.subplot(1, n_part, i + 1)
    #         plt.imshow(onepart)
    #         ax.set_title('part')
    #         plt.xticks([]), plt.yticks([])
    #         plt.tight_layout()
    #     plt.gca().xaxis.set_major_locator(plt.NullLocator())
    #     plt.gca().yaxis.set_major_locator(plt.NullLocator())
    #     plt.margins(0, 0)
    #     plt.show()
    for i in range(n_part):
        if int(labels[i]) == 0:
            newmask = np.where(mask_part[i] > 0, np.zeros(newmask.shape),
                               newmask)  # 忽然发现这句好像没用?
        else:
            newmask = np.where(
                mask_part[i] > 0,
                np.ones(newmask.shape, dtype=int) *
                (labelbase * 10 + int(labels[i])), newmask)

    # 一部分一部分标
    # for mask in mask_part:
    #     plt.figure(figsize=(10, 10))
    #     print("输入这部分的标签:")
    #     temp=copy.copy(img)
    #     onepart=apply_mask(temp,mask,colors[0])
    #     plt.subplot(1,1,1)
    #     plt.imshow(onepart)
    #     plt.title("输入这部分的标签")
    #     plt.xticks([]), plt.yticks([])
    #     # plt.show()
    #     plt.ion()
    #     plt.pause(2)
    #     plt.close()
    #     newlabel=int(input())
    #     newmask=np.where(mask>0,newlabel,newmask)
    return newmask
예제 #21
0
파일: tests.py 프로젝트: riffm/asyncws
 def test_unmasking_text(self):
     self.assertEqual(utils.apply_mask('\x7f\x9fMQX', '7\xfa!='), 'Hello')
예제 #22
0
def train(args, net, optimizer, data_loader, epoch, writer):
    net.train()
    losses = AverageMeter()
    losses_mask = AverageMeter()
    losses_depth = AverageMeter()
    losses_normal = AverageMeter()
    losses_area = AverageMeter()

    for iter, sample in enumerate(data_loader):
        image = sample['image'].cuda()
        depth = sample['depth'].cuda()
        label = sample['label'].cuda()
        K_inv = sample['K_inv'].cuda()

        # normal
        normal = depth_2_normal(depth, K_inv[0, 0])

        # forward
        plane_params, pred_masks = net(image)

        # loss
        loss_mask, loss_depth, loss_normal = get_loss(plane_params, pred_masks, depth, normal, label, K_inv)
        loss = args.mask * loss_mask + args.depth * loss_depth + args.normal * loss_normal

        # Backward
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        losses.update(loss.item())
        losses_mask.update(loss_mask.item())
        losses_depth.update(loss_depth.item())
        losses_normal.update(loss_normal.item())
        #losses_area.update(loss_area.item())

        if iter % 10 == 0:
            print(f"[{epoch:2d}][{iter:4d}/{len(data_loader)}]"
                  f"Loss:{losses.val:.4f} ({losses.avg:.4f})"
                  f"Mask:{losses_mask.val:.4f} ({losses_mask.avg:.4f})"
                  f"Depth:{losses_depth.val:.4f} ({losses_depth.avg:.4f})"
                  f"Normal:{losses_normal.val:.4f} ({losses_normal.avg:.4f})")
                  #f"Area:{losses_area.val:.4f} ({losses_area.avg:.4f})")
            writer.add_scalar('loss/total_loss', losses.val, iter + epoch * len(data_loader))
            writer.add_scalar('loss/mask_loss', losses_mask.val, iter + epoch * len(data_loader))
            writer.add_scalar('loss/depth_loss', losses_depth.val, iter + epoch * len(data_loader))
            writer.add_scalar('loss/normal_loss', losses_normal.val, iter + epoch * len(data_loader))
            #writer.add_scalar('loss/area_loss', losses_area.val, iter + epoch * len(data_loader))

        if iter % 100 == 0:
            mask = F.softmax(pred_masks[0], dim=1)
            normal = (normal + 1) / 2.
            for j in range(image.size(0)):
                writer.add_image('Train Input Image/%d'%(j), tensor_to_X_image(image[j].cpu()), iter + epoch * len(data_loader))
                writer.add_image('Train GT Depth/%d'%(j), 1. / depth[j], iter + epoch * len(data_loader))
                writer.add_image('Train GT Mask/%d'%(j), label[j], iter + epoch * len(data_loader))

                # apply mask to input image
                cur_mask = mask[j].detach().cpu().numpy().argmax(axis=0)
                masked_image = apply_mask(image[j].cpu(), cur_mask, ignore_index=mask.size(1) - 1)
                writer.add_image('Train Masked Image/%d' % (j), masked_image, iter + epoch * len(data_loader))

                # predict mask
                for k in range(mask.size(1) - 1):
                    writer.add_image('Train Mask %d/%d'%(k, j), mask[j, k:k+1], iter + epoch * len(data_loader))

                # non plane mask
                writer.add_image('Train Non-plane Mask/%d'%(j), mask[j, -1:], iter + epoch * len(data_loader))

                writer.add_image('Train Normal/%d'%(j), normal[j], iter + epoch * len(data_loader))
                
                # predict normal map
                pred_params = plane_params[j]
                cur_mask = mask[j].detach().cpu().numpy()[:-1, :, :].argmax(axis=0)
                pred_normal = pred_params[cur_mask, :].detach()  
                pred_normal = pred_normal.permute(2, 0, 1) 
                pred_normal /= (torch.norm(pred_normal, p=2, dim=0, keepdim=True) + 1e-6)
                pred_normal = (pred_normal + 1) / 2.
                writer.add_image('Train Pred Normal/%d'%(j), pred_normal, iter + epoch * len(data_loader))
예제 #23
0
for i, (imgs, _) in enumerate(dataloader):

    if i == num_batches:
        break

    imgs = imgs.type(Tensor)
    save_sample_images(imgs, 'originals', i)

    # initial input for generator. This is what we want to optimize
    z = create_noise(cuda, imgs.shape[0], opt.latent_dim)
    optimizer = torch.optim.Adam([z], lr=opt.lr, betas=(opt.b1, opt.b2))

    img_mask = generate_center_mask(Tensor, opt.img_size, opt.channels, 0.3)
    # we also want a slightly bigger version of the mask for alpha blending later
    fill_mask = generate_center_mask(Tensor, opt.img_size, opt.channels, 0.25)
    masked_imgs = apply_mask(Tensor, imgs, img_mask)

    save_sample_images(masked_imgs, 'masked', i)

    avg_contextual_loss = 0
    avg_perceptual_loss = 0
    avg_completion_loss = 0

    # iterate n times over the same batch and optimize to find the best z input vector
    # that will produce the best completed result
    for j in range(opt.num_iters):
        if z.grad is not None:
            z.grad.data.zero_()
        discriminator.zero_grad()
        generator.zero_grad()
예제 #24
0
def main():

    #
    # parse command line
    #
    args = parse_arguments()

    #
    # create out folder
    #
    if not os.path.exists(args.outf):
        os.makedirs(args.outf)

    #
    # load image
    #
    if not os.path.exists(args.image):
        raise FileNotFoundError(args.image)
    im = ClickableImage(args.image)

    # ******************* PARAMETERS ******************************************

    # Intensity of interest: intensity above which a pixel may be considered as a center / an edge
    # Assumption: centers and edges are brighter than background
    intensity_of_interest = get_quantile(im.img, 0.75)

    #
    # Ring
    #
    # - radius: if --typical_edge is not specified, we use the value of --radius, else we heuristically chose it from the 'typical edge length'
    # - thickness: if --thickness is not specified, we use (2/3)*radius
    if args.typical_edge == "measure_an_edge":
        typical_edge_length = im.measure_an_edge()
        print(
            "You selected an edge of length : {}".format(typical_edge_length))
        radius = math.ceil(typical_edge_length * 0.6)
    elif args.typical_edge == "count_cells":
        n_cells = im.count_cells()
        typical_edge_length = im.n_cells_to_edge_length(n_cells)
        radius = math.floor(typical_edge_length / 3)
    else:
        radius = args.radius
    thickness = args.thickness if args.thickness is not None else int(
        (2 / 3) * radius)

    # *************************************************************

    #
    # Manual selection of the point to analyze
    #
    center = im.get_point()
    center = (math.ceil(center[0]), math.ceil(center[1]))
    print("Selected point: {}".format(center))

    #************************** STEP 1 ****************************#
    # Filter the image with a ring centered on the point of interest
    #**************************************************************#
    #
    # define ring
    #
    ring = {}
    ring["center"] = center
    ring["radius"] = (radius - 0.5 * thickness, radius + 0.5 * thickness)

    #
    # filter image with the ring
    #
    mask = create_ring_mask(im.img, ring)
    img_filtered = apply_mask(im.img, mask)

    #
    # Visualization of the ring around the selected point
    #

    # image filtered by the ring
    fig, ax = plot_img(img_filtered, title="Image filtered by the ring")
    fig.savefig(os.path.join(args.outf, "1_img_filtered.png"))

    # image with superimposed ring
    img_with_ring = superimpose_ring(im.img, ring, mask)
    fig, ax = plot_img(
        img_with_ring,
        title="Center : ({:.2f}, {:.2f}) \n Radius : ({:.2f}, {:.2f})".format(
            ring["center"][0], ring["center"][1], ring["radius"][0],
            ring["radius"][1]))
    fig.savefig(os.path.join(args.outf, "2_ring.png"))

    #********************** STEP 2 **********************#
    # Get the 'mountain' relief of intensity in the ring
    #****************************************************#

    angle2intensity = angle2intensity_in_ring(im.img, ring, mask)
    bucket2intensity, intensity2bucket = angular_smoothing(
        angle2intensity,
        size=args.size_smoothing,
        stride=args.stride_smoothing,
        cast_to_int=False)

    #************************** STEP 3 *********************#
    # 'water descent' on our relief, to obtain the barcodes
    # of the peaks (birth/death)
    #
    # Let f be our 'relief' function, f: angle -> intensity
    # Compute persistence of the connected components of
    # the filtration {f^-1([h, +inf[), for all h real}
    #*******************************************************#
    barcodes = water_descent(intensity2bucket, args.stride_smoothing)

    # Heuristically computes a correct 'min_lifetime', a threshold on duration (death - birth):
    # - below: the connected components is considered as noise
    # - above: the cc is considered as a peak
    # this is the dashed line that "cuts" the persistence diagram
    range_intensity = max(bucket2intensity) - min(bucket2intensity)
    min_lifetime = max(args.threshold_min_lifetime,
                       range_intensity * args.coef_min_lifetime)

    #****************************** STEP 4 ********************************#
    # For visualization, we report the barcodes in a 'persistence diagram'
    # We filter out the cc with a persistence < min_lifetime
    # The remaining ones are the significant 'peaks'
    # We count them to determine if the point of interest is:
    # - a corner (3 or more peaks)
    # - an edge (2 peaks)
    # - a part of background (else)
    #***********************************************************************#

    # Analysis of the barcodes: persistence diagram and type of the point
    peaks = get_peaks(barcodes, cut=min_lifetime)
    n_peaks = len(peaks)

    #
    # Visualization
    #
    fig, (ax1, ax2) = plt.subplots(1, 2, sharey=True, figsize=(16, 7.3))

    # intensity relief in the ring
    ax1.plot(range(0, 360, args.stride_smoothing), bucket2intensity)
    ax1.set_xlim(0, 360)
    ax1.set_xlabel("Angle in the ring (°)")
    ax1.set_ylabel("Intensity in [0, 255]")
    ax1.set_title(
        "Intensity vs. Angle\nafter angular smoothing (size={}, stride={})".
        format(args.size_smoothing, args.stride_smoothing))

    # corresponding persistence diagram
    ax2 = persistence_diagram(ax2, barcodes, min_lifetime,
                              intensity_of_interest)
    ax2.set_title(
        "Persistence Diagram (with cut = {}) \nNumber of 'persistent' cc: {}".
        format(min_lifetime, n_peaks))
    ax2.set_xlabel("Birth intensity")
    ax2.set_ylabel("Death intensity")
    fig.savefig(os.path.join(args.outf, "3_persistence_diagram.png"),
                dpi=90,
                bbox_inches='tight')

    #
    # classify the selected point
    #
    if n_peaks >= 3:
        point_type = "corner"
    elif n_peaks == 2:
        point_type = "edge"
    else:
        point_type = "background"
    print("\n==> Point type: {} ({} peak(s))".format(point_type, n_peaks))

    #
    # display detected edges
    #
    if n_peaks >= 2:
        angles_of_edges = []
        for cc in peaks:
            angle_of_edge = cc.peak.x
            angles_of_edges.append(angle_of_edge)
        img_with_ring_and_intersections = superimpose_ring_and_intersections(
            im.img, ring, angles_of_edges, mask)
        fig, ax = plot_img(
            img_with_ring_and_intersections,
            title="Angles between horizontal and detected edges\n{}".format(
                sorted(angles_of_edges)))
        fig.savefig(os.path.join(args.outf, "4_edges.png"))

    #
    # Make animated gif
    #
    if not args.nogif:
        print("\nMaking animated gif...")
        make_gif(args.outf, barcodes, min_lifetime, bucket2intensity,
                 args.stride_smoothing, args.size_smoothing,
                 intensity_of_interest)

    print("Visualizations stored in {}".format(args.outf))

    return
예제 #25
0
    rois_dir = runthis.get_rois_dir()

    img_paths = next(os.walk(label_dir))[2]
    colors=random_colors(10)
    for img_path in img_paths:
        img = cv2.imread(del_bg_dir + img_path.split('.')[0]+'_delbg.png')     # 反常
        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)  # 正常
        rois = np.loadtxt(rois_dir + img_path.split('.')[0] + '.rois', dtype=np.int, delimiter=',').tolist()
        mask_inone=np.loadtxt(label_dir+img_path,dtype=int,delimiter=',')
        minrow = rois[0]
        mincol = rois[1]
        maxrow = rois[2]
        maxcol = rois[3]

        temp1=mask_inone.flatten().tolist()
        labels = list(set(temp1))
        labels.sort()
        labels=labels[1:]
        mask_part=[]
        for i in range(len(labels)):
            tempmask=np.zeros(mask_inone.shape,dtype=int)
            tempmask=np.where(mask_inone==labels[i],np.ones(tempmask.shape, dtype=int) * labels[i],tempmask)
            img=apply_mask(img,tempmask,colors[labels[i]%10])
        plt.figure(figsize=(10,5))
        ax=plt.subplot(1,2,1)
        plt.imshow(img)
        plt.xticks([]), plt.yticks([])
        ax=plt.subplot(1,2,2)
        plt.imshow(img[minrow:maxrow,mincol:maxcol:])
        plt.xticks([]), plt.yticks([])
        plt.show()