Esempio n. 1
0
    def run(self):
        self.mask = Mask(self.frames[0])
        if (PARAMS["verbose"]): self.mask.render("contour")

        self.mask.computeDensity()
        if (PARAMS["debug"]): self.mask.render("density")

        self.currentcontour = self.mask.contour

        RESULT = []
        for i in range(0, len(self.frames)):
            RESULT += [self.computeSimpleFlow(i)]
        print(RESULT[0].shape, "SHAPE   ", vid.getFPS(self.path), "FPS")
        print([r.shape for r in RESULT])

        shape = (int(RESULT[0].shape[0] * 2), int(RESULT[0].shape[1]))
        print(shape)
        out = cv2.VideoWriter('output.avi', cv2.VideoWriter_fourcc(*'XVID'),
                              vid.getFPS(self.path), (512, 512))

        print(vid.getFPS(self.path), shape)
        for f, res in zip(self.noblurframes, RESULT):
            res3 = np.array([res, res, res]).transpose(1, 2, 0) * 255
            newf = cv2.resize(np.uint8(np.concatenate([f, res3], axis=0)),
                              (512, 512))
            #plt.imshow(newf[:, :, ::-1])
            #plt.show()

            out.write(newf)

        out.release()
def print_new_dead_new_alive():
    start_orbit = 21847 
    end_orbit = 21890

#    start_orbit = 22820 
#    end_orbit = 22890

#    start_orbit = 24023
#    end_orbit = 24051

#    start_orbit = 27229 
#    end_orbit = 27258

#    start_orbit = 32726 
#    end_orbit = 32870

#    start_orbit = 49230
#    end_orbit = 49259

    m1 = Mask()
    m1.apply_pixel_window(394, 620)
    m1.load_ascii(start_orbit)

    m2 = Mask()
    for orbit in range(start_orbit, end_orbit):
        m2.load_ascii(orbit)
        print(orbit, "new dead:", m1.get_new_dead(m2), "new alive:", m1.get_new_alive(m2))

    return
Esempio n. 3
0
    def __init__(self, rect, mask):
        self.rect = rect
        if not self.rect.size == mask.get_size():
            self.rect.size = mask.get_size()
        Mask.__init__(self, mask.get_size())

        self.from_array(mask.get_array())
Esempio n. 4
0
 def applyMedian(self):
     mask = Mask('Mascaras/mask-median.txt')
     mask.show('Median')
     matrix = self.pbm.getMatrix()
     data = matrix.map(moveMask(mask, matrix, median))
     matrix.setMatrixData(data)
     print('Applied Median')
Esempio n. 5
0
def process(infile, group_name, day, timeid, copies=1):
    mask = Mask((A4_WIDTH, A4_HEIGHT))
    page = mask.apply_mask(infile)
    create_title(page, group_name, day, timeid)
    if not os.path.exists(os.path.join('png', day)):
        os.mkdir(os.path.join('png', day))
    png_file = os.path.join('png', day, '{}_{}.jpg'.format(timeid, tools.safe_filename(group_name)))
    page.save(png_file, quality=75)
    tools.print_image(png_file, copies)
Esempio n. 6
0
def demoMaskCreation():
    image = mycv2.loadImage(PARAMS["data"]["images"]["lena"])
    image = mycv2.cvtGray(image)
    mycv2.show(image)

    mask = Mask(image)

    mycv2.show(mask.mask)
    mycv2.show(mask.hull)
    print("Area", mask.area())
Esempio n. 7
0
def process(infile, group_name, day, timeid, copies=1):
    mask = Mask((A4_WIDTH, A4_HEIGHT))
    page = mask.apply_mask(infile)
    create_title(page, group_name, day, timeid)
    tools.mkdir_p(os.path.join('png', day))
    png_file = os.path.join(
        'png', day, '{}_{}.jpg'.format(timeid,
                                       tools.safe_filename(group_name)))
    page.save(png_file, quality=75)
    tools.print_image(png_file, copies)
Esempio n. 8
0
 def __init__(self, distance_threshold, depthmask_file, scenemask_file, scenemask_threshold, frame_threshold):
     self.idxm = Identifier()
     self.targets = {}
     self.lost_targets = {}
     #self.last_targets = set()
     # thresholds
     self.distance_threshold = distance_threshold
     self.depthmask = cv2.imread(depthmask_file).mean(2) / 255.0
     self.scenemask = Mask(scenemask_file, scenemask_threshold)
     self.frame_threshold = frame_threshold
def compare_smoothmasks():
    fname = input_mask_fname #"sdmf_pyxelmask.h5"
    fid = h5py.File(fname, "r")
    ds_orbits = fid["orbits"]
    idx = np.argsort(ds_orbits[:])
    orbits32 = ds_orbits[:][idx]
    start_orbit = np.min(orbits32)
    stop_orbit = np.max(orbits32)
    orbit_range = stop_orbit - start_orbit + 1
    #print(idx)
    ds_combi = fid["combined"]
    num_orbits = idx.size
    a = np.empty((num_orbits,1024), dtype=np.float)
    for i_orbit in range(num_orbits):
        id_ = idx[i_orbit]
        orbit = orbits32[i_orbit]
#        a[orbit-start_orbit,:] = ds_combi[id_,:]
        a[i_orbit,:] = ds_combi[id_,:]
    fid.close()
    print("3.2 read")

    # print(a.shape)
    # plt.cla()
    # plt.imshow(a)
    # plt.show()

    # fname_out = "orbital_dbqm.h5"
    # fid_out = h5py.File(fname_out, "w")
    # ds = fid_out.create_dataset("data", a.shape, dtype=np.float)
    # ds[:,:] = a
    # fid_out.close()
    # np.savetxt("orbital_dbqm.csv", a, delimiter=",")

    m = Mask()
    a_binary = np.ones((1400,1024), dtype=np.bool)
    i_orbit = 0
    orbits30 = np.empty(1400, dtype=np.int)
    for orbit in range(42000,43400):
        #print(orbit)
        try:
            m.load_sdmf30_crit(orbit, "combined", smooth=True)
        except:
            continue
        orbits30[i_orbit] = orbit
        a_binary[i_orbit,:] = m.mask
        i_orbit += 1
    print("3.0 read")

    orbits30 = orbits30[0:i_orbit]
    a_binary = a_binary[0:i_orbit,:]

    for pixnr in range(1024):
        plot(orbits32, a, orbits30, a_binary, pixnr)

    return
Esempio n. 10
0
    def __init__( self ):

        """
        Placeholder
        """

        # increase system memory usage
        os.environ['GDAL_CACHEMAX'] = '2048'
        gdal.UseExceptions()

        self._mask = Mask()
        return
Esempio n. 11
0
def get_program(filename):
    program = []
    mask = None
    with open(filename, 'r') as f:
        for line in f:
            line = line[:-1]
            if 'mask' in line:
                if mask is not None:
                    program.append(mask)
                mask = Mask(line)
            elif 'mem' in line:
                mask.add_mem(line)
        program.append(mask)
    return program
Esempio n. 12
0
    def process_pair_data(self, data_r1_path, data_r2_path, force_mask = None):
        """Used to read and process a pair of FASTQ data files.

        Note that this parses the pair data into an in-memory SQLite
        database, which on most modern systems will be fine except for the
        largest input sets. If you hit memory issues, create a disk-based
        SQLite DB via :class:`.db.PairDB` and then use
        :meth:`.process_pair_db`.

        Note that this may be called multiple times to process more
        than one set of data files before computing profiles.

        :param data_r1_path: path to R1 fragments
        :param data_r2_path: path to matching R2 fragments.
        """
        self.run._force_mask = force_mask
        self.run.apply_config_restrictions()
        self.force_mask = Mask(force_mask) if force_mask else None
        use_quality = self.run._parse_quality
        if not self.run.skip_database and not use_quality:
            self.process_pair_db(self._memory_db_from_pairs(data_r1_path, data_r2_path))
        else:
            with FastFastqParser(data_r1_path, data_r2_path, use_quality) as parser:
                if not self.run.pair_length:
                    self.run.pair_length = parser.pair_length()
                self._process_pair_iter(parser.iterator(batch_size = 131072))
Esempio n. 13
0
 def __init__(self, masks):
     if len(masks) == 2 and 'RRRY' in masks and 'YYYR' in masks:
         self.match_mask = match_mask_optimized
     else:
         self.match_mask = self._match_mask
         self.masks = []
         for mask in masks:
             self.masks.append(Mask(mask))
Esempio n. 14
0
def main():
    image = mycv2.loadImage(PARAMS["data"]["images"]["lena"])
    mycv2.show(image)

    mask = Mask(image)

    plt.imshow(mask.mask, cmap="gray")
    plt.show()
Esempio n. 15
0
 def testDefaults(self):
     mask = Mask("Deku Mask", "Turns Link into a Deku shrub", True)
     self.assertEqual(mask.name, "Deku Mask")
     self.assertEqual(mask.properties, "Turns Link into a Deku shrub")
     self.assertEqual(mask.isOn, False)
     self.assertEqual(mask.acquired, False)
     self.assertEqual(mask.cApplied, "none")
     self.assertEqual(mask.inUse, False)
Esempio n. 16
0
 def get_indices(tid, si):
     return set(
         Mask.get_mask_stats_by_idx(
             model.sess,
             mask_id,
             feed_dict={
                 X: model.trainXs[tid][si:si + batch_size_per_task],
                 is_training: True,
             })["indices"])
def print_dead_quality():
    orbit = 50000
    m = Mask()
    m.load_ascii_quality("qualities/"+str(orbit)+".txt")
    print(np.where(m.mask))
    print(np.sum(m.mask))

    wlsmask = np.zeros(1024, dtype=np.bool)
    wls_idx = np.array([143, 196, 219, 220, 222, 254, 338, 398, 399, 405, 406, 494, 502, 577, 601, 609, 624, 635, 678, 707, 779, 802, 868, 881, 902])
    sun_idx = np.array([218, 219, 220, 609, 779, 883, 902, 1008])
    wlsmask[wls_idx] = True
    new_alive = m.mask.astype('i') - wlsmask
    print("new alive = ", np.where(new_alive == 1), np.where(new_alive == 1)[0].size)
    new_dead = wlsmask.astype('i') - m.mask
    print("new dead = ", np.where(new_dead  == 1), np.where(new_dead == 1)[0].size)
    print(m.mask)
    print(new_alive)
    print(new_dead)
    return
Esempio n. 18
0
    def CreateMask(
        self,
        imagedata=None,
        name=None,
        colour=None,
        opacity=None,
        threshold_range=None,
        edition_threshold_range=None,
        edited_points=None,
    ):

        # TODO: mask system to new system.
        future_mask = Mask()
        future_mask.create_mask(self.matrix.shape)

        if name:
            future_mask.name = name
        if colour:
            future_mask.colour = colour
        if opacity:
            future_mask.opacity = opacity
        if edition_threshold_range:
            future_mask.edition_threshold_range = edition_threshold_range
        if edited_points:
            future_mask.edited_points = edited_points
        if threshold_range:
            future_mask.threshold_range = threshold_range

        # insert new mask into project and retrieve its index
        proj = Project()
        index = proj.AddMask(future_mask)
        future_mask.index = index

        ## update gui related to mask
        Publisher.sendMessage(
            "Add mask", (future_mask.index, future_mask.name, future_mask.threshold_range, future_mask.colour)
        )

        self.current_mask = future_mask

        Publisher.sendMessage("Change mask selected", future_mask.index)
        Publisher.sendMessage("Update slice viewer")
Esempio n. 19
0
def crossover(parent1, parent2):
    # Number of shapes from parents
    n = random.randint(1, len(parent1.shapes))
    m = random.randint(0, len(parent2.shapes))

    # Adds parent genes to child
    child = []
    child += random.sample(parent1.shapes, n)
    child += random.sample(parent2.shapes, m)

    return Mask(child)
Esempio n. 20
0
def init_population(popSize):
    population = []

    # Generates population
    for _ in range(popSize):

        # Makes individual and adds to population
        indv = Mask()
        population.append(indv)

    return population
Esempio n. 21
0
 def run(self):
     logging.info("starting data retention")
     file_regex = [
         '.*agencies.jsonl', '.*caregivers.jsonl', '.*care_logs.jsonl',
         '.*clients.jsonl', '.*locations.jsonl', '.*shifts.jsonl',
         '.*timezone_agencies.jsonl'
     ]
     mask = Mask([
         Condition("body.resource_url", file_mask)
         for file_mask in file_regex
     ])
     ResourceJoin(matched_callback=self.retain_data, mask=mask).join()
Esempio n. 22
0
 def __init__(self, dispatcher):
     self.dispatcher = dispatcher
     self.masks = {c: Mask(c, dispatcher) for c in COLORS}
     self.entities = []
     self.sides = [Side(name) for name in ['top', 'left', 'right']]
     self.image = None
     self.current_frame = None
     self.tracked_entities = {}
     self.width = None
     self.height = None
     self.collisions = None
     self.balls = None
Esempio n. 23
0
    def __init__(self):
        """
        Initializer
        """
        super().__init__()

        # Set the working directory (where we expect to find files) to the same
        # directory this .py file is in. You can leave this out of your own
        # code, but it is needed to easily run the examples using "python -m"
        # as mentioned at the top of this program.
        file_path = os.path.dirname(os.path.abspath(__file__))
        os.chdir(file_path)
    
        # Sprite lists
        self.player = Player()
        self.coin_list = None
        self.wall_list = Solid_blocks().wall_list
        self.player_list = self.player.player_list
        self.player_sprite = self.player.player_sprite
        self.player_health = self.player.player_health
        self.brick_list = Destroyable_blocks().random_wall_list
        self.virus = Virus_cells()
        self.enemies = Virus_cells().virus_cells
        self.mask = Mask()
        self.mask_list = Mask().mask_list
        self.power = None
        self.walls_and_bricks = None
        self.explosions_list = None
        self.score = 0 
        self.mask_count = Mask().mask_count
        self.physics_engine = None
        self.volume = 0.4
        self.background = None
        self.background_music = None
        self.width = constants.SCREEN_WIDTH
        self.height = constants.SCREEN_HEIGHT
        self.bullet_list = None
        self.shotgun = False
        self.mouse_clicks = 0 
        self.play_music = None
    def build_model(self):

        X_list = [tf.placeholder(tf.float32, [None, self.dims[0]], name="X_%d" % (t + 1))
                  for t in range(self.n_tasks)]
        Y_list = [tf.placeholder(tf.float32, [None, self.n_classes], name="Y_%d" % (t + 1))
                  for t in range(self.n_tasks)]
        is_training = tf.placeholder(tf.bool, name="is_training")

        for t, (X, Y) in enumerate(zip(X_list, Y_list)):
            bottom = X
            excl_bottom = X if self.use_set_based_mask else None
            for i in range(1, self.n_layers):
                w = self.get_variable('layer%d' % i, 'weight', True)
                b = self.get_variable('layer%d' % i, 'biases', True)
                bottom = tf.nn.relu(tf.matmul(bottom, w) + b)

                if self.use_set_based_mask:
                    bottom, residuals = Mask(
                        i - 1, self.mask_alpha[i - 1], self.mask_not_alpha[i - 1], mask_type=self.mask_type,
                    ).get_masked_tensor(bottom, is_training, with_residuals=True)
                    mask = residuals["cond_mask"]
                    with tf.control_dependencies([mask]):
                        excl_bottom = tf.nn.relu(tf.matmul(excl_bottom, w) + b)
                        excl_bottom = Mask.get_exclusive_masked_tensor(excl_bottom, mask, is_training)

            w = self.get_variable("layer%d" % self.n_layers, "weight_%d" % (t + 1), True)
            b = self.get_variable("layer%d" % self.n_layers, "biases_%d" % (t + 1), True)

            y = tf.matmul(bottom, w) + b
            yhat = tf.nn.sigmoid(y)
            self.yhat_list.append(yhat)
            loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=y, labels=Y))
            self.partial_loss_list.append(loss)

            if self.use_set_based_mask:
                excl_y = tf.matmul(excl_bottom, w) + b
                excl_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=excl_y, labels=Y))
                self.excl_partial_loss_list.append(excl_loss)

        return X_list, Y_list, is_training
def compare_combined_30_vs_32(orbit):
    m30 = Mask()
    m30.load_sdmf30_crit(orbit, "combined")

    m32 = Mask()
    m32.load_sdmf32_mask(orbit, "combinedFlag")

    # for i in range(1024):
    #     print(i, m30.mask[i], m32.mask[i])

    newdead = m30.get_new_dead(m32)
    newalive = m30.get_new_alive(m32)
    print("3.0->3.2 dead:", newdead.size, newdead)
    print("3.0->3.2 alive:", newalive.size, newalive)
    return
Esempio n. 26
0
    def Task2(self) -> int:
        # Address masking
        mem = {}
        mask = None
        for line in self.data:
            if "mask" in line:
                mask = Mask(line.split(" = ")[-1])
                mask.mutate()
            else:
                value = int(line.split(" = ")[-1])
                address = int(line.split("[")[-1].split("]")[0])

                intermediate = mask.apply(address, value)

                mem = {**mem, **intermediate}

        result = 0

        for value in mem.values():
            if value != 0:
                result += value

        return result
Esempio n. 27
0
 def __init__(
         self,
         capture_link="https://imageserver.webcamera.pl/rec/hotel-senacki-krakow/latest.mp4",
         box_min_area=800,
         blur_size=15,
         min_threshold=10,
         frames_to_work=10,
         average_alfa=0.1):
     """
     :param capture_link: link to the stream (0 is for local camera)
     :param box_min_area: minimal box area
     :param blur_size: size of the blur, the bigger the larger boxes will be detected
     :param min_threshold:
     :param frames_to_work: frames to start detecting move
     :param average_alfa: change of the background speed, between 0 and 1.
     """
     self.background = bg.Background(frames_to_work, average_alfa)
     self.captureStream = cv2.VideoCapture(capture_link)
     self.box_min_area = box_min_area
     self.blur_shape = (blur_size, blur_size)
     self.min_threshold = min_threshold
     self.state = States.Normal
     self.mask = Mask()
     self.defaults = (box_min_area, blur_size, min_threshold, average_alfa)
def print_nr_ppg(orbit):

    dictwls = get_closest_state_exec(orbit, 61, "/SCIA/SDMF31/sdmf_extract_calib.h5", readoutMean=True)
    orbit_range = dictwls["orbit_range"]
    orbit = orbit_range[0]
    print("closest wls orbit=", orbit)

    m = Mask()

    #
    # first just print combined smoothmask
    #

    m.load_ascii(orbit)
    print(np.where(m.mask))

    fname = "/SCIA/SDMF30/sdmf_pixelmask.h5"
    fid = h5py.File(fname, "r")
    grpname = "orbitalMask/"
    ds_orbits = fid[grpname+"orbitList"]

    orbits = ds_orbits[:]
    #print(orbits)

    idx = orbit == orbits
    if np.sum(idx) == 0:
        raise Exception("orbit not in SDMF3.2 orbital mask")

    i = np.argmax(idx)
    print("i=",i)

    ds_ppg = fid[grpname+"pixelGain"]
    ppg_chan8 = ds_ppg[7*1024:,i]
    print("ppg", np.sum(ppg_chan8), np.where(ppg_chan8))

    return
Esempio n. 29
0
    def getWordPostion(self):
        aux = []
        width, height = self.matrix.getDimen()
        matrix_copy = copy.copy(self.matrix)
        mask = Mask('Mascaras/mask-dilation5x5.txt')
        Manipulator.applyDilation(mask, matrix_copy)
        lines = matrix_copy.getMatrixData()
        positionLines = self.getInitialAndFinal(lines)
        for l_initial, l_final in positionLines:
            columns = Matrix.getTransposed(lines[l_initial:l_final + 1], width,
                                           l_final - l_initial)
            positionColumns = self.getInitialAndFinal(columns)
            for c_initial, c_final in positionColumns:
                aux += [((l_initial, c_initial), (l_final, c_final))]

        return aux
Esempio n. 30
0
    def learn(self, layer0, goal):
        '''
        Train the network on a single batch.
        :param layer0: the input batch
        :param goal: the desired output of the final layer
        '''
        masks = {}
        for i in self.maskPr:
            layer = self.layers[i]
            pR = self.maskPr[i]
            maskI = Mask(layer, pR)
            masks[i] = maskI

        self.fire(layer0, masks)
        self.train(goal, masks)
        return self.layers[-1]
Esempio n. 31
0
    def CreateMask(self, imagedata=None, name=None, colour=None,
                    opacity=None, threshold_range=None,
                    edition_threshold_range = None,
                    edited_points=None):

        future_mask = Mask()
        if colour:
            future_mask.colour = colour
        if opacity:
            future_mask.opacity = opacity
        if edition_threshold_range:
            future_mask.edition_threshold_range = edition_threshold_range
        if edited_points:
            future_mask.edited_points = edited_points

        # this is not the first mask, so we will import data from old imagedata
        if imagedata is None:
            old_mask = self.current_mask
            imagedata = old_mask.imagedata
            future_mask.threshold_range = old_mask.threshold_range

        if threshold_range:
            future_mask.threshold_range = threshold_range
            future_mask.imagedata = self.__create_mask_threshold(self.imagedata, 
                                                    threshold_range)
        else:
            future_mask.imagedata = vtk.vtkImageData()
            future_mask.imagedata.DeepCopy(imagedata)
            future_mask.imagedata.Update()


        # when this is not the first instance, user will have defined a name
        if name is not None:
            future_mask.name = name
            if future_mask.is_shown:
                self.blend_filter.SetOpacity(1, future_mask.opacity)
            else:
                self.blend_filter.SetOpacity(1, 0)
            self.blend_filter.Update()

        # insert new mask into project and retrieve its index
        proj = Project()
        index = proj.AddMask(future_mask)
        future_mask.index = index
        if threshold_range:
            self.SetMaskThreshold(index, threshold_range)
            future_mask.edited_points = {}

        # update gui related to mask
        ps.Publisher().sendMessage('Add mask',
                                    (future_mask.index,
                                     future_mask.name,
                                     future_mask.threshold_range,
                                     future_mask.colour))

        self.current_mask = future_mask

        ps.Publisher().sendMessage('Change mask selected', future_mask.index)
        ps.Publisher().sendMessage('Update slice viewer')
Esempio n. 32
0
            self.calRegTerm()
            self.updateThetaConstSize()
            print "Interation index: %d, Costfunction value: %4f." % (
                ii, self.error[ii])


if __name__ == '__main__':

    from lens import LensList
    from tcc import TCCList
    from mask import Mask
    from source import Source
    import time

    a = time.time()
    m = Mask()
    m.x_range = [-300.0, 300.0]
    m.y_range = [-300.0, 300.0]
    m.x_gridsize = 2.5
    m.y_gridsize = 2.5
    m.openGDS('./NanGateLibGDS/NOR2_X2.gds', 11, 0.3)
    m.maskfft()

    s = Source()
    s.na = 1.35
    s.maskxpitch = m.x_range[1] - m.x_range[0]
    s.maskypitch = m.y_range[1] - m.y_range[0]
    s.type = 'annular'
    s.sigma_in = 0.7
    s.sigma_out = 0.9
    s.smooth_deta = 0.00
Esempio n. 33
0
from pbm import Pbm
from mask import Mask
from manipulator import Manipulator
from text import Text
# Exemplo: Imagens/grupo_07_imagem_1_linhas_30_colunas_2_palavras_277.pbm
path = input('Digite o caminho do arquivo: ')
pbm = Pbm(path)
mask = Mask('Mascaras/mask-dilation.txt')

manipulator = Manipulator(pbm)
manipulator.applyMedian()
manipulator.applyDilation(mask)

text = Text(pbm)
text.markWords()
text.showInfo()

name = path.split('.')[0].split('/')[1]
pbm.save(name)
pretrain_file = "sskip.100.vectors"
tag_info_file = "tag.info"
#trn_file = "train.input.part"
#dev_file = "dev.input.part"
#tst_file = "test.input.part"
#pretrain_file = "sskip.100.vectors.part"
UNK = "<UNK>"
PADDING = "<PADDING>"
#############################################
## tags
tags = Tag(tag_info_file)
SOS = tags.SOS
EOS = tags.EOS
##############################################
##
mask_info = Mask(tags)
#############################################
trn_data = readfile(trn_file)
word_to_ix = {UNK:1, PADDING: 0}
lemma_to_ix = {UNK:1, PADDING: 0}
for sentence, _, lemmas, tags in trn_data:
    for word in sentence:
        if word not in word_to_ix:
            word_to_ix[word] = len(word_to_ix)
    for lemma in lemmas:
        if lemma not in lemma_to_ix:
            lemma_to_ix[lemma] = len(lemma_to_ix)

pretrain_to_ix = {UNK:1, PADDING: 0}
pretrain_embeddings = [ [0. for i in range(100)], [0. for i in range(100)]] # for UNK and PADDING
pretrain_data = readpretrain(pretrain_file)
Esempio n. 35
0
            self.AIList.append(self.AI)
            self.RIList.append([])
            for jj in self.doseList:
                self.resist_t = self.resist_tRef*jj
                self.calRI()
                self.RIList[ii].append(self.RI)
                        
if __name__ == "__main__":
    from tcc import TCCList,TCC
    from mask import Mask 
    from source import Source
    from lens import LensList, Lens
    
    mp = [ [[-1,6],[-1, 2],[1, 2],[1, 1],[6, 1],[6, 0],[0, 0],[0, 1],[-2, 1],[-2, 6],[-1, 6]], \
       [[6, -1],[6, -2],[1, -2],[1, -3],[4, -3],[4, -6],[3, -6],[3, -4],[0, -4],[0, -1],[6, -1]] ]
    m = Mask()
    m.x_range = [-300.0,300.0]
    m.y_range = [-400.0,300.0]
    m.x_gridsize = 1.0
    m.y_gridsize = 1.0
    m.CD = 40
    m.polygons = mp
    m.poly2mask()
    m.smooth()
    m.maskfft()
    
    """nominal ILT setting"""
    s = Source()
    s.na = 1.35
    s.maskxpitch = 600.0
    s.maskypitch = 800.0
def main():
    '''
    Initialization
    '''
    start = time.time()

    tar_4_corners_xy = None
    ref_4_corners_xy = None
    homoMat = np.load('image/save_H.npy')
    shiftMat = np.load('image/save_Shift.npy')
    ref_4_corners_xy = np.load('image/save_ref_4_corners_xy.npy')
    tar_4_corners_xy = np.load('image/save_tar_4_corners_xy.npy')
    warp_tar_img = cv2.imread('image/warped_target.png')
    warp_ref_img = cv2.imread('image/warped_reference.png')
    seam_mask = cv2.imread('image/seam_mask.png', cv2.IMREAD_GRAYSCALE)

    mask = Mask(warp_tar_img, warp_ref_img)
    ref_region_mask = cv2.imread('image/result_from_reference.png',
                                 cv2.IMREAD_GRAYSCALE)
    tar_region_mask = cv2.bitwise_and(cv2.bitwise_not(ref_region_mask),
                                      mask.tar)

    mask.tar_result = tar_region_mask
    mask.ref_result = ref_region_mask
    '''
    Blend color
    '''

    CB = CBlender(warp_tar_img, warp_ref_img, seam_mask, tar_4_corners_xy,
                  ref_4_corners_xy, homoMat, shiftMat, mask)

    devided_tar_nonoverlap_mask, devided_tar_edge_mask = CB.split_tar_nonoverlap_area_and_edge(
        ref_4_corners_xy, tar_4_corners_xy, shiftMat, homoMat)
    devided_tar_edge_mask_1 = np.copy(devided_tar_edge_mask)
    devided_tar_edge_mask_1[devided_tar_edge_mask_1 == 127] = 0
    devided_tar_edge_mask_2 = np.copy(devided_tar_edge_mask)
    devided_tar_edge_mask_2[devided_tar_edge_mask_1 == 255] = 0

    # refered_pixel_coordi_lst = np.vstack( (\
    #     np.array( np.nonzero(seam_mask) ).T, \
    #     np.array( np.where(devided_tar_edge_mask==255) ).T, \
    #     np.array( np.where(devided_tar_edge_mask==127) ).T \
    #     ))

    slic = MaskedSLIC(warp_tar_img,
                      np.bitwise_and(mask.tar_result, mask.overlap),
                      region_size=20,
                      compactness=5)

    seam_superpixel_idx_lst = np.array([
        idx for idx, (rows, cols) in enumerate(slic.labels_position)
        if np.sum(seam_mask[rows, cols]) > 0
    ])
    tar_edge_1_superpixel_idx_lst = np.array([
        idx for idx, (rows, cols) in enumerate(slic.labels_position)
        if np.sum(devided_tar_edge_mask_1[rows, cols]) > 0
    ])
    tar_edge_2_superpixel_idx_lst = np.array([
        idx for idx, (rows, cols) in enumerate(slic.labels_position)
        if np.sum(devided_tar_edge_mask_2[rows, cols]) > 0
    ])

    refered_idx_lst = np.hstack(
        (seam_superpixel_idx_lst, tar_edge_1_superpixel_idx_lst,
         tar_edge_2_superpixel_idx_lst))

    refered_pixel_coordi_lst = np.array([
        (rows[len(rows) // 2], cols[len(rows) // 2])
        for idx, (rows, cols) in enumerate(slic.labels_position)
        if (idx != 0) and (np.isin(idx, refered_idx_lst))
    ])
    fuck_this_shit = CB.blend_color(refered_pixel_coordi_lst,
                                    np.bitwise_and(CB.mask.overlap,
                                                   CB.mask.tar_result),
                                    sigma1=0.3,
                                    sigma2=0.2)

    cv2.imwrite('overlap_blending_only.png', fuck_this_shit)

    CB.ref_img = fuck_this_shit

    refered_pixel_coordi_lst = np.array([
        (rows[len(rows) // 2], cols[len(rows) // 2])
        for idx, (rows, cols) in enumerate(slic.labels_position)
        if (idx != 0) and (np.isin(idx, tar_edge_1_superpixel_idx_lst))
    ])
    mask = np.zeros(devided_tar_nonoverlap_mask.shape)
    mask[devided_tar_nonoverlap_mask == 255] = 255
    fuck_this_shit = CB.blend_color(refered_pixel_coordi_lst,
                                    mask,
                                    sigma1=0.1,
                                    sigma2=0.05)
    cv2.imwrite('nonoverlap_blending_only1.png', fuck_this_shit)
    CB.ref_img = fuck_this_shit
    refered_pixel_coordi_lst = np.array([
        (rows[len(rows) // 2], cols[len(rows) // 2])
        for idx, (rows, cols) in enumerate(slic.labels_position)
        if (idx != 0) and (np.isin(idx, tar_edge_2_superpixel_idx_lst))
    ])
    mask = np.zeros(devided_tar_nonoverlap_mask.shape)
    mask[devided_tar_nonoverlap_mask == 127] = 255
    fuck_this_shit = CB.blend_color(refered_pixel_coordi_lst,
                                    mask,
                                    sigma1=0.1,
                                    sigma2=0.05)
    cv2.imwrite('nonoverlap_blending_only2.png', fuck_this_shit)
    for idx in tar_edge_2_superpixel_idx_lst:
        fuck_this_shit[slic.labels_position[idx][0],
                       slic.labels_position[idx][1]] = np.random.randint(
                           0, 255, 3)
    cv2.imwrite('araara.png', fuck_this_shit)

    warp_ref_img[slic.contour_mask > 0] = (0, 255, 0)
    warp_tar_img[slic.contour_mask > 0] = (0, 255, 0)
    cv2.imwrite('target_image_slic.png', warp_ref_img)
    cv2.imwrite('reference_image_slic.png', warp_tar_img)

    print(f'time: {time.time() - start}')
Esempio n. 37
0
 def from_array(self, array):
     if use_pygame:
         if not size == self.get_size():
             self = RectMaskCombo(self.rect, Mask((len(array), len(array[0]))))
     Mask.from_array(self, array)
Esempio n. 38
0
"""
Created on Wen Apr 27 2016
@author: WenLv ([email protected])
"""

from lens import LensList
from tcc import TCCList
from mask import Mask
from source import Source 
from ilt import RobustILT

import numpy as np    
import time


m = Mask()
m.x_gridsize = 2.5
m.y_gridsize = 2.5
m.openGDS('./NanGateLibGDS/NOR2_X2.gds',layername=11,boundary=0.3)
m.maskfft()


s = Source()
s.na = 1.35
s.maskxpitch = m.x_range[1] - m.x_range[0]
s.maskypitch = m.y_range[1] - m.y_range[0]
s.type = 'annular'
s.sigma_in = 0.7
s.sigma_out = 0.9
s.smooth_deta = 0.00
s.shiftAngle = 0
def compare_error_30_vs_32(orbit):
    m30 = Mask()
    m30_combined = Mask()
    m30_combined.load_ascii(orbit)
    m30.load_sdmf30_crit(orbit, "residual")

    m32 = Mask()
    m32.load_sdmf32_figure(orbit, "darkError")

    newdead = m30.get_new_dead(m32)
    newalive = m30.get_new_alive(m32)
    print("new errory:", newdead.size, newdead)
    print("new errory but already bad", np.sum(np.in1d(newdead, np.where(m30_combined.mask))))
    print("real new errory", newdead[np.in1d(newdead, np.where(m30_combined.mask), invert=True)])

    print("new not-errory:", newalive.size, newalive)
    print("new not-errory but still bad", np.sum(np.in1d(newalive, np.where(m30_combined.mask))))
    print("real new not-errory:", newalive[np.in1d(newalive, np.where(m30_combined.mask), invert=True)])
    return
Esempio n. 40
0
 def collidepoint(self, point):
     self.pos = self.rect.topleft
     return Mask.collidepoint(self, point)

start = time.time()

H = None
shift = None
tar_4_corners_xy = None
ref_4_corners_xy = None
homoMat = np.load('image/save_H.npy')
shiftMat = np.load('image/save_Shift.npy')
ref_4_corners_xy = np.load('image/save_ref_4_corners_xy.npy')
tar_4_corners_xy = np.load('image/save_tar_4_corners_xy.npy')
warp_tar_img = cv2.imread('image/warped_target.png')
warp_ref_img = cv2.imread('image/warped_reference.png')
seam_mask = cv2.imread('image/seam_mask.png', cv2.IMREAD_GRAYSCALE)
mask = Mask(warp_tar_img, warp_ref_img)
ref_region_mask = cv2.imread('image/result_from_reference.png',
                             cv2.IMREAD_GRAYSCALE)
tar_region_mask = cv2.bitwise_and(cv2.bitwise_not(ref_region_mask), mask.tar)
mask.tar_result = tar_region_mask
mask.ref_result = ref_region_mask

CB = CBlender(warp_tar_img, warp_ref_img, seam_mask, tar_4_corners_xy,
              ref_4_corners_xy, homoMat, shiftMat, mask)
devided_tar_nonoverlap_mask, devided_tar_edge_mask = CB.split_tar_nonoverlap_area_and_edge(
    ref_4_corners_xy, tar_4_corners_xy, shiftMat, homoMat)

refered_pixel_coordi_lst = np.vstack( (\
    np.array( np.nonzero(seam_mask) ).T, \
    np.array( np.where(devided_tar_edge_mask==255) ).T, \
    np.array( np.where(devided_tar_edge_mask==127) ).T \
	def __init__(self, size, numbers_of_elements):
		Mask.__init__(self, size, numbers_of_elements)
		self.core = numbers_of_elements - size
Esempio n. 43
0
        self._retriever.stop()

    def join(self):
        self._retriever.join()

    def __str__(self):
        return str(self._mask)

    def __repr__(self):
        return self.__str__()


def blah(joiner):
    print("blah received the following events:")
    for event in joiner.mask().events():
        print(event)
    print("stopping the joiner!")
    joiner.stop()

# e.g.
if __name__ == "__main__":
    file_regex = ['.*agencies.jsonl',
                  '.*caregivers.jsonl',
                  '.*care_logs.jsonl',
                  '.*clients.jsonl',
                  '.*locations.jsonl',
                  '.*shifts.jsonl',
                  '.*timezone_agencies.jsonl']
    mask = Mask([Condition("body.resource_url", file_mask) for file_mask in file_regex])
    ResourceJoin(blah, mask).join()
Esempio n. 44
0
class Processor:


    def __init__( self ):

        """
        Placeholder
        """

        # increase system memory usage
        os.environ['GDAL_CACHEMAX'] = '2048'
        gdal.UseExceptions()

        self._mask = Mask()
        return


    def getPansharpenImage( self, pathname ):

        """
        Placeholder
        """

        # get masked datasets
        code = None

        datasets = self.getMaskedDatasets ( os.path.dirname( pathname ) )
        if len( datasets ) == 3:

            # define pansharpen options
            args = [ os.path.join( os.path.dirname( pathname ), 'B8_merge.tif' ) ] + datasets + [ pathname ]
            options = [ '-co',  'PHOTOMETRIC=RGB', '-co', 'COMPRESS=DEFLATE', '-nodata', '0' ]

            # manage execution of gdal pansharpen
            out, err, code = execute( 'gdal_pansharpen.py', args + options )

        return code


    def getMaskedDatasets( self, path ):

        """
        Placeholder
        """

        # check qa image exists
        datasets = []

        qa_pathname = os.path.join( path, 'BQA_merge.tif' )
        if os.path.exists( qa_pathname ):

            arr = self._mask.generate( qa_pathname, out_pathname=os.path.join( path, 'mask.tif') )
            if arr is not None:

                # mask channel images
                channels = [ 'B4', 'B3' , 'B2' ]
                for channel in channels:

                    pathname = os.path.join( path, '{}_merge.tif'.format( channel ) )
                    out_pathname = pathname.replace( '_merge.tif', '_merge_mask.tif' ) 

                    # apply masking
                    arr = self._mask.apply( pathname, out_pathname=out_pathname )
                    if arr is not None:
                        datasets.append( out_pathname )

            else:

                # masking error
                print ( 'Masking error - skipping: {}'.format ( qa_pathname ) )

        else:

            # no qa file
            print ( 'File does not exist - skipping: {}'.format ( qa_pathname ) )

        return datasets

    
    def getImageChip( self, pathname, centroid, out_pathname, size=512, scale_min=2, scale_max=98 ):

        """
        Placeholder
        """

        # open pansharpen image
        ds = gdal.Open( pathname )
        if ds is not None:

            # transform latlon centroid coordinates to image srs
            coord_tx = self.getCoordinateTransform( ds )
            x, y, z = coord_tx.TransformPoint( centroid[ 0 ], centroid[ 1 ] )

            # convert image srs coordinates into pixel coordinates
            geo = ds.GetGeoTransform()
            pixel = ( round ( ( x - geo[ 0 ] ) / geo[ 1 ] ), round ( ( geo[ 3 ] - y ) / -geo[ 5 ] ) )

            x_offset = ( pixel[ 0 ] - ( size / 2 ) )
            y_offset = ( pixel[ 1 ] - ( size / 2 ) )

            # check chip window is valid
            if x_offset >= 0 and y_offset >= 0 and ( x_offset + size ) < ds.RasterXSize and ( y_offset + size ) < ds.RasterYSize: 

                # read in window
                r = ds.GetRasterBand(1).ReadAsArray( xoff=x_offset, yoff=y_offset, win_xsize=size, win_ysize=size )
                g = ds.GetRasterBand(2).ReadAsArray( xoff=x_offset, yoff=y_offset, win_xsize=size, win_ysize=size )
                b = ds.GetRasterBand(3).ReadAsArray( xoff=x_offset, yoff=y_offset, win_xsize=size, win_ysize=size )

                # validate goodness - number of black (masked) pixels 
                goodness = ( np.count_nonzero( r ) / ( size * size ) ) * 100.0
                if goodness > 95.0:

                    # get scale factors to convert from 16bit to 8bit 
                    scale = {   'r' : self.getScaleMinMax( r, ( scale_min, scale_max ) ),
                                'g' : self.getScaleMinMax( g, ( scale_min, scale_max ) ),
                                'b' : self.getScaleMinMax( b, ( scale_min, scale_max ) ) }

                    # compile translation options
                    options = [ '-of JPEG',
                                '-ot Byte',
                                '-srcwin {} {} {} {}'.format( x_offset, y_offset, size, size ),
                                '-scale_1 {} {}'.format ( scale[ 'r' ][ 0 ], scale[ 'r' ][ 1 ] ),
                                '-scale_2 {} {}'.format ( scale[ 'g' ][ 0 ], scale[ 'g' ][ 1 ] ),
                                '-scale_3 {} {}'.format ( scale[ 'b' ][ 0 ], scale[ 'b' ][ 1 ] ) ]

                    # create output directory if not exists
                    if not os.path.exists( os.path.dirname( out_pathname ) ):
                        os.makedirs( os.path.dirname( out_pathname ) )

                    # execute translation
                    options_str = ' '.join( options )
                    out = gdal.Translate( out_pathname, ds, options=options_str )

                    print ( 'Generated chip: {} {}'.format ( out_pathname, goodness ) )

                else:

                    # failed qc check
                    print ( 'Chip failed QC: {} {}'.format( out_pathname, goodness ) )        

            else:

                # report error
                print ( 'Invalid chip window {} {} {} {} ({} {}): {}'.format( x_offset, y_offset, x_offset + size, y_offset + size, 
                                                                                ds.RasterXSize, ds.RasterYSize, pathname ) )        
        else:

            # report error
            print ( ' Unable to open dataset: {}'.format( pathname ) )
    
        return 


    def getCoordinateTransform( self, ds ):

        """
        Placeholder
        """

        # get transformation from latlon to image srs
        image = osr.SpatialReference( wkt=ds.GetProjection() )

        aoi = osr.SpatialReference()
        aoi.ImportFromEPSG( 4326 )

        return osr.CoordinateTransformation( aoi, image )


    # compute percentiles from cdf
    def getScaleMinMax( self, arr, pcs ):

        """
        Placeholder
        """

        return np.percentile( arr[ arr != 0 ], pcs )
Esempio n. 45
0
            self.calRobustGrad()
            self.calRegTerm()
            self.updateThetaConstSize()
            print "Interation index: %d, Costfunction value: %4f." %(ii, self.error[ii])
                                                    
   
if __name__ == '__main__' :
    
    from lens import LensList
    from tcc import TCCList
    from mask import Mask
    from source import Source    
    import time
    
    a = time.time()
    m = Mask()
    m.x_range = [-300.0,300.0]
    m.y_range = [-300.0,300.0]
    m.x_gridsize = 2.5
    m.y_gridsize = 2.5
    m.openGDS('./NanGateLibGDS/NOR2_X2.gds',11,0.3)
    m.maskfft()
    
    s = Source()
    s.na = 1.35
    s.maskxpitch = m.x_range[1] - m.x_range[0]
    s.maskypitch = m.y_range[1] - m.y_range[0]
    s.type = 'annular'
    s.sigma_in = 0.7
    s.sigma_out = 0.9
    s.smooth_deta = 0.00
Esempio n. 46
0
 def colliderect(self, other):
     self.pos = self.rect.topleft
     return Mask.colliderect(self, other)
Esempio n. 47
0
    def save_mask_window(self):
        """ Creates and saves the mask """

        width = self.ui.lineEdit_width.text().replace(",", ".")
        x = self.ui.lineEdit_x.text().replace(",", ".")
        y = self.ui.lineEdit_y.text().replace(",", ".")
        z = self.ui.lineEdit_z.text().replace(",", ".")

        # If the coordinates and radius_width are valid numbers, then choose a given path and create the mask
        if self.is_pos_number(width) and self.is_pos_number(x) and self.is_pos_number(y) and self.is_pos_number(z):
            if float(width) == 0:
                QMessageBox.warning(
                self, "Warning", "You have entered invalid width/radius. Please enter numbers larger than zero.")
            else:
                # Convert coordinates and shape_size to voxels, using the affine matrix of the brain file
                coordinate = np.array([float(self.ui.lineEdit_x.text()), float(self.ui.lineEdit_y.text()), float(self.ui.lineEdit_z.text())])
                coordinate = apply_affine(np.linalg.inv(self.brain_file._affine), coordinate)
                coordinate[0] = round(coordinate[0])
                coordinate[1] = round(coordinate[1])
                coordinate[2] = round(coordinate[2])

                # If the coordinates together with radius_width are valid create a mask
                # If not valid send an error message
                if self.ui.comboBox_shape.currentText() == "Sphere":
                    voxel_width = (float(width)/self.brain_file._header.get_zooms()[0],
                                   float(width)/self.brain_file._header.get_zooms()[1],
                                   float(width)/self.brain_file._header.get_zooms()[2],)
                else:
                    voxel_width = ((float(width)/2)/self.brain_file._header.get_zooms()[0],
                                    (float(width)/2)/self.brain_file._header.get_zooms()[1],
                                    (float(width)/2)/self.brain_file._header.get_zooms()[2])

                # Check if coordinates is within brain data
                # Generates parameters to and call Mask function
                if coordinate[0] <= self.brain_file.get_data().shape[0]\
                        and coordinate[1] <= self.brain_file.get_data().shape[1]\
                        and coordinate[2] <= self.brain_file.get_data().shape[2]:
                    if (coordinate[0] + voxel_width[0]) <= self.brain_file.get_data().shape[0]\
                            and (coordinate[1] + voxel_width[1]) <= self.brain_file.get_data().shape[1]\
                            and (coordinate[2] + voxel_width[2]) <= self.brain_file.get_data().shape[2]\
                            and (coordinate[0] - voxel_width[0]) >= 0 and (coordinate[1] - voxel_width[1]) >= 0\
                            and (coordinate[2] - voxel_width[2]) >= 0:
                        file_name = QFileDialog.getSaveFileName(self, "Save file as nii", "", ".nii")
                        if file_name[0] == "":
                            return
                        path = file_name[0]+file_name[1]
                        shape = self.ui.comboBox_shape.currentText()
                        width = voxel_width
                        Mask(path, shape, coordinate, width, self.brain_file)
                        self.close()
                        self.parent().load_mask(path)
                    else:
                        QMessageBox.warning(
                            self, "Warning",
                            "You have entered invalid values in width. Please enter a value within the dimensions.")
                else:
                    QMessageBox.warning(
                        self, "Warning",
                        "You have entered invalid values in coordinate. Please enter values within the dimensions.")
        else:
            QMessageBox.warning(
                self, "Warning", "You have entered one or more invalid values. Please enter only numbers.")