def extractMarkedNodes(filename):
    # extracts markers from the raw images
    # returns a dict, with color as key and a list
    # of marker center coords as value

    print "processing file", filename
    im = vigra.readImage(filename)
    colored1 = im[..., 0] != im[..., 1]
    colored2 = im[..., 1] != im[..., 2]
    colored3 = im[..., 2] != im[..., 0]
    colored = numpy.logical_or(colored1, colored2)
    colored = numpy.logical_or(colored, colored3)
    cc = vigra.analysis.labelImageWithBackground(colored.astype(numpy.uint8))
    # take the center pixel for each colored square
    feats = vigra.analysis.extractRegionFeatures(colored.astype(numpy.float32), cc, ["RegionCenter"])
    center_coords = feats["RegionCenter"][1:][:].astype(numpy.uint32)
    center_coords_list = [center_coords[:, 0], center_coords[:, 1]]
    im_centers = numpy.asarray(im[center_coords_list])
    # print im_centers
    # struct = im_centers.view(dtype='f4, f4, f4')

    # colors, indices = numpy.unique(struct, return_inverse=True)
    # print colors, indices, colors.shape
    centers_by_color = {}
    for iindex in range(center_coords.shape[0]):
        center = (center_coords[iindex][0], center_coords[iindex][1])
        #print center, index
        color = colors[tuple(im_centers[iindex].astype(numpy.uint8))]
        #centers_by_color.setdefault(tuple(im_centers[iindex]), []).append(center)
        centers_by_color.setdefault(color, []).append(center)
        
    print centers_by_color
    return centers_by_color
Esempio n. 2
0
def clip(msnames, station_selection, threshold=750):
    """Clip CORRECTED_DATA amplitudes above threshold and adjust flags"""

    for msname in msnames:
        t = pyrap.tables.table(msname, readonly = False)
        if os.path.exists(msname + '.flags'):
            t_flags = pyrap.tables.table(msname + '.flags')
        else:
            t_flags = t.select("FLAG").copy(msname + '.flags', deep=True)

        t_ant = pyrap.tables.table(msname + '/ANTENNA')
        ant_names = t_ant.getcol('NAME')

        ant1 = t.getcol("ANTENNA1")
        f1 = numpy.array([ant_names[ant] not in station_selection for ant in ant1])

        ant2 = t.getcol("ANTENNA2")
        f2 = numpy.array([ant_names[ant] not in station_selection for ant in ant2])

        f = t_flags.getcol("FLAG")
        f = numpy.logical_or(f, f1[:, numpy.newaxis, numpy.newaxis])
        f = numpy.logical_or(f, f2[:, numpy.newaxis, numpy.newaxis])

        d = t.getcol("CORRECTED_DATA")

        f = numpy.logical_or(f, abs(d)>threshold)
        t.putcol("FLAG", f)
        t.flush()
Esempio n. 3
0
    def _setdiag(self, values, k):
        M, N = self.shape
        if values.ndim and not len(values):
            return
        idx_dtype = self.row.dtype

        # Determine which triples to keep and where to put the new ones.
        full_keep = self.col - self.row != k
        if k < 0:
            max_index = min(M+k, N)
            if values.ndim:
                max_index = min(max_index, len(values))
            keep = np.logical_or(full_keep, self.col >= max_index)
            new_row = np.arange(-k, -k + max_index, dtype=idx_dtype)
            new_col = np.arange(max_index, dtype=idx_dtype)
        else:
            max_index = min(M, N-k)
            if values.ndim:
                max_index = min(max_index, len(values))
            keep = np.logical_or(full_keep, self.row >= max_index)
            new_row = np.arange(max_index, dtype=idx_dtype)
            new_col = np.arange(k, k + max_index, dtype=idx_dtype)

        # Define the array of data consisting of the entries to be added.
        if values.ndim:
            new_data = values[:max_index]
        else:
            new_data = np.empty(max_index, dtype=self.dtype)
            new_data[:] = values

        # Update the internal structure.
        self.row = np.concatenate((self.row[keep], new_row))
        self.col = np.concatenate((self.col[keep], new_col))
        self.data = np.concatenate((self.data[keep], new_data))
        self.has_canonical_format = False
 def remove_noise_with_hsv(self, img):
     # Use number of occurrences to find the standard h, s, v
     # Convert to int so we can sort the colors
     # noinspection PyTypeChecker
     img_int = np.dot(np.rint(img * 255), np.power(256, np.arange(3)))
     color_array = sort_by_occurrence(img_int.flatten())
     # standard color is the 2nd most frequent color
     std_color = color_array[1]
     std_b, mod = divmod(std_color, 256 ** 2)
     std_g, std_r = divmod(mod, 256)
     # noinspection PyTypeChecker
     std_h, std_s, std_v = colors.rgb_to_hsv(np.array([std_r, std_g, std_b]) / 255)
     # print(std_h * 360, std_s * 100, std_v * 100)
     height, width, _ = img.shape
     img_hsv = colors.rgb_to_hsv(img)
     h, s, v = img_hsv[:, :, 0], img_hsv[:, :, 1], img_hsv[:, :, 2]
     h_mask = np.abs(h - std_h) > self.h_tolerance
     s_mask = np.abs(s - std_s) > self.s_tolerance
     delta_v = np.abs(v - std_v)
     v_mask = delta_v > self.v_tolerance
     hsv_mask = np.logical_or(np.logical_or(h_mask, s_mask), v_mask)
     new_img = 1 - delta_v
     new_img[hsv_mask] = 0
     # Three types of grayscale colors in new_img:
     # Type A: 1. Outside noise, or inside point.
     # Type B: between 0 and 1. Outside noise, or contour point.
     # Type C: 0. Inside noise, or background.
     return new_img
Esempio n. 5
0
def getgeodesicpts(m):
    """
 computes the lat/lon values of the points on the surface of the sphere
 corresponding to a twenty-sided (icosahedral) geodesic.

 @param m: the number of points on the edge of a single geodesic triangle.
 There are 10*(m-1)**2+2 total geodesic points, including the poles.

 @return: C{B{lats, lons}} - rank 1 numpy float32 arrays containing
 the latitudes and longitudes of the geodesic points (in degrees). These
 points are nearly evenly distributed on the surface of the sphere.
    """
    x,y,z = _spherepack.ihgeod(m)
# convert cartesian coords to lat/lon.
    rad2dg = 180./math.pi
    r1 = x*x+y*y
    r = numpy.sqrt(r1+z*z)
    r1 = numpy.sqrt(r1)
    xtmp = numpy.where(numpy.logical_or(x,y),x,numpy.ones(x.shape,numpy.float32))
    ztmp = numpy.where(numpy.logical_or(r1,z),z,numpy.ones(z.shape,numpy.float32))
    lons = rad2dg*numpy.arctan2(y,xtmp)+180.
    lats = rad2dg*numpy.arctan2(r1,ztmp)-90.
    lat = numpy.zeros(10*(m-1)**2+2,numpy.float32)
    lon = numpy.zeros(10*(m-1)**2+2,numpy.float32)
# first two points are poles.
    lat[0] = 90; lat[1] = -90.
    lon[0] = 0.; lon[1] = 0.
    lat[2:] = lats[0:2*(m-1),0:m-1,:].flatten()
    lon[2:] = lons[0:2*(m-1),0:m-1,:].flatten()
    return lat,lon
Esempio n. 6
0
def count_edges_within_band(a, b, band=3, rising=True):
    '''
    Counts the number of rising (or falling) edges match, within a sample band
    
    Params
    -------
    @param a, b: Arrays that will be compared
    @type a, b: Boolean array 
    @param band: The number of samples of tolerance
    @type band: float
    @param rising: Specify rising or falling edge
    @type rising: boolean 
    
    Returns
    -------
    @return: Count of matching edges, total true rising (or falling) edges
    @rtype: int
    '''
    if rising:
        a = np.r_[a[0], np.diff(a)]>0
        b = np.r_[b[0], np.diff(b)]>0
    else:
        a = np.r_[a[0], np.diff(a)]<0
        b = np.r_[b[0], np.diff(b)]<0

    total_edges = sum(a)
    result = np.logical_and(a, b)
    for offset in np.add(range(3),1):
        posoff = np.r_[[0]*offset, np.logical_and(a[:-offset], b[offset:])]
        negoff = np.r_[np.logical_and(a[offset:], b[:-offset]), [0]*offset]
        result = np.logical_or(result, posoff)
        result = np.logical_or(result, negoff)

    return sum(result), total_edges
Esempio n. 7
0
    def clean_events(self, events):
        events = events.view(np.recarray)
        events.protocol = self._protocol
        events.montage = self._montage
        events.experiment = self._experiment
        events.exp_version = self._exp_version

        stim_events = np.logical_or(events['type'] == 'STIM', events['type'] == 'STIM_OFF')
        stim_events = np.logical_or(stim_events, events['type'] == 'SHAM')
        stim_event_indices = np.where(stim_events)[0]

        poll_events = np.where(events['type'] == 'NP_POLL')[0]
        first_poll_event = poll_events[0]
        last_poll_event = poll_events[-1]

        # Need the last two events (on/off) before the first np poll and the two events after the last np poll
        stim_before = np.array([index for index in stim_event_indices if index < first_poll_event - 2])
        stim_after = np.array([index for index in stim_event_indices if index > last_poll_event + 2])

        good_range = np.array([index for index in range(len(events)) \
                               if index not in stim_before and index not in stim_after])

        cleaned_events = events[good_range]
        # Remove NP_POLL
        cleaned_events = cleaned_events[cleaned_events['type'] != 'NP_POLL']
        cleaned_events.sort(order='mstime')
        return cleaned_events
 def _call_joint_genotypes(self, data, genotypes):
     normal = genotypes['normal']
     tumour = genotypes['tumour']
             
     normal_aa = (normal == 0)
     normal_ab = (normal == 1)
     normal_bb = (normal == 2)
         
     normal_var = np.logical_or(normal_ab, normal_bb)
     
     tumour_aa = (tumour == 0)
     tumour_ab = (tumour == 1)
     tumour_bb = (tumour == 2)
     
     tumour_var = np.logical_or(tumour_ab, tumour_bb)
     tumour_hom = np.logical_and(tumour_aa, tumour_bb)
     
     reference = np.logical_and(normal_aa, tumour_aa)
     germline = np.logical_and(normal_var, tumour_var)
     somatic = np.logical_and(normal_aa, tumour_var)
     loh = np.logical_and(normal_ab, tumour_hom)
     
     
     n = normal_aa.size
     joint_genotypes = 4 * np.ones((n,))
     
     joint_genotypes[reference] = 0
     joint_genotypes[germline] = 1
     joint_genotypes[somatic] = 2
     joint_genotypes[loh] = 3
     
     return joint_genotypes
def filter_params(io, rsh, rs, ee, isc):
    # Function filter_params identifies bad parameter sets. A bad set contains
    # Nan, non-positive or imaginary values for parameters; Rs > Rsh; or data
    # where effective irradiance Ee differs by more than 5% from a linear fit
    # to Isc vs. Ee

    badrsh = np.logical_or(rsh < 0., np.isnan(rsh))
    negrs = rs < 0.
    badrs = np.logical_or(rs > rsh, np.isnan(rs))
    imagrs = ~(np.isreal(rs))
    badio = np.logical_or(~(np.isreal(rs)), io <= 0)
    goodr = np.logical_and(~badrsh, ~imagrs)
    goodr = np.logical_and(goodr, ~negrs)
    goodr = np.logical_and(goodr, ~badrs)
    goodr = np.logical_and(goodr, ~badio)

    matrix = np.vstack((ee / 1000., np.zeros(len(ee)))).T
    eff = np.linalg.lstsq(matrix, isc)[0][0]
    pisc = eff * ee / 1000
    pisc_error = np.abs(pisc - isc) / isc
    # check for departure from linear relation between Isc and Ee
    badiph = pisc_error > .05

    u = np.logical_and(goodr, ~badiph)
    return u
Esempio n. 10
0
def generateBoundingPoints(nx, ny, nz):
    x = np.arange(0, nx)
    y = np.arange(0, ny)
    z = np.arange(0, nz)
    xv, yv, zv = np.meshgrid(x, y, z, indexing='ij')      

    xv_l = (xv == 0)
    xv_r = (xv == nx-1)
    xv_bound = np.logical_or(xv_l, xv_r)
    
    yv_b = (yv == 0)
    yv_f = (yv == ny-1)
    yv_bound = np.logical_or(yv_b, yv_f)
    
    zv_b = (zv == 0)
    zv_u = (zv == nz-1)
    zv_bound = np.logical_or(zv_b, zv_u)
    
    bound = np.logical_or(xv_bound, yv_bound)
    bound = np.logical_or(bound, zv_bound)
    
    xv = xv[bound]
    yv = yv[bound]
    zv = zv[bound]
    
    return np.column_stack((xv.ravel(), yv.ravel(), zv.ravel()))
Esempio n. 11
0
def diamond_110_001(el, a0, n, crack_surface=[1,1,0], crack_front=[0,0,1],
            skin_x=1.0, skin_y=1.0, vac=5.0):
    nx, ny, nz = n
    third_dir = np.cross(crack_surface, crack_front)
    directions = [ third_dir, crack_surface, crack_front ]
    if np.linalg.det(directions) < 0:
        third_dir = -third_dir
    directions = [ third_dir, crack_surface, crack_front ]
    a = Diamond(el, latticeconstant = a0, size = [ nx,ny,nz ], 
                directions = directions)
    sx, sy, sz = a.get_cell().diagonal()
    a.translate([a0/100,a0/100,a0/100])
    a.set_scaled_positions(a.get_scaled_positions())
    a.center()

    lx  = skin_x*sx/nx
    ly  = skin_y*sy/ny
    r   = a.get_positions()
    g   = np.where(
        np.logical_or(
            np.logical_or(
                np.logical_or(
                    r[:, 0] < lx, r[:, 0] > sx-lx),
                r[:, 1] < ly),
            r[:, 1] > sy-ly),
        np.zeros(len(a), dtype=int),
        np.ones(len(a), dtype=int))
    a.set_array('groups', g)

    a.set_cell([sx+2*vac, sy+2*vac, sz])
    a.translate([vac, vac, 0.0])
    a.set_pbc([False, False, True])

    return a
Esempio n. 12
0
    def _eucl_max(self, nii1, nii2):
        origdata1 = nii1.get_data()
        origdata1 = np.logical_not(
            np.logical_or(origdata1 == 0, np.isnan(origdata1)))
        origdata2 = nii2.get_data()
        origdata2 = np.logical_not(
            np.logical_or(origdata2 == 0, np.isnan(origdata2)))

        if isdefined(self.inputs.mask_volume):
            maskdata = nb.load(self.inputs.mask_volume).get_data()
            maskdata = np.logical_not(
                np.logical_or(maskdata == 0, np.isnan(maskdata)))
            origdata1 = np.logical_and(maskdata, origdata1)
            origdata2 = np.logical_and(maskdata, origdata2)

        if origdata1.max() == 0 or origdata2.max() == 0:
            return np.NaN

        border1 = self._find_border(origdata1)
        border2 = self._find_border(origdata2)

        set1_coordinates = self._get_coordinates(border1, nii1.affine)
        set2_coordinates = self._get_coordinates(border2, nii2.affine)
        distances = cdist(set1_coordinates.T, set2_coordinates.T)
        mins = np.concatenate(
            (np.amin(distances, axis=0), np.amin(distances, axis=1)))

        return np.max(mins)
def get_largest_cc(u,v):
    """
    Return mask with largest connected component in u,v

    """

    if not skimage_available:
        print('*** skimage is not available. get_larget_cc() will not work. ***')
        return np.ones_like(u).astype('bool')
    
    fxx = np.array([[1,-2.0,1.0]])
    fxy = np.array([[-0.25,0,0.25],[0.0,0,0],[0.25,0,-0.25]])
    fyy = fxx.T

    u_ = u.astype('float32')
    v_ = v.astype('float32')
    uxx = cv2.filter2D(u_,-1,fxx)
    uxy = cv2.filter2D(u_,-1,fxy)
    uyy = cv2.filter2D(u_,-1,fyy)

    vxx = cv2.filter2D(v_,-1,fxx)
    vxy = cv2.filter2D(v_,-1,fxy)
    vyy = cv2.filter2D(v_,-1,fyy)

    THRESH=0.1
    ue = np.logical_or(np.logical_or(np.abs(uxx)>THRESH, np.abs(uxy)>THRESH),np.abs(uyy)>THRESH)
    ve = np.logical_or(np.logical_or(np.abs(vxx)>THRESH, np.abs(vxy)>THRESH),np.abs(vyy)>THRESH)
    edg = np.logical_or(ue,ve)
    
    L = measure.label(edg.astype('int32'),neighbors=4)
    
    sums = np.bincount(L.ravel())
    biggest_cc = L==np.argmax(sums)
    return biggest_cc
Esempio n. 14
0
def split_most_even(values):
    """
    >>> import numpy as np
    >>> T = True; F = False
    >>> values = np.array([1, 2, 2, 3, 3, 4])
    >>> expected = np.array([F, F, F, T, T, T])
    >>> actual = split_most_even(values)
    >>> assert(np.all(expected==actual))
    >>> values = np.array([1, 2, 2, 3, 3, 3, 4])
    >>> expected = np.array([F, F, F, T, T, T, T])
    >>> actual = split_most_even(values)
    >>> assert(np.all(expected==actual))
    >>> values = np.array([2, 3, 3, 3, 3, 3, 4])
    >>> expected = np.array([F, T, T, T, T, T, T])
    >>> actual = split_most_even(values)
    >>> assert(np.all(expected==actual))
    >>> values = np.array([2, 3, 3, 3, 3, 3, 4, 4])
    >>> expected = np.array([F, F, F, F, F, T, T])
    >>> actual = split_most_even(values)
    >>> assert(np.all(expected==actual))
    """
    median = np.median(values)
    below = values < median
    above = values > median
    meds = values == median

    num_below = np.sum(below)
    num_above = np.sum(above)
    if num_below < num_above:
        below = np.logical_or(below, meds)
    else:
        above = np.logical_or(above, meds)

    return below, above, median
Esempio n. 15
0
    def natural(self):
        """Make a Natural Colors RGB image composite from
        M-bands only.
        """
        self.check_channels('M05', 'M06', 'M07', 'M10')

        ch1 = self['M10'].check_range()
        ch2 = self['M07'].check_range()
        ch3 = self['M05'].check_range()

        ch2b = self['M06'].check_range()
        ch2 = np.ma.where(ch2.mask, ch2b, ch2)

        common_mask = np.logical_or(ch1.mask, ch2.mask)
        common_mask = np.logical_or(common_mask, ch3.mask)
        ch1.mask = common_mask
        ch2.mask = common_mask
        ch3.mask = common_mask

        img = geo_image.GeoImage((ch1, ch2, ch3),
                                 self.area,
                                 self.time_slot,
                                 fill_value=(0, 0, 0),
                                 mode="RGB",
                                 crange=((0, 90),
                                         (0, 90),
                                         (0, 90)))

        img.enhance(gamma=1.8)

        return img
Esempio n. 16
0
def shared_groups(grps, cutoff=0.3):
    data = []
    for grp in grps:
        H = []
        grp_file = open(grp+'_groups.txt', 'r')
        for line in grp_file:
            H.append([float(v) for v in line.split('\t')[3:]])
        grp_file.close()
        data.append(np.array(H))
    for j in xrange(10):
        s_and = 1; s_or = 0
        for H in data:
            h = H[:,j]
            c = h > np.percentile(h, 50)
            s_and = np.logical_and(s_and, c)
            s_or  = np.logical_or(s_or, c)
        print j, (s_and>0).sum()/float((s_or>0).sum())
    ref = np.array([H.shape[1] for H in data])
    cc = ref-1
    out = []
    while True:
        s_and = 1; s_or = 0
        for i in xrange(len(cc)):
            h = data[i][:,cc[i]]
            c = (h > np.percentile(h, 50))
            s_and = np.logical_and(s_and, c)
            s_or  = np.logical_or(s_or, c)
            if s_and.sum()/float(s_or.sum()) < cutoff:
                if i < len(cc)-1:
                    for k in xrange(i+1, len(cc)):
                        cc[k] = 0
                break
        if s_and.sum()/float(s_or.sum()) >= cutoff:
            out.append([s_and.sum()/float(s_or.sum())] + cc.tolist())
        if cc.sum() == 0:
            break
        for i in xrange(len(cc)-1, -1, -1):
            if cc[i] > 0:
                cc[i] -= 1
                break
            else:
                cc[i] = ref[i]-1
    print 'We have', len(out), 'groups'
    out.sort(reverse=True)
    unique = []
    bins = []
    for val1 in out:
        exist = False
        for val2 in unique:
            for i,j in zip(val1[1:], val2[1:]):
                if i == j:
                    exist = True
        if not exist:
            unique.append(val1)
            h = []
            for i in xrange(len(val1)-1):
                h.append(data[i][:,val1[i+1]])
            bins.append(np.array(h))
            show(val1, True)
    return bins
def test_categorical_variables():
    np.random.seed(123)

    def objective(x):
        return np.array(np.sum(x, axis=1).reshape(-1, 1))

    carol_spirits = ['past', 'present', 'yet to come']
    encoding = OneHotEncoding(carol_spirits)
    parameter_space = ParameterSpace([
        ContinuousParameter('real_param', 0.0, 1.0),
        CategoricalParameter('categorical_param', encoding)
    ])

    random_design = LatinDesign(parameter_space)
    x_init = random_design.get_samples(10)

    assert x_init.shape == (10, 4)
    assert np.all(np.logical_or(x_init[:, 1:3] == 0.0, x_init[:, 1:3] == 1.0))

    y_init = objective(x_init)

    gpy_model = GPy.models.GPRegression(x_init, y_init)
    gpy_model.Gaussian_noise.fix(1)
    model = GPyModelWrapper(gpy_model)

    loop = ExperimentalDesignLoop(parameter_space, model)
    loop.run_loop(objective, 5)

    assert len(loop.loop_state.Y) == 15
    assert np.all(np.logical_or(loop.loop_state.X[:, 1:3] == 0.0, loop.loop_state.X[:, 1:3] == 1.0))
def make_boxplot_temperature(caObj, name, modis_lvl2=False):
    low_clouds = get_calipso_low_clouds(caObj)
    high_clouds = get_calipso_high_clouds(caObj)
    medium_clouds = get_calipso_medium_clouds(caObj)
    temp_c = caObj.calipso.all_arrays['layer_top_temperature'][:,0] +273.15 
    if modis_lvl2:
        temp_pps = caObj.modis.all_arrays['temperature']
    else:
        temp_pps = caObj.imager.all_arrays['ctth_temperature']  
    if modis_lvl2:
        height_pps = caObj.modis.all_arrays['height']
    else:
        height_pps = caObj.imager.all_arrays['ctth_height']

    thin = np.logical_and(caObj.calipso.all_arrays['feature_optical_depth_532_top_layer_5km']<0.30, 
                          caObj.calipso.all_arrays['feature_optical_depth_532_top_layer_5km']>0) 
    very_thin = np.logical_and(caObj.calipso.all_arrays['feature_optical_depth_532_top_layer_5km']<0.10, 
                          caObj.calipso.all_arrays['feature_optical_depth_532_top_layer_5km']>0) 
    thin_top = np.logical_and(caObj.calipso.all_arrays['number_layers_found']>1, thin)
    thin_1_lay = np.logical_and(caObj.calipso.all_arrays['number_layers_found']==1, thin)
    use = np.logical_and(temp_pps >100,
                         caObj.calipso.all_arrays['layer_top_altitude'][:,0]>=0)
    use = np.logical_and(height_pps <45000,use)
    low = np.logical_and(low_clouds,use)
    medium = np.logical_and(medium_clouds,use)
    high = np.logical_and(high_clouds,use)
    c_all = np.logical_or(high,np.logical_or(low,medium))
    high_very_thin = np.logical_and(high, very_thin)
    high_thin = np.logical_and(high, np.logical_and(~very_thin,thin))
    high_thick = np.logical_and(high, ~thin)
    #print "thin, thick high", np.sum(high_thin), np.sum(high_thick) 
    bias = temp_pps - temp_c
    abias = np.abs(bias)
    #abias[abias>2000]=2000
    print name.ljust(30, " "), "%3.1f"%(np.mean(abias[c_all])), "%3.1f"%(np.mean(abias[low])),"%3.1f"%(np.mean(abias[medium])),"%3.1f"%(np.mean(abias[high]))

    c_all = np.logical_or(np.logical_and(~very_thin,high),np.logical_or(low,medium))
    number_of = np.sum(c_all)
     
    #print name.ljust(30, " "), "%3.1f"%(np.sum(abias[c_all]<250)*100.0/number_of), "%3.1f"%(np.sum(abias[c_all]<500)*100.0/number_of),  "%3.1f"%(np.sum(abias[c_all]<1000)*100.0/number_of), "%3.1f"%(np.sum(abias[c_all]<1500)*100.0/number_of), "%3.1f"%(np.sum(abias[c_all]<2000)*100.0/number_of), "%3.1f"%(np.sum(abias[c_all]<3000)*100.0/number_of), "%3.1f"%(np.sum(abias[c_all]<4000)*100.0/number_of), "%3.1f"%(np.sum(abias[c_all]<5000)*100.0/number_of)
    from matplotlib import rcParams
    rcParams.update({'figure.autolayout': True})
    fig = plt.figure(figsize = (6,9))        
    ax = fig.add_subplot(111)
    plt.xticks(rotation=70)
    ax.fill_between(np.arange(0,8),-2.5,2.5, facecolor='green', alpha=0.6)
    ax.fill_between(np.arange(0,8),-5,5, facecolor='green', alpha=0.4)
    ax.fill_between(np.arange(0,8),-7.5,7.5, facecolor='green', alpha=0.2)
    ax.fill_between(np.arange(0,8),10,150, facecolor='red', alpha=0.2)
    ax.fill_between(np.arange(0,8),-20,-10, facecolor='red', alpha=0.2)
    for y_val in [-5,-4,-3,-2,-1,1,2,3,4,5]:
        plt.plot(np.arange(0,8), y_val*20 + 0*np.arange(0,8),':k', alpha=0.4)
    plt.plot(np.arange(0,8), 0 + 0*np.arange(0,8),':k', alpha=0.4)
    bplot = ax.boxplot([bias[low],bias[medium],bias[high],bias[high_thick],bias[high_thin],bias[high_very_thin]],whis=[5, 95],sym='',
                labels=["low","medium","high-all","high-thick\n od>0.4","high-thin \n 0.1<od<0.4","high-vthin\n od<0.1"],showmeans=True, patch_artist=True)
    ax.set_ylim(-20,100)
    for box in bplot['boxes']:
        box.set_facecolor('0.9')
    plt.title(name)
    plt.savefig(ADIR + "/PICTURES_FROM_PYTHON/CTTH_BOX/ctth_box_plot_temperature_%s_5_95_filt.png"%(name))
Esempio n. 19
0
    def test_object_logical(self):
        a = np.array([3, None, True, False, "test", ""], dtype=object)
        assert_equal(np.logical_or(a, None),
                        np.array([x or None for x in a], dtype=object))
        assert_equal(np.logical_or(a, True),
                        np.array([x or True for x in a], dtype=object))
        assert_equal(np.logical_or(a, 12),
                        np.array([x or 12 for x in a], dtype=object))
        assert_equal(np.logical_or(a, "blah"),
                        np.array([x or "blah" for x in a], dtype=object))

        assert_equal(np.logical_and(a, None),
                        np.array([x and None for x in a], dtype=object))
        assert_equal(np.logical_and(a, True),
                        np.array([x and True for x in a], dtype=object))
        assert_equal(np.logical_and(a, 12),
                        np.array([x and 12 for x in a], dtype=object))
        assert_equal(np.logical_and(a, "blah"),
                        np.array([x and "blah" for x in a], dtype=object))

        assert_equal(np.logical_not(a),
                        np.array([not x for x in a], dtype=object))

        assert_equal(np.logical_or.reduce(a), 3)
        assert_equal(np.logical_and.reduce(a), None)
Esempio n. 20
0
    def _run_interface(self, runtime):
        nii1 = nb.load(self.inputs.volume1)
        nii2 = nb.load(self.inputs.volume2)

        origdata1 = np.logical_not(np.logical_or(nii1.get_data() == 0, np.isnan(nii1.get_data())))
        origdata2 = np.logical_not(np.logical_or(nii2.get_data() == 0, np.isnan(nii2.get_data())))

        if isdefined(self.inputs.mask_volume):
            maskdata = nb.load(self.inputs.mask_volume).get_data()
            maskdata = np.logical_not(np.logical_or(maskdata == 0, np.isnan(maskdata)))
            origdata1 = np.logical_and(maskdata, origdata1)
            origdata2 = np.logical_and(maskdata, origdata2)

        for method in ("dice", "jaccard"):
            setattr(self, "_" + method, self._bool_vec_dissimilarity(origdata1, origdata2, method=method))

        self._volume = int(origdata1.sum() - origdata2.sum())

        both_data = np.zeros(origdata1.shape)
        both_data[origdata1] = 1
        both_data[origdata2] += 2

        nb.save(nb.Nifti1Image(both_data, nii1.get_affine(), nii1.get_header()), self.inputs.out_file)

        return runtime
Esempio n. 21
0
def warp_image(im, flow):
    """
    Use optical flow to warp image to the next
    :param im: image to warp
    :param flow: optical flow
    :return: warped image
    """
    from scipy import interpolate
    image_height = im.shape[0]
    image_width = im.shape[1]
    flow_height = flow.shape[0]
    flow_width = flow.shape[1]
    n = image_height * image_width
    (iy, ix) = np.mgrid[0:image_height, 0:image_width]
    (fy, fx) = np.mgrid[0:flow_height, 0:flow_width]
    fx += flow[:,:,0]
    fy += flow[:,:,1]
    mask = np.logical_or(fx <0 , fx > flow_width)
    mask = np.logical_or(mask, fy < 0)
    mask = np.logical_or(mask, fy > flow_height)
    fx = np.minimum(np.maximum(fx, 0), flow_width)
    fy = np.minimum(np.maximum(fy, 0), flow_height)
    points = np.concatenate((ix.reshape(n,1), iy.reshape(n,1)), axis=1)
    xi = np.concatenate((fx.reshape(n, 1), fy.reshape(n,1)), axis=1)
    warp = np.zeros((image_height, image_width, im.shape[2]))
    for i in range(im.shape[2]):
        channel = im[:, :, i]
        plt.imshow(channel, cmap='gray')
        values = channel.reshape(n, 1)
        new_channel = interpolate.griddata(points, values, xi, method='cubic')
        new_channel = np.reshape(new_channel, [flow_height, flow_width])
        new_channel[mask] = 1
        warp[:, :, i] = new_channel.astype(np.uint8)

    return warp.astype(np.uint8)
Esempio n. 22
0
def computemergepenalty(ellipses,i,j,L,dfore):
    # compute parameters of merged component
    BWmerge = num.logical_or(L == i+1,L == j+1)
    if not BWmerge.any():
        return (0.,ellipses[i])
    ellipsemerge = weightedregionpropsi(BWmerge,dfore[BWmerge])
    #print 'in computemergepenalty, ellipsemerge is: ' + str(ellipsemerge)
    # see if the major, minor, area are small enough
    if (ellipsemerge.area > params.maxshape.area) or (ellipsemerge.minor > params.maxshape.minor) or (ellipsemerge.major > params.maxshape.major):
        #print 'merged ellipse would be too large'
        return (params.maxpenaltymerge+1,ellipses[i])
    # find pixels that should be foreground according to the ellipse parameters
    (r1,r2,c1,c2) = getboundingboxtight(ellipsemerge,L.shape)
    isforepredmerge = ellipsepixels(ellipsemerge,num.array([r1,r2,c1,c2]))
    # pixels that were foreground
    isforepredi = ellipsepixels(ellipses[i],num.array([r1,r2,c1,c2]))
    isforepredj = ellipsepixels(ellipses[j],num.array([r1,r2,c1,c2]))
    isforepredi = num.logical_or(isforepredi, (L[r1:r2,c1:c2]==i+1))
    # pixels that are now foreground that weren't before
    newforemerge = num.logical_and(isforepredmerge,num.logical_or(isforepredi,isforepredj)==False)
    # compute the total background score for this new region that must be foreground
    dforemerge = dfore[r1:r2,c1:c2].copy()
    dforemerge = 1 - dforemerge[newforemerge]
    dforemerge[dforemerge<0] = 0
    mergepenalty = num.sum(dforemerge)
    #print 'mergepenalty = ' + str(mergepenalty)
    #print 'in computemergepenalty, ellipsemerge is: ' + str(ellipsemerge)
    return (mergepenalty,ellipsemerge)
Esempio n. 23
0
 def remove_wrongly_sized_connected_components(self, a, min_size, max_size, in_place):
     """
     Adapted from http://github.com/jni/ray/blob/develop/ray/morpho.py
     (MIT License)
     """
     bin_out = self.BinaryOut.value
     
     original_dtype = a.dtype
         
     if not in_place:
         a = a.copy()
     if min_size == 0 and (max_size is None or max_size > numpy.prod(a.shape)): # shortcut for efficiency
         return a
     
     try:
         component_sizes = numpy.bincount( a.ravel() )
     except TypeError:
         # On 32-bit systems, must explicitly convert from uint32 to int
         # (This fix is just for VM testing.)
         component_sizes = numpy.bincount( numpy.asarray(a.ravel(), dtype=int) )
     bad_sizes = component_sizes < min_size
     if max_size is not None:
         numpy.logical_or( bad_sizes, component_sizes > max_size, out=bad_sizes )
     
     bad_locations = bad_sizes[a]
     a[bad_locations] = 0
     if (bin_out):
         # Replace non-zero values with 1
         numpy.place(a,a,1)
     return numpy.array(a, dtype=original_dtype)
Esempio n. 24
0
def plot_region(regname,ptype='nh',axis=None,latlim=None,limsdict=None): 
    """ plot_region(regname,ptype='nh',axis=None,latlim=None,limsdict=None):
                     Given a region name, plot it for reference.

                     latlims is unused right now
                     if passing limsdict (to override regname), set regname='other'
    """

    if regname=='other':
        reglims=limsdict
    else:
        reglims = con.get_regionlims(regname)

    latlims = reglims['latlims']
    lonlims = reglims['lonlims']

    dummy = con.get_t63landmask()
    lat = con.get_t63lat()
    lon = con.get_t63lon()

    lons,lats = np.meshgrid(lon,lat)

    reglatsbool = np.logical_and(lat>latlims[0],lat<latlims[1])
    reglonsbool = np.logical_and(lon>lonlims[0],lon<lonlims[1])

    # mask everything but the region of interest
    regmask = np.logical_or( 
                            np.logical_or(lats<latlims[0],lats>latlims[1]), 
                            np.logical_or(lons<lonlims[0],lons>lonlims[1]))
    dummym = ma.masked_where(regmask,dummy)

    plt.figure()
    kemmap(dummym,lat,lon,ptype=ptype,axis=axis,latlim=latlim,suppcb=1,
           cmin=-11,cmax=2,cmap='blue2blue_w10',drawgrid=True)
Esempio n. 25
0
 def relative_error(self,A,B,abs_eps):
     absA = np.abs(A)
     absB = np.abs(B)
     I = np.logical_not(np.logical_or(A==B,np.logical_or(absA < abs_eps, absB < abs_eps)))
     E = np.zeros(A.shape,dtype=A.dtype)
     E[I] = np.abs(A[I]-B[I]) / min(absA[I] + absB[I])
     return E
Esempio n. 26
0
    def bb(self,inFile, xval, yval, zval):
        X_invalid = np.logical_or((inFile.header.min[0] > xval), (inFile.header.max[0] < xval))
        Y_invalid = np.logical_or((inFile.header.min[1] > yval), (inFile.header.max[1] < yval))
        Z_invalid = np.logical_or((inFile.header.min[2] > zval), (inFile.header.max[2] < zval))

        bad_indices = np.where(np.logical_or(X_invalid, Y_invalid, Z_invalid))
        return(bad_indices)
Esempio n. 27
0
    def prepareCalculation(self, pic=None, automask=True):
        '''
        prepare data used in calculation
        
        :param pic: str, list of str, or 2d array, if provided, and automask is True, then 
            generate a dynamic mask
        :param automask: bool, if True, and pic is not None, then generate a dynamic mask
        
        :return: None
        '''
        self.staticmask = self.mask.staticMask()
        self.correction = self.calculate.genCorrectionMatrix()
        # if a pic is provided, then generate one-time dynamicmask
        if (pic != None) and automask:
            image = self._getPic(pic)
            # image *= self.correction
            dymask = self.mask.dynamicMask(image)
            if dymask != None:
                dymask = np.logical_or(self.staticmask, dymask)
            else:
                dymask = self.staticmask
            
            if self.config.avgmask:
                mask = self.genAvgMask(image, dymask=dymask)
                self.staticmask = np.logical_or(dymask, mask)

        self.calculate.genIntegrationInds(self.staticmask)
        return
Esempio n. 28
0
 def find_most_similar(self, bids, K_sim=14):
     """Return the bid of the most similar book to parameter bid except the given bid."""
     termv = sparse.csc_matrix((self.M, 1), dtype=int)
     for bid in bids:
         col_num = self.bid_to_col.get(str(bid))
         if col_num is not None:
             termv = termv + self.term_bid_matrix.getcol(col_num)
     if termv.nnz == 0:
         return ()
     termva = termv.toarray()    # Generate a vector for terms
     stop_words_removed = np.logical_and(termva, self.stop_words)
     nonzero = stop_words_removed.nonzero()[0]    # Nonzero indices
     rest_term_rows = self.term_bid_matrix_csr[nonzero]
     docs = np.zeros(self.N, dtype=bool)
     for row in rest_term_rows:
         np.logical_or(docs, row.toarray()[0], docs)
     cols = docs.nonzero()[0]
     matched_matrix = self.term_bid_matrix[:,cols]
     termv.data = self.tf(termv.data) * np.array([self.idf(self.row_to_term[row])
                                                  for row in termv.indices])
     termv = normalize(termv.T, axis=1, copy=False)
     matched_matrix.data = self.tf(matched_matrix.data)
     matched_matrix = normalize(matched_matrix.T, axis=1, copy=False).T
     cos_sims = termv.dot(matched_matrix).toarray()[0]
     found_bids = (self.col_to_bid[col] for col in cols)
     return islice((int(r[1])
                    for r in heapq.nlargest(K_sim, zip(cos_sims, found_bids))
                    if int(r[1]) not in bids),
                    9)
Esempio n. 29
0
def update_mul_inf_zero(lb1_ub1, lb2_ub2, t):
    if not any(np.isinf(lb1_ub1)) and not any(np.isinf(lb2_ub2)):
        return
        
    t_min, t_max = t
    lb1, ub1 = lb1_ub1
    lb2, ub2 = lb2_ub2
    
    ind1_zero_minus = logical_and(lb1<0, ub1>=0)
    ind1_zero_plus = logical_and(lb1<=0, ub1>0)
    
    ind2_zero_minus = logical_and(lb2<0, ub2>=0)
    ind2_zero_plus = logical_and(lb2<=0, ub2>0)
    
    has_plus_inf_1 = logical_or(logical_and(ind1_zero_minus, lb2==-inf), logical_and(ind1_zero_plus, ub2==inf))
    has_plus_inf_2 = logical_or(logical_and(ind2_zero_minus, lb1==-inf), logical_and(ind2_zero_plus, ub1==inf))
    
    # !!!! lines with zero should be before lines with inf !!!!
    ind = logical_or(logical_and(lb1==-inf, ub2==0), logical_and(lb2==-inf, ub1==0))
    t_max[atleast_1d(logical_and(ind, t_max<0.0))] = 0.0
    
    t_max[atleast_1d(logical_or(has_plus_inf_1, has_plus_inf_2))] = inf
    t_max[atleast_1d(logical_or(logical_and(lb1==0, ub2==inf), logical_and(lb2==0, ub1==inf)))] = inf
    
    has_minus_inf_1 = logical_or(logical_and(ind1_zero_plus, lb2==-inf), logical_and(ind1_zero_minus, ub2==inf))
    has_minus_inf_2 = logical_or(logical_and(ind2_zero_plus, lb1==-inf), logical_and(ind2_zero_minus, ub1==inf))
    # !!!! lines with zero should be before lines with -inf !!!!
    t_min[atleast_1d(logical_or(logical_and(lb1==0, ub2==inf), logical_and(lb2==0, ub1==inf)))] = 0.0
    t_min[atleast_1d(logical_or(logical_and(lb1==-inf, ub2==0), logical_and(lb2==-inf, ub1==0)))] = 0.0
    
    t_min[atleast_1d(logical_or(has_minus_inf_1, has_minus_inf_2))] = -inf
Esempio n. 30
0
def generateBoundingPoints(nx, ny, nz):
    x = np.arange(0, nx)
    y = np.arange(0, ny)
    z = np.arange(0, nz)
    xm, ym, zm = np.meshgrid(x, y, z, indexing='ij')

    xm_l = (xm == 0)        # left
    xm_r = (xm == nx - 1)   # right
    xm_bound = np.logical_or(xm_l, xm_r)

    ym_b = (ym == 0)        # back
    ym_f = (ym == ny - 1)   # front
    ym_bound = np.logical_or(ym_b, ym_f)

    zm_b = (zm == 0)        # bottom
    zm_u = (zm == nz - 1)   # up
    zm_bound = np.logical_or(zm_b, zm_u)

    bound = np.logical_or(xm_bound, ym_bound)
    bound = np.logical_or(bound, zm_bound)

    xb = xm[bound]
    yb = ym[bound]
    zb = zm[bound]

    return np.column_stack((xb.ravel(), yb.ravel(), zb.ravel()))
Esempio n. 31
0
# In[7]:

from sklearn.externals import joblib

# In[21]:

#Preprocesar:
nmed = None
nan_mask = None
for band in bands:
    b = medians1[band].ravel()
    if nan_mask is None:
        nan_mask = np.isnan(b)
    else:
        nan_mask = np.logical_or(nan_mask, np.isnan(medians1[band].ravel()))
    b[np.isnan(b)] = np.nanmedian(b)
    if nmed is None:
        sp = medians1[band].shape
        nmed = b
    else:
        nmed = np.vstack((nmed, b))

# In[12]:

import os
model = None
for file in os.listdir(modelos):
    print file
    if file.endswith(".pkl"):
        model = file
Esempio n. 32
0
def sample_hist_statistic(sample_path):
    """ calculate overlapping between noncal and outline with HU range 0 ~ 50 """
    k = 5
    f1s = []
    file_paths = []  # save paths of slice with non-calcified plaque
    noncal_flag = False

    np.random.seed(42)
    sample = sample_path.split('/')[-1]
    print("Processing ", sample)
    for artery in sorted(listdir(sample_path)):
        mask_path = osp.join(sample_path, artery, 'applicate', 'mask')
        img_path = osp.join(sample_path, artery, 'applicate', 'image')

        # extract label files
        label_files = sorted([
            file for file in listdir(mask_path)
            if file.endswith('.tiff') and not file.startswith('.')
        ])

        rand_seeds = np.random.uniform(0.0, 1.0, len(label_files))
        for inx, label_file in enumerate(label_files):
            label_path = osp.join(mask_path, label_file)
            slice_path = osp.join(img_path, label_file)

            label = io.imread(label_path)[144:368, 144:368]
            slice = io.imread(slice_path)[144:368, 144:368]

            if rand_seeds[inx] < 0.05:
                # save file path
                file_path = '/'.join([sample, artery, label_file])
                file_paths.append(file_path)

                # calculate noncal evaluations
                n_above50 = np.sum(np.logical_and(label == 76, slice > 50))
                n_below0 = np.sum(np.logical_and(label == 76, slice < 0))
                if np.sum(label == 76) != 0:
                    noncal_pxiels_sort = sorted(slice[label == 76].flatten())
                    topk = noncal_pxiels_sort[-k:]
                    buttomk = noncal_pxiels_sort[:k]
                else:
                    topk = [51 for _ in range(k)]
                    buttomk = [-1 for _ in range(k)]
                noncal_eval = np.array([n_above50, n_below0, *topk,
                                        *buttomk]).astype(np.int16)

                # hu0050 map
                # mask_hu0050 = np.logical_and(slice <= -800, slice >= -1000)
                mask_hu0050 = (slice <= -800)
                hu0050_map = np.zeros(label.shape, dtype=np.uint8)
                hu0050_map[mask_hu0050] = 150

                slice1 = slice  # only extract HU range [-100, 155]
                slice2 = hu2lut(slice, window=1000,
                                level=700)  # for calcification

                # noncal map
                mask_noncal = (label == 76)
                mask_outline = np.logical_or(label == 76, label == 255)
                mask_outline = np.logical_or(mask_outline, label == 151)
                mask_outline_hu0050 = np.logical_and(mask_outline, mask_hu0050)

                # calculate F1 score
                f1s.append(
                    f1_score(mask_noncal.flatten(),
                             mask_outline_hu0050.flatten()))

                # calculate overlap
                overlap_map = np.zeros(label.shape, dtype=np.uint8)
                overlap_map[mask_noncal] = 76
                overlap_map[mask_outline_hu0050] = 150
                overlap_map[np.logical_and(
                    mask_noncal,
                    mask_outline_hu0050)] = 226  # yellow for overlap

                # combine overlap with GT label
                mix_overlap = label.copy()
                mix_overlap[mask_outline_hu0050] = 150
                mix_overlap[np.logical_and(
                    mask_noncal,
                    mask_outline_hu0050)] = 226  # yellow for overlap

                if not noncal_flag:
                    noncal_evals = noncal_eval[np.newaxis, :]
                    labels = label[np.newaxis, :, :]
                    slices1 = slice1[np.newaxis, :, :]
                    slices2 = slice2[np.newaxis, :, :]
                    hu0050_maps = hu0050_map[np.newaxis, :, :]
                    overlap_maps = overlap_map[np.newaxis, :, :]
                    noncal_maps = mask_noncal[np.newaxis, :, :]
                    outline0050_maps = mask_outline_hu0050[np.newaxis, :, :]
                    mix_overlap_maps = mix_overlap[np.newaxis, :, :]
                    noncal_flag = True
                else:
                    noncal_evals = np.concatenate(
                        [noncal_evals, noncal_eval[np.newaxis, :]])
                    labels = np.concatenate([labels, label[np.newaxis, :, :]],
                                            axis=0)
                    slices1 = np.concatenate(
                        [slices1, slice1[np.newaxis, :, :]], axis=0)
                    slices2 = np.concatenate(
                        [slices2, slice2[np.newaxis, :, :]], axis=0)
                    hu0050_maps = np.concatenate(
                        [hu0050_maps, hu0050_map[np.newaxis, :, :]], axis=0)
                    noncal_maps = np.concatenate(
                        (noncal_maps, mask_noncal[np.newaxis, :, :]), axis=0)
                    outline0050_maps = np.concatenate(
                        (outline0050_maps,
                         mask_outline_hu0050[np.newaxis, :, :]),
                        axis=0)
                    overlap_maps = np.concatenate(
                        [overlap_maps, overlap_map[np.newaxis, :, :]], axis=0)
                    mix_overlap_maps = np.concatenate(
                        [mix_overlap_maps, mix_overlap[np.newaxis, :, :]],
                        axis=0)

    if not noncal_flag:
        noncal_evals = np.empty((0, 2 * k + 2), dtype=np.int16)
        labels = np.empty((0, *label.shape), dtype=np.uint8)
        slices1 = np.empty((0, *label.shape), dtype=np.uint8)
        slices2 = np.empty((0, *label.shape), dtype=np.uint8)
        noncal_maps = np.empty((0, *label.shape), dtype=np.uint8)
        outline0050_maps = np.empty((0, *label.shape), dtype=np.uint8)
        overlap_maps = np.empty((0, *label.shape), dtype=np.uint8)
        hu0050_maps = np.empty((0, *label.shape), dtype=np.uint8)
        mix_overlap_maps = np.empty((0, *label.shape), dtype=np.uint8)

    print(f1s)
    return noncal_evals, f1s, file_paths, noncal_maps, outline0050_maps, overlap_maps, labels, slices1, slices2, hu0050_maps, mix_overlap_maps
Esempio n. 33
0
def outline_noncal_overlap_statistic(sample_path):
    """ calculate overlapping between noncal and outline with HU range 0 ~ 50 """
    f1s = []
    noncal_flag = False

    sample = sample_path.split('/')[-1]
    print("Processing ", sample)
    for artery in sorted(listdir(sample_path)):
        mask_path = osp.join(sample_path, artery, 'applicate', 'mask')
        img_path = osp.join(sample_path, artery, 'applicate', 'image')

        # extract label files
        label_files = sorted([
            file for file in listdir(mask_path)
            if file.endswith('.tiff') and not file.startswith('.')
        ])

        for label_file in label_files:
            label_path = osp.join(mask_path, label_file)
            slice_path = osp.join(img_path, label_file)

            label = io.imread(label_path)
            slice = io.imread(slice_path)

            if np.sum(label == 76) != 0:
                overlap_map = np.zeros(label.shape, dtype=np.uint8)
                # noncal map
                mask_noncal = (label == 76)
                noncal_pixels = slice[mask_noncal].flatten()
                # print(noncal_pixels.max(), noncal_pixels.min())
                mask_hu0050 = np.logical_and(slice <= 50, slice >= 0)

                mask_outline = np.logical_or(label == 76, label == 255)
                mask_outline = np.logical_or(mask_outline, label == 151)

                mask_outline_hu0050 = np.logical_and(mask_outline, mask_hu0050)
                # mask_outline_hu0050 = mask_outline

                try:
                    f1s.append(
                        f1_score(mask_noncal.flatten(),
                                 mask_outline_hu0050.flatten()))
                except:
                    print(label_path)

                overlap_map[mask_noncal] = 76
                overlap_map[mask_outline_hu0050] = 150
                overlap_map[np.logical_and(
                    mask_noncal,
                    mask_outline_hu0050)] = 226  # yellow for overlap

                if not noncal_flag:
                    overlap_maps = overlap_map[np.newaxis, :, :]
                    noncal_maps = mask_noncal[np.newaxis, :, :]
                    outline0050_maps = mask_outline_hu0050[np.newaxis, :, :]
                    noncal_flag = True
                else:
                    noncal_maps = np.concatenate(
                        (noncal_maps, mask_noncal[np.newaxis, :, :]), axis=0)
                    outline0050_maps = np.concatenate(
                        (outline0050_maps,
                         mask_outline_hu0050[np.newaxis, :, :]),
                        axis=0)
                    overlap_maps = np.concatenate(
                        [overlap_maps, overlap_map[np.newaxis, :, :]], axis=0)

    if not noncal_flag:
        noncal_maps = np.empty((0, *label.shape), dtype=np.uint8)
        outline0050_maps = np.empty((0, *label.shape), dtype=np.uint8)
        overlap_maps = np.empty((0, *label.shape), dtype=np.uint8)

    return f1s, noncal_maps, outline0050_maps, overlap_maps
Esempio n. 34
0
    def matching_gen(A, K, D, m, eta, gamma, model_var):
        K += epsilon

        mseed = np.size(np.where(A.flat)) // 2

        if type(model_var) == tuple:
            mv1, mv2 = model_var
        else:
            mv1, mv2 = model_var, model_var

        if mv1 in ('powerlaw', 'power_law'):
            Fd = D**eta
        elif mv1 in ('exponential', ):
            Fd = np.exp(eta * D)

        if mv2 in ('powerlaw', 'power_law'):
            Fk = K**gamma
        elif mv2 in ('exponential', ):
            Fk = np.exp(gamma * K)

        Ff = Fd * Fk * np.logical_not(A)
        u, v = np.where(np.triu(np.ones((n, n)), 1))

        for ii in range(mseed, m):
            C = np.append(0, np.cumsum(Ff[u, v]))
            r = np.sum(np.random.random() * C[-1] >= C)
            uu = u[r]
            vv = v[r]
            A[uu, vv] = A[vv, uu] = 1

            updateuu, = np.where(np.inner(A, A[:, uu]))
            np.delete(updateuu, np.where(updateuu == uu))
            np.delete(updateuu, np.where(updateuu == vv))

            c1 = np.append(A[:, uu], A[uu, :])
            for i in range(len(updateuu)):
                j = updateuu[i]
                c2 = np.append(A[:, j], A[j, :])

                use = np.logical_or(c1, c2)
                use[uu] = use[uu + n] = use[j] = use[j + n] = 0
                ncon = np.sum(c1[use]) + np.sum(c2[use])
                if ncon == 0:
                    K[uu, j] = K[j, uu] = epsilon
                else:
                    K[uu, j] = K[j, uu] = (
                        2 / ncon * np.sum(np.logical_and(c1[use], c2[use])) +
                        epsilon)

            updatevv, = np.where(np.inner(A, A[:, vv]))
            np.delete(updatevv, np.where(updatevv == uu))
            np.delete(updatevv, np.where(updatevv == vv))

            c1 = np.append(A[:, vv], A[vv, :])
            for i in range(len(updatevv)):
                j = updatevv[i]
                c2 = np.append(A[:, j], A[j, :])

                use = np.logical_or(c1, c2)
                use[vv] = use[vv + n] = use[j] = use[j + n] = 0
                ncon = np.sum(c1[use]) + np.sum(c2[use])
                if ncon == 0:
                    K[vv, j] = K[j, vv] = epsilon
                else:
                    K[vv, j] = K[j, vv] = (
                        2 / ncon * np.sum(np.logical_and(c1[use], c2[use])) +
                        epsilon)

            Ff = Fd * Fk * np.logical_not(A)

        return A
Esempio n. 35
0
 def distfun(s1, s2):
     return self.get_dist(data_vec[np.logical_or(y == s1, y == s2), :].T)
Esempio n. 36
0
def plot_scores(input_filename, sheetnames, x_col, y_cols, group_by, log_scale,
                colors, linestyle, select, outpout_filename):
    # %matplotlib
    import numpy as np
    import pandas as pd
    import matplotlib.pyplot as plt
    from matplotlib.backends.backend_pdf import PdfPages
    import seaborn as sns

    for sheetname in sheetnames:
        # sheetname = sheetnames[0]
        #os.path.join(os.path.dirname(input_filename),
        #                              sheetname + "_scores-by-s.pdf")
        data = pd.read_excel(input_filename, sheetname=sheetname)

        # avoid poor rounding
        for col in group_by:
            try:
                data[col] = np.asarray(data[col]).round(5)
            except:
                pass
        data[x_col] = np.asarray(data[x_col]).round(5)
        # assert len(data[x_col].unique()) == 11

        def close(vec, val, tol=1e-4):
            return np.abs(vec - val) < tol

        # select
        for k in select:
            mask_or = np.zeros(data.shape[0], dtype=bool)
            for v in select[k]:
                mask_or = np.logical_or(mask_or, close(data[k], v))
            data = data[mask_or]

        # enet => tv or gn == 0
        enettv0 = data[data["algo"] == "enet"].copy()
        enettv0.algo = "enettv"
        enetgn0 = data[data["algo"] == "enet"].copy()
        enetgn0.algo = "enetgn"
        data = pd.concat([data, enettv0, enetgn0])

        # rm enet
        data = data[~(data.algo == 'enet')]

        data.sort_values(by=x_col, ascending=True, inplace=True)

        pdf = PdfPages(outpout_filename)
        for y_col in y_cols:
            #y_col = y_cols[0]
            fig = plt.figure()
            xoffsset = -0.001 * len([_ for _ in data.groupby(group_by)]) / 2
            for (algo, l1_ratio, a), d in data.groupby(group_by):
                print((algo, l1_ratio, a))
                plt.plot(d[x_col],
                         d[y_col],
                         color=colors[(a, l1_ratio)],
                         ls=linestyle[algo],
                         label="%s, l1/l2:%.1f, a:%.3f" % (algo, l1_ratio, a))
                if y_col in log_scale:
                    plt.yscale('log')
                y_col_se = y_col + "_se"
                if y_col_se in d.columns:
                    plt.errorbar(d[x_col] + xoffsset,
                                 d[y_col],
                                 yerr=d[y_col_se],
                                 legend=False,
                                 fmt=None,
                                 alpha=0.2,
                                 ecolor=colors[(a, l1_ratio)],
                                 elinewidth=1)
                    xoffsset += 0.001
            plt.xlabel(x_col)
            plt.ylabel(y_col)
            plt.legend()
            plt.suptitle(y_col)
            pdf.savefig(fig)
            plt.clf()
        pdf.close()
Esempio n. 37
0
dwi_fname, dwi_bval_fname, dwi_bvec_fname, _ = get_fnames('cfin_multib')
data, affine = load_nifti(dwi_fname)
bvals, bvecs = read_bvals_bvecs(dwi_bval_fname, dwi_bvec_fname)
gtab = gradient_table(bvals, bvecs)

"""
For the sake of simplicity, we only select two non-zero b-values for this
example.
"""

bvals = gtab.bvals

bvecs = gtab.bvecs

sel_b = np.logical_or(np.logical_or(bvals == 0, bvals == 1000), bvals == 2000)

data = data[..., sel_b]

gtab = gradient_table(bvals[sel_b], bvecs[sel_b])

print(data.shape)

"""
As one can see from its shape, the selected data contains a total of 67
volumes of images corresponding to all the diffusion gradient directions
of the selected b-values.

The PCA denoising using the Marcenko-Pastur distribution can be performed by
calling the function ``mppca``:
"""
def calc_p_alpha_limits_pdf(pdfs, ks, mu, rel_std):
    """Get the CDF ratio at the limits `rel_std` in each observable bin.

    Similar to `calc_p_alpha_limits`, but the CDF calculation is based on the
    normalized likelihood values `pdfs` and corresponding k values `ks`.

    Parameters
    ----------
    pdfs : list of list of float
        The pdf values for each feature bin and for each value k.
        The value k is the observed number of events in the Poisson Likelihood.
        The number of evaluted k values is different for each observable bin,
        and it is chosen such that a certain coverage is obtained.
        Shape: [n_bins, n_k_values] (note that n_k_values is not constant!)
    ks : list of list of int
        The corresponding k value for each of the evaluated pdf values `pdfs`.
        Shape: [n_bins, n_k_values] (note that n_k_values is not constant!)
    mu : array_like
        The expected number (Poisson mean) of events in each observable bin.
        Shape: [n_bins]
    rel_std : array_like
        The relative limits wrt the expected number (Poisson mean) of events
        in each bin, i.e. limits / mu. The last dimension corresponds to lower
        and upper relative limits, respectively.
        Shape: [n_bins, n_alpha, 2]

    Returns
    -------
    array_like
        The ratio of the PDF tails:
            P(x <= limit_i) / P(x <= mu_i) if limit_i <= mu_i
            P(x > limit_i) / P(x > mu_i) if limit_i > mu_i
        for each observable bin i.
        The CDF P(x <= y) is calculated based on the normalized likelihood
        values `pdfs` and corresponding k values `ks`.
        This ratio reaches 1., if the measured values `k` agree well with the
        expected values `mu`. The smaller this ratio is, the higher the
        discrepancy.
        Shape: [n_bins, n_alpha, 2]
    """
    abs_std = np.zeros_like(rel_std)
    limits = np.zeros_like(rel_std)
    for i in range(rel_std.shape[1]):
        abs_std = mu * rel_std[:, i, 0]
        returned_vals = __calc_p_alpha_pdf__(pdfs,
                                             ks,
                                             mu,
                                             abs_std,
                                             upper=False)

        is_nan = np.logical_or(np.isnan(abs_std), np.isnan(mu))
        is_zero_mu = mu == 0.
        only_zero_mu = np.logical_and(is_zero_mu, ~is_nan)

        returned_vals[only_zero_mu] = -np.inf
        limits[:, i, 0] = returned_vals
    for i in range(rel_std.shape[1]):
        abs_std = mu * rel_std[:, i, 1]
        returned_vals = __calc_p_alpha_pdf__(pdfs, ks, mu, abs_std, upper=True)
        is_nan = np.logical_or(np.isnan(abs_std), np.isnan(mu))
        is_zero_mu = mu == 0.
        only_zero_mu = np.logical_and(is_zero_mu, ~is_nan)

        returned_vals[only_zero_mu] = -np.inf
        limits[:, i, 1] = returned_vals
    return limits
Esempio n. 39
0
    def eval_sequence(self, data):
        """Calculates GT and Tracker matches for one sequence for TrackMAP metrics. Adapted from
        https://github.com/TAO-Dataset/"""

        # Initialise results to zero for each sequence as the fields are only defined over the set of all sequences
        res = {}
        for field in self.fields:
            res[field] = [0 for _ in self.array_labels]

        gt_ids, dt_ids = data['gt_track_ids'], data['dt_track_ids']

        if len(gt_ids) == 0 and len(dt_ids) == 0:
            for idx in range(self.num_ig_masks):
                res[idx] = None
            return res

        # get track data
        gt_tr_areas = data.get('gt_track_areas', None) if self.use_area_rngs else None
        gt_tr_lengths = data.get('gt_track_lengths', None) if self.use_time_rngs else None
        gt_tr_iscrowd = data.get('gt_track_iscrowd', None)
        dt_tr_areas = data.get('dt_track_areas', None) if self.use_area_rngs else None
        dt_tr_lengths = data.get('dt_track_lengths', None) if self.use_time_rngs else None
        is_nel = data.get('not_exhaustively_labeled', False)

        # compute ignore masks for different track sets to eval
        gt_ig_masks = self._compute_track_ig_masks(len(gt_ids), track_lengths=gt_tr_lengths, track_areas=gt_tr_areas,
                                                   iscrowd=gt_tr_iscrowd)
        dt_ig_masks = self._compute_track_ig_masks(len(dt_ids), track_lengths=dt_tr_lengths, track_areas=dt_tr_areas,
                                                   is_not_exhaustively_labeled=is_nel, is_gt=False)

        boxformat = data.get('boxformat', 'xywh')
        ious = self._compute_track_ious(data['dt_tracks'], data['gt_tracks'], iou_function=data['iou_type'],
                                        boxformat=boxformat)

        for mask_idx in range(self.num_ig_masks):
            gt_ig_mask = gt_ig_masks[mask_idx]

            # Sort gt ignore last
            gt_idx = np.argsort([g for g in gt_ig_mask], kind="mergesort")
            gt_ids = [gt_ids[i] for i in gt_idx]

            ious_sorted = ious[:, gt_idx] if len(ious) > 0 else ious

            num_thrs = len(self.array_labels)
            num_gt = len(gt_ids)
            num_dt = len(dt_ids)

            # Array to store the "id" of the matched dt/gt
            gt_m = np.zeros((num_thrs, num_gt)) - 1
            dt_m = np.zeros((num_thrs, num_dt)) - 1

            gt_ig = np.array([gt_ig_mask[idx] for idx in gt_idx])
            dt_ig = np.zeros((num_thrs, num_dt))

            for iou_thr_idx, iou_thr in enumerate(self.array_labels):
                if len(ious_sorted) == 0:
                    break

                for dt_idx, _dt in enumerate(dt_ids):
                    iou = min([iou_thr, 1 - 1e-10])
                    # information about best match so far (m=-1 -> unmatched)
                    # store the gt_idx which matched for _dt
                    m = -1
                    for gt_idx, _ in enumerate(gt_ids):
                        # if this gt already matched continue
                        if gt_m[iou_thr_idx, gt_idx] > 0:
                            continue
                        # if _dt matched to reg gt, and on ignore gt, stop
                        if m > -1 and gt_ig[m] == 0 and gt_ig[gt_idx] == 1:
                            break
                        # continue to next gt unless better match made
                        if ious_sorted[dt_idx, gt_idx] < iou - np.finfo('float').eps:
                            continue
                        # if match successful and best so far, store appropriately
                        iou = ious_sorted[dt_idx, gt_idx]
                        m = gt_idx

                    # No match found for _dt, go to next _dt
                    if m == -1:
                        continue

                    # if gt to ignore for some reason update dt_ig.
                    # Should not be used in evaluation.
                    dt_ig[iou_thr_idx, dt_idx] = gt_ig[m]
                    # _dt match found, update gt_m, and dt_m with "id"
                    dt_m[iou_thr_idx, dt_idx] = gt_ids[m]
                    gt_m[iou_thr_idx, m] = _dt

            dt_ig_mask = dt_ig_masks[mask_idx]

            dt_ig_mask = np.array(dt_ig_mask).reshape((1, num_dt))  # 1 X num_dt
            dt_ig_mask = np.repeat(dt_ig_mask, num_thrs, 0)  # num_thrs X num_dt

            # Based on dt_ig_mask ignore any unmatched detection by updating dt_ig
            dt_ig = np.logical_or(dt_ig, np.logical_and(dt_m == -1, dt_ig_mask))
            # store results for given video and category
            res[mask_idx] = {
                "dt_ids": dt_ids,
                "gt_ids": gt_ids,
                "dt_matches": dt_m,
                "gt_matches": gt_m,
                "dt_scores": data['dt_track_scores'],
                "gt_ignore": gt_ig,
                "dt_ignore": dt_ig,
            }

        return res
def __calc_p_alpha_pdf__(pdfs, ks, mu, k, upper=True):
    """Get the CDF ratio at a given number of observed events k in each bin.

    Similar to `__calc_p_alpha__`, but CDF is calculated based on the
    computed normalized likelihood values `pdfs` and the corresponding
    k values `ks`.

    Parameters
    ----------
    pdfs : list of list of float
        The pdf values for each feature bin and for each value k.
        The value k is the observed number of events in the Poisson Likelihood.
        The number of evaluted k values is different for each observable bin,
        and it is chosen such that a certain coverage is obtained.
        Shape: [n_bins, n_k_values] (note that n_k_values is not constant!)
    ks : list of list of int
        The corresponding k value for each of the evaluated pdf values `pdfs`.
        Shape: [n_bins, n_k_values] (note that n_k_values is not constant!)
    mu : array_like
        The expected number (Poisson mean) of events in each observable bin.
        Shape: [n_bins]
    k : array_like
        The measured number (Poisson k) of events in each observable bin.
        The CDF ratio is evaluated at these k values.
        Shape: [n_bins]
    upper : bool, optional
        If true, the upper PDF tail will be considered, i.e. the ratio
        P(x > k_i) / P(x > mu_i) will be computed.
        If false, P(x <= k_i) / P(x <= mu_i) is computed.

    Returns
    -------
    array_like
        The ratio P(x <= k_i) / P(x <= mu_i) for each observable bin i.
        The CDF P(x <= y) is calculated based on the normalized likelihood
        values `pdfs` and corresponding k values `ks`.
        If upper is True, then '<=' switches to '>'.
        Shape: [n_bins]
    """
    assert mu.shape == k.shape, 'Shape of \'mu\' and \'k\' have to be the same'
    limit = np.copy(k)

    is_nan = np.logical_or(np.isnan(k), np.isnan(mu))
    is_finite = mu != 0.

    for i, (pdf, ksi) in enumerate(zip(pdfs, ks)):
        cdf = np.cumsum(pdf)
        if is_finite[i]:
            mu_idx = np.where(ksi == int(mu[i]))[0]
            if len(mu_idx) == 0:
                a_ref = np.nan
            else:
                a_ref = cdf[mu_idx]
            k_idx = np.where(ksi == int(k[i]))[0]
            if len(k_idx) == 0:
                if upper:
                    a_k = 1
                else:
                    a_k = 0
            else:
                a_k = cdf[k_idx]
            if upper:
                if 1 - a_k == 0.:
                    limit[i] = np.inf
                else:
                    ratio = (1 - a_k) / (1 - a_ref)
                    limit[i] = ratio
            else:
                if a_k == 0:
                    limit[i] = np.inf
                else:
                    ratio = a_k / a_ref
                    limit[i] = ratio

    limit[is_nan] = np.nan
    return limit
Esempio n. 41
0
    def do_subtract_background(
        self,
        thres=None,
        back_dict=None,
    ):
        if len(self.raw_data_dict['timestamps']) == 1:
            pass
        else:
            x_filtered = []
            y_filtered = []
            for tt in range(len(self.raw_data_dict['timestamps'])):
                y = np.squeeze(self.plot_amp[tt])
                x = np.squeeze(self.plot_frequency)[tt]
                # print(self.plot_frequency)
                # [print(x.shape) for x in self.plot_frequency]
                # print(x)
                # print(y)
                # print(len(x),len(y))
                guess_dict = SlopedHangerFuncAmplitudeGuess(y, x)
                Q = guess_dict['Q']['value']
                f0 = guess_dict['f0']['value']
                df = 2 * f0 / Q
                fmin = f0 - df
                fmax = f0 + df
                indices = np.logical_or(x < fmin * 1e9, x > fmax * 1e9)

                x_filtered.append(x[indices])
                y_filtered.append(y[indices])
            self.background = pd.concat([
                pd.Series(y_filtered[tt], index=x_filtered[tt])
                for tt in range(len(self.raw_data_dict['timestamps']))
            ],
                                        axis=1).mean(axis=1)
            background_vals = self.background.reset_index().values
            freq = background_vals[:, 0]
            amp = background_vals[:, 1]
            # thres = 0.0065
            indices = amp < thres
            freq = freq[indices] * 1e-9
            amp = amp[indices]
            fit_fn = double_cos_linear_offset
            model = lmfit.Model(fit_fn)
            fit_yvals = amp
            fit_xvals = {'t': freq}
            # fit_guess_fn = double_cos_linear_offset_guess
            # guess_dict = fit_guess_fn(fit_yvals, **fit_xvals)
            for key, val in list(back_dict.items()):
                model.set_param_hint(key, **val)
            params = model.make_params()
            print(fit_xvals)
            fit_res = model.fit(fit_yvals, params=params, **fit_xvals)
            self.background_fit = fit_res

            for tt in range(len(self.raw_data_dict['timestamps'])):
                divide_vals = fit_fn(
                    np.squeeze(self.plot_frequency)[tt] * 1e-9,
                    **fit_res.best_values)
                self.plot_amp[tt] = np.array([
                    np.array([
                        np.divide(np.squeeze(self.plot_amp[tt]), divide_vals)
                    ])
                ]).transpose()
#   Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import print_function

import unittest, sys
sys.path.append("../")
import numpy as np

from test_logical_op import create_test_class

create_test_class('logical_and', lambda _a, _b: np.logical_and(_a, _b))
create_test_class('logical_or', lambda _a, _b: np.logical_or(_a, _b))
create_test_class('logical_not', lambda _a: np.logical_not(_a), False)

if __name__ == '__main__':
    unittest.main()
Esempio n. 43
0
# SEAK_2ndGrowth_noveg.GetRasterBand(1).ComputeStatistics(0)
# SEAK_2ndGrowth = None

## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ##
# we also need to solve an issue where the pixels with values not upland coincident
#  with the harvest to upland.
# SEAK_2ndGrowth = SEAK_2ndGrowth_noveg
TNF_cover_added = TNF_cover_added
TNF_cover_added_arr = TNF_cover_added.GetRasterBand(1).ReadAsArray()
TNF_cover_added_copy = np.copy(TNF_cover_added_arr)

SEAK_2ndGrowth = gdal.Open(os.path.join(file_path, 'SEAK_2ndGrowth.tif'),
                           gdal.GA_ReadOnly)
SEAK_2ndGrowth_arr = SEAK_2ndGrowth.GetRasterBand(1).ReadAsArray()

TNF_cover_added_copy[np.logical_and(np.logical_or(TNF_cover_added_arr > 1 , TNF_cover_added_arr < 5), \
  SEAK_2ndGrowth_arr > 0 )] = 2 # convert harvested area to upland

driver = gdal.GetDriverByName('GTiff')
SEAK_2ndGrowth_upland = driver.CreateCopy(os.path.join(
    output_path, output_name.replace('.tif', '_SEAK_2ndGrowth_to_upland.tif')),
                                          SEAK_2ndGrowth,
                                          options=creation_options)

SEAK_2ndGrowth_upland.GetRasterBand(1).WriteArray(TNF_cover_added_copy)

## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ## ##

# ** Changed to final step prior to resampling. **
#  reclassify erroneous values in Saltwater
base_rst = SEAK_2ndGrowth_upland
Esempio n. 44
0
def merge_files(obs_files,
                out_files,
                days,
                dt,
                reftime,
                limits=None,
                clobber=True):
    """
    merge together a group of observation files into combined new files
    with observations that lie only within the corresponding dates

    Parameters
    ----------
    obs_files : list,
        List of files to merge together (a single file will work, it will
        just be filtered by the dates)
    out_files : list or string,
        list of the filenames to create for each of the output periods.
        If a single string is given, the character '#' will be replaced
        by the starting time of the observation (e.g. out_files="out_#.nc"
        will become out_03234.nc)
    days : list of tuples,
        List of starting and ending day numbers for each cycle to process.
        The first value is the start day, the second is the end day. The
        number of tuples is the number of files to output.
    dt : float,
        Time separation of observations. Observations that are less than
        dt apart in time will be set to the same time.
    reftime :
        Reference time used to process the observations. The merged files
        are now timed in relation to the beginning of the assimilation cycle
    limits : dict, optional
        Set the limits of the grid points that observations are allowed
        within, {'north':i, 'south':i, 'east':i, 'west':i }. As obs near
        the boundaries are not advisable, this allows you to specify the
        valid grid range to accept obs within.
    clobber: bool, optional
        If True, output files are overwritten. If False, they are skipped.

    Returns
    -------
    None

    Examples
    --------

    Put together three files into 5 separate files in two day intervals from
    day 10 through day 20:

    >>> merge_files(["obs_1.nc", "obs_2.nc", "obs_3.nc"], "new_#.nc",
                   [(i, i+2) for i in range(10, 20, 2)])

    Put together same three files into 3 overlapping separate files in five
    day intervals with one overlapping day:

    >>> merge_files(["obs_1.nc", "obs_2.nc", "obs_3.nc"], "new_#.nc",
                   [(i, i+5) for i in range(10, 20, 4)])

    """
    import re
    import os

    # Only unique files
    obs_files = set().union(seapy.flatten(obs_files))
    outtime = False
    if isinstance(out_files, str):
        outtime = True
        time = re.compile('\#')

    # Go through the files to determine which periods they cover
    myobs = list()
    sdays = list()
    edays = list()
    for file in obs_files:
        nc = seapy.netcdf(file)
        fdays = nc.variables['survey_time'][:]
        nc.close()
        l = np.where(
            np.logical_and(fdays >= np.min(days), fdays <= np.max(days)))[0]
        if not l.size:
            continue
        myobs.append(file)
        sdays.append(fdays[0])
        edays.append(fdays[-1])
    sdays = np.asarray(sdays)
    edays = np.asarray(edays)

    # Loop over the dates in pairs
    for n, t in track(enumerate(days),
                      total=len(days),
                      description="search files"):
        # Set output file name
        if outtime:
            outfile = time.sub("{:05d}".format(t[0]), out_files)
        else:
            outfile = out_files[n]

        if os.path.exists(outfile) and not clobber:
            continue

        # Find the files that cover the current period
        fidx = np.where(np.logical_and(sdays <= t[1], edays >= t[0]))[0]
        if not fidx.size:
            continue

        # Create new observations for this time period
        nobs = obs(myobs[fidx[0]])
        l = np.where(np.logical_or(nobs.time < t[0], nobs.time > t[1]))
        nobs.delete(l)
        for idx in fidx[1:]:
            o = obs(myobs[idx])
            l = np.where(np.logical_and(o.time >= t[0], o.time <= t[1]))
            nobs.add(o[l])
        # Remove any limits
        if limits is not None:
            l = np.where(
                np.logical_or.reduce(
                    (nobs.x < limits['west'], nobs.x > limits['east'],
                     nobs.y < limits['south'], nobs.y > limits['north'])))
            nobs.delete(l)
        # Make time relative to the assimilation window
        nobs.reftime = reftime
        #nobs.reftime = seapy.day2date(t[0],epoch=reftime)
        #nobs.time = abs(abs(nobs.time) - abs(t[0]))
        # Save out the new observations
        nobs.to_netcdf(outfile, dt=dt)

        pass
def SupplyandDemandOptimization(Building_Var_Inputs):    
    Internal_Start = timeit.default_timer()

    '''-----------------------------------------------------------------------------------------------'''
    ### Use the input variables to create an overall demand file and then call the required functions. #
    '''-----------------------------------------------------------------------------------------------'''
    # First create a dictionary of building input variables
    Building_Vars = {}
    for i in range(Num_Buildings): ## MODIFIED it was 21 and it wasn't a loop
        Building_Vars[i+1] = Building_Var_Inputs[i] ###### Building_Vars = number of each building type
    Engine_Var = Building_Var_Inputs[Num_Buildings]
    Chiller_Var = Building_Var_Inputs[Num_Buildings+1]
    Comm_Solar_Var = 0 #Building_Var_Inputs[Num_Buildings+2]
    if CWWTP_Mode == 0:
        WWT_Var = Building_Var_Inputs[Num_Buildings+2]
    else:
        WWT_Var = 3
#    Comm_Solar_Type_Var = 1 ## ?? WHAT IS IT? Only the first solar type is used! NOT OPTIMIZING FOR SOLAR PANEL TYPE

    '''-----------------------------------------------------------------------------------------------'''
    ## Trivial Case Avoidance
    '''-----------------------------------------------------------------------------------------------'''
    if np.sum(Building_Var_Inputs[:Num_Buildings]) == 0: ## TRIVIAL CASE AVOIDANCE
        Run_Result = np.zeros((1,Vars_Plus_Output))
        Run_Result[0][Num_Buildings] = Engine_Var # i.e. element 21
        Run_Result[0][Num_Buildings+1] = Chiller_Var # i.e. element 22
        Run_Result[0][Num_Buildings+2] = Comm_Solar_Var # i.e. element 23
        Run_Result[0][Num_Buildings+3] = WWT_Var # i.e. element 24
        return ((0, 0,),
        ((Max_Site_GFA-0)/Max_Site_GFA,
         (0-Min_GFA)/Min_GFA,
         (Max_GFA-0)/Max_GFA, ), Run_Result) # Update based on whatever needs to be optimized



    # Use the Building_Vars dictionary and the dictionary of demands to create an aggregate function of demand
    # Note that the Diversifier Peak is an assumption
    Diversifier_Peak = 0.8 ## ??
    Aggregate_Demand = 0
    for i in range(Num_Buildings):
        j = i+1
        Aggregate_Demand += Diversifier_Peak*(Building_Vars[j]*Demand_Types[j][:,0:4]) ## MODIFIED for water demand+syntax shortened (columnstack replaced)
    
    
    '''-----------------------------------------------------------------------------------------------'''
    ### Adding the municipal demands to the created aggregate demands #
    '''-----------------------------------------------------------------------------------------------'''
    # Calculate total length and width of building sites
    Total_Site_Length = 0
    Total_Site_Width = 0
    for i in range(Num_Buildings):
        j = i+1
        Total_Site_Length += Building_Vars[j]*Site_Dimensions[j][0]
        Total_Site_Width += Building_Vars[j]*Site_Dimensions[j][1]

    # Add in municipal loads # MODIFIED--WAS ERRONEOUS BEFORE
    Curfew_Modifier = 0.50
    Light_Spacing = 48.8        # m
    Lights_Per_Side = 2
    Light_Power = .190           # kW
    Width_to_Length_Ratio = 1/8
    
    hours = np.array(range(8760))
    hours %= 24
    hours_lights_on = np.logical_or(((hours >= 19) * (hours <= 23)), ((hours >= 0) * (hours <= 6)))
    hours_lights_half_power = ((hours >= 2) * (hours <= 6))*(1-Curfew_Modifier)
    ## hours_lights_on-hours_lights_half_power results in 1 for hours with lights on, and curfew_modifier for half-powered hours
    Aggregate_Demand[:,0] += (hours_lights_on-hours_lights_half_power)*(np.ceil((Total_Site_Length+Width_to_Length_Ratio*Total_Site_Width)/Light_Spacing)*Lights_Per_Side*Light_Power)
    

    # Save the loads at this point for use later
    Final_Demand = copy.deepcopy(Aggregate_Demand)


    '''-----------------------------------------------------------------------------------------------'''
    ### Initiate TES based on the max raw hourly thermal demand #
    '''-----------------------------------------------------------------------------------------------'''
    TES_Max = np.max(Aggregate_Demand[:,1]) * TES_Max_Hours ## Storage capacity = TES_Max_Hours hours x peak annual hour heat load
    TES_Capex = 95*TES_Max/1000 * USD_2008_to_2019 # In 2019 USD # Averaged based on Table 8 from Cost for Sensible and other heat storage... @ D:\PhD\+Main Research Directory\W&WW+low-heat applications\+++ TES

    '''-----------------------------------------------------------------------------------------------'''
    ### Adding the losses to the demands #
    '''-----------------------------------------------------------------------------------------------'''
    Heat_Loss = 0.003               # kW/m
    Cooling_Loss = 0.017            # kW/m
    Electrical_Loss = 0.8568*0.06  # Decimal

    ''' See ISSST paper for losses on thermal side. For electrical, data is a combination of 6% loss on average
        in the U.S. and calculations by Mungkung, et al. on the percentage makeup of those losses at the low
        voltage level. References are:
            Munkung, et al.: http://www.wseas.us/e-library/conferences/2009/istanbul/TELE-INFO/TELE-INFO-02.pdf
            EIA: http://www.eia.gov/tools/faqs/faq.cfm?id=105&t=3
    '''
    ## MODIFIED: For loop -> in-place conversion
    Aggregate_Demand[:,0] += Aggregate_Demand[:,0]*Electrical_Loss
    Aggregate_Demand[:,1] += (Total_Site_Length+Total_Site_Width)*2*Heat_Loss*np.ones(len(Aggregate_Demand[:,0]))
    Aggregate_Demand[:,2] += (Total_Site_Length+Total_Site_Width)*2*Cooling_Loss*np.ones(len(Aggregate_Demand[:,0]))

    '''-----------------------------------------------------------------------------------------------'''
    ### Adding the chiller electrical/thermal demand to the aggregate electrical and thermal demands #
    '''-----------------------------------------------------------------------------------------------'''
#    Chiller_Hourly_Cooling_Results = np.zeros((8760)) ## MODIFIED for performance
    Chiller_COP_Results = np.zeros((8760)) ## MODIFIED for performance
#    UNUSED: Electrical_Demand = np.zeros((8760)) ## MODIFIED for performance
    Chiller_Costs = np.zeros((8760)) ## MODIFIED for performance

    Chilled_Water_Supply_Temperature = 44.0 # in deg F ## WHERE DID THIS COME FROM?
    Number_Iterations = 1 ## why??
    Heat_Source_Temperature = 100 ## And this? IS it in deg C or F?? It's in deg F

    Engine_Demand = np.zeros(shape=(8760,2))

    for i in range(len(Aggregate_Demand[:,0])):
        Hourly_Chiller_Result = Chiller_Types[Chiller_Var](Chilled_Water_Supply_Temperature, Hourly_Wet_Bulb[i]*9/5+32, Hourly_Temperature[i]*9/5+32, Aggregate_Demand[i,2], Number_Iterations, Heat_Source_Temperature)[0:6]
#        Chiller_Hourly_Cooling_Results[i] = Hourly_Chiller_Result[3] ## UNUSED
        Chiller_COP_Results[i] = Hourly_Chiller_Result[4] # MODIFIED
        Chiller_Costs[i] = Hourly_Chiller_Result[5] # MODIFIED
        Engine_Demand[i,0] = Aggregate_Demand[i,0]+Hourly_Chiller_Result[1]
        Engine_Demand[i,1] = Aggregate_Demand[i,1]+Hourly_Chiller_Result[2]
        

    
    ## Creating the total energy and wastewater demand for the neighborhood (used for comparing neighborhoods)
    Total_Energy_Demand = np.sum(Engine_Demand[:,0]) + np.sum(Engine_Demand[:,1])
    Total_WWater_Demand = np.sum(Aggregate_Demand[:,3])
    
    
    
    # additional vars: Hourly_WWT_Results (use later), WWT_Var (add to optimization vars)
    # additional functions: WWT_Types
    '''-----------------------------------------------------------------------------------------------'''
    ### Adding the GW treatment electrical/thermal demand to the aggregate electrical and thermal demands #
    '''-----------------------------------------------------------------------------------------------'''
    if CWWTP_Mode == 0:
        Hourly_WWT_Results = WWT_Types[WWT_Var](Aggregate_Demand[:,3], Hourly_Temperature)
    else:
        Hourly_WWT_Results = WWT_Types[WWT_Var](Aggregate_Demand[:,3], Hourly_Temperature, Grid_Emissions)
    Engine_Demand[:,0] += Hourly_WWT_Results[0]
    Engine_Demand[:,1] += Hourly_WWT_Results[1]
    WWT_Opex_Total = Hourly_WWT_Results[2] ## Annual value
    WWT_Capex_Total = Hourly_WWT_Results[3] ## Annual value
    if CWWTP_Mode == 0:
        WWT_GHG = 0
    else:
        WWT_GHG = Hourly_WWT_Results[4]
    
    
    
    '''-----------------------------------------------------------------------------------------------'''
    ### Solar Production #
    '''-----------------------------------------------------------------------------------------------'''

    Excess_Electricity = np.zeros((8760)) ## Originally: grid_sales
    Capital_Solar_Cost = 0



    # Calculate loads and subtract from total electrical demand; calculate costs and total solar capacity installed
    [Hourly_Solar_Generation, Capital_Solar_Cost] = [0,0]#Commercial_Solar_Types[Comm_Solar_Type_Var](np.array(range(8760)), UTC, Comm_Solar_Area, Tilt, Azimuth, Latitude, Longitude, Hourly_DNI, Hourly_DHI, Hourly_GHI, Hourly_Albedo, Hourly_Temperature, Hourly_Wind_Speed, Site_Altitude)[3:5]
    Engine_Demand[:,0] -= Hourly_Solar_Generation
    Excess_Electricity = np.abs((Engine_Demand[:,0] < 0) * Engine_Demand[:,0]) # Excess electricity no. 1
    Engine_Demand[:,0] += Excess_Electricity ## Hours with excess electricity are zeroed to avoid erroneous calculation in the CHPEngines.py with a negative Engine_Demand[i,0]
    

    # Save the loads with a different name at this point for use later
    Post_Solar_Demand = copy.deepcopy(Engine_Demand)


    '''-----------------------------------------------------------------------------------------------'''
    ### Run the CHP engine with the demands + use the excess heat for ww treatment #
    '''-----------------------------------------------------------------------------------------------'''
    # Now run a control scheme that simply produces to the greatest demand and counts excess as waste
    Power_to_Heat_Ratio = Power_to_Heat[Engine_Var]
    Gas_Line_Pressure = 55.0

    Fuel_Input_Results = np.zeros((8760))

    CCHP_Capex = 0 # in $
    CCHP_Opex = 0
    Carbon_Emissions = np.zeros(8760) ## CHANGED TO ARRAY FOLLOWING IILP_TOY_OPT 
    Last_Part_Load = 0
    Last_Num_Engines = 0
    Excess_Heat = np.zeros((8760)) ## CHANGED TO ARRAY FOLLOWING IILP_TOY_OPT 
    TES = np.zeros((8760)) ## Thermal Energy Storage
    

    
    ## For the previous version of the code in which only the excess heat was used in the WWT, refer to Ch3_SF_CaseStudy_w_Storage_PreE_Consumption_for_WWT
    for i in range(len(Engine_Demand[:,0])): ## MODIFIED: repetitive code excluded from the first if else
        TES[i] = Hourly_TES_Coeff * TES[i-1] ## Depreciating the previous time-step's stored energy; each timestep is defined as 300s ## NOTE: CAPITAL AND O&M for TES is not included yet!
        if Engine_Demand[i,1] < TES[i]: # More Stored heat than needed
            TES[i] -= Engine_Demand[i,1]
            Engine_Demand[i,1] = 0
        else: # All the stored heat should be used and we'll need extra heat from the CCHP
            Engine_Demand[i,1] -= TES[i]
            TES[i] = 0
        Test_Electricity = Engine_Demand[i,1]*Power_to_Heat_Ratio ## Electrical equivalent of the heat demand
        if Engine_Demand[i,0] > Test_Electricity: ## heat is not the controlling load; produce electricity to supply the engine-demand --> We'll have excess heat
            Hourly_Supply_Result = Supply_Types[Engine_Var](Site_Altitude, Hourly_Temperature[i], Gas_Line_Pressure, Engine_Demand[i,0], Last_Num_Engines, Last_Part_Load)
            Last_Num_Engines = Hourly_Supply_Result[7]
            Last_Part_Load = Hourly_Supply_Result[8]
            if Hourly_Supply_Result[2] < Engine_Demand[i,1]: ## Checking the produced heat with the required heat ## HOW IS IT POSSIBLE?
                Hourly_Supply_Result = Supply_Types[Engine_Var](Site_Altitude, Hourly_Temperature[i], Gas_Line_Pressure, Test_Electricity, Last_Num_Engines, Last_Part_Load)
                Last_Num_Engines = Hourly_Supply_Result[7]
                Last_Part_Load = Hourly_Supply_Result[8]
        else: ## Heat is the controlling load, produce to satisfy the heat, we'll have excess electricity
            Hourly_Supply_Result = Supply_Types[Engine_Var](Site_Altitude, Hourly_Temperature[i], Gas_Line_Pressure, Test_Electricity, Last_Num_Engines, Last_Part_Load)
            Last_Num_Engines = Hourly_Supply_Result[7]
            Last_Part_Load = Hourly_Supply_Result[8]
            if Hourly_Supply_Result[3] < Engine_Demand[i,0]: ## Checking electricity with the existing demand ## HOW IS IT POSSIBLE? ## We'll have excess heat
                Hourly_Supply_Result = Supply_Types[Engine_Var](Site_Altitude, Hourly_Temperature[i], Gas_Line_Pressure, Engine_Demand[i,0], Last_Num_Engines, Last_Part_Load)
                Last_Num_Engines = Hourly_Supply_Result[7]
                Last_Part_Load = Hourly_Supply_Result[8]
        
        ## Isn't the if statement below associated with the if statement 24 lines above????
        if Hourly_Supply_Result[2] > Engine_Demand[i,1]: # If produced heat > required heat, i.e. we have excess heat
            Excess_Heat[i] = Hourly_Supply_Result[2] - Engine_Demand[i,1]
            TES[i] = min(TES[i] + Excess_Heat[i], TES_Max) ## Formula from the 'storage formula' from "A comparison of TES models for bldg energy system opt" ## Assuming 100% energy storage efficiency
            Engine_Demand[i,1] = 0 ## following line 3085 of IILP_Toy_Optimization.py # Engine electric demand is responded to and it's zeroed
            ## ^ Why?? It's consumed and subtracted from the remaining demand similar to L 599
            
            # Hourly_Supply_Result[2] = 0 # Seems wrong
        
        
        Fuel_Input_Results[i] = Hourly_Supply_Result[0]/kWh_to_Btu ## a little problematic: we're only considering the energy content of the input fuel # kWh fuel
        CCHP_Capex = max(CCHP_Capex, Hourly_Supply_Result[4])
        CCHP_Opex += Hourly_Supply_Result[5] # CCHP Opex added 10 lines below
        Carbon_Emissions[i] += Hourly_Supply_Result[6] # in lbs
        
        ## Added to include grid sales after solar excess has been subtracted from demand above ^
        Engine_Demand[i,0] -= Hourly_Supply_Result[3]
        ## Isn't the if statement below associated with the else statement 50 lines above????
        if Engine_Demand[i,0] < 0:
            Excess_Electricity[i] += abs(Engine_Demand[i,0]) # Excess electricity number 2
            Engine_Demand[i,0] = 0



    '''-----------------------------------------------------------------------------------------------'''
    ### Calculate the overall efficiency and the hourly efficiencies. #
    '''-----------------------------------------------------------------------------------------------'''
    # Calculate values for constraints
    Total_GFA = 0
    Total_Site_GFA = 0
    Total_Buildings = 0
    Total_Height = 0
    Type_Max = int(np.max(Building_Info[:,7])) - 1 ## SHOULD THE TYPES BE SORTED IN THE FILE THEN? ## What's with the minus 1?? to avoid adding zero for mixed type -> they're separated into their components and added to the respective categories
    Type_Totals = {}
    Type_Areas = {}
    Type_Building_Percents = {}
    Type_Area_Percents = {}

    for i in range(Type_Max):
        j = i+1
        Type_Totals[j] = 0
        Type_Areas[j] = 0

    for i in range(Num_Buildings):
        j = i+1
        Total_GFA += Building_Vars[j]*GFA[j]
        Total_Site_GFA += Building_Vars[j]*Site_GFA[j]
        Total_Buildings += Building_Vars[j]
        Total_Height += Building_Vars[j]*Height[j]
        ## The mixed types are divided into their separate types (Res, Comm, and Off) and added to the respective areas
        if j == Num_Buildings-1:
            Type_Totals[3] += 1/Stories[j]*Building_Vars[j]
            Type_Totals[1] += 1/Stories[j]*(Stories[j]-1)*Building_Vars[j]
            Type_Areas[3] += 1/Stories[j]*Building_Vars[j]*GFA[j]
            Type_Areas[1] += 1/Stories[j]*(Stories[j]-1)*Building_Vars[j]*GFA[j]
        elif j == Num_Buildings:
            Type_Totals[3] += 1/Stories[j]*Building_Vars[j]
            Type_Totals[2] += 1/Stories[j]*(Stories[j]-1)*Building_Vars[j]
            Type_Areas[3] += 1/Stories[j]*Building_Vars[j]*GFA[j]
            Type_Areas[2] += 1/Stories[j]*(Stories[j]-1)*Building_Vars[j]*GFA[j]
        else:
            Type_Totals[Type[j]] = Type_Totals[Type[j]]+Building_Vars[j]
            Type_Areas[Type[j]] = Type_Areas[Type[j]]+GFA[j]*Building_Vars[j]
        

    Site_FAR = np.nan_to_num(Total_GFA/Total_Site_GFA) ## MODIFIED: nan_to_num added
    Average_Height = np.nan_to_num(Total_Height/Total_Buildings) ## MODIFIED: nan_to_num added

    for i in range(Type_Max):
        j = i+1
        Type_Building_Percents[j] = np.nan_to_num(Type_Totals[j]/Total_Buildings) ## MODIFIED: nan_to_num added
        Type_Area_Percents[j] = np.nan_to_num(Type_Areas[j]/Total_GFA) # In percents  ## MODIFIED: nan_to_num added

    # To find Final Demand, add the actual heat or electricity used to do real cooling work
    # on the buildings and calculate totaly hourly demand and efficiency, and keep a running
    # total of fuel use and demand
    Useful_Demand = np.zeros((8760))
    Total_Demand = 0.0
    CHP_Demand = 0.0 ## ADDED
    Total_Fuel = 0.0
    CHP_Fuel = 0.0 ## ADDED


    ## FOR LOOPS converted to DIRECT NUMPY ARRAY MANIPULATIONS
    COP_Flag = (Chiller_COP_Results > 1.0) ## WHY?
    Useful_Demand = (Final_Demand[:,0] + Final_Demand[:,1] +
                     COP_Flag * Final_Demand[:,2]/Chiller_COP_Results +
                     (1-COP_Flag) * Final_Demand[:,2])

    Total_Demand = np.sum(Useful_Demand)
    CHP_Demand = np.sum(Post_Solar_Demand[:,0] + Post_Solar_Demand[:,1]) # + Hourly_GW_Treated*FO_MD_Power_per_m3/1000) ## ADDED ## REMOVED!
    Total_Fuel = np.sum(Fuel_Input_Results) ## MODIFIED

    CHP_Fuel = np.sum(Fuel_Input_Results) ## ADDED


    if Total_Buildings > 0: ## REFER TO PAGE 79 of the thesis for the explanation 
        Overall_Efficiency = np.nan_to_num(Total_Demand/Total_Fuel) ## Added the nan_to_number ## Note: total demand = raw demand + solar power and neglecting the excess electricity from PVs and CHP
        Overall_CHP_Efficiency = np.nan_to_num(CHP_Demand/CHP_Fuel)
    else:
        Overall_Efficiency = 0
        Overall_CHP_Efficiency = 0

#    GW_Efficiency = np.nan_to_num(Total_Treated_GW/Total_GW) ## Added the nan_to_number
    
    # Total Capex # ADDED/ MODIFIED 
    if CWWTP_Mode == 0:
        Capex = CCHP_Capex + np.max(Chiller_Costs) + WWT_Capex_Total + Capital_Solar_Cost + TES_Capex # MODIFIED: Solar added ##MODIFIED # Optimization objective
    else:
        Capex = CCHP_Capex + np.max(Chiller_Costs) + Capital_Solar_Cost + TES_Capex # MODIFIED: Solar added ##MODIFIED # Optimization objective
    Total_Capex = CCHP_Capex + np.max(Chiller_Costs) + WWT_Capex_Total + Capital_Solar_Cost + TES_Capex # MODIFIED: Solar added ##MODIFIED # For recording
    ## Added: sell price usage
    
    
    # Carbon emissions # ADDED/ MODIFIED 
    Construction_Carbon = (Capex / USD_2007_to_2019)/10**6 * Const_Carbon_per_Mil_Dollar # in metric tons CO2 eq # Optimization objective
    Total_Construction_Carbon = (Total_Capex / USD_2007_to_2019)/10**6 * Const_Carbon_per_Mil_Dollar # in metric tons CO2 eq # For recording
    
    Annual_Carbon_Emissions = np.sum(Carbon_Emissions)/MT_to_lbs # in metric tons CO2e # Optimization objective
    Total_Annual_Carbon_Emissions = np.sum(Carbon_Emissions)/MT_to_lbs + WWT_GHG # in metric tons CO2e # For recording
    
    Years = np.arange(Current_Year, Current_Year+Project_Life) # ADDED/ MODIFIED 
    Annual_SCC = 0.8018*Years - 1585.7 # ADDED/ MODIFIED # in 2007 $ per metric tons of CO2
    
    SCC = (Construction_Carbon * Annual_SCC[0] + np.sum(Annual_SCC * Annual_Carbon_Emissions)) * USD_2007_to_2019 # ADDED/ MODIFIED # in 2019 $
    Total_SCC = (Total_Construction_Carbon * Annual_SCC[0] + np.sum(Annual_SCC * Total_Annual_Carbon_Emissions)) * USD_2007_to_2019 # ADDED/ MODIFIED # in 2019 $
    Total_Carbon = Project_Life * Total_Annual_Carbon_Emissions + Total_Construction_Carbon # ADDED/ MODIFIED in metric tons CO2e




    LCC = Capex
    LCC_Total = Total_Capex
    CCHP_Opex -= np.sum(Sell_Price*Excess_Electricity) ## Including the sales price
    for i in range(Project_Life):
        if CWWTP_Mode == 0:
            LCC += (CCHP_Opex+WWT_Opex_Total)/(1+Discount_Rate)**i # Optimization objective
        else:
            LCC += CCHP_Opex/(1+Discount_Rate)**i # Optimization objective
        LCC_Total += (CCHP_Opex+WWT_Opex_Total)/(1+Discount_Rate)**i # Total LCC for the record
    


    '''-----------------------------------------------------------------------------------------------'''
    ### Creating the outputs #
    '''-----------------------------------------------------------------------------------------------'''
    Internal_Stop = timeit.default_timer()
    Internal_Time = Internal_Stop-Internal_Start
    Run_Result = np.zeros((1,Vars_Plus_Output))
    # Add the variables first ## DESCRIPTIONS ADDED FROM IILP_TOY_OOPT
    for i in range(Num_Buildings): # i.e. elements 0 to 20
        Run_Result[0][i] = Building_Var_Inputs[i]
    Run_Result[0][Num_Buildings] = Engine_Var # i.e. element 21
    Run_Result[0][Num_Buildings+1] = Chiller_Var # i.e. element 22
    Run_Result[0][Num_Buildings+2] = Comm_Solar_Var # i.e. element 23
    Run_Result[0][Num_Buildings+3] = WWT_Var # i.e. element 24
    # Now the objectives
    Run_Result[0][Num_Buildings+4] = Overall_Efficiency # i.e. element 25
    Run_Result[0][Num_Buildings+5] = LCC_Total # i.e. element 26
    Run_Result[0][Num_Buildings+6] = Total_SCC # i.e. element 27 # in $
    Run_Result[0][Num_Buildings+7] = Overall_CHP_Efficiency # i.e. element 28
    Run_Result[0][Num_Buildings+7+Type_Max+4] = Total_Carbon # i.e. element 39 # in metric tons CO2e
    # Now the constraint values
    for i in range(Type_Max):
        j = i+1
        Run_Result[0][Num_Buildings+7+j] = Type_Area_Percents[j] # i.e. element 29 to 35
    Run_Result[0][Num_Buildings+7+Type_Max+1] = Site_FAR # i.e. element 36
    Run_Result[0][Num_Buildings+7+Type_Max+2] = Average_Height # i.e. element 37 # in ft!!
    Run_Result[0][Num_Buildings+7+Type_Max+3] = Total_GFA # i.e. element 38 # in m2
    # Now add the watch variables
    Run_Result[0][Num_Buildings+7+Type_Max+5] = Total_Energy_Demand #Old: Total_Excess_Electricity ## originally: Total_Capex # i.e. element 40
    Run_Result[0][Num_Buildings+7+Type_Max+6] = Total_WWater_Demand #Old: Total_Excess_Heat ## originally: CCHP_Opex # i.e. element 41 # in L


    # Return format: fitness values = return[0]; constraint values = return[1]; Results = np.append(Results, [fit[2]], axis=0)
    ## ALL THE CONSTRAINT VALUES ARE NORMALIZED to 0 to 1 range concerning the method of comparing two infeasible individuals in fitness_with_constraints [Look at def dominates(self)]
    if Total_GFA == 0: # Added for avoiding division by zero in the objectives (below) for trivial solutions
        Total_GFA = 0.00001
    return ((LCC/Total_GFA, SCC/Total_GFA, ),
            ((Max_Site_GFA-Total_Site_GFA),
             (Total_GFA-Min_GFA),
             (Max_GFA-Total_GFA), ),Run_Result)
Esempio n. 46
0
def findSquare(vi: int, vert_type: np.ndarray, vert_index: np.ndarray,
               vert_color: np.ndarray, voxel_model_array: np.ndarray, x: int,
               y: int, z: int, dx: int, dy: int, dz: int):
    """
    Find the largest square starting from a given point and generate the corresponding points and tris.

    Args:
        vi: Current vertex index
        vert_type: Array of vertex types
        vert_index: Array of vertex indices
        vert_color: Array of vertex colors
        voxel_model_array: Voxel data array
        x: Target voxel X
        y: Target voxel Y
        z: Target voxel Z
        dx: Square search step in X
        dy: Square search step in Y
        dz: Square search step in Z

    Returns:
        vi, vert_type, vert_index, new_verts, new_colors, new_tris, new_quads
    """
    x_len, y_len, z_len = vert_type.shape
    new_verts = []
    new_colors = []
    new_tris = []
    new_quads = []

    vert_on_surface = (vert_type[x, y, z] == 1 or vert_type[x, y, z] == 2)
    if vert_on_surface and x + dx < x_len and y + dy < y_len and z + dz < z_len:  # Point is a face vertex and next point is in bounds
        xn = x
        yn = y
        zn = z

        for i in range(1, max(
                x_len - x, y_len - y, z_len -
                z)):  # See if a square can be found starting at this point
            xn = x + dx * i
            yn = y + dy * i
            zn = z + dz * i

            # Check if endpoint is in bounds
            in_range = [xn < x_len, yn < y_len, zn < z_len]

            if not np.all(np.array(in_range)):
                xn = x + dx * (i - 1)
                yn = y + dy * (i - 1)
                zn = z + dz * (i - 1)
                break

            face = (vert_type[x:xn + 1, y:yn + 1, z:zn + 1] == 1)
            blocking = (vert_type[x:xn + 1, y:yn + 1, z:zn + 1] == 2)
            on_surface = np.logical_or(face, blocking)

            # Check if square includes only surface vertices
            if not np.all(on_surface):
                xn = x + dx * (i - 1)
                yn = y + dy * (i - 1)
                zn = z + dz * (i - 1)
                break

            # Check if square includes any blocking vertices
            if np.any(blocking):
                break

        square = None

        # Determine vert coords based on search direction
        if xn > x and yn > y and zn == z:
            # vert_type[x:xn+1, y:yn+1, z] = 1 # Type 1 = occupied/exterior
            vert_type[x + 1:xn, y + 1:yn,
                      z] = 3  # Type 3 = interior/already included in a square

            vx_pos = False
            vx_neg = False
            if z - 1 >= 0:
                vx_neg = (voxel_model_array[x, y, z - 1] != 0)
            if z < z_len - 1:
                vx_pos = (voxel_model_array[x, y, z] != 0)

            if vx_pos and not vx_neg:  # CW
                square = [[x, y, z], [x, yn, z], [xn, y, z], [xn, yn, z]]
            elif vx_neg and not vx_pos:  # CCW
                square = [[x, y, z], [xn, y, z], [x, yn, z], [xn, yn, z]]
            else:  # Interior face -- can occur with certain small features
                square = None

        elif xn > x and yn == y and zn > z:
            # vert_type[x:xn+1, y, z:zn+1] = 1 # Type 1 = occupied/exterior
            vert_type[
                x + 1:xn, y,
                z + 1:zn] = 3  # Type 3 = interior/already included in a square

            vx_pos = False
            vx_neg = False
            if y - 1 >= 0:
                vx_neg = (voxel_model_array[x, y - 1, z] != 0)
            if y < y_len - 1:
                vx_pos = (voxel_model_array[x, y, z] != 0)

            if vx_pos and not vx_neg:  # CW
                square = [[x, y, z], [xn, y, z], [x, y, zn], [xn, y, zn]]
            elif vx_neg and not vx_pos:  # CCW
                square = [[x, y, z], [x, y, zn], [xn, y, z], [xn, y, zn]]
            else:  # Interior face -- can occur with certain small features
                square = None

        elif xn == x and yn > y and zn > z:
            # vert_type[x, y:yn+1, z:zn+1] = 1 # Type 1 = occupied/exterior
            vert_type[
                x, y + 1:yn,
                z + 1:zn] = 3  # Type 3 = interior/already included in a square

            vx_pos = False
            vx_neg = False
            if x - 1 >= 0:
                vx_neg = (voxel_model_array[x - 1, y, z] != 0)
            if x < x_len - 1:
                vx_pos = (voxel_model_array[x, y, z] != 0)

            if vx_pos and not vx_neg:  # CW
                square = [[x, y, z], [x, y, zn], [x, yn, z], [x, yn, zn]]
            elif vx_neg and not vx_pos:  # CCW
                square = [[x, y, z], [x, yn, z], [x, y, zn], [x, yn, zn]]
            else:  # Interior face -- can occur with certain small features
                square = None

        # Add verts, tris, quads, and colors
        if square is not None:
            p = []
            for i in range(len(square)):
                new_vi = vert_index[square[i][0], square[i][1], square[i][2]]
                if new_vi == -1:
                    new_verts.append(square[i])
                    new_colors.append(vert_color[square[i][0], square[i][1],
                                                 square[i][2]])
                    vert_index[square[i][0], square[i][1], square[i][2]] = vi
                    p.append(vi)
                    vi = vi + 1
                else:
                    p.append(new_vi)

            new_tris.append([p[0], p[1], p[2]])
            new_tris.append([p[3], p[2], p[1]])
            new_quads.append([p[0], p[1], p[3], p[2]])

    return vi, vert_type, vert_index, new_verts, new_colors, new_tris, new_quads
Esempio n. 47
0
def pamhRt(sps, ptype, pparms=[]):
    """
	PAM normalized matched filter (MF) receiver filter
	h_R(t) = h_R(n*TB/sps) generation
	>>>>> hRt = pamhRt(sps, ptype, pparms) <<<<<
	where sps:
		ptype: pulse type from list
			('man', 'rcf', 'rect', 'rrcf', 'sinc', 'tri')
		pparms not used for 'man', 'rect', 'tri'
		pparms = [k, alpha] for 'rcf', 'rrcf'
		pparms = [k, beta] for 'sinc'
		k: "tail" truncation parameter for 'rcf', 'rrcf', 'sinc'
			(truncates p(t) to -k*sps <=l n < k*sps)
		alpha: Rolloff parameter for 'rcf', 'rrcf', 0 <= alpha <= 1
		beta: Kaiser window parameter for 'sinc'
		hRt: MF impulse response h_R(t) at t=n*TB/sps
	Note: In terms of sampling rate Fs and baud rate FB,
		sps = Fs/FB
	"""
    if ptype.lower() == 'man':
        nn = np.arange(-sps / 2, sps / 2)
        # Need 1 then -1
        pt = np.ones(len(nn))

        ix = np.where(nn >= 0)[0]
        pt[ix] = -pt[ix]

        #ix1 = np.where(nn < len(nn) / 2)[0]
        #ix2 = np.where(nn >= len(nn) / 2)[0]
        #pt[ix1] = 1
        #pt[ix2] = -1
    elif ptype.lower() == 'rcf':

        nk = round(pparms[0] * sps)
        nn = np.arange(-nk, nk)
        pt = np.sinc(nn / float(sps))  # sinc pulse
        if len(pparms) > 1:
            p2t = 0.25 * np.pi * np.ones(len(nn))
            ix = np.where(
                np.power(2 * pparms[1] * nn / float(sps), 2.0) != 1)[0]
            p2t[ix] = np.cos(np.pi * pparms[1] * nn[ix] / float(sps))
            p2t[ix] = p2t[ix] / (
                1 - np.power(2 * pparms[1] * nn[ix] / float(sps), 2.0))
            pt = pt * p2t

    elif ptype.lower() == 'rect':

        nn = np.arange(-sps / 2, sps / 2)
        pt = np.ones(len(nn))

    elif ptype.lower() == 'rrcf':

        nk = round(pparms[0] * sps)
        nn = np.arange(-nk, nk)
        pt = np.zeros(len(nn))
        ix1 = np.where(nn == 0)[0]
        ix2 = np.where(
            np.logical_or(nn == -sps / (4 * pparms[1]),
                          nn == sps / (4 * pparms[1])))[0]
        ix3 = np.where(
            np.logical_and(
                np.logical_and(nn != -sps / (4 * pparms[1]),
                               nn != sps / (4 * pparms[1])), nn != 0))[0]
        pt[ix1] = 1 - pparms[1] + 4 * pparms[1] / np.pi
        pt[ix2] = pparms[1] / np.sqrt(2) * (
            (1 + 2 / np.pi) * np.sin(np.pi / (4 * pparms[1])) +
            (1 - 2 / np.pi) * np.cos(np.pi / (4 * pparms[1])))
        pt[ix3] = (np.sin((1 - pparms[1]) * np.pi * nn[ix3] / float(sps)) +
                   4 * pparms[1] * nn[ix3] / float(sps) * np.cos(
                       (1 + pparms[1]) * np.pi * nn[ix3] / float(sps))) / (
                           np.pi *
                           (1 - (4 * pparms[1] * nn[ix3] / float(sps))**2) *
                           nn[ix3] / float(sps))

    elif ptype.lower() == 'sinc':

        nk = round(pparms[0] * sps)
        nn = np.arange(-nk, nk)
        pt = np.sinc(nn / float(sps))
        if len(pparms) > 1:
            pt = pt * np.kaiser(len(pt), pparms[1])

    elif ptype.lower() == 'tri':

        nn = np.arange(-sps, sps)
        pt = np.zeros(len(nn))
        ix = np.where(nn < 0)[0]
        pt[ix] = 1 + nn[ix] / float(sps)
        ix = np.where(nn >= 0)[0]
        pt[ix] = 1 - nn[ix] / float(sps)

    else:
        pt = np.ones(1)

    pt = pt / (np.sum(pt**2))
    return pt
Esempio n. 48
0
print("Click on the top left corner of the area.")

root.mainloop()

time.sleep(2)

template_matcher = MultiScaleTemplateMatcher(
    template=cv2.Canny(cv2.imread("img/template.png", 0), 50, 200))
last_click_positions = deque([(0, 0)] * CLICK_MEMORY_SIZE)

while not QUIT:
    img = np.array(
        pyautogui.screenshot())[CAPTURE_AREA[0][1]:CAPTURE_AREA[1][1],
                                CAPTURE_AREA[0][0]:CAPTURE_AREA[1][0], :]

    img = np.where(np.logical_or(img[:, :, 0] < 180, img[:, :, 0] > 220), 0,
                   255).astype(np.uint8)

    rectangle = template_matcher.match_image(img)

    if rectangle:
        center = (rectangle[0][0] + (rectangle[1][0] - rectangle[0][0]) // 2,
                  rectangle[0][1] + (rectangle[1][1] - rectangle[0][1]) // 2)

        x, y = CAPTURE_AREA[0][0] + center[0], CAPTURE_AREA[0][1] + center[1]

        if not was_position_already_clicked((x, y)):
            # Correct the mouse cursor to point to the target
            x = x - 18
            y = y - 25
Esempio n. 49
0
def noslip_boundary(x):
    return np.logical_or(
        np.logical_or(np.isclose(x[0], 0.0), np.isclose(x[0], 1.0)),
        np.isclose(x[1], 0.0))
def log_uv_histogram_wrapped(im, mask, conf):
    num_bins = conf['num_bins']
    bin_size = conf['bin_size']
    starting_uv = conf['starting_uv']
    min_intensity = conf['min_intensity']
    normalization = conf['normalization']
    postprocess = conf['postprocess']

    r = im[:, :, 0].astype(np.float)
    g = im[:, :, 1].astype(np.float)
    b = im[:, :, 2].astype(np.float)

    max_value = np.iinfo(im.dtype).max
    min_value = min_intensity * max_value

    # ignore black pixels
    bigger_zero = np.logical_and(np.logical_and(r >= 1, g >= 1), b >= 1)
    # ignore pixels smaller than minimum
    bigger_min = np.logical_or(np.logical_or(r >= min_value, g >= min_value),
                               b >= min_value)
    # ignore saturated pixels
    not_saturated = np.logical_and(
        np.logical_and(r < max_value, g < max_value), b < max_value)

    invalid = np.logical_not(
        np.logical_and(np.logical_and(bigger_zero, bigger_min),
                       np.logical_and(not_saturated, mask > 0)))

    r[invalid] = 1
    g[invalid] = 1
    b[invalid] = 1

    log_r = np.log(r)
    log_g = np.log(g)
    log_b = np.log(b)

    # ignore invalid pixels
    invalid_log = np.logical_not(
        np.logical_and(np.logical_and(np.isfinite(log_r), np.isfinite(log_g)),
                       np.isfinite(log_b)))

    invalid = np.logical_or(invalid, invalid_log)
    valid = np.logical_not(invalid)

    log_r[invalid] = 0
    log_g[invalid] = 0
    log_b[invalid] = 0

    u = log_g - log_r
    v = log_g - log_b

    weight = np.ones(u.shape)

    # set invalid pixels weight to 0
    weight[invalid] = 0

    weight_flat = weight[valid].flatten()
    u_flat = u[valid].flatten()
    v_flat = v[valid].flatten()

    # FFCC: wrap log uv!
    wrapped_u = np.mod(np.round((u_flat - starting_uv) / bin_size), num_bins)
    wrapped_v = np.mod(np.round((v_flat - starting_uv) / bin_size), num_bins)

    hist, xedges, yedges = np.histogram2d(wrapped_u,
                                          wrapped_v,
                                          num_bins,
                                          [[0, num_bins], [0, num_bins]],
                                          weights=weight_flat)

    hist = hist.astype(np.float)
    if normalization is None:
        div_hist = 1.0
    elif normalization == 'sum':
        div_hist = float(hist.sum())
    elif normalization == 'max':
        div_hist = float(hist.max())
    else:
        raise Exception('not a valid histogram normalization: ' +
                        normalization)

    hist = hist / max(div_hist, 0.00001)

    if postprocess is not None:
        if postprocess == 'sqrt':
            hist = np.sqrt(hist)
        else:
            raise Exception('not a valid histogram postprocess: ' +
                            postprocess)

    hist = hist.astype(np.float32)

    return hist, weight
Esempio n. 51
0
def __get_tumor_core__(data):
    return np.logical_or(data == 1, data == 3)
Esempio n. 52
0
def pampt(sps, ptype, pparms=[]):

    #Code used from ECEN4242 last semester
    """
	PAM pulse p(t) = p(n*TB/sps) generation
	>>>>> pt = pampt(sps, ptype, pparms) <<<<<
	where sps:
		ptype: pulse type ('man', 'rcf, 'rect', 'rrcf' 'sinc', 'tri')
		pparms not used for 'man', 'rect', 'tri'
		pparms = [k,alpha] for 'rcf', 'rrcf'
		pparms = [k, beta] for sinc
		k: "tail" truncation parameter for 'rcf', 'rrcf', 'sinc'
			(truncates p(t) to -k*sps <= n < k*sps)
		alpha: Rolloff parameter for 'rcf', 'rrcf', 0 <= alpha <= 1
		beta: Kaiser window parameter for 'sinc'
		pt: pulse p(t) at t=n*TB/sps
	Note: In terms of sampling rate Fs and baud rate FB,
		sps = Fs/FB
	"""
    if ptype.lower() == 'rcf':
        nk = round(pparms[0] * sps)
        nn = np.arange(-nk, nk)
        pt = np.sinc(nn / float(sps))  # sinc pulse
        if len(pparms) > 1:
            p2t = 0.25 * np.pi * np.ones(len(nn))
            ix = np.where(
                np.power(2 * pparms[1] * nn / float(sps), 2.0) != 1)[0]
            p2t[ix] = np.cos(np.pi * pparms[1] * nn[ix] / float(sps))
            p2t[ix] = p2t[ix] / (
                1 - np.power(2 * pparms[1] * nn[ix] / float(sps), 2.0))
            pt = pt * p2t
    elif ptype.lower() == 'rrcf':
        nk = round(pparms[0] * sps)
        nn = np.arange(-nk, nk)
        pt = np.zeros(len(nn))
        ix1 = np.where(nn == 0)[0]
        ix2 = np.where(
            np.logical_or(nn == -sps / (4 * pparms[1]),
                          nn == sps / (4 * pparms[1])))[0]
        ix3 = np.where(
            np.logical_and(
                np.logical_and(nn != -sps / (4 * pparms[1]),
                               nn != sps / (4 * pparms[1])), nn != 0))[0]
        pt[ix1] = 1 - pparms[1] + 4 * pparms[1] / np.pi
        pt[ix2] = pparms[1] / np.sqrt(2) * (
            (1 + 2 / np.pi) * np.sin(np.pi / (4 * pparms[1])) +
            (1 - 2 / np.pi) * np.cos(np.pi / (4 * pparms[1])))
        pt[ix3] = (np.sin((1 - pparms[1]) * np.pi * nn[ix3] / float(sps)) +
                   4 * pparms[1] * nn[ix3] / float(sps) * np.cos(
                       (1 + pparms[1]) * np.pi * nn[ix3] / float(sps))) / (
                           np.pi *
                           (1 - (4 * pparms[1] * nn[ix3] / float(sps))**2) *
                           nn[ix3] / float(sps))

    elif ptype.lower() == 'rect':
        nn = np.arange(-sps / 2, sps / 2)
        pt = np.ones(len(nn))
    elif ptype.lower() == 'sinc':
        nk = round(pparms[0] * sps)
        nn = np.arange(-nk, nk)
        pt = np.sinc(nn / float(sps))
        if len(pparms) > 1:
            pt = pt * np.kaiser(len(pt), pparms[1])
    elif ptype.lower() == 'tri':
        nn = np.arange(-sps, sps)
        pt = np.zeros(len(nn))
        ix = np.where(nn < 0)[0]
        pt[ix] = 1 + nn[ix] / float(sps)
        ix = np.where(nn >= 0)[0]
        pt[ix] = 1 - nn[ix] / float(sps)
    elif ptype.lower() == 'man':
        nn = np.arange(-sps / 2, sps / 2)
        # Need 1 then -1
        pt = np.ones(len(nn))

        ix = np.where(nn < 0)[0]
        pt[ix] = -pt[ix]
        print("transmitter")
        print(nn)
        print(pt)
    else:
        pt = np.ones(1)
    return pt
    def run(self, workspace):
        """Run the module on the current data set

        workspace - has the current image set, object set, measurements
                    and the parent frame for the application if the module
                    is allowed to display. If the module should not display,
                    workspace.frame is None.
        """
        #
        # The object set holds "objects". Each of these is a container
        # for holding up to three kinds of image labels.
        #
        object_set = workspace.object_set
        #
        # Get the primary objects (the centers to be removed).
        # Get the string value out of primary_object_name.
        #
        primary_objects = object_set.get_objects(
            self.primary_objects_name.value)
        #
        # Get the cleaned-up labels image
        #
        primary_labels = primary_objects.segmented
        #
        # Do the same with the secondary object
        secondary_objects = object_set.get_objects(
            self.secondary_objects_name.value)
        secondary_labels = secondary_objects.segmented
        #
        # If one of the two label images is smaller than the other, we
        # try to find the cropping mask and we apply that mask to the larger
        #
        try:
            if any([
                    p_size < s_size for p_size, s_size in zip(
                        primary_labels.shape, secondary_labels.shape)
            ]):
                #
                # Look for a cropping mask associated with the primary_labels
                # and apply that mask to resize the secondary labels
                #
                secondary_labels = primary_objects.crop_image_similarly(
                    secondary_labels)
                tertiary_image = primary_objects.parent_image
            elif any([
                    p_size > s_size for p_size, s_size in zip(
                        primary_labels.shape, secondary_labels.shape)
            ]):
                primary_labels = secondary_objects.crop_image_similarly(
                    primary_labels)
                tertiary_image = secondary_objects.parent_image
            elif secondary_objects.parent_image is not None:
                tertiary_image = secondary_objects.parent_image
            else:
                tertiary_image = primary_objects.parent_image
        except ValueError:
            # No suitable cropping - resize all to fit the secondary
            # labels which are the most critical.
            #
            primary_labels, _ = cpo.size_similarly(secondary_labels,
                                                   primary_labels)
            if secondary_objects.parent_image is not None:
                tertiary_image = secondary_objects.parent_image
            else:
                tertiary_image = primary_objects.parent_image
                if tertiary_image is not None:
                    tertiary_image, _ = cpo.size_similarly(
                        secondary_labels, tertiary_image)
        #
        # Find the outlines of the primary image and use this to shrink the
        # primary image by one. This guarantees that there is something left
        # of the secondary image after subtraction
        #
        primary_outline = outline(primary_labels)
        tertiary_labels = secondary_labels.copy()
        if self.shrink_primary:
            primary_mask = np.logical_or(primary_labels == 0, primary_outline)
        else:
            primary_mask = primary_labels == 0
        tertiary_labels[primary_mask == False] = 0
        #
        # Get the outlines of the tertiary image
        #
        tertiary_outlines = outline(tertiary_labels) != 0
        #
        # Make the tertiary objects container
        #
        tertiary_objects = cpo.Objects()
        tertiary_objects.segmented = tertiary_labels
        tertiary_objects.parent_image = tertiary_image
        #
        # Relate tertiary objects to their parents & record
        #
        child_count_of_secondary, secondary_parents = secondary_objects.relate_children(
            tertiary_objects)
        if self.shrink_primary:
            child_count_of_primary, primary_parents = primary_objects.relate_children(
                tertiary_objects)
        else:
            # Primary and tertiary don't overlap.
            # Establish overlap between primary and secondary and commute
            _, secondary_of_primary = secondary_objects.relate_children(
                primary_objects)
            mask = secondary_of_primary != 0
            child_count_of_primary = np.zeros(mask.shape, int)
            child_count_of_primary[mask] = child_count_of_secondary[
                secondary_of_primary[mask] - 1]
            primary_parents = np.zeros(secondary_parents.shape,
                                       secondary_parents.dtype)
            primary_of_secondary = np.zeros(secondary_objects.count + 1, int)
            primary_of_secondary[secondary_of_primary] = np.arange(
                1,
                len(secondary_of_primary) + 1)
            primary_of_secondary[0] = 0
            primary_parents = primary_of_secondary[secondary_parents]
        #
        # Write out the objects
        #
        workspace.object_set.add_objects(tertiary_objects,
                                         self.subregion_objects_name.value)
        #
        # Write out the measurements
        #
        m = workspace.measurements
        #
        # The parent/child associations
        #
        for parent_objects_name, parents_of, child_count, relationship in (
            (
                self.primary_objects_name,
                primary_parents,
                child_count_of_primary,
                R_REMOVED,
            ),
            (
                self.secondary_objects_name,
                secondary_parents,
                child_count_of_secondary,
                R_PARENT,
            ),
        ):
            m.add_measurement(
                self.subregion_objects_name.value,
                cellprofiler.measurement.FF_PARENT % parent_objects_name.value,
                parents_of,
            )
            m.add_measurement(
                parent_objects_name.value,
                cellprofiler.measurement.FF_CHILDREN_COUNT %
                self.subregion_objects_name.value,
                child_count,
            )
            mask = parents_of != 0
            image_number = np.ones(np.sum(mask), int) * m.image_set_number
            child_object_number = np.argwhere(mask).flatten() + 1
            parent_object_number = parents_of[mask]
            m.add_relate_measurement(
                self.module_num,
                relationship,
                parent_objects_name.value,
                self.subregion_objects_name.value,
                image_number,
                parent_object_number,
                image_number,
                child_object_number,
            )

        object_count = tertiary_objects.count
        #
        # The object count
        #
        cpmi.add_object_count_measurements(workspace.measurements,
                                           self.subregion_objects_name.value,
                                           object_count)
        #
        # The object locations
        #
        cpmi.add_object_location_measurements(
            workspace.measurements, self.subregion_objects_name.value,
            tertiary_labels)

        if self.show_window:
            workspace.display_data.primary_labels = primary_labels
            workspace.display_data.secondary_labels = secondary_labels
            workspace.display_data.tertiary_labels = tertiary_labels
            workspace.display_data.tertiary_outlines = tertiary_outlines
    def load_frame(self, seq, frame, mask1_cropped, mask1_crop_param):
        cprint('FRAME = ' + str(frame), bcolors.WARNING)

        #reading first frame
        fresh_mask = True
        frame1_dict = read_davis_frame(self.sequences, seq, frame)
        image1 = frame1_dict['image']
        mask1 = frame1_dict['mask']
        if (mask1 > .5).sum() < 500:
            return None
        if mask1_cropped is not None and mask1_crop_param is not None:
            #convert mask1 to its original shape using mask1_crop_param
            uncrop_mask1 = crop_undo(mask1_cropped, **mask1_crop_param)
            inter = np.logical_and((mask1 > .5), uncrop_mask1 > .5).sum()
            union = np.logical_or((mask1 > .5), uncrop_mask1 > .5).sum()

            if float(inter) / union > .1:
                mask1 = uncrop_mask1
                fresh_mask = False

#reading second frame
        frame2_dict = read_davis_frame(self.sequences, seq, frame + 1,
                                       self.flow_method)
        image2 = frame2_dict['image']
        mask2 = frame2_dict['mask']
        if not frame2_dict.has_key('iflow'):
            frame2_dict['iflow'] = np.zeros(
                (image2.shape[0], image2.shape[1], 2))

        # Cropping and resizing
        mask1[mask1 < .2] = 0
        mask1_bbox = bbox(mask1)
        cimg = crop(image1,
                    mask1_bbox,
                    bbox_enargement_factor=self.bb1_enlargment,
                    output_shape=self.resizeShape1,
                    resize_order=3) - self.mean
        cmask = crop(mask1.astype('float32'),
                     mask1_bbox,
                     bbox_enargement_factor=self.bb1_enlargment,
                     output_shape=self.resizeShape1)
        if self.noisy_mask and fresh_mask:
            #print 'Adding Noise to the mask...'
            cmask = add_noise_to_mask(cmask)
        cimg_masked = cimg * (cmask[:, :, np.newaxis] > self.mask_threshold)
        cimg_bg = cimg * (cmask[:, :, np.newaxis] <= self.mask_threshold)
        nimg = crop(image2,
                    mask1_bbox,
                    bbox_enargement_factor=self.bb2_enlargment,
                    output_shape=self.resizeShape2,
                    resize_order=3) - self.mean
        label = crop(mask2.astype('float32'),
                     mask1_bbox,
                     bbox_enargement_factor=self.bb2_enlargment,
                     output_shape=self.resizeShape2,
                     resize_order=0)
        label_crop_param = dict(bbox=mask1_bbox,
                                bbox_enargement_factor=self.bb2_enlargment,
                                output_shape=image1.shape[0:2])

        cmask -= self.mask_mean
        if self.bgr:
            cimg = cimg[:, :, ::-1]
            cimg_masked = cimg_masked[:, :, ::-1]
            cimg_bg = cimg_bg[:, :, ::-1]
            nimg = nimg[:, :, ::-1]

        if self.scale_256:
            cimg *= 255
            cimg_masked *= 255
            cimg_bg *= 255
            nimg *= 255
            cmask *= 255

        item = {
            'current_image': cimg.transpose((2, 0, 1)),
            'fg_image': cimg_masked.transpose((2, 0, 1)),
            'bg_image': cimg_bg.transpose((2, 0, 1)),
            'current_mask': cmask,
            'next_image': nimg.transpose((2, 0, 1)),
            'label': label,
            'label_crop_param': label_crop_param
        }

        #crop inv_flow
        if len(self.flow_params) > 0:
            inv_flow = frame2_dict['iflow']
            max_val = np.abs(inv_flow).max()
            if max_val != 0:
                inv_flow /= max_val
            iflow = crop(inv_flow,
                         mask1_bbox,
                         bbox_enargement_factor=self.bb2_enlargment,
                         resize_order=1,
                         output_shape=self.resizeShape2,
                         clip=False,
                         constant_pad=0)

            x_scale = float(iflow.shape[1]) / (mask1_bbox[3] - mask1_bbox[2] +
                                               1) / self.bb2_enlargment
            y_scale = float(iflow.shape[0]) / (mask1_bbox[1] - mask1_bbox[0] +
                                               1) / self.bb2_enlargment

            for i in range(len(self.flow_params)):
                name, down_scale, offset, flow_scale = self.flow_params[i]
                pad = int(-offset + (down_scale - 1) / 2)
                h = np.floor(float(iflow.shape[0] + 2 * pad) / down_scale)
                w = np.floor(float(iflow.shape[1] + 2 * pad) / down_scale)

                n_flow = np.pad(
                    iflow, ((pad, int(h * down_scale - iflow.shape[0] - pad)),
                            (pad, int(h * down_scale - iflow.shape[1] - pad)),
                            (0, 0)), 'constant')
                n_flow = resize(n_flow, (h, w),
                                order=1,
                                mode='nearest',
                                clip=False)
                n_flow[:, :, 0] *= max_val * flow_scale * x_scale / down_scale
                n_flow[:, :, 1] *= max_val * flow_scale * y_scale / down_scale

                n_flow = n_flow.transpose((2, 0, 1))[::-1, :, :]
                item[name] = n_flow
        return item
Esempio n. 55
0
def _optimal_substemma(ms_id, explain_matrix, combinations, mode):
    """Do an exhaustive search for the combination among a given set of ancestors
    that best explains a given manuscript.

    """

    ms_id = ms_id - 1  # numpy indices start at 0
    val = current_app.config.val

    b_defined = val.def_matrix[ms_id]
    # remove variants where the inspected ms is undefined
    b_common = np.logical_and(val.def_matrix, b_defined)

    explain_equal_matrix = val.mask_matrix[ms_id]

    # The mss x passages boolean matrix that is TRUE whenever the inspected ms.
    # agrees with the potential source ms.
    b_equal = np.bitwise_and(val.mask_matrix, explain_equal_matrix) > 0
    b_equal = np.logical_and(b_equal, b_common)

    # The mss x passages boolean matrix that is TRUE whenever the inspected ms.
    # agrees with the potential source ms. or is posterior to it.
    b_post = np.bitwise_and(val.mask_matrix, explain_matrix) > 0
    b_post = np.logical_and(b_post, b_common)

    for comb in combinations:
        # how many passages does this combination explain?
        # pylint: disable=no-member
        b_explained_equal = np.logical_or.reduce(b_equal[comb.vec])
        b_explained_post = np.logical_or.reduce(b_post[comb.vec])
        b_explained_post = np.logical_and(b_explained_post,
                                          np.logical_not(b_explained_equal))
        b_explained = np.logical_or(b_explained_equal, b_explained_post)

        comb.n_explained_equal = np.count_nonzero(b_explained_equal)
        comb.n_explained_post = np.count_nonzero(b_explained_post)

        unexplained_matrix = np.copy(explain_matrix)
        unexplained_matrix[np.logical_not(b_defined)] = 0
        unexplained_matrix[b_explained] = 0
        b_unknown = np.bitwise_and(unexplained_matrix, 0x1) > 0
        unexplained_matrix[b_unknown] = 0
        b_open = unexplained_matrix > 0

        comb.n_unknown = np.count_nonzero(b_unknown)
        comb.n_open = np.count_nonzero(b_open)

        if mode == 'detail':
            comb.open_indices = tuple(
                int(n + 1) for n in np.nonzero(b_open)[0])
            comb.unknown_indices = tuple(
                int(n + 1) for n in np.nonzero(b_unknown)[0])

    if mode == 'search':
        # add the 'hint' column
        def key_len(c):
            return c.len

        def key_explained(c):
            return -c.explained()

        for _k, g in itertools.groupby(sorted(combinations, key=key_len),
                                       key=key_len):
            sorted(g, key=key_explained)[0].hint = True
Esempio n. 56
0
def remove_outliers(df, column, min_val, max_val):
    col_values = df[column].values
    df[column] = np.where(
        np.logical_or(col_values <= min_val, col_values >= max_val), np.NaN,
        col_values)
    return df
def Coeff_PML(Type, i, j, h, Nx, Ny, k2_eau,v_eau,N_PML):
    k = np.sqrt(k2_eau) # p-e seulement la partie réelle de k2eau?
    beta = 1

    x = i * h 
    y = j * h   # Pour éviter les divisions par 0 ?

    if np.logical_or(np.logical_or(i==0,i==Ny-1),np.logical_or(j==0,j==Nx-1)):

    #Couche sur les bords extérieurs
        if Type == 14:
            Coeff = [0,0,0,0,1,0,0,0,0]
            
        if Type == 15:
            Coeff = [0,0,0,0,1,0,0,0,0]

        if Type == 16:
            Coeff = [0,0,0,0,1,0,0,0,0]
            
        if Type == 17:
            Coeff = [0,0,0,0,1,0,0,0,0]

        if Type == 18:
            Coeff = [0,0,0,0,1,0,0,0,0]
            
        if Type == 19:
            Coeff = [0,0,0,0,1,0,0,0,0]

        if Type == 20:
            Coeff = [0,0,0,0,1,0,0,0,0]

        if Type == 21:
            Coeff = [0,0,0,0,1,0,0,0,0]    
            
            
    else:
        if Type == 14:
            x0 = 0
            y0 = h * (Ny)  # Ny ou Ny-1 ???
            Beta_x = 1j *beta *(x0-x) / (np.abs(x0 - x)**2 * (k * np.abs(x0 - x) + 1j*beta))
            Beta_y = 1j *beta *(y0-y) / (np.abs(y0 - y)**2 * (k * np.abs(y0 - y) + 1j*beta))
            Gamma_x = 1 + 1j / k / (np.abs(x0 - x)) *beta
            Gamma_y = 1 + 1j / k / (np.abs(y0 - y)) *beta
            Coeff = [0,Beta_x/2*h/(Gamma_x**2)+1/(Gamma_x**2) ,0,Beta_y/2*h/(Gamma_y**2)+1/(Gamma_y**2)\
                     ,-2/((Gamma_y)**2)-2/((Gamma_x)**2)+k**2*h**2,-Beta_x/2*h/(Gamma_x**2)+1/(Gamma_x**2),0,-Beta_y/2*h/(Gamma_y**2)+1/(Gamma_y**2),0]

        if Type == 15:
            y0 = h * (Ny)  # Ny ou Ny-1 ???
            Beta_x = 0
            Beta_y = 1j *beta *(y0-y) / (np.abs(y0 - y)**2 * (k * np.abs(y0 - y) + 1j*beta))
            Gamma_x = 1
            Gamma_y = 1 + 1j / k / (np.abs(y0 - y)) *beta
            if j == Nx-N_PML:
                Coeff = [0,0,-Gamma_y,4*Gamma_y,(-3*Gamma_y-3),0,0,4,-1]
            else:    
                Coeff = [0,Beta_x/2*h/(Gamma_x**2)+1/(Gamma_x**2) ,0,Beta_y/2*h/(Gamma_y**2)+1/(Gamma_y**2)\
                         ,-2/((Gamma_y)**2)-2/((Gamma_x)**2)+k**2*h**2,-Beta_x/2*h/(Gamma_x**2)+1/(Gamma_x**2),0,-Beta_y/2*h/(Gamma_y**2)+1/(Gamma_y**2),0]


        if Type == 16:
            x0 = h * (Nx)  # Nx ou Nx-1 ???
            y0 = h * (Ny)  # Ny ou Ny-1 ???
            Beta_x = 1j *beta *(x0-x) / (np.abs(x0 - x)**2 * (k * np.abs(x0 - x) + 1j*beta))
            Beta_y = 1j *beta *(y0-y) / (np.abs(y0 - y)**2 * (k * np.abs(y0 - y) + 1j*beta))
            Gamma_x = 1 + 1j / k / (np.abs(x0 - x)) *beta
            Gamma_y = 1 + 1j / k / (np.abs(y0 - y)) *beta
            Coeff = [0,Beta_x/2*h/(Gamma_x**2)+1/(Gamma_x**2) ,0,Beta_y/2*h/(Gamma_y**2)+1/(Gamma_y**2)\
                     ,-2/((Gamma_y)**2)-2/((Gamma_x)**2)+k**2*h**2,-Beta_x/2*h/(Gamma_x**2)+1/(Gamma_x**2),0,-Beta_y/2*h/(Gamma_y**2)+1/(Gamma_y**2),0]

        if Type == 17:
            x0 = h * (Nx)  # Nx ou Nx-1 ???
            Beta_x = 1j *beta *(x0-x) / (np.abs(x0 - x)**2 * (k * np.abs(x0 - x) + 1j*beta))
            Beta_y = 0
            Gamma_x = 1 + 1j / k / (np.abs(x0 - x)) *beta
            Gamma_y = 1
            if i == Ny-N_PML:
                Coeff = [-Gamma_x,4*Gamma_x,0,0,(-3*Gamma_x-3),4,-1,0,0]
            else:    
                Coeff = [0,Beta_x/2*h/(Gamma_x**2)+1/(Gamma_x**2) ,0,Beta_y/2*h/(Gamma_y**2)+1/(Gamma_y**2)\
                         ,-2/((Gamma_y)**2)-2/((Gamma_x)**2)+k**2*h**2,-Beta_x/2*h/(Gamma_x**2)+1/(Gamma_x**2),0,-Beta_y/2*h/(Gamma_y**2)+1/(Gamma_y**2),0]


        if Type == 18:
            x0 = h * (Nx)  # Nx ou Nx-1 ???
            y0 = 0
            Beta_x = 1j *beta *(x0-x) / (np.abs(x0 - x)**2 * (k * np.abs(x0 - x) + 1j*beta))
            Beta_y = 1j *beta *(y0-y) / (np.abs(y0 - y)**2 * (k * np.abs(y0 - y) + 1j*beta))
            Gamma_x = 1 + 1j / k / (np.abs(x0 - x)) *beta
            Gamma_y = 1 + 1j / k / (np.abs(y0 - y)) *beta
            Coeff = [0,Beta_x/2*h/(Gamma_x**2)+1/(Gamma_x**2) ,0,Beta_y/2*h/(Gamma_y**2)+1/(Gamma_y**2)\
                     ,-2/((Gamma_y)**2)-2/((Gamma_x)**2)+k**2*h**2,-Beta_x/2*h/(Gamma_x**2)+1/(Gamma_x**2),0,-Beta_y/2*h/(Gamma_y**2)+1/(Gamma_y**2),0]

        if Type == 19:
            y0 = 0
            Beta_x = 0
            Beta_y = 1j *beta *(y0-y) / (np.abs(y0 - y)**2 * (k * np.abs(y0 - y) + 1j*beta))
            Gamma_x = 1
            Gamma_y = 1 + 1j / k / (np.abs(y0 - y)) *beta
            
            if j == N_PML-1:
                Coeff = [0,0,-1,4,(-3*Gamma_y-3),0,0,4*Gamma_y,-Gamma_y]
            else:    
                Coeff = [0,Beta_x/2*h/(Gamma_x**2)+1/(Gamma_x**2) ,0,Beta_y/2*h/(Gamma_y**2)+1/(Gamma_y**2)\
                         ,-2/((Gamma_y)**2)-2/((Gamma_x)**2)+k**2*h**2,-Beta_x/2*h/(Gamma_x**2)+1/(Gamma_x**2),0,-Beta_y/2*h/(Gamma_y**2)+1/(Gamma_y**2),0]


        if Type == 20:
            x0 = 0
            y0 = 0
            Beta_x = 1j *beta *(x0-x) / (np.abs(x0 - x)**2 * (k * np.abs(x0 - x) + 1j*beta))
            Beta_y = 1j *beta *(y0-y) / (np.abs(y0 - y)**2 * (k * np.abs(y0 - y) + 1j*beta))
            Gamma_x = 1 + 1j / k / (np.abs(x0 - x)) *beta
            Gamma_y = 1 + 1j / k / (np.abs(y0 - y)) *beta
            Coeff = [0,Beta_x/2*h/(Gamma_x**2)+1/(Gamma_x**2) ,0,Beta_y/2*h/(Gamma_y**2)+1/(Gamma_y**2)\
                     ,-2/((Gamma_y)**2)-2/((Gamma_x)**2)+k**2*h**2,-Beta_x/2*h/(Gamma_x**2)+1/(Gamma_x**2),0,-Beta_y/2*h/(Gamma_y**2)+1/(Gamma_y**2),0]

        if Type == 21:
            x0 = 0
            Beta_x = 1j *beta *(x0-x) / (np.abs(x0 - x)**2 * (k * np.abs(x0 - x) + 1j*beta))
            Beta_y = 0
            Gamma_x = 1 + 1j / k / (np.abs(x0 - x)) *beta
            Gamma_y = 1 
            if i == N_PML-1:
                Coeff = [-1,4,0,0,(-3*Gamma_x-3),4*Gamma_x,-Gamma_x,0,0]
            else:    
                Coeff = [0,Beta_x/2*h/(Gamma_x**2)+1/(Gamma_x**2) ,0,Beta_y/2*h/(Gamma_y**2)+1/(Gamma_y**2)\
                         ,-2/((Gamma_y)**2)-2/((Gamma_x)**2)+k**2*h**2,-Beta_x/2*h/(Gamma_x**2)+1/(Gamma_x**2),0,-Beta_y/2*h/(Gamma_y**2)+1/(Gamma_y**2),0]



#        Coeff = [0,-Beta_x/2*h/(Gamma_x**2)+1/(Gamma_x**2) ,0,-Beta_y/2*h/(Gamma_y**2)+1/(Gamma_y**2)\
#            ,-2/((Gamma_x)**2)-2/((Gamma_y)**2)+k**2*h**2,Beta_x/2*h/(Gamma_x**2)+1/(Gamma_x**2),0,Beta_y/2*h/(Gamma_y**2)+1/(Gamma_y**2),0]
        
        
    return Coeff
Esempio n. 58
0
for pair_idx in xrange(1, len(desired_sample_pairs)):

    initial_sample, final_sample = desired_sample_pairs[pair_idx]

    f0s = initial_freqs[pair_idx]
    f1s = final_freqs[pair_idx]

    dfs = (f1s - f0s)

    abs_dfs = numpy.fabs(dfs)

    major_freqs = numpy.fmax(f1s, 1 - f1s)

    # Don't plot a bunch of crap near 0
    good_sites = numpy.logical_or(f0s > 0.05, f1s > 0.05)

    #sfs_axes[pair_idx-1].plot(f0s[good_sites], f1s[good_sites],'.',alpha=0.1,markersize=2,markeredgewidth=0,color='0.7')

    sfs_axes[pair_idx - 1].plot(f0s[good_sites],
                                f1s[good_sites],
                                '.',
                                alpha=0.1,
                                markersize=2,
                                markeredgewidth=0,
                                color='0.7')

    # Now plot ones in fixed gene set with colors
    for gene_name in sorted(highlighted_gene_names[pair_idx]):

        in_gene = (gene_names[pair_idx] == gene_name) * good_sites
Esempio n. 59
0
    def plot_attributes(self,
                        name_attribute_x,
                        name_attribute_y=None,
                        outlier_threshold_std_from_median=2,
                        axes: plt.Axes = None,
                        annotate=True,
                        marker_size=2):

        x = self.get_attribute(name_attribute_x)

        show_plot_at_end = False
        if axes is None:
            figure = plt.figure()
            axes = plt.axes()
            figure.set_facecolor('white')
            axes.set_facecolor('white')
            show_plot_at_end = True

        plot_options = \
            {'marker': 'o', 'markersize': marker_size,
             'linestyle': 'None', 'linewidth': 0.5}

        if name_attribute_y is None:
            # Statistics
            N = len(x)
            mean = np.mean(x)
            median = np.median(x)
            std = np.std(x)

            # Find outliers
            outliers_threshold_low = \
                median - (2 * outlier_threshold_std_from_median * std)
            outliers_threshold_high = \
                median + (2 + outlier_threshold_std_from_median * std)
            number_of_outliers = np.sum(
                np.logical_or(x > outliers_threshold_high,
                              x < outliers_threshold_low))

            # Plot
            if annotate:
                axes.set_title(
                    f"{name_attribute_x}\n"
                    f"N: {N}, "
                    f"$ \mu $: {mean:.1e}, "
                    f"median: {median:.1e}, "
                    f"$ \sigma $: {std:.1e}\n"
                    f"outliers (> {outlier_threshold_std_from_median:.1f} "
                    f"$ \sigma $ from median): {number_of_outliers}")
                axes.set_xlabel('Element index')
                axes.set_ylabel('Attribute value')

            legend_entries = list()
            for class_index in range(len(self.classNames)):
                legend_entries.append(self.classNames[class_index])
                logical_index_in_class = self.y == class_index
                index_in_class = np.arange(len(self.y))[logical_index_in_class]
                x_in_class = x[index_in_class]
                axes.plot(index_in_class, x_in_class, **plot_options)

            # Median line
            if annotate:
                axes.plot(
                    [1, len(x)],
                    [median, median],  # median
                    'k-',
                    linewidth=1)

            axes.set_xlim(0)

            legend_entries.append('Median')

        else:
            y = self.get_attribute(name_attribute_y)

            if annotate:
                axes.set_xlabel(name_attribute_x)
                axes.set_ylabel(name_attribute_y)
            legend_entries = list()
            for class_index in range(len(self.classNames)):
                legend_entries.append(self.classNames[class_index])
                logical_index_in_class = self.y == class_index
                x_in_class = x[logical_index_in_class]
                y_in_class = y[logical_index_in_class]
                axes.plot(x_in_class, y_in_class, **plot_options)

        if annotate:
            axes.legend(legend_entries)

        if show_plot_at_end:
            plt.show()
def Construction_A(Nx, Ny, dx, Neuf_points, k2_eau, k2_bois, gamma_eau, gamma_bois, rho_eau, v_eau, p_source,
                   SourceCylindrique, Source_Lineaire, Source_Ponctuelle, Map, \
                   N_PML, Source_Map, Q_map, coeff, centre_bois_x, centre_bois_y, Nx_Bois, Ny_Bois, alpha_Map, omega,
                   B_eau, PML_mode=1, TF_SF=True):
    h = dx
    # **********************Construction de la matrice A************************

    # L'ordre des coefficients est toujours
    # [p(i-2,j),p(i-1,j) ,p(i,j-2),p(i,j-1),p(i,j),p(i+1,j),p(i+2,j),p(i,j+1),p(i,j+2)]

    # Cas 1:
    if Neuf_points == True:
        Coeff1 = [0, 1, 0, 1, -(4 - k2_eau * h ** 2), 1, 0, 1, 0]
    else:
        # Version à 9 points
        # [p(i-1,j-1),p(i-1,j) ,p(i-1,j+1),p(i,j-1),p(i,j),p(i,j+1),p(i+1,j-1),p(i+1,j),p(i+1,j+1)]
        Coeff1 = [1, 4, 1, 4, -11 + 6 * h ** 2 * k2_eau, 4, 1, 4, 1]

    # Cas 2:
    if Neuf_points == True:
        Coeff2 = [0, 1, 0, 1, -(4 - k2_bois * h ** 2), 1, 0, 1, 0]
    else:

        # Version à 9 points
        # [p(i-1,j-1),p(i-1,j) ,p(i-1,j+1),p(i,j-1),p(i,j),p(i,j+1),p(i+1,j-1),p(i+1,j),p(i+1,j+1)]
        Coeff2 = [1, 4, 1, 4, -11 + 6 * h ** 2 * k2_bois, 4, 1, 4, 1]

    # Cas 3 à 10:

    Coeff3 = Coeff_Frontiere(gamma_eau, gamma_bois, -1 / np.sqrt(2), -1 / np.sqrt(2))
    Coeff4 = Coeff_Frontiere(gamma_eau, gamma_bois, 0, -1)
    Coeff5 = Coeff_Frontiere(gamma_bois, gamma_eau, 1 / np.sqrt(2), -1 / np.sqrt(2))  # -ny
    Coeff6 = Coeff_Frontiere(gamma_bois, gamma_eau, 1, 0)
    Coeff7 = Coeff_Frontiere(gamma_bois, gamma_eau, 1 / np.sqrt(2), 1 / np.sqrt(2))
    Coeff8 = Coeff_Frontiere(gamma_bois, gamma_eau, 0, 1)
    Coeff9 = Coeff_Frontiere(gamma_eau, gamma_bois, -1 / np.sqrt(2), 1 / np.sqrt(2))
    Coeff10 = Coeff_Frontiere(gamma_eau, gamma_bois, -1, 0)

    # Cas 11 à 12 (triangle)
    # Cas 11
    Nx11 = -np.cos(coeff / 2)  # -
    Ny11 = -np.sin(coeff / 2)  # -
    Coeff11 = Coeff_Frontiere(gamma_eau, gamma_bois, Nx11, Ny11)
    # Cas 12
    Nx12 = np.cos(coeff / 2)
    Ny12 = -np.sin(coeff / 2)
    Coeff12 = Coeff_Frontiere(gamma_eau, gamma_bois, Nx12, Ny12)

    # Cas 13 (Cercle)
    # Voir la boucle plus bas

    # Cas 14 à 21 (PML):Dans les fonctions suivantes

    # Cas 22 (source): Option 2
    # Coeff22 = [0, 1, 0, 1, -(4 - k2_eau * h ** 2), 1, 0, 1, 0]

    Dict_Coeff = {1: Coeff1, 2: Coeff2, 3: Coeff3, 4: Coeff4, 5: Coeff5, 6: Coeff6, 7: Coeff7, 8: Coeff8, 9: Coeff9,
                  10: Coeff10, 11: Coeff11, 12: Coeff12}

    # A = np.zeros([Nx * Ny, Nx * Ny], dtype=complex)
    b = np.zeros([Nx * Ny], dtype=complex)
    b_TFSF = np.zeros([Nx * Ny], dtype=complex)

    data_A = []
    ligne_A = []
    colonne_A = []

    # Matrice sans bois
    data_Q = []
    ligne_Q = []
    colonne_Q = []

    # Q = np.zeros([Nx * Ny, Nx * Ny], dtype=int)

    if PML_mode == 2:
        PML_Range = 22
    elif PML_mode == 1:
        PML_Range = 21

    Source_mask = np.ones([Ny, Nx], dtype=np.complex) * np.finfo(float).eps
    Source_mask[1:-1, 1:-1] = 0
    Source_mask[N_PML + 2:Nx - N_PML - 2, N_PML + 2:Nx - N_PML - 2] = 1
    
    #    Source_mask[N_PML-1,N_PML-1:Nx-N_PML] = 0
    #    Source_mask[N_PML-1:Nx-N_PML,N_PML-1] = 0
    #    Source_mask[Nx-N_PML,N_PML-1:Nx-N_PML] = 0
    #    Source_mask[N_PML-1:Nx-N_PML+1,Nx-N_PML] = 0

    for i in range(Nx):
        for j in range(Ny):
            L = p(i, j, Nx)

            Type = int(Map[i, j])

            if np.logical_and(Type >= 14, Type <= PML_Range):
                if PML_mode == 1:
                    Coefficient = Coeff_PML(Type, i, j, h, Nx, Ny, k2_eau, v_eau, N_PML)
                if PML_mode == 2:
                    alpha = alpha_Map[i, j]
                    Coefficient = Coeff_PML2(Type, h, Nx, Ny, omega, B_eau, alpha, rho_eau)

            elif Type == 13:
                Nx13 = (i - centre_bois_x) / coeff
                # Coordonnées en y du centre du cercle
                centre_y = centre_bois_y - Ny_Bois / 2 + np.sqrt(coeff ** 2 - (Nx_Bois / 2) ** 2)
                Ny13 = (j - centre_y) / coeff
                Coefficient = Coeff_Frontiere(gamma_eau, gamma_bois, Nx13, Ny13)
            else:
                if Type != 0:
                    Coefficient = Dict_Coeff[Type]

            if np.logical_and(np.logical_or(Type == 1, Type == 2), Neuf_points == True):
                Position = [p(i - 1, j - 1, Nx), p(i - 1, j, Nx), p(i - 1, j + 1, Nx), p(i, j - 1, Nx), p(i, j, Nx),
                            p(i, j + 1, Nx),
                            p(i + 1, j - 1, Nx), p(i + 1, j, Nx), p(i + 1, j + 1, Nx)]
            else:
                Position = [p(i - 2, j, Nx), p(i - 1, j, Nx), p(i, j - 2, Nx), p(i, j - 1, Nx), p(i, j, Nx),
                            p(i + 1, j, Nx), p(i + 2, j, Nx),
                            p(i, j + 1, Nx), p(i, j + 2, Nx)]
            if TF_SF == True:
                data_Q.append(Q_map[i, j])
                ligne_Q.append(L)
                colonne_Q.append(L)

            for k, pos in enumerate(Position):
                # if np.logical_and(pos >= 0, pos < (Nx * Ny)):
                if Coefficient[k] != 0:
                    data_A.append(Coefficient[k])
                    ligne_A.append(L)
                    colonne_A.append(pos)
                    # A[L, int(pos)] = Coefficient[k]
            b[L] = Source_Map[i, j] * Source_mask[i, j] * h ** 2 * rho_eau * p_source

    A_sp = scipy.sparse.coo_matrix((data_A, (ligne_A, colonne_A)), shape=(Nx ** 2, Nx ** 2), dtype=np.complex)
    A_sp = A_sp.tocsc()  # scipy.sparse.csc_matrix(A)
    if TF_SF == True:
        Q_sp = scipy.sparse.coo_matrix((data_Q, (ligne_Q, colonne_Q)), shape=(Nx ** 2, Nx ** 2), dtype=np.complex)
        Q_sp = Q_sp.tocsc()  # scipy.sparse.csc_matrix(A)
        b_TFSF = (Q_sp.dot(A_sp) - A_sp.dot(Q_sp)).dot(b)
    else:
        b_TFSF = b

    return A_sp, b_TFSF