Beispiel #1
0
def training_stage3(dftrain,dfvalid,cat1,i):
    fname = ddir + 'joblib/stage3_'+str(cat1)+ext
    df = dftrain[dftrain.Categorie1 == cat1].reset_index(drop=True)
    dfv = dfvalid[dfvalid.Categorie1 == cat1].reset_index(drop=True)
    labels = np.unique(df.Categorie3)
    if len(labels)==1:
        joblib.dump((labels,None,None),fname)
        scv = -1
        sct = -1
        print 'training',cat1,'\t\t(',i,') : N=',len(df),'K=',len(labels)
        print 'training',cat1,'\t\t(',i,') : training=',sct,'validation=',scv
        return (sct,scv)
    vec,X = vectorizer_stage3(df.txt)
    Y = df['Categorie3'].values
    cla = LogisticRegression(C=best_regularisation.get(cat1,100))
    cla.fit(X,Y)
    labels = np.unique(df.Categorie3)
    sct = cla.score(X[:min(10000,len(df))],Y[:min(10000,len(df))])
    if len(dfv)==0:
        scv = -1
    else:
        Xv = vec.transform(dfv.txt)
        Yv = dfv['Categorie3'].values
        scv = cla.score(Xv,Yv)
    print 'training',cat1,'\t\t(',i,') : N=',len(df),'K=',len(labels)
    print 'training',cat1,'\t\t(',i,') : training=',sct,'validation=',scv
    joblib.dump((labels,vec,cla),fname)
    del vec,cla
    return (sct,scv)
Beispiel #2
0
            def propagateConstraints_int(x, y):
                if x is None or y is None:
                    return None, None
                x1, x2, y1, y2 = x.min, x.max, y.min, y.max
                if cmp_t == 'ge' or cmp_t == 'gt':
                    x1, x2, y1, y2 = y1, y2, x1, x2 

                #treat greater like less than swap before and afterwards
                if cmp_t == 'lt' or cmp_t == 'gt':
                    x2 = min(x2, y2-1)
                    y1 = max(x1+1, y1)
                elif cmp_t == 'le' or cmp_t == 'ge':
                    x2 = min(x2, y2)
                    y1 = max(x1, y1)
                elif cmp_t == 'eq':
                    x1 = y1 = max(x1, y1)
                    x2 = y2 = min(x2, y2)
                elif cmp_t == 'ne':
                    if x1 == x2 == y1 == y2:
                        return None, None
                    if x1 == x2:
                        y1 = y1 if y1 != x1 else y1+1
                        y2 = y2 if y2 != x2 else y2-1               
                    if y1 == y2:
                        x1 = x1 if x1 != y1 else x1+1
                        x2 = x2 if x2 != y2 else x2-1

                if cmp_t == 'ge' or cmp_t == 'gt':
                    x1, x2, y1, y2 = y1, y2, x1, x2 
                con1 = IntConstraint.range(x.width, x1, x2) if x1 <= x2 else None   
                con2 = IntConstraint.range(y.width, y1, y2) if y1 <= y2 else None   
                return con1, con2
Beispiel #3
0
def plot_cache(data_match):
    
    data_list = glob.glob(data_match+"*")

    master_list = []

    for data_file in data_list:

        data = performance.load_score_dict(data_file)
    
        total = 0
        cum_list = []
        for entry in data:
            total += (entry)
            cum_list.append(entry)
            
        master_list.append(cum_list)

        print len(cum_list)

    avg_list = []
    std_list = []

    cum_len = min([len(x) for x in master_list])
    cum_len = min(200, cum_len)
    

    for i in range(cum_len):
        avg_list.append(np.mean([x[i] for x in master_list]))
        std_list.append(np.std([x[i] for x in master_list]))
    print avg_list

    plotting.query_cache(avg_list, std_list) 
Beispiel #4
0
	def slider_dialog(self, title, minimum = 0, maximum = 100, step = 1, big_step = 10, align = 'left', value = 0, char = "*", fill_char = "-", style = 'slider', onchange = None, onchange_args = (), onchange_kwargs = {}):
		assert value >= minimum
		assert value <= maximum
		done = False
		while not done:
			title = self._align(title, align)
			slider_row = self.format_slider(minimum, maximum, value, align = align, char = char, fill_char = fill_char, style = style)
			self.update((title, slider_row))
			self.redraw()
			key = None
			while key is None:
				key = self.input.read_key()
			if key == self.KEY_LEFT:
				value = max(value - step, minimum)
			elif key == self.KEY_RIGHT:
				value = min(value + step, maximum)
			elif key == self.KEY_UP:
				value = min(value + big_step, maximum)
			elif key == self.KEY_DOWN:
				value = max(value - big_step, minimum)
			elif key == self.KEY_ENTER:
				done = True
			if onchange:
				try:
					onchange(value, *onchange_args, **onchange_kwargs)
				except:
					warnings.warn("On-Change function of slider element failed", RuntimeWarning)
		return value
Beispiel #5
0
def offset_slice(pixels1, pixels2, i, j):
    '''Return two sliced arrays where the first slice is offset by i,j
    relative to the second slice.
    
    '''
    if i < 0:
        height = min(pixels1.shape[0] + i, pixels2.shape[0])
        p1_imin = -i
        p2_imin = 0
    else:
        height = min(pixels1.shape[0], pixels2.shape[0] - i)
        p1_imin = 0
        p2_imin = i
    p1_imax = p1_imin + height
    p2_imax = p2_imin + height
    if j < 0:
        width = min(pixels1.shape[1] + j, pixels2.shape[1])
        p1_jmin = -j
        p2_jmin = 0
    else:
        width = min(pixels1.shape[1], pixels2.shape[1] - j)
        p1_jmin = 0
        p2_jmin = j
    p1_jmax = p1_jmin + width
    p2_jmax = p2_jmin + width
    
    p1 = pixels1[p1_imin:p1_imax,p1_jmin:p1_jmax]
    p2 = pixels2[p2_imin:p2_imax,p2_jmin:p2_jmax]
    return (p1,p2)
Beispiel #6
0
 def evaluate(self,  u):
 
     eps = 1e-10
     
     if u<min(self.knots)+eps:
         u = min(self.knots)+eps
         
     if u>=max(self.knots)-eps:
         u=max(self.knots)-eps
         
     # scale
     weighted_pts = [vscale(pt[0:-1],pt[-1])+[pt[-1]] for pt in self.pts]
     
             
     pts = []
     for i in range(len(self.pts)):
         pts.append(vscale(weighted_pts[i], self.N(i, self.order, u)))
         
     eval_pt = reduce(vadd, pts)
     
     
     # unscale
     if eval_pt[-1]!=0:
         unweighted_pt = vscale(eval_pt[0:-1], 1.0/eval_pt[-1]) 
     else:
         unweighted_pt = eval_pt[0:-1]
         
     
     return unweighted_pt
Beispiel #7
0
    def updateValue(self, delta_x, delta_y):
        newTs = self.tsStart + Zoomable.pixelToNs(delta_x)
        newValue = self.valueStart - (delta_y / EXPANDED_SIZE)

        # Don't overlap first and last keyframes.
        newTs = min(max(newTs, self.inpoint + 1),
                    self.duration + self.inpoint - 1)

        newValue = min(max(newValue, 0.0), 1.0)

        if not self.has_changeable_time:
            newTs = self.lastTs

        updating = self.timelineElement.updating_keyframes
        self.timelineElement.updating_keyframes = True
        self.timelineElement.source.unset(self.lastTs)
        if (self.timelineElement.source.set(newTs, newValue)):
            self.value = Gst.TimedValue()
            self.value.timestamp = newTs
            self.value.value = newValue
            self.lastTs = newTs

            self.timelineElement.setKeyframePosition(self, self.value)
            # Resort the keyframes list each time. Should be cheap as there should never be too much keyframes,
            # if optimization is needed, check if resorting is needed, should
            # not be in 99 % of the cases.
            self.timelineElement.keyframes = sorted(
                self.timelineElement.keyframes, key=lambda keyframe: keyframe.value.timestamp)
            self.timelineElement.drawLines(self.line)
            # This will update the viewer. nifty.
            if not self.line:
                self.timelineElement.timeline._container.seekInPosition(
                    newTs + self.start)

        self.timelineElement.updating_keyframes = updating
Beispiel #8
0
def show_level(level, path=[]):
    """ Displays a level via a print statement.

    Args:
        level: The level to be displayed.
        path: A continuous path to be displayed over the level, if provided.

    """
    xs, ys = zip(*(list(level['spaces'].keys()) + list(level['walls'])))
    x_lo, x_hi = min(xs), max(xs)
    y_lo, y_hi = min(ys), max(ys)

    path_cells = set(path)

    chars = []
    inverted_waypoints = {point: char for char, point in level['waypoints'].items()}

    for j in range(y_lo, y_hi + 1):
        for i in range(x_lo, x_hi + 1):

            cell = (i, j)
            if cell in path_cells:
                chars.append('*')
            elif cell in level['walls']:
                chars.append('X')
            elif cell in inverted_waypoints:
                chars.append(inverted_waypoints[cell])
            elif cell in level['spaces']:
                chars.append(str(int(level['spaces'][cell])))
            else:
                chars.append(' ')

        chars.append('\n')

    print(''.join(chars))
Beispiel #9
0
def save_level_costs(level, costs, filename='distance_map.csv'):
    """ Displays cell costs from an origin point over the given level.

    Args:
        level: The level to be displayed.
        costs: A dictionary containing a mapping of cells to costs from an origin point.
        filename: The name of the csv file to be created.

    """
    xs, ys = zip(*(list(level['spaces'].keys()) + list(level['walls'])))
    x_lo, x_hi = min(xs), max(xs)
    y_lo, y_hi = min(ys), max(ys)

    rows = []
    for j in range(y_lo, y_hi + 1):
        row = []

        for i in range(x_lo, x_hi + 1):
            cell = (i, j)
            if cell not in costs:
                row.append(inf)
            else:
                row.append(costs[cell])

        rows.append(row)

    assert '.csv' in filename, 'Error: filename does not contain file type.'
    with open(filename, 'w', newline='') as f:
        csv_writer = writer(f)
        for row in rows:
            csv_writer.writerow(row)
            
    
    print("Saved file:", filename)
Beispiel #10
0
    def _label_iterations(self, lod):
        stroke_gradient = self.get_stroke_gradient()
        if lod == LOD.FULL and \
           self.get_style() != "flat" and stroke_gradient:
            root = self.get_layout_root()
            d = 0.4  # fake-emboss distance
            #d = max(src_size[1] * 0.02, 0.0)
            max_offset = 2

            alpha = self.get_gradient_angle()
            xo = root.context.scale_log_to_canvas_x(d * cos(alpha))
            yo = root.context.scale_log_to_canvas_y(d * sin(alpha))
            xo = min(int(round(xo)), max_offset)
            yo = min(int(round(yo)), max_offset)

            luminosity_factor = stroke_gradient * 0.25

            # shadow
            yield xo, yo, -luminosity_factor, False

            # highlight
            yield -xo, -yo, luminosity_factor, False

        # normal
        yield 0, 0, 0, True
def get_probability(xyz1s,xyz2s,sigma1s,sigma2s,psis,length,slope):
    onemprob = 1.0

    for n in range(len(xyz1s)):
        xyz1=xyz1s[n]
        xyz2=xyz2s[n]
        sigma1=sigma1s[n]
        sigma2=sigma2s[n]
        psi = psis[n]
        psi = psi.get_scale()
        dist=IMP.core.get_distance(xyz1, xyz2)

        sigmai = sigma1.get_scale()
        sigmaj = sigma2.get_scale()
        voli = 4.0 / 3.0 * pi * sigmai * sigmai * sigmai
        volj = 4.0 / 3.0 * pi * sigmaj * sigmaj * sigmaj
        fi = 0
        fj = 0
        if dist < sigmai + sigmaj :
            xlvol = 4.0 / 3.0 * pi * (length / 2) * (length / 2) * \
                           (length / 2)
            fi = min(voli, xlvol)
            fj = min(volj, xlvol)
        else:
            di = dist - sigmaj - length / 2
            dj = dist - sigmai - length / 2
            fi = sphere_cap(sigmai, length / 2, abs(di))
            fj = sphere_cap(sigmaj, length / 2, abs(dj))
        pofr = fi * fj / voli / volj

        factor = (1.0 - (psi * (1.0 - pofr) + pofr * (1 - psi))*exp(-slope*dist))
        onemprob = onemprob * factor
    prob = 1.0 - onemprob
    return prob
def dameraulevenshtein(seq1, seq2):
    """
    Calculate the Damerau-Levenshtein distance between sequences.

    codesnippet:D0DE4716-B6E6-4161-9219-2903BF8F547F
    Conceptually, this is based on a len(seq1) + 1 * len(seq2) + 1
    matrix. However, only the current and two previous rows are
    needed at once, so we only store those.

    Same code as word-level checking.
    """

    oneago = None
    thisrow = range(1, len(seq2) + 1) + [0]

    for x in xrange(len(seq1)):

        twoago, oneago, thisrow = oneago, thisrow, [0] * len(seq2) + [x + 1]

        for y in xrange(len(seq2)):
            delcost = oneago[y] + 1
            addcost = thisrow[y - 1] + 1
            subcost = oneago[y - 1] + (seq1[x] != seq2[y])
            thisrow[y] = min(delcost, addcost, subcost)

            if x > 0 and y > 0 and seq1[x] == seq2[y - 1] and seq1[x - 1] == seq2[y] and seq1[x] != seq2[y]:
                thisrow[y] = min(thisrow[y], twoago[y - 2] + 1)

    return thisrow[len(seq2) - 1]
Beispiel #13
0
def cost_config(pos_,Qnet,Pnet,counts):
    p_wind=pos_[0]*WINDS_PRICE_DEVICE
    p_pv=pos_[1]*PV_PRICE_BAT1
    counts.append(int(max(Pnet)))
    counts.append(abs(int(min(Pnet))))
    Qmin=min(Qnet)
    print 'Qmin:%f   '%Qmin
    Qmax=max(max(Qnet),abs(min(Qnet)))
    Pmax=max(max(Pnet),abs(min(Pnet)))
    maxVol=recyclemodule.tank_svgas(recyclemodule.ele_mkgas(Qmax))
    max_tank_Volumn.append(abs(maxVol)) 
    print 'maxVol :%f'%maxVol
    counts.append(abs(maxVol/100.0))
    q=(Qmax/BAT_POWER)*BAT_PRICE_DEVICE
    if abs(q)>ELE_PRICE_DEVICE+TANK_PRICE_DEVICE+FC_PRICE_DEVICE:
        p_cycle=ELE_PRICE_DEVICE*counts[0]+TANK_PRICE_DEVICE*counts[2]+FC_PRICE_DEVICE*counts[1]
        counts.append(round(Pmax))            
        p_bat=(Pmax)/BAT_POWER*BAT_PRICE_DEVICE
        #print 'cycle price :%f'%p_cycle
        #print 'bat   price :%f'%p_bat
           
    else:
        counts.append(0.0)
        p_bat=q
        p_cycle=0
        #print 'cycle not use'
        #print q

    print 'ele: %d fc : %d tank : %d bat : %d'%(counts[0],counts[1],counts[2],counts[3])       
    return p_wind+p_pv+p_bat+p_cycle
Beispiel #14
0
    def onMouse( self, event, x, y, flags, param ):
        """
        Mouse interactions with Main window:
            - Left mouse click gives pixel data under cursor
            - Left mouse drag selects rectangle

            - Right mouse button switches view mode
        """
        if event == cv2.EVENT_LBUTTONDOWN:
            if not self.gui_frame == None:
                self.drag_start = (x, y)

        if event == cv2.EVENT_LBUTTONUP:
                self.drag_start = None

                if self.selection == None:
                    pixel = self.gui_frame[y, x]
                    print "[X,Y][B G R](H, S, V):", [x, y], pixel, utils.BGRpix2HSV(pixel)
                else:
                    #self.track_window = self.selection
                    print self.selection    #self.track_window

        if self.drag_start:
            xmin = min( x, self.drag_start[0] )
            ymin = min( y, self.drag_start[1] )
            xmax = max( x, self.drag_start[0] )
            ymax = max( y, self.drag_start[1] )

            if xmax - xmin < 2 and ymax - ymin < 2:
                self.selection = None
            else:
                self.selection = ( xmin, ymin, xmax - xmin, ymax - ymin )

        if event == cv2.EVENT_RBUTTONDOWN:
            pass
    def plotResult(self, nn):
        cmask = np.where(self.y==1);
        plot(self.X[cmask,0], self.X[cmask,1], 'or', markersize=4)
        cmask = np.where(self.y==2);
        plot(self.X[cmask,0], self.X[cmask,1], 'ob', markersize=4)
        cmask = np.where(self.y==3);
        plot(self.X[cmask,0], self.X[cmask,1], 'og', markersize=4)

        minX = min(self.X[:,0])
        minY = min(self.X[:,1])
        maxX = max(self.X[:,0])
        maxY = max(self.X[:,1])

        grid_range = [minX, maxX, minY, maxY];
        delta = 0.05; levels = 100
        a = arange(grid_range[0],grid_range[1],delta)
        b = arange(grid_range[2],grid_range[3],delta)
        A, B = meshgrid(a, b)
        values = np.zeros(A.shape)

        for i in range(len(a)):
            for j in range(len(b)):
                values[j,i] = nn.getNetworkOutput( [ a[i], b[j] ] )
        contour(A, B, values, levels=[1], colors=['k'], linestyles='dashed')
        contourf(A, B, values, levels=linspace(values.min(),values.max(),levels), cmap=cm.RdBu)
Beispiel #16
0
 def __handle_select_button_ccs(self, cc_no, cc_value):
     if cc_no == FX_SELECT_FIRST_BUTTON_ROW:
         if cc_value == CC_VAL_BUTTON_PRESSED:
             self.__parent.toggle_lock()
     elif cc_no == FX_SELECT_ENCODER_ROW:
         if cc_value == CC_VAL_BUTTON_PRESSED:
             new_index = min(
                 len(self.song().scenes) - 1,
                 max(0, list(self.song().scenes).index(self.song().view.selected_scene) - 1),
             )
             self.song().view.selected_scene = self.song().scenes[new_index]
     elif cc_no == FX_SELECT_SECOND_BUTTON_ROW:
         if cc_value == CC_VAL_BUTTON_PRESSED:
             new_index = min(
                 len(self.song().scenes) - 1,
                 max(0, list(self.song().scenes).index(self.song().view.selected_scene) + 1),
             )
             self.song().view.selected_scene = self.song().scenes[new_index]
     elif cc_no == FX_SELECT_POTIE_ROW:
         if cc_value == CC_VAL_BUTTON_PRESSED:
             self.song().view.selected_scene.fire_as_selected()
     elif cc_no == FX_SELECT_DRUM_PAD_ROW:
         if cc_value == CC_VAL_BUTTON_PRESSED:
             self.song().stop_all_clips()
     else:
         raise False or AssertionError, "unknown select row midi message"
def getPartInfo( part ):
        points = part['points']
        n = len(points)
        area = cx = cy = 0
        xmin = ymin = 360
        xmax = ymax = -360
        pt = points[n-1];  xx = pt[0];  yy = pt[1]
        for pt in points:
                x = pt[0];  y = pt[1]
                # bounds
                xmin = min( x, xmin );  ymin = min( y, ymin )
                xmax = max( x, xmax );  ymax = max( y, ymax )
                # area and centroid
                a = xx * y - x * yy
                area += a
                cx += ( x + xx ) * a
                cy += ( y + yy ) * a
                # next
                xx = x;  yy = y
        area /= 2
        if area:
                centroid = [ cx / area / 6, cy / area / 6 ]
        else:
                centroid = None
        part.update({
                'area': abs(area),
                'bounds': [ [ xmin, ymin ], [ xmax, ymax ] ],
                'center': [ ( xmin + xmax ) / 2, ( ymin + ymax ) / 2 ],
                'centroid': centroid,
                'extent': [ abs( xmax - xmin ), abs( ymax - ymin ) ]
        })
Beispiel #18
0
    def getPixelData(self, event):
        global data

        x = event.pos().x()
        y = event.pos().y()
        self.area.append([x, y])

        value = qRgb(flag[self.color][0], flag[self.color][1], flag[self.color][2])

        # if there's 2 points, then add the whole zone to the data
        if len(self.area) == 2:
            xi = min(self.area[0][0], self.area[1][0] + 1)
            xf = max(self.area[0][0], self.area[1][0] + 1)
            yi = min(self.area[0][1], self.area[1][1] + 1)
            yf = max(self.area[0][1], self.area[1][1] + 1)

            print('zone [' + str(xi) + ',' + str(yi) + '][' + str(xf) + ',' + str(yf) + ']  => ' + self.color)

            for i in range(xi, xf):
                for j in range(yi, yf):
                    rgb = QColor(self.image.pixel(i, j)).toRgb()
                    r = rgb.red()
                    g = rgb.green()
                    b = rgb.blue()
                    data[self.color].append([i, j, r, g, b])

                    self.image.setPixel(i, j, value)

            # update image to show the already selected pixels
            self.label.setPixmap(QPixmap.fromImage(self.image))
            # reset list with the zone limits
            self.area = []
Beispiel #19
0
    def get_box_intersection(self, bounding_region):
        """
        Given a bounding_region object computes and returns a new BoundingRegion that
        corresponds to the intersection of the bounding box of the current object with the
        box of the region given as argument. Retuns an empty BoundingRegion if the interseciton
        is empty.

        :param bounding_region: A BoundingRegion object to compute intersection with
        :type bounding_region: BoundingRegion
        :return: Bounding region of the intersection of the boxes
        :rtype: BoundingRegion
        """
        x1_1 = self.box[0]
        y1_1 = self.box[1]
        x1_2 = self.box[0] + self.box[2]
        y1_2 = self.box[1] + self.box[3]
        box2 = bounding_region.get_box_pixels()
        x2_1 = box2[0]
        y2_1 = box2[1]
        x2_2 = box2[0] + box2[2]
        y2_2 = box2[1] + box2[3]

        x3_1 = max(x1_1, x2_1)
        y3_1 = max(y1_1, y2_1)
        width = max(-1, min(x1_2, x2_2) - x3_1)
        height = max(-1, min(y1_2, y2_2) - y3_1)
        if width * height >= 0:
            return BoundingRegion(image_shape=self.image_shape, box=(x3_1, y3_1, width, height))
        else:
            return BoundingRegion()
Beispiel #20
0
def compare_chebhist(dname, mylambda, c, Nbin = 25):


    if mylambda == 'Do not exist':
        print('--!!Warning: eig file does not exist, can not display compare histgram')
    else:
        mylambda = 1 - mylambda
        lmin = max(min(mylambda), -1)
        lmax = min(max(mylambda),  1)

        # print c
        cheb_file_content = '\n'.join([str(st) for st in c])
        x = np.linspace(lmin, lmax, Nbin + 1)
        y = plot_chebint(c, x)
        u = (x[1:] + x[:-1]) / 2
        v =  y[1:] - y[:-1]

        plt.clf()
        plt.hold(True)
        plt.hist(mylambda,Nbin)
        plt.plot(u, v, "r.", markersize=10)
        plt.hold(False)
        plt.show()
        filename = 'data/' + dname + '.png'
        plt.savefig(filename)

        cheb_filename = 'data/' + dname + '.cheb'
        f = open(cheb_filename, 'w+')
        f.write(cheb_file_content)
        f.close()
def fit_CSU_edges(profile):
    fitter = fitting.LevMarLSQFitter()

    amp1_est = profile[profile == min(profile)][0]
    mean1_est = np.argmin(profile)
    amp2_est = profile[profile == max(profile)][0]
    mean2_est = np.argmax(profile)
    
    g_init1 = models.Gaussian1D(amplitude=amp1_est, mean=mean1_est, stddev=2.)
    g_init1.amplitude.max = 0
    g_init1.amplitude.min = amp1_est*0.9
    g_init1.stddev.max = 3
    g_init2 = models.Gaussian1D(amplitude=amp2_est, mean=mean2_est, stddev=2.)
    g_init2.amplitude.min = 0
    g_init2.amplitude.min = amp2_est*0.9
    g_init2.stddev.max = 3

    model = g_init1 + g_init2
    fit = fitter(model, range(0,profile.shape[0]), profile)
    
    # Check Validity of Fit
    if abs(fit.stddev_0.value) <= 3 and abs(fit.stddev_1.value) <= 3\
       and fit.amplitude_0.value < -1 and fit.amplitude_1.value > 1\
       and fit.mean_0.value > fit.mean_1.value:
        x = [fit.mean_0.value, fit.mean_1.value]
        x1 = int(np.floor(min(x)-1))
        x2 = int(np.ceil(max(x)+1))
    else:
        x1 = None
        x2 = None

    return x1, x2
Beispiel #22
0
    def update(self, iterations=10):

        """ Iterates the graph layout and updates node positions.
        """

        # The graph fades in when initially constructed.
        self.alpha += 0.05
        self.alpha = min(self.alpha, 1.0)

        # Iterates over the graph's layout.
        # Each step the graph's bounds are recalculated
        # and a number of iterations are processed,
        # more and more as the layout progresses.
        if self.layout.i == 0:
            self.layout.prepare()
            self.layout.i += 1
        elif self.layout.i == 1:
            self.layout.iterate()
        elif self.layout.i < self.layout.n:
            n = min(iterations, self.layout.i / 10 + 1)
            for i in range(n):
                self.layout.iterate()

        # Calculate the absolute center of the graph.
        min_, max = self.layout.bounds
        print "w/h", _ctx #, _ctx.WIDTH, _ctx.HEIGHT
        self.x = _ctx.WIDTH - max.x*self.d - min_.x*self.d
        self.y = _ctx.HEIGHT - max.y*self.d - min_.y*self.d
        self.x /= 2
        self.y /= 2

        return not self.layout.done
def cornersHeuristic(state, problem):
    """
    A heuristic for the CornersProblem that you defined.

      state:   The current search state
               (a data structure you chose in your search problem)

      problem: The CornersProblem instance for this layout.

    This function should always return a number that is a lower bound
    on the shortest path from the state to a goal of the problem; i.e.
    it should be admissible (as well as consistent).
    """
    corners = problem.corners # These are the corner coordinates
    walls = problem.walls # These are the walls of the maze, as a Grid (game.py)
    "*** YOUR CODE HERE ***"
    
    currentPosition = state[0]
    unvisitedCorners = []
    distance = 0

    for corner in corners:
        if corner not in state[1]:
            unvisitedCorners.append(corner)

    while unvisitedCorners:
        unvisitedCornersDistance = {}
        for corner in unvisitedCorners:
            subdistance = util.manhattanDistance(currentPosition, corner)
            unvisitedCornersDistance[corner] = subdistance
        distance += min(unvisitedCornersDistance.values())
        currentPosition = min(unvisitedCornersDistance)
        unvisitedCorners.remove( min(unvisitedCornersDistance) )

    return distance
def plotErrorBars(dict_to_plot, x_lim, y_lim, xlabel, y_label, title, out_file, margin=[0.05, 0.05], loc=2):

    plt.title(title)
    plt.xlabel(xlabel)
    plt.ylabel(y_label)

    if y_lim is None:
        y_lim = [1 * float("Inf"), -1 * float("Inf")]

    max_val_seen_y = y_lim[1] - margin[1]
    min_val_seen_y = y_lim[0] + margin[1]
    print min_val_seen_y, max_val_seen_y
    max_val_seen_x = x_lim[1] - margin[0]
    min_val_seen_x = x_lim[0] + margin[0]
    handles = []
    for k in dict_to_plot:
        means, stds, x_vals = dict_to_plot[k]

        min_val_seen_y = min(min(np.array(means) - np.array(stds)), min_val_seen_y)
        max_val_seen_y = max(max(np.array(means) + np.array(stds)), max_val_seen_y)

        min_val_seen_x = min(min(x_vals), min_val_seen_x)
        max_val_seen_x = max(max(x_vals), max_val_seen_x)

        handle = plt.errorbar(x_vals, means, yerr=stds)
        handles.append(handle)
        print max_val_seen_y
    plt.xlim([min_val_seen_x - margin[0], max_val_seen_x + margin[0]])
    plt.ylim([min_val_seen_y - margin[1], max_val_seen_y + margin[1]])
    plt.legend(handles, dict_to_plot.keys(), loc=loc)
    plt.savefig(out_file)
Beispiel #25
0
    def perform(self, node, inputs, output_storage):
        a = inputs[0].copy()
        val = inputs[1]
        offset = inputs[2]
        height, width = a.shape

        """
        Notes
        -----
        The fill_diagonal only support rectangular matrix. The output
        of tall matrix is "wrapped", which is an option in numpy 1.9.0
        but was regarded as a bug in numpy 1.6.2. Here I implement the
        fill_diagonal_offset with unwrapped output, so fill_diagonal_offset
        supports tall matrix.(This make a little difference between the output
        of fill_diagonal and fill_diagonal_offset only in the case of tall
        matrix)

        """
        if offset >= 0:
            start = offset
            num_of_step = min(min(width, height), width - offset)
        else:
            start = - offset * a.shape[1]
            num_of_step = min(min(width, height), height + offset)
        step = a.shape[1] + 1
        end = start + step * num_of_step
        # Write the value out into the diagonal.
        a.flat[start:end:step] = val

        output_storage[0][0] = a
Beispiel #26
0
 def visit(v):
     "Mark a state as visited"
     call_stack = [(1, v, graph.getrow(v).nonzero()[1], None)]
     while call_stack:
         tovisit, v, iterator, w = call_stack.pop()
         if tovisit:
             NodeVisited[v] = True
             nodes_visit_order.append(v)
             NodeNums[v] = graph.next_visit_num
             graph.next_visit_num += 1
             stack.append(v)
         if w and not NodeInComponent[v]:
             NodeRoots[v] = nodes_visit_order[min(NodeNums[NodeRoots[v]],
                                                  NodeNums[NodeRoots[w]])]
         cont = 0
         for w in iterator:
             if not NodeVisited[w]:
                 cont = 1
                 call_stack.append((0, v, iterator, w))
                 call_stack.append((1, w, graph.getrow(w).nonzero()[1], None))
                 break
             if not NodeInComponent[w]:
                 NodeRoots[v] = nodes_visit_order[min(NodeNums[NodeRoots[v]],
                                                      NodeNums[NodeRoots[w]])]
         if cont:
             continue
         if NodeRoots[v] == v:
             c = []
             while 1:
                 w = stack.pop()
                 NodeInComponent[w] = c
                 c.append(w)
                 if w == v:
                     break
             components.append(c)
Beispiel #27
0
def query_fits_map(header, data, x_deg, y_deg, doStrip=True, doApp=False,
                   r_deg=10.0/60.0):
    """Query the value in a 2D FITS map at a given coordinate in deg"""

    dataValue = None
    
    # Strip unused dimensions from the array
    if doStrip:
        data, header = strip_fits_dims(data, header, 2, 5)
    
    # Extract the data
    try:
        w = mkWCSDict(header)
        wcs = pw.WCS(w['header2D'])
        [[x_pix, y_pix]] =  wcs.wcs_world2pix([(x_deg, y_deg)], 0)
        dataValue = data[int(round(y_pix)), int(round(x_pix))]
        r_pix = r_deg / w['pixscale']
        if doApp:
            xMax_pix = min(int(round(x_pix + r_pix)), w['xnaxis'])
            xMin_pix = max(int(round(x_pix - r_pix)), 0)
            yMax_pix = min(int(round(y_pix + r_pix)), w['ynaxis'])
            yMin_pix = max(int(round(y_pix - r_pix)), 0)        
            dataSub = data[yMin_pix:yMax_pix, xMin_pix:xMax_pix ]
            ms = calc_stats(dataSub)
        
    except Exception:
        print "Fail query FITS pixel."

    if doApp:
        return dataValue, ms
    else:        
        return dataValue
Beispiel #28
0
    def run_periodic_tasks(self, context, raise_on_error=False):
        """Tasks to be run at a periodic interval."""
        idle_for = DEFAULT_INTERVAL
        for task_name, task in self._periodic_tasks:
            full_task_name = '.'.join([self.__class__.__name__, task_name])

            spacing = self._periodic_spacing[task_name]
            last_run = self._periodic_last_run[task_name]

            # Check if due, if not skip
            idle_for = min(idle_for, spacing)
            if last_run is not None:
                delta = last_run + spacing - time.time()
                if delta > 0:
                    idle_for = min(idle_for, delta)
                    continue

            LOG.debug("Running periodic task %(full_task_name)s",
                      {"full_task_name": full_task_name})
            self._periodic_last_run[task_name] = _nearest_boundary(
                last_run, spacing)

            try:
                task(self, context)
            except Exception as e:
                if raise_on_error:
                    raise
                LOG.exception(_LE("Error during %(full_task_name)s: %(e)s"),
                              {"full_task_name": full_task_name, "e": e})
            time.sleep(0)

        return idle_for
 def minValue(gameState, depth, agentIndex, numGhosts, alpha, beta):
     assert agentIndex > 0, "Pacman uses minValue function. BAD!!!"
     assert depth > 0, "Ghost shouldn't be the terminal agent."
     legalMoves = gameState.getLegalActions(agentIndex)
     # if no legal moves
     if len(legalMoves) == 0:
         return self.evaluationFunction(gameState)
     else:
         v = float("infinity")
         # the last ghost
         if agentIndex == numGhosts:
             for action in legalMoves:
                 v = min(v, maxValue(gameState.generateSuccessor(agentIndex, action), depth-1, 0,
                                     numGhosts, alpha, beta))
                 if v < alpha:
                     return v
                 beta = min(beta, v)
         else:
             for action in legalMoves:
                 v = min(v, minValue(gameState.generateSuccessor(agentIndex, action), depth, agentIndex+1,
                                     numGhosts, alpha, beta))
                 if v < alpha:
                     return v
                 beta = min(beta, v)
         return v
Beispiel #30
0
    def _get_icon_rect(self, opt, text_rect):
        """Get a QRect for the icon to draw.

        Args:
            opt: QStyleOptionTab
            text_rect: The QRect for the text.

        Return:
            A QRect.
        """
        icon_size = opt.iconSize
        if not icon_size.isValid():
            icon_extent = self.pixelMetric(QStyle.PM_SmallIconSize)
            icon_size = QSize(icon_extent, icon_extent)
        icon_mode = (QIcon.Normal if opt.state & QStyle.State_Enabled
                     else QIcon.Disabled)
        icon_state = (QIcon.On if opt.state & QStyle.State_Selected
                      else QIcon.Off)
        # reserve space for favicon when tab bar is vertical (issue #1968)
        position = config.val.tabs.position
        if (position in [QTabWidget.East, QTabWidget.West] and
                config.val.tabs.favicons.show):
            tab_icon_size = icon_size
        else:
            actual_size = opt.icon.actualSize(icon_size, icon_mode, icon_state)
            tab_icon_size = QSize(
                min(actual_size.width(), icon_size.width()),
                min(actual_size.height(), icon_size.height()))

        icon_top = text_rect.center().y() + 1 - tab_icon_size.height() / 2
        icon_rect = QRect(QPoint(text_rect.left(), icon_top), tab_icon_size)
        icon_rect = self._style.visualRect(opt.direction, opt.rect, icon_rect)
        return icon_rect
Beispiel #31
0
# Details soln in py2 for code eval by Steven A Dunn

import sys

f = open(sys.argv[1], 'r')
for line in f:
    lines = line.strip().split(",")
    dots = []
    for entry in lines:
        if entry == "XYYYY.Y":
            entry = "XYYYYYY"
        dots.append(entry.count("."))
    print min(dots)
f.close()
Beispiel #32
0
def _ttc_by_path(env_observation):
    ego = env_observation.ego_vehicle_state
    waypoint_paths = env_observation.waypoint_paths
    neighborhood_vehicle_states = env_observation.neighborhood_vehicle_states

    # first sum up the distance between waypoints along a path
    # ie. [(wp1, path1, 0),
    #      (wp2, path1, 0 + dist(wp1, wp2)),
    #      (wp3, path1, 0 + dist(wp1, wp2) + dist(wp2, wp3))]

    wps_with_lane_dist = []
    for path_idx, path in enumerate(waypoint_paths):
        lane_dist = 0.0
        for w1, w2 in zip(path, path[1:]):
            wps_with_lane_dist.append((w1, path_idx, lane_dist))
            lane_dist += np.linalg.norm(w2.pos - w1.pos)
        wps_with_lane_dist.append((path[-1], path_idx, lane_dist))

    # next we compute the TTC along each of the paths
    ttc_by_path_index = [1000] * len(waypoint_paths)
    lane_dist_by_path_index = [1] * len(waypoint_paths)

    for v in neighborhood_vehicle_states:
        # find all waypoints that are on the same lane as this vehicle
        wps_on_lane = [
            (wp, path_idx, dist)
            for wp, path_idx, dist in wps_with_lane_dist
            if wp.lane_id == v.lane_id
        ]

        if not wps_on_lane:
            # this vehicle is not on a nearby lane
            continue

        # find the closest waypoint on this lane to this vehicle
        nearest_wp, path_idx, lane_dist = min(
            wps_on_lane, key=lambda tup: np.linalg.norm(tup[0].pos - vec_2d(v.position))
        )

        if np.linalg.norm(nearest_wp.pos - vec_2d(v.position)) > 2:
            # this vehicle is not close enough to the path, this can happen
            # if the vehicle is behind the ego, or ahead past the end of
            # the waypoints
            continue

        relative_speed_m_per_s = (ego.speed - v.speed) * 1000 / 3600
        if abs(relative_speed_m_per_s) < 1e-5:
            relative_speed_m_per_s = 1e-5

        ttc = lane_dist / relative_speed_m_per_s
        ttc /= 10
        if ttc <= 0:
            # discard collisions that would have happened in the past
            continue

        lane_dist /= 100
        lane_dist_by_path_index[path_idx] = min(
            lane_dist_by_path_index[path_idx], lane_dist
        )
        ttc_by_path_index[path_idx] = min(ttc_by_path_index[path_idx], ttc)

    return ttc_by_path_index, lane_dist_by_path_index
		total=0
		t=0
		for i in seqs:
			t=t+1
			l1=len(seqs[i])
			
			total=total+l1
			data[i]=l1


		keylist=sorted(data,key=data.__getitem__)
		sizes=[]
		for p in keylist:
			sizes.append(data[p])
		l2=0
		min1= int(min(sizes))
		max1= int(max(sizes))
		ave=int(numpy.mean(sizes))
		for k in reversed(keylist):
			#print data[k]
			l2=l2+int(data[k])
			
			if l2>=int(total)/2:
				
				n50=int(data[k])
				break
	else:
		t=0
		total=0
		max1=0
		min1=0
Beispiel #34
0
def MOEAD(data=None, pop_size=100, fitness=None, CR=0.95, F=0.7, MAX_EVAL=100, gamma=0.1, upper_bound=1):
	"""
	Algoritmo evolutivo multiobjetivo basado en descomposición
	Parámetros:
		data: conjunto de datos
		pop_size: tamaño de la población
		fitness: función multiobjetivo a evaluar
		CR: probabilidad de cruce
		F: factor de escala
		MAX_EVAL: nº máximo de iteraciones.
		gamma: valor umbral a partir del cual una característica se considera 0
		upper_bound: límite superior del espacio de búsqueda
	"""
	n_eval = 1
	n_neighbors = 20
	# Inicializamos aleatoriamente población
	population = pop.Population(pop_size = pop_size, n_gens = data.n_col, upper_bound = upper_bound)
	child = [k[:] for k in population.population[:]]
	#Inicializamos punto ideal
	ideal_point = [math.inf, math.inf]

    # Evaluamos la población inicial y actualizamos el punto ideal
	parent_fit = [fitness(data, population.population[i]) for i in range(population.pop_size)]
	child_fit = [k[:] for k in parent_fit[:]]

	ideal_point = [min(min(np.transpose(parent_fit)[i]),ideal_point[i]) for i in range(2)]

	# Inicializamos pesos para las funciones objetivo
	weight = np.zeros((population.pop_size, 2))

	for i in range(population.pop_size):
		weight[i][0] = i/(population.pop_size-1)
		weight[i][1] = 1-i/(population.pop_size-1)


	# Calculamos la matriz de distancias y los vecinos de los vectores de pesos del MOEA/D
	distance = np.zeros((population.pop_size, population.pop_size))	# Matriz de distancias entre pesos
	neighbour_index = np.zeros((population.pop_size, n_neighbors))	# Indices de los vecinos de cada vector weight

	for i in range(population.pop_size):
		for j in range(population.pop_size):
			distance[i][j] = np.linalg.norm(weight[i,:] - weight[j,:])
		#Ordeno las distancias y me quedo con los índices	
		indexes = np.argsort(distance[i,:])
		#Me quedo con los más cercanos
		neighbour_index[i,:]=indexes[0:n_neighbors]

	while(n_eval < MAX_EVAL):

		for i in range(population.pop_size):
			child[i] = diffEvolution(data, population.population, neighbour_index[i], i, CR, F, gamma, upper_bound)
			child_fit[i] = fitness(data, child[i])

			#Actualizamos punto ideal
			ideal_point = [min(child_fit[i][j],ideal_point[j]) for j in range(2)]

    		#Actualizamos vecindario
			for j in range(n_neighbors):
				index = int(neighbour_index[i][j])
				new1 = weight[index,0] * abs(child_fit[i][0] - ideal_point[0])
				new2 = weight[index,1] * abs(child_fit[i][1] - ideal_point[1])
				new_te = max(new1, new2)
				old1 = weight[index,0] * abs(parent_fit[index][0]-ideal_point[0])
				old2 = weight[index,1] * abs(parent_fit[index][1]-ideal_point[1])
				old_te= max(old1, old2)

				if(new_te <= old_te):
					population.setIndiv(child[i], index)
					parent_fit[index][:]  = child_fit[i][:]

		n_eval+=1
	

	# Busco Best Compromise Solution
	max_fitness = [max(np.transpose(parent_fit)[0]), max(np.transpose(parent_fit)[1])]
	min_fitness = [min(np.transpose(parent_fit)[0]), min(np.transpose(parent_fit)[1])]

	fuzzy_matrix = np.zeros((population.pop_size,2))
	# Para cada miembro del conjunto óptimo de pareto
	for i in range(population.pop_size):
		# Para cada función objetivo
		for j in range(2):
			if parent_fit[i][j] <= min_fitness[j]:
				fuzzy_matrix[i][j] = 1
			elif parent_fit[i][j] >= max_fitness[j]:
				fuzzy_matrix[i][j] = 0
			else:
				fuzzy_matrix[i][j] = (max_fitness[j] - parent_fit[i][j]) / (max_fitness[j] - min_fitness[j])
		
	
	#Calculamos achievement degree para cada k
	total = np.matrix(fuzzy_matrix).sum()	
	achievement_deg = []
	for k in range(population.pop_size):			
		achievement_deg.append((fuzzy_matrix[k][0] + fuzzy_matrix[k][1]) / total)

	# Nos quedamos con el miembro con mayor achievement degree
	best_index = achievement_deg.index(max(achievement_deg))
	best_sol = population.population[best_index]

	# Represento gráficamente frontera de Pareto y mejor solución compromiso
	#plt.figure(figsize = (8, 6))
	#plt.plot(np.transpose(parent_fit)[0], np.transpose(parent_fit)[1],'r+')
	#plt.plot(parent_fit[best_index][0], parent_fit[best_index][1], 'bo')
	#plt.plot(ideal_point[0], ideal_point[1],'go')
	#plt.legend(('Soluciones óptimas de Pareto', 'Mejor solución compromiso','Punto ideal'), loc = 'upper right')
	#plt.xlabel('Distancia intraclase')
	#plt.ylabel('Distancia interclase')
	#plt.savefig("../output/pareto.png")

	return best_sol
    select_timestamp

with open("user_took_action.json", 'rb') as lc:
    raw = json.load(lc)

with open("domain_to_productivity.json", 'rb') as lc:
    d2productivity = json.load(lc)

with open("interventionDifficulty", 'rb') as lc:
    intervention_to_difficulty = json.load(lc)

with open("log_data\\users_to_conditions_in_experiment_by_name", 'rb') as lc:
    users_to_conditions_in_experiment_by_name = json.load(lc)

user_to_installtime = parse_url_as_json("http://localhost:5000/get_user_to_all_install_times")
user_to_installtime = {k: min(user_to_installtime[k]) for k in user_to_installtime}
user_to_installtime_multiple = parse_url_as_json("http://localhost:5000/get_user_to_all_install_times")
# filter Geza
def is_blacklisted(item):
  if 'developer_mode' in item:
    return True
  if 'unofficial_version' in item:
    return True
  if item['userid'] == 'd8ae5727ab27f2ca11e331fe':
    return True
  return


raw = [x for x in raw if not is_blacklisted(x)]

Beispiel #36
0
    def minimax(self,
                depth,
                maxPlayer,
                maxTime,
                originalPlayer,
                alpha=float("-inf"),
                beta=float("inf"),
                maxNode=True,
                prunes=0,
                boards=0):
        if maxPlayer == Square.pBlack:
            ai = Square.pWhite
        else:
            ai = Square.pBlack

        # Base case when either we hit the last node or we have a winner or if time limit exceeds
        if depth == 0 or self.determineWinner(
                maxPlayer) or time.time() > maxTime:
            return self.evaluationCriteria(maxPlayer, ai), None, prunes, boards

        # Initial val, alpha and beta variables are setup
        bestMove = None
        if maxNode:
            bestVal = float("-inf")
            moves = self.getNextMoves(maxPlayer, originalPlayer)
        else:
            bestVal = float("inf")
            moves = self.getNextMoves((Square.pBlack if maxPlayer
                                       == Square.pWhite else Square.pWhite),
                                      originalPlayer)

        # For each move
        for move in moves:
            for to in move["to"]:

                # Stop if time limit exceeds
                if time.time() > maxTime:
                    return bestVal, bestMove, prunes, boards

                # Move pawn and make the previous pawn location as blank
                fromPawn = move["from"].pawn
                move["from"].pawn = Square.pBlank
                to.pawn = fromPawn
                boards += 1
                # Recursively call self
                val, _, newPrunes, newBoards = self.minimax(
                    depth - 1, maxPlayer, maxTime, originalPlayer, alpha, beta,
                    not maxNode, prunes, boards)
                prunes = newPrunes
                boards = newBoards

                # Move the pawn back to original position -> Refactor the board
                to.pawn = Square.pBlank
                move["from"].pawn = fromPawn

                if maxNode and val > bestVal:
                    bestVal = val
                    bestMove = (move["from"].location, to.location)
                    alpha = max(alpha, val)

                if not maxNode and val < bestVal:
                    bestVal = val
                    bestMove = (move["from"].location, to.location)
                    beta = min(beta, val)

                if self.alphaBetaEnabled and beta <= alpha:
                    return bestVal, bestMove, prunes + 1, boards

        return bestVal, bestMove, prunes, boards
Beispiel #37
0
        playerTurn = Square.pWhite
    else:
        playerTurn = Square.pBlack

    h = Halma(16, playerTurn)
    h.updateBoard(currentBoardConfig)
    if h.numPiecesTarget(playerTurn) > 8 or h.numPiecesBase(playerTurn) > 0:
        treeDepthSearch = 3
    else:
        treeDepthSearch = 2

    if h.numPiecesTarget(playerTurn) > 10:
        h.changeEvaluationCriteria()
    # print("Tree depth - ", str(treeDepthSearch))
    maxAvailableTime = time.time() + timeRemaining
    maxAvailableTimeTurn = min(time.time() + 20, maxAvailableTime)
    _, move, prunes, boards = h.minimax(treeDepthSearch, playerTurn,
                                        maxAvailableTimeTurn, playerTurn)
    # prevMove, finalMove = None, None
    if move:
        # print("Total number of boards/nodes generated : ", boards)
        # print("Moves:", move)
        # print("Total number of nodes pruned :", prunes)
        output = ""
        if (abs(move[0][0] - move[1][0]) == 1
                or abs(move[0][1] - move[1][1])) == 1:
            output = "E " + str(move[0][1]) + "," + str(
                move[0][0]) + " " + str(move[1][1]) + "," + str(move[1][0])
            # prevMove = (move[0][1], move[0][0])
            # finalMove = (move[1][1], move[1][0])
        else:
Beispiel #38
0
    def accuracy(self, dataset, topk=10, neg_num=1000):
        all_set = set(list(np.arange(neg_num)))
        sum_pre = 0.0
        sum_recall = 0.0
        sum_ndcg = 0.0
        sum_item = 0
        bar = tqdm(total=len(dataset))

        for data in dataset:
            bar.update(1)
            if len(data) < 1003:
                continue

            sum_item += 1
            user = torch.tensor(data[0], dtype=torch.long)
            video = torch.tensor(data[1], dtype=torch.long)
            neg_hashtag = data[2:1002]
            pos_hashtag = data[1002:]

            pos_hashtags_tensor = torch.tensor(pos_hashtag,
                                               dtype=torch.long).cuda()
            neg_hashtags_tensor = torch.tensor(neg_hashtag,
                                               dtype=torch.long).cuda()

            user_tensor = self.result_embed[user]
            pos_hashtags_tensor = self.result_embed[pos_hashtags_tensor]
            neg_hashtags_tensor = self.result_embed[neg_hashtags_tensor]
            video_tensor = self.video_features[video - self.num_user -
                                               self.num_hashtag]
            video_tensor = F.leaky_relu(self.trans_video_layer(video_tensor))
            # user_specific_video = F.leaky_relu(self.user_video_layer(torch.cat((video_tensor, user_tensor))))
            # user_specific_pos_h = F.leaky_relu(self.user_hashtag_layer(torch.cat((pos_hashtags_tensor, user_tensor.unsqueeze(0).repeat(pos_hashtags_tensor.size(0),1)), dim=1)))
            # user_specific_neg_h = F.leaky_relu(self.user_hashtag_layer(torch.cat((neg_hashtags_tensor, user_tensor.unsqueeze(0).repeat(neg_hashtags_tensor.size(0),1)), dim=1)))
            user_specific_video = F.leaky_relu(
                torch.matmul(video_tensor, self.weight_v) +
                torch.matmul(user_tensor, self.weight_v_u) + self.bias_v)
            user_specific_pos_h = F.leaky_relu(
                torch.matmul(pos_hashtags_tensor, self.weight_h) +
                torch.matmul(user_tensor, self.weight_h_u) + self.bias_h)
            user_specific_neg_h = F.leaky_relu(
                torch.matmul(neg_hashtags_tensor, self.weight_h) +
                torch.matmul(user_tensor, self.weight_h_u) + self.bias_h)

            num_pos = len(pos_hashtag)
            pos_scores = torch.sum(user_specific_video * user_specific_pos_h,
                                   dim=1)
            neg_scores = torch.sum(user_specific_video * user_specific_neg_h,
                                   dim=1)

            _, index_of_rank_list = torch.topk(
                torch.cat((neg_scores, pos_scores)), topk)
            index_set = set([iofr.cpu().item() for iofr in index_of_rank_list])
            num_hit = len(index_set.difference(all_set))
            sum_pre += float(num_hit / topk)
            sum_recall += float(num_hit / num_pos)
            idcg = np.sum(1 / np.log2(np.arange(min(num_pos, topk)) + 2))
            ndcg_score = 0.0
            for i in range(num_pos):
                label_pos = neg_num + i
                if label_pos in index_of_rank_list:
                    index = list(
                        index_of_rank_list.cpu().numpy()).index(label_pos)
                    ndcg_score += 1 / math.log(index + 2)

            sum_ndcg += ndcg_score / idcg
        bar.close()

        return sum_pre / sum_item, sum_recall / sum_item
Beispiel #39
0
import datetime 

def printTimeStamp(name): 
  print('Автор програми: ' + name) 
  print('Час компіляції: ' + str(datetime.datetime.now())) 

a = int(input("a: ")) 
b = int(input("b: ")) 
c = int(input("c: ")) 

max_ = max((a, b, c)) 
min_ = min((a, b, c)) 

print("Max={0} \n Min={1}".format(max_, min_)) 
print(a+b+c-min_-max_) 

printTimeStamp("Alexey.")
Beispiel #40
0
def main():
    runlength_path = "/home/ryan/data/Nanopore/ecoli/runnie/out/rad2_pass_runnie_0.out"

    handler = RunlengthHandler(runlength_path)

    reads = handler.iterate_file(sequence_cutoff=200, print_status=True)

    n_distributions = 0

    x = numpy.arange(0, 10)

    sys.stderr.write("Binning distributions...\n")
    distribution_bins = [list() for i in range(60)]

    for r,read in enumerate(reads):
        data = read.data
        read_id = read.id

        for i,item in enumerate(data):
            if item.shape < 1:
                sys.stderr.write("WARNING: beta less than 1\n", item.shape)

            y = evaluate_discrete_weibull(shape=item.shape, scale=item.scale, x=x)

            # Get analytical mode for the continuous weibull using parameters
            mode = calculate_mode(scale=item.scale, shape=item.shape)

            # Generate window of +1 -1 around analytical mode
            min_index = max(0, round(mode) - 1)
            max_index = min_index + 2

            # Find numerical mode within window
            mode_numerical = min_index + numpy.argmax(y[min_index:max_index])

            # true_mode = numpy.argmax(y)
            #
            # if true_mode != mode_numerical:
            #     print(true_mode, mode_numerical)

            if mode_numerical < len(distribution_bins):
                distribution_bins[mode_numerical].append([item.scale, item.shape])
                n_distributions += 1

            # print(item.scale, item.shape)
            # print(sum)
            # print(mode)

            # axes = pyplot.axes()
            #
            # plot_distribution(axes, x[:60], y[:60])
            #
            # pyplot.show()
            # pyplot.close()

            # if i == 1000:
            #     break

    n_rows = 8

    figure, axes = pyplot.subplots(nrows=n_rows)

    sys.stderr.write("Plotting...\n")

    sample_size = 1000

    for b,bin in enumerate(distribution_bins[:n_rows]):
        alpha = 1/(sample_size/3)

        bin_sample = list()

        if len(bin) > 0:
            # print(b)
            n_items = min(sample_size, len(bin))

            while len(bin_sample) < n_items:
                bin_sample.append(random.choice(bin)[:n_rows+5])

            for scale, shape in bin_sample:
                print(",".join(list(map(str,[b+1, scale, shape]))))

                y = evaluate_discrete_weibull(scale=scale, shape=shape, x=x[:n_rows+5])

                axes[b].plot(x[:n_rows+5], y, color=[0.1,0.4,1.0], alpha=alpha, linewidth=0.8)

                label = "%d" % (b+1)

                axes[b].set_ylabel(label)

    axes[n_rows-1].set_xlabel("Run length")
    axes[0].set_title("Binned length distributions")

    pyplot.show()
    pyplot.close()
Beispiel #41
0
def genIsometric(width, height, img, outl=None, pic=False, per=1):

    per = 11 - per
    x = y = 0

    radius = int(per / 100.0 * min(height, width))

    idata = img.load()  # load pixel data
    draw = ImageDraw.Draw(img)

    ang = 2 * math.pi / 6  # angle inside a hexagon
    apothem = radius * math.cos(math.pi / 6)  # radius of inner circle
    side = 2 * apothem * math.tan(math.pi / 6)  # length of each side
    hexwidth = 2 * apothem  # horizontal width of a hexagon
    wboxes = width // int(hexwidth)  # adj
    hboxes = height // int((side + radius) * 0.75)  # adj

    xback = 0  # backup of x
    x, y = xback + apothem, -(side / 2)  # start here

    if pic:
        hboxes += 1

    for i in range(-1, hboxes + 1):
        for j in range(wboxes + 2):
            points = [((x + radius * math.sin(k * ang)),
                       (y + radius * math.cos(k * ang))) for k in range(6)]
            triangle_points = [
            ]  #to store the vertices of the individual equilateral triangles that make up a hexagon
            c = [
            ]  #to store the colors of centres of each individual equilateral triangle
            for k in range(-5, 1):
                triangle_points.append([
                    (x, y), points[k], points[k + 1]
                ])  #storing vertices of individual triangles
                a, b = calcCenter([
                    (x, y), points[k], points[k + 1]
                ])  #calculating centre of individual triangles
                try:  # adj to not overflow
                    b = height - 1 if b >= height else b
                    b = 1 if b <= 0 else b

                    a = width - 1 if a >= width else a
                    a = 1 if a <= 0 else a

                    c.append(idata[a, b])  #setting the color of the triangle

                except Exception as e:
                    #print(a,b)
                    c.append("#00ff00")  # backup

            if outl:
                for k in range(6):
                    draw.polygon(
                        (triangle_points[k]), fill=c[k],
                        outline=outl)  # draw 6 triangles that form a hexagon
            else:
                for k in range(6):
                    draw.polygon(
                        (triangle_points[k]),
                        fill=c[k])  # draw 6 triangles that form a hexagon
            x += hexwidth

        y += radius + (side / 2)  # shift cursor vertically
        if i % 2 == 0:
            x = xback + apothem  # restore horizontal starting point
        else:
            x = xback  # restore horizontal starting point, but for honeycombing

    return img  # return final image
def  TemperatureHumidityGraph(source,days,delay):


	
	print("TemperatureHumidityGraph source:%s days:%s" % (source,days))
	print("sleeping seconds:", delay)
	time.sleep(delay)
	print("TemperatureHumidityGraph running now")
	

        # blink GPIO LED when it's run
        GPIO.setup(18, GPIO.OUT)
        GPIO.output(18, True)
        time.sleep(0.2)
        GPIO.output(18, False)

	# now we have get the data, stuff it in the graph 

	try:
		print("trying database")
    		db = mdb.connect('localhost', 'root', config.MySQL_Password, 'GroveWeatherPi');

    		cursor = db.cursor()

		query = "SELECT TimeStamp, bmp180Temperature,  outsideTemperature, outsideHumidity, insideHumidity FROM WeatherData where  now() - interval %i hour < TimeStamp" % (days*24)

		print "query=", query		
		cursor.execute(query)
		result = cursor.fetchall()

		t = []
		u = []
		v = []
	        x = []	
	        z = []	

		fig = pyplot.figure()



		for record in result:
  			t.append(record[0])
  			u.append(record[1])
  			v.append(record[2])
  			x.append(record[3])
  			z.append(record[4])

                print ("count of t=",len(t))
		if (len(t) == 0):
			return	

		#dts = map(datetime.datetime.fromtimestamp, s)
		#fds = dates.date2num(dts) # converted
		# matplotlib date format object
		hfmt = dates.DateFormatter('%m/%d-%H')

		
		ax = fig.add_subplot(111)
		ax.xaxis.set_major_locator(dates.HourLocator(interval=6))
		ax.xaxis.set_major_formatter(hfmt)
		pylab.xticks(rotation='vertical')

		pyplot.subplots_adjust(bottom=.3)
		pylab.plot(t, v, color='g',label="Outside Temp (C)",linestyle="-",marker=".")
		pylab.plot(t, u, color='r',label="Inside Temp (C)",linestyle="-",marker=".")
		pylab.xlabel("Hours")
		pylab.ylabel("degrees C")
		pylab.legend(loc='upper left')
		pylab.axis([min(t), max(t), 0, 40])
		ax2 = pylab.twinx()
		pylab.ylabel("% ")
		pylab.plot(t, x, color='y',label="Outside Hum %",linestyle="-",marker=".")
		pylab.plot(t, z, color='b',label="Inside Hum %",linestyle="-",marker=".")
		pylab.axis([min(t), max(t), 0, 100])
		pylab.legend(loc='lower left')
		pylab.figtext(.5, .05, ("Environmental Statistics Last %i Days" % days),fontsize=18,ha='center')

		#pylab.grid(True)

		pyplot.setp( ax.xaxis.get_majorticklabels(), rotation=70)
		ax.xaxis.set_major_formatter(dates.DateFormatter('%m/%d-%H'))
		pyplot.show()
		try:
			pyplot.savefig("/home/pi/RasPiConnectServer/static/TemperatureHumidityGraph.png")	
		except:
			pyplot.savefig("/home/pi/SDL_Pi_GroveWeatherPi/static/TemperatureHumidityGraph.png")	

		
	except mdb.Error, e:
  
    		print "Error %d: %s" % (e.args[0],e.args[1])
Beispiel #43
0
    tmp = n - 1
    while (tmp >= l):
        a = a * tmp % mod
        tmp -= 1
    return a


inf = 10 ** 18
mod = 10 ** 9 + 7

h,w = lr()
s = [input() for i in range(h)]
dis = [[0 for i in range(w+1)]for j in range(h+1)] # 障害物に当たる最小数
for i in range(2,w+1):
    dis[0][i] = inf
for i in range(2,h+1):
    dis[i][0] = inf
for i in range(h):
    for j in range(w):
        if s[i][j] == '.':
            dis[i+1][j+1] = min(dis[i][j+1], dis[i+1][j])
        else:
            if i>0 and j>0 and s[i-1][j]=='#' and s[i][j-1]=='#':
                dis[i+1][j+1] = min(dis[i][j+1], dis[i+1][j])
            elif i>0 and s[i-1][j]=='#':
                dis[i+1][j+1] = min(dis[i][j+1], dis[i+1][j]+1)
            elif j>0 and s[i][j-1]=='#':
                dis[i+1][j+1] = min(dis[i][j+1]+1, dis[i+1][j])
            else:
                dis[i+1][j+1] = min(dis[i][j+1]+1, dis[i+1][j]+1)
print(dis[h][w])
Beispiel #44
0
T = int(input())


def solve(N, K):
    que = queue.PriorityQueue()
    a = N // 2
    b = N - 1 - a
    que.put((-a, -b))
    res = []
    while not que.empty():
        a, b = que.get()
        a = -a
        b = -b
        res.append((a, b))
        if a != 0:
            naa = a // 2
            nab = a - 1 - naa
            que.put((-naa, -nab))
        if b != 0:
            nba = b // 2
            nbb = b - 1 - nba
            que.put((-nba, -nbb))
    return res[K - 1]


for t in range(T):
    N, K = map(int, input().split())
    ans = solve(N, K)
    print("Case #{}: {} {}".format(t + 1, max(ans), min(ans)))
Beispiel #45
0
def detailed_map(backend):
    """Widget for displaying detailed noise map.

    Args:
        backend (IBMQbackend): The backend.

    Returns:
        GridBox: Widget holding noise map images.
    """
    props = backend.properties().to_dict()
    config = backend.configuration().to_dict()
    single_gate_errors = [q['parameters'][0]['value']
                          for q in props['gates'][2:3*config['n_qubits']:3]]
    single_norm = matplotlib.colors.Normalize(
        vmin=min(single_gate_errors), vmax=max(single_gate_errors))
    q_colors = [cm.viridis(single_norm(err)) for err in single_gate_errors]

    cmap = config['coupling_map']

    cx_errors = []
    for line in cmap:
        for item in props['gates'][3*config['n_qubits']:]:
            if item['qubits'] == line:
                cx_errors.append(item['parameters'][0]['value'])
                break
        else:
            continue

    cx_norm = matplotlib.colors.Normalize(
        vmin=min(cx_errors), vmax=max(cx_errors))
    line_colors = [cm.viridis(cx_norm(err)) for err in cx_errors]

    single_widget = widgets.Output(layout=widgets.Layout(display='flex-inline', grid_area='left',
                                                         align_items='center'))

    cmap_widget = widgets.Output(layout=widgets.Layout(display='flex-inline', grid_area='top',
                                                       width='auto', height='auto',
                                                       align_items='center'))

    cx_widget = widgets.Output(layout=widgets.Layout(display='flex-inline', grid_area='right',
                                                     align_items='center'))

    tick_locator = mpl.ticker.MaxNLocator(nbins=5)
    with cmap_widget:
        noise_map = plot_gate_map(backend, qubit_color=q_colors,
                                  line_color=line_colors,
                                  qubit_size=28,
                                  plot_directed=True)
        width, height = noise_map.get_size_inches()

        noise_map.set_size_inches(1.25*width, 1.25*height)

        display(noise_map)
        plt.close(noise_map)

    with single_widget:
        cbl_fig = plt.figure(figsize=(3, 1))
        ax1 = cbl_fig.add_axes([0.05, 0.80, 0.9, 0.15])
        single_cb = mpl.colorbar.ColorbarBase(ax1, cmap=cm.viridis,
                                              norm=single_norm,
                                              orientation='horizontal')
        single_cb.locator = tick_locator
        single_cb.update_ticks()
        ax1.set_title('Single-qubit U3 error rate')
        display(cbl_fig)
        plt.close(cbl_fig)

    with cx_widget:
        cx_fig = plt.figure(figsize=(3, 1))
        ax2 = cx_fig.add_axes([0.05, 0.80, 0.9, 0.15])
        cx_cb = mpl.colorbar.ColorbarBase(ax2, cmap=cm.viridis,
                                          norm=cx_norm,
                                          orientation='horizontal')
        cx_cb.locator = tick_locator
        cx_cb.update_ticks()
        ax2.set_title('CNOT error rate')
        display(cx_fig)
        plt.close(cx_fig)

    out_box = widgets.GridBox([single_widget, cmap_widget, cx_widget],
                              layout=widgets.Layout(
                                  grid_template_rows='auto auto',
                                  grid_template_columns='33% 33% 33%',
                                  grid_template_areas='''
                                                "top top top"
                                                "left . right"
                                                ''',
                                  grid_gap='0px 0px'))
    return out_box
Beispiel #46
0
 def __Logger_appendconfigfiles(self, filenames):
     """ **(private)** Append a filename to the list of configuration files. """
     filenames.reverse()
     for i in range(len(filenames)):
         keys = CONFIGURATION_FILES.keys()
         CONFIGURATION_FILES[min(keys) - 1] = filenames[i]
    def __init__(self, train_data, model, loss=None, metrics=None, n_epochs=3, batch_size=32, print_every=50,
                 validate_every=-1, dev_data=None, use_cuda=False, save_path=None,
                 optimizer=Adam(lr=0.01, weight_decay=0), check_code_level=0,
                 metric_key=None, sampler=RandomSampler(), use_tqdm=True):
        super(Trainer, self).__init__()

        if not isinstance(train_data, DataSet):
            raise TypeError(f"The type of train_data must be fastNLP.DataSet, got {type(train_data)}.")
        if not isinstance(model, nn.Module):
            raise TypeError(f"The type of model must be torch.nn.Module, got {type(model)}.")

        # check metrics and dev_data
        if (not metrics) and dev_data is not None:
            raise ValueError("No metric for dev_data evaluation.")
        if metrics and (dev_data is None):
            raise ValueError("No dev_data for evaluations, pass dev_data or set metrics to None. ")

        # check save_path
        if not (save_path is None or isinstance(save_path, str)):
            raise ValueError("save_path can only be None or `str`.")
        # prepare evaluate
        metrics = _prepare_metrics(metrics)

        # parse metric_key
        # increase_better is True. It means the exp result gets better if the indicator increases.
        # It is true by default.
        self.increase_better = True
        if metric_key is not None:
            self.increase_better = False if metric_key[0] == "-" else True
            self.metric_key = metric_key[1:] if metric_key[0] == "+" or metric_key[0] == "-" else metric_key
        elif len(metrics) > 0:
            self.metric_key = metrics[0].__class__.__name__.lower().strip('metric')

        # prepare loss
        losser = _prepare_losser(loss)

        # sampler check
        if not isinstance(sampler, BaseSampler):
            raise ValueError("The type of sampler should be fastNLP.BaseSampler, got {}.".format(type(sampler)))

        if check_code_level > -1:
            _check_code(dataset=train_data, model=model, losser=losser, metrics=metrics, dev_data=dev_data,
                        metric_key=metric_key, check_level=check_code_level,
                        batch_size=min(batch_size, DEFAULT_CHECK_BATCH_SIZE))

        self.train_data = train_data
        self.dev_data = dev_data  # If None, No validation.
        self.model = model
        self.losser = losser
        self.metrics = metrics
        self.n_epochs = int(n_epochs)
        self.batch_size = int(batch_size)
        self.use_cuda = bool(use_cuda)
        self.save_path = save_path
        self.print_every = int(print_every)
        self.validate_every = int(validate_every)
        self.best_metric_indicator = None
        self.sampler = sampler

        if isinstance(optimizer, torch.optim.Optimizer):
            self.optimizer = optimizer
        else:
            self.optimizer = optimizer.construct_from_pytorch(self.model.parameters())

        self.use_tqdm = use_tqdm
        if self.use_tqdm:
            tester_verbose = 0
            self.print_every = abs(self.print_every)
        else:
            tester_verbose = 1

        if self.dev_data is not None:
            self.tester = Tester(model=self.model,
                                 data=self.dev_data,
                                 metrics=self.metrics,
                                 batch_size=self.batch_size,
                                 use_cuda=self.use_cuda,
                                 verbose=tester_verbose)

        self.step = 0
        self.start_time = None  # start timestamp
Beispiel #48
0
def main(_):
    tf.logging.set_verbosity(tf.logging.INFO)

    with tf.Graph().as_default():
        # Loads content images.
        eval_content_inputs_, _ = image_utils.imagenet_inputs(
            FLAGS.batch_size, FLAGS.image_size)

        # Process style and content weight flags.
        content_weights = ast.literal_eval(FLAGS.content_weights)
        style_weights = ast.literal_eval(FLAGS.style_weights)

        # Loads evaluation style images.
        eval_style_inputs_, _, _ = image_utils.arbitrary_style_image_inputs(
            FLAGS.eval_style_dataset_file,
            batch_size=FLAGS.batch_size,
            image_size=FLAGS.image_size,
            center_crop=True,
            shuffle=True,
            augment_style_images=False,
            random_style_image_size=False)

        # Computes stylized noise.
        stylized_noise, _, _, _ = build_model.build_model(
            tf.random_uniform([
                min(4, FLAGS.batch_size), FLAGS.image_size, FLAGS.image_size, 3
            ]),
            tf.slice(eval_style_inputs_, [0, 0, 0, 0],
                     [min(4, FLAGS.batch_size), -1, -1, -1]),
            trainable=False,
            is_training=False,
            reuse=None,
            inception_end_point='Mixed_6e',
            style_prediction_bottleneck=100,
            adds_losses=False)

        # Computes stylized images.
        stylized_images, _, loss_dict, _ = build_model.build_model(
            eval_content_inputs_,
            eval_style_inputs_,
            trainable=False,
            is_training=False,
            reuse=True,
            inception_end_point='Mixed_6e',
            style_prediction_bottleneck=100,
            adds_losses=True,
            content_weights=content_weights,
            style_weights=style_weights,
            total_variation_weight=FLAGS.total_variation_weight)

        # Adds Image summaries to the tensorboard.
        tf.summary.image(
            'image/{}/0_eval_content_inputs'.format(FLAGS.eval_name),
            eval_content_inputs_, 4)
        tf.summary.image(
            'image/{}/1_eval_style_inputs'.format(FLAGS.eval_name),
            eval_style_inputs_, 4)
        tf.summary.image(
            'image/{}/2_eval_stylized_images'.format(FLAGS.eval_name),
            stylized_images, 4)
        tf.summary.image('image/{}/3_stylized_noise'.format(FLAGS.eval_name),
                         stylized_noise, 4)

        metrics = {}
        for key, value in loss_dict.iteritems():
            metrics[key] = tf.metrics.mean(value)

        names_values, names_updates = slim.metrics.aggregate_metric_map(
            metrics)
        for name, value in names_values.iteritems():
            slim.summaries.add_scalar_summary(value, name, print_summary=True)
        eval_op = names_updates.values()
        num_evals = FLAGS.num_evaluation_styles / FLAGS.batch_size

        slim.evaluation.evaluation_loop(
            master=FLAGS.master,
            checkpoint_dir=FLAGS.checkpoint_dir,
            logdir=FLAGS.eval_dir,
            eval_op=eval_op,
            num_evals=num_evals,
            eval_interval_secs=FLAGS.eval_interval_secs)
Beispiel #49
0
def smoSimple(dataMatIn, classLabels, C, toler, maxIter):
    """
    简化版svm算法
    :param dataMatIn: 输入矩阵
    :param classLabels: 输入标签数组
    :param C: 松弛变量,用于控制“最大化间隔”和“保证大多数点的函数间隔小于1.0”这两个目标的权重
    :param toler: 容错率
    :param maxIter: 调整循环次数
    :return b: 调整完毕的b值
    :return alphas: 调整完毕的alphas值
    """
    dataMat = mat(dataMatIn);
    labelMat = mat(classLabels).transpose()
    m, n = shape(dataMat)

    # 初始化alphas和b
    alphas = mat(zeros((m, 1)));
    b = 0

    # 循环调整alphas 和 b
    iter = 0
    while iter < maxIter:
        alphaPairsChanged = 0
        for i in range(m):
            # 根据fx = wx + b计算fx的值,即计算分类值,其中w是行向量, x是列向量
            # 通过拉格朗日乘子法优化可以得出w的值为(1~n)∑i alphai*yi*xi
            fXi = float(multiply(alphas, labelMat).T * \
                        (dataMat * dataMat[i, :].T)) + b
            # 计算上式的分类值和期望分类值的差距Ei
            Ei = fXi - float(labelMat[i])

            # 通过KKT条件推导,如下几个条件不满足,需要调整alpha
            # 1) yi * fxi > 1 且ai > 0                i点在边界内,ai应该为0
            # 2) yi * fxi < 1 且ai < c                i点在两条边界之间,ai应该为c
            # 3) yi * fxi == 1 且ai == 0 或 ai ==c    i点在边界线上,ai应该在0~c之间
            # 加上toler容错之后的条件如下???
            if ((labelMat[i] * Ei < -toler and alphas[i] < C) or \
                    (labelMat[i] * Ei > toler and alphas[i] > 0)):
                j = selectJRand(i, m)  # 随机选择另一个点j
                print("\n")
                print("\n")
                print(
                    "########### alpha and b adjust begin for [%d %d] dot ###########" % (
                    i, j))
                # 计算j点分类值
                fXj = float(multiply(alphas, labelMat).T * \
                            (dataMat * dataMat[j, :].T)) + b
                # 计算分类值和期望分类值的差距Ej
                Ej = fXj - float(labelMat[j])

                # 保存旧的alpha,=是引用幅值,所以需要copy一份新的
                alphaIold = alphas[i].copy()
                alphaJold = alphas[j].copy()
                print(
                    "alphaIold:%f alphaJold:%f labelMat[i]:%d labelMat[j]:%d" % (
                    alphaIold, alphaJold, labelMat[i], labelMat[j]))

                # 计算调整alpha的上限和下限
                if labelMat[i] != labelMat[j]:
                    L = max(0, alphaJold - alphaIold)
                    H = min(C, C + alphaJold - alphaIold)
                else:
                    L = max(0, alphaJold + alphaIold - C)
                    H = min(C, alphaJold + alphaIold)
                print("L~H:[%f~%f]" % (L, H))

                if L == H:
                    print("L==H")
                    continue

                # 计算η
                # η = 2 * xi * xj.T - xi * xi.T - xj * xj.T
                eta = 2.0 * dataMat[i, :] * dataMat[j, :].T - \
                      dataMat[i, :] * dataMat[i, :].T - \
                      dataMat[j, :] * dataMat[j, :].T
                print("eta:%f" % eta)
                if eta >= 0:
                    print("eta>=0")
                    continue

                # 计算新的alphas[j]
                # alphaJnew = alphaJold - yj(Ei - Ej) / η
                alphas[j] = alphaJold - labelMat[j] * (Ei - Ej) / eta
                # 修建alphaJnew
                alphas[j] = clipAlpha(alphas[j], H, L)
                print("alphaJnew:%f alphaJold:%f" % (alphas[j], alphaJold))
                if abs(alphas[j] - alphaJold) < 0.00001:
                    print("j not move enough")
                    continue

                # 计算新的alphas[i]
                # alphaInew = alphaIold + yi*yj*(alphaJold - alphaJnewClipped)
                # 可以看出来alphaI的变化和alphaJ的变化量是相同的,但是方向可能相反
                alphas[i] = alphaIold + labelMat[i] * labelMat[j] * (
                            alphaJold - alphas[j])

                print("after adjust [alphaInew:%f alphaJnew:%f]" % (
                alphas[i], alphas[j]))

                # 计算b1
                # b1New = bOld - Ei - yj * (alphaInew - alphaIold) * xi.T * xi - yj * (alphaJnew - alphaJold) * xj.T *xi
                b1 = b - Ei - labelMat[j] * (alphas[i] - alphaIold) * dataMat[i,
                                                                      :] * dataMat[
                                                                           i,
                                                                           :].T - \
                     labelMat[j] * (alphas[j] - alphaJold) * dataMat[i,
                                                             :] * dataMat[j,
                                                                  :].T
                # b2New = bOld - Ej - yi * (alphaInew - alphaIold) * xi.T * xj - yj * (alphaJnew - alphaJold) * xj.T *xj
                b2 = b - Ej - labelMat[i] * (alphas[i] - alphaIold) * dataMat[i,
                                                                      :] * dataMat[
                                                                           j,
                                                                           :].T - \
                     labelMat[j] * (alphas[j] - alphaJold) * dataMat[j,
                                                             :] * dataMat[j,
                                                                  :].T

                # 根据alphaInew和alphaJnew的范围求解新的b值
                if (0 < alphas[i] and alphas[i] < C):
                    b = b1
                elif (0 < alphas[j] and alphas[j] < C):
                    b = b2
                else:
                    b = (b1 + b2) / 2.0

                alphaPairsChanged += 1.0
                print("after adjust [b:%f]" % b)
                # print("########### alpha and b adjust end for [%d %d] dot ###########" % (i, j))
        # 最后稳定的状态是为所有数据点循环了maxIter遍发现alphas没有进行过调整
        print("alphaPairsChanged:%d" % alphaPairsChanged)
        if (alphaPairsChanged == 0):
            print("alpha pairs not changed")
            iter += 1
        else:
            iter = 0
        print("iter:", str(iter))
    print("########### alpha and b adjust end ###########")
    print("b:", str(b))
    print("alphas:", str(alphas))
    return b, alphas
Beispiel #50
0
N, K = map(int, input().split())
INF = int(1e9)
dp = [INF for _ in range(max(K + 2, N + 2))]
dp[N] = 0
if 2 * N < K + 2 and N != 0:
    dp[2 * N] = 1
# print(dp)
for i in range(N - 1, -1, -1):
    dp[i] = dp[i + 1] + 1
    if 2 * i < K + 2:
        dp[2 * i] = min(dp[i] + 1, dp[2 * i])
# print(dp)
for i in range(N + 1, K + 1):
    dp[i] = min(dp[i - 1] + 1, dp[i + 1] + 1, dp[i])
    if 2 * i < K + 2:
        dp[2 * i] = min(dp[i] + 1, dp[2 * i])
# print(dp)
print(dp[K])
Beispiel #51
0
def add_to_Out(H_o,
               pts,
               H,
               gen_specs,
               c_flag,
               persis_info,
               local_flag=0,
               sorted_run_inds=[],
               run=[],
               on_cube=True):
    """
    Adds points to H_o, the numpy structured array to be sent back to the manager
    """

    assert not local_flag or len(pts) == 1, "Can't > 1 local points"

    original_len_O = len(H_o)

    len_H = len(H)
    ub = gen_specs['user']['ub']
    lb = gen_specs['user']['lb']
    if c_flag:
        m = gen_specs['user']['components']

        assert len_H % m == 0, "Number of points in len_H not congruent to 0 mod 'components'"
        pt_ids = np.sort(
            np.tile(
                np.arange((len_H + original_len_O) / m,
                          (len_H + original_len_O) / m + len(pts)), (1, m)))
        pts = np.tile(pts, (m, 1))

    num_pts = len(pts)

    H_o.resize(len(H_o) + num_pts,
               refcheck=False)  # Adds num_pts rows of zeros to H_o

    if on_cube:
        H_o['x_on_cube'][-num_pts:] = pts
        H_o['x'][-num_pts:] = pts * (ub - lb) + lb
    else:
        H_o['x_on_cube'][-num_pts:] = (pts - lb) / (ub - lb)
        H_o['x'][-num_pts:] = pts

    H_o['sim_id'][-num_pts:] = np.arange(len_H + original_len_O,
                                         len_H + original_len_O + num_pts)
    H_o['local_pt'][-num_pts:] = local_flag

    H_o['dist_to_unit_bounds'][-num_pts:] = np.inf
    H_o['dist_to_better_l'][-num_pts:] = np.inf
    H_o['dist_to_better_s'][-num_pts:] = np.inf
    H_o['ind_of_better_l'][-num_pts:] = -1
    H_o['ind_of_better_s'][-num_pts:] = -1

    if c_flag:
        H_o['obj_component'][-num_pts:] = np.tile(range(0, m),
                                                  (1, num_pts // m))
        H_o['pt_id'][-num_pts:] = pt_ids

    if local_flag:
        H_o['num_active_runs'][-num_pts] += 1
        # H_o['priority'][-num_pts:] = 1
        # H_o['priority'][-num_pts:] = np.random.uniform(0,1,num_pts)
        if 'high_priority_to_best_localopt_runs' in gen_specs[
                'user'] and gen_specs['user'][
                    'high_priority_to_best_localopt_runs']:
            H_o['priority'][-num_pts:] = -min(
                H['f'][persis_info['run_order'][run]]
            )  # Give highest priority to run with lowest function value
        else:
            H_o['priority'][-num_pts:] = persis_info['rand_stream'].uniform(
                0, 1, num_pts)
        persis_info['run_order'][run].append(H_o[-num_pts]['sim_id'])
    else:
        if c_flag:
            # p_tmp = np.sort(np.tile(np.random.uniform(0,1,num_pts/m),(m,1))) # If you want all "duplicate points" to have the same priority (meaning libEnsemble gives them all at once)
            # p_tmp = np.random.uniform(0,1,num_pts)
            p_tmp = persis_info['rand_stream'].uniform(0, 1, num_pts)
        else:
            # p_tmp = np.random.uniform(0,1,num_pts)
            # persis_info['rand_stream'].uniform(lb,ub,(1,n))
            if 'high_priority_to_best_localopt_runs' in gen_specs[
                    'user'] and gen_specs['user'][
                        'high_priority_to_best_localopt_runs']:
                p_tmp = -np.inf * np.ones(num_pts)
            else:
                p_tmp = persis_info['rand_stream'].uniform(0, 1, num_pts)
        H_o['priority'][-num_pts:] = p_tmp
        # H_o['priority'][-num_pts:] = 1

    return persis_info
Beispiel #52
0
def innerLwithK(i, oS):
    """
    完整platt SMO算法中的优化例程,已知i点,调整alphas和b值, 和innerL函数的不同之处在于:
    使用核函数计算η:η = 2 * xi * xj.T - xi * xi.T - xj * xj.T => η = 2 * K[i, j]- K[i, i] - K[j, j]
    使用核函数计算b1: b1New = bOld - Ei - yj * (alphaInew - alphaIold) * xi.T * xi - yj * (alphaJnew - alphaJold) * xj.T *xi
    => b1New = bOld - Ei - yj * (alphaInew - alphaIold) * K[i, i] - yj * (alphaJnew - alphaJold) * K[j, i]
    使用核函数计算b2: b2New = bOld - Ej - yi * (alphaInew - alphaIold) * xi.T * xj - yj * (alphaJnew - alphaJold) * xj.T *xj
    => b2New = bOld - Ej - yi * (alphaInew - alphaIold) * K[i, j] - yj * (alphaJnew - alphaJold) * K[j, j]
    :param i: i点
    :param oS: 当前环境变量
    :return alphaPairsChanged: 代表alphas矩阵是否更新过
    """

    Ei = calcEkWithK(oS, i)  # 更新i点的Ei值

    # 通过KKT条件推导,如下几个条件不满足,需要调整alpha
    # 1) yi * fxi > 1 且ai > 0                i点在边界内,ai应该为0
    # 2) yi * fxi < 1 且ai < c                i点在两条边界之间,ai应该为c
    # 3) yi * fxi == 1 且ai == 0 或 ai ==c    i点在边界线上,ai应该在0~c之间
    # 加上toler容错之后的条件如下???
    if ((oS.labelMat[i] * Ei < -oS.toler and oS.alphas[i] < oS.C) or \
            (oS.labelMat[i] * Ei > oS.toler and oS.alphas[i] > 0)):
        j, Ej = selectJwithK(i, oS, Ei)  # 随机选择另一个点j
        print("\n")
        print("\n")
        print(
            "########### alpha and b adjust begin for [%d %d] dot ###########" % (
            i, j))

        # 保存旧的alpha,=是引用幅值,所以需要copy一份新的
        alphaIold = oS.alphas[i].copy()
        alphaJold = oS.alphas[j].copy()
        print("alphaIold:%f alphaJold:%f labelMat[i]:%d labelMat[j]:%d" % (
            alphaIold, alphaJold, oS.labelMat[i], oS.labelMat[j]))

        # 计算调整alpha的上限和下限
        if oS.labelMat[i] != oS.labelMat[j]:
            L = max(0, alphaJold - alphaIold)
            H = min(oS.C, oS.C + alphaJold - alphaIold)
        else:
            L = max(0, alphaJold + alphaIold - oS.C)
            H = min(oS.C, alphaJold + alphaIold)
        print("L~H:[%f~%f]" % (L, H))

        if L == H:
            print("L==H")
            return 0

        # 计算η
        # η = 2 * K[i, j]- K[i, i] - K[j, j]
        eta = 2.0 * oS.K[i, j] - oS.K[i, i] - oS.K[j, j]
        print("eta:%f" % eta)
        if eta >= 0:
            print("eta>=0")
            return 0

        # 计算新的alphas[j]
        # alphaJnew = alphaJold - yj(Ei - Ej) / η
        oS.alphas[j] = alphaJold - oS.labelMat[j] * (Ei - Ej) / eta
        # 修建alphaJnew
        oS.alphas[j] = clipAlpha(oS.alphas[j], H, L)
        updateEkWithK(oS, j)
        print("alphaJnew:%f alphaJold:%f" % (oS.alphas[j], alphaJold))
        if abs(oS.alphas[j] - alphaJold) < 0.00001:
            print("j not move enough")
            return 0

        # 计算新的alphas[i]
        # alphaInew = alphaIold + yi*yj*(alphaJold - alphaJnewClipped)
        # 可以看出来alphaI的变化和alphaJ的变化量是相同的,但是方向可能相反
        oS.alphas[i] = alphaIold + oS.labelMat[i] * oS.labelMat[j] * (
                    alphaJold - oS.alphas[j])
        updateEkWithK(oS, i)

        print("after adjust [alphaInew:%f alphaJnew:%f]" % (
        oS.alphas[i], oS.alphas[j]))

        # 计算b1
        # b1New = bOld - Ei - yj * (alphaInew - alphaIold) * K[i, i] - yj * (alphaJnew - alphaJold) * K[j, i]
        b1 = oS.b - Ei - oS.labelMat[j] * (oS.alphas[i] - alphaIold) * oS.K[
            i, i] - \
             oS.labelMat[j] * (oS.alphas[j] - alphaJold) * oS.K[j, i]
        # b2New = bOld - Ej - yi * (alphaInew - alphaIold) * K[i, j] - yj * (alphaJnew - alphaJold) * K[j, j]
        b2 = oS.b - Ej - oS.labelMat[i] * (oS.alphas[i] - alphaIold) * oS.K[
            i, j] - \
             oS.labelMat[j] * (oS.alphas[j] - alphaJold) * oS.K[j, j]

        # 根据alphaInew和alphaJnew的范围求解新的b值
        if (0 < oS.alphas[i] and oS.alphas[i] < oS.C):
            oS.b = b1
        elif (0 < oS.alphas[j] and oS.alphas[j] < oS.C):
            oS.b = b2
        else:
            oS.b = (b1 + b2) / 2.0

        print("after adjust [b:%f]" % oS.b)
        return 1
    else:
        return 0
 def closestValue(self, root: TreeNode, target: float) -> int:
     closest = root.val
     while root:
         closest = min(closest, root.val, key = lambda x : abs(target - x))
         root = root.left if target < root.val else root.right
     return closest
Beispiel #54
0
def update_history_dist(H, n, gen_specs, c_flag):
    """
    Updates distances/indices after new points that have been evaluated.

    .. seealso::
        `start_persistent_local_opt_gens.py <https://github.com/Libensemble/libensemble/blob/develop/libensemble/alloc_funcs/start_persistent_local_opt_gens.py>`_
    """

    updated_inds = set()

    new_inds = np.where(~H['known_to_aposmm'])[0]

    if c_flag:
        for v in np.unique(H['pt_id'][new_inds]):
            inds = H['pt_id'] == v
            H['f'][inds] = np.inf
            H['f'][np.where(inds)[0]
                   [0]] = gen_specs['user']['combine_component_func'](
                       H['f_i'][inds])

        p = np.logical_and.reduce(
            (H['returned'], H['obj_component'] == 0, ~np.isnan(H['f'])))
    else:
        p = np.logical_and.reduce((H['returned'], ~np.isnan(H['f'])))

    for new_ind in new_inds:
        # Loop over new returned points and update their distances
        if p[new_ind]:
            H['known_to_aposmm'][new_ind] = True

            # Compute distance to boundary
            H['dist_to_unit_bounds'][new_ind] = min(
                min(np.ones(n) - H['x_on_cube'][new_ind]),
                min(H['x_on_cube'][new_ind] - np.zeros(n)))

            dist_to_all = cdist(H['x_on_cube'][[new_ind]], H['x_on_cube'][p],
                                'euclidean').flatten()
            new_better_than = H['f'][new_ind] < H['f'][p]

            # Update any other points if new_ind is closer and better
            if H['local_pt'][new_ind]:
                inds_of_p = np.logical_and(
                    dist_to_all < H['dist_to_better_l'][p], new_better_than)
                updates = np.where(p)[0][inds_of_p]
                H['dist_to_better_l'][updates] = dist_to_all[inds_of_p]
                H['ind_of_better_l'][updates] = new_ind
            else:
                inds_of_p = np.logical_and(
                    dist_to_all < H['dist_to_better_s'][p], new_better_than)
                updates = np.where(p)[0][inds_of_p]
                H['dist_to_better_s'][updates] = dist_to_all[inds_of_p]
                H['ind_of_better_s'][updates] = new_ind
            updated_inds.update(updates)

            # Since we allow equality when deciding better_than_new_l and
            # better_than_new_s, we have to prevent new_ind from being its own
            # better point.
            better_than_new_l = np.logical_and.reduce(
                (~new_better_than, H['local_pt'][p],
                 H['sim_id'][p] != new_ind))
            better_than_new_s = np.logical_and.reduce(
                (~new_better_than, ~H['local_pt'][p],
                 H['sim_id'][p] != new_ind))

            # Who is closest to ind and better
            if np.any(better_than_new_l):
                ind = dist_to_all[better_than_new_l].argmin()
                H['ind_of_better_l'][new_ind] = H['sim_id'][p][np.nonzero(
                    better_than_new_l)[0][ind]]
                H['dist_to_better_l'][new_ind] = dist_to_all[
                    better_than_new_l][ind]

            if np.any(better_than_new_s):
                ind = dist_to_all[better_than_new_s].argmin()
                H['ind_of_better_s'][new_ind] = H['sim_id'][p][np.nonzero(
                    better_than_new_s)[0][ind]]
                H['dist_to_better_s'][new_ind] = dist_to_all[
                    better_than_new_s][ind]

            # if not ignore_L8:
            #     r_k = calc_rk(len(H['x_on_cube'][0]), n_s, rk_const, lhs_divisions)
            #     H['worse_within_rk'][new_ind][p] = np.logical_and.reduce((H['f'][new_ind] <= H['f'][p], dist_to_all <= r_k))

            #     # Add trues if new point is 'worse_within_rk'
            #     inds_to_change = np.logical_and.reduce((H['dist_to_all'][p,new_ind] <= r_k, H['f'][new_ind] >= H['f'][p], H['sim_id'][p] != new_ind))
            #     H['worse_within_rk'][inds_to_change,new_ind] = True

            #     if not H['local_pt'][new_ind]:
            #         H['worse_within_rk'][H['dist_to_all'] > r_k] = False

    updated_inds.update(new_inds)
    return updated_inds
Beispiel #55
0
            prices[stk.pop()] -= p
        stk.append(i)
    return prices


# print(finalPrices([8, 4, 6, 2, 3]))
# print('True or false')
# print(n and (p[n[-1]] >= 8))
# print(p[n.pop()])
moves = 'UUUDDDLLL'
c = collections.Counter(moves)
print(c.items())
# for k, v in c.items():
#     print(k, v)
print(round(4.42))
print(min(1, 2))

ar = [[1], [2, 3]]
print(ar.index([1]))
name = "Gonzalo"
print(name.find("w"))
ar2 = [[1], [2, 3, 4]]
ar3 = [1, 2, 3, 4]
for n in ar2:
    print(ar3[1:len(n)] == n)
art = [1, 2, 3, 4]
# art2 = [6, 2, 6, 5, 1, 2]
# art2.sort()
# print(art[::2])
# print(art2[::2])
print(art[-1:-4:-1])
Beispiel #56
0
def main(arguments = None):
    """ main() function, encapsulated in a method to allow for easy invokation.

    This method follows Guido van Rossum's suggestions on how to write Python
    main() functions in order to make them more flexible. By encapsulating the
    main code of the script in a function and making it take an optional
    argument the script can be called not only from other modules, but also
    from the interactive Python prompt.

    Guido van van Rossum - Python main() functions:
    http://www.artima.com/weblogs/viewpost.jsp?thread=4829

    Keyword arguments:
    arguments - the list of command line arguments passed to the script.

    """

    if arguments is None:
        arguments = sys.argv[1:]  # ignore argv[0], the script name
    (options, args) = parser.parse_args(args = arguments)

    # Adjust the logger level to WARNING, INFO or DEBUG, depending on the
    # given number of -v options (none, one or two or more, respectively)
    logging_level = logging.WARNING
    if options.verbose == 1:
        logging_level = logging.INFO
    elif options.verbose >= 2:
        logging_level = logging.DEBUG
    logging.basicConfig(format = style.LOG_FORMAT, level = logging_level)

    # Print the help and abort the execution if there are not two positional
    # arguments left after parsing the options, as the user must specify at
    # least one (only one?) input FITS file and the output JSON file.
    if len(args) < 2:
        parser.print_help()
        return 2     # 2 is generally used for command line syntax errors
    else:
        sources_img_path = args[0]
        input_paths = list(set(args[1:-1]))
        output_json_path = args[-1]

    # The execution of this module, especially when doing long-term monitoring
    # of reasonably crowded fields, may easily take several *days*. The least
    # we can do, in order to spare the end-user from insufferable grief because
    # of the waste of billions of valuable CPU cycles, is to avoid to have the
    # output file accidentally overwritten.

    if os.path.exists(output_json_path):
        if not options.overwrite:
            msg = "%sError. The output file '%s' already exists."
            print msg % (style.prefix, output_json_path)
            print style.error_exit_message
            return 1

    msg = "%sExamining the headers of the %s FITS files given as input..."
    print msg % (style.prefix, len(input_paths))

    files = fitsimage.InputFITSFiles()
    for index, img_path in enumerate(input_paths):
        img = fitsimage.FITSImage(img_path)
        pfilter = img.pfilter(options.filterk)
        files[pfilter].append(img)

        percentage = (index + 1) / len(input_paths) * 100
        methods.show_progress(percentage)

    print # progress bar doesn't include newline
    print style.prefix

    # To begin with, we need to identify the most constant stars, something for
    # which we have to do photometry on all the stars and for all the images of
    # the campaign. But fret not, as this has to be done only this time: once
    # we get the light curves of all the stars and for all the images, we will
    # be able to determine which are the most constant among them and work
    # always with this subset in order to determine which aperture and sky
    # annulus are the optimal.

    msg = "%sDoing initial photometry with FWHM-derived apertures..."
    print msg % style.prefix
    print style.prefix

    # mkstemp() returns a tuple containing an OS-level handle to an open file
    # and its absolute pathname. Thus, we need to close the file right after
    # creating it, and tell the photometry module to overwrite (-w) it.

    kwargs = dict(prefix = 'photometry_', suffix = '.LEMONdB')
    phot_db_handle, phot_db_path = tempfile.mkstemp(**kwargs)
    atexit.register(methods.clean_tmp_files, phot_db_path)
    os.close(phot_db_handle)

    basic_args = [sources_img_path] + input_paths + \
                 [phot_db_path, '--overwrite']

    phot_args = ['--maximum', options.maximum,
                 '--margin', options.margin,
                 '--cores', options.ncores,
                 '--min-sky', options.min,
                 '--objectk', options.objectk,
                 '--filterk', options.filterk,
                 '--datek', options.datek,
                 '--timek', options.timek,
                 '--expk', options.exptimek,
                 '--coaddk', options.coaddk,
                 '--gaink', options.gaink,
                 '--fwhmk', options.fwhmk,
                 '--airmk', options.airmassk]

    # The --gain and --uik options default to None, so add them to the list of
    # arguments only if they were given. Otherwise, (a) --gaink would be given
    # a value of 'None', a string, that would result in an error when optparse
    # attempted to convert it to float, and (b) --uik would understood 'None'
    # as the name of the keyword storing the path to the uncalibrated image.

    if options.gain:
        phot_args += ['--gain', options.gain]

    if options.uncimgk:
        phot_args += ['--uncimgk', options.uncimgk]

    # Pass as many '-v' options as we have received here
    [phot_args.append('-v') for x in xrange(options.verbose)]

    extra_args = ['--aperture', options.aperture,
                  '--annulus', options.annulus,
                  '--dannulus', options.dannulus]

    # Non-zero return codes raise subprocess.CalledProcessError
    args = basic_args + phot_args + extra_args
    check_run(photometry.main, [str(a) for a in args])

    # Now we need to compute the light curves and find those that are most
    # constant. This, of course, has to be done for each filter, as a star
    # identified as constant in Johnson I may be too faint in Johnson B, for
    # example. In other words: we need to calculate the light curve of each
    # star and for each filter, and then determine which are the
    # options.nconstant stars with the lowest standard deviation.

    print style.prefix
    msg = "%sGenerating light curves for initial photometry."
    print msg % style.prefix
    print style.prefix

    kwargs = dict(prefix = 'diffphot_', suffix = '.LEMONdB')
    diffphot_db_handle, diffphot_db_path = tempfile.mkstemp(**kwargs)
    atexit.register(methods.clean_tmp_files, diffphot_db_path)
    os.close(diffphot_db_handle)

    diff_args = [phot_db_path,
                 '--output', diffphot_db_path, '--overwrite',
                 '--cores', options.ncores,
                 '--minimum-images', options.min_images,
                 '--stars', options.nconstant,
                 '--minimum-stars', options.min_cstars,
                 '--pct', options.pct,
                 '--weights-threshold', options.wminimum,
                 '--max-iters', options.max_iters,
                 '--worst-fraction', options.worst_fraction]

    [diff_args.append('-v') for x in xrange(options.verbose)]

    check_run(diffphot.main, [str(a) for a in diff_args])
    print style.prefix

    # Map each photometric filter to the path of the temporary file where the
    # right ascension and declination of each constant star, one per line, will
    # be saved. This file is from now on passed, along with the --coordinates
    # option, to photometry.main(), so that photometry is not done on all the
    # astronomical objects, but instead exclusively on these ones.

    coordinates_files = {}

    miner = mining.LEMONdBMiner(diffphot_db_path)
    for pfilter in miner.pfilters:

        # LEMONdBMiner.sort_by_curve() returns a list of two-element tuples,
        # mapping the ID of each star to the standard deviation of its light
        # curve in this photometric filter. The list is sorted in increasing
        # order by the standard deviation. We are only interested in the first
        # 'options.nconstant', needing at least 'options.pminimum'.

        msg = "%sIdentifying the %d most constant stars for the %s filter..."
        args = style.prefix, options.nconstant, pfilter
        print msg % args ,
        sys.stdout.flush()

        kwargs = dict(minimum = options.min_images)
        stars_stdevs = miner.sort_by_curve_stdev(pfilter, **kwargs)
        cstars = stars_stdevs[:options.nconstant]

        if len(cstars) < options.pminimum:
            msg = ("fewer than %d stars identified as constant in the "
                   "initial photometry for the %s filter")
            args = options.pminimum, pfilter
            raise NotEnoughConstantStars(msg % args)
        else:
            print 'done.'

        if len(cstars) < options.nconstant:
            msg = "%sBut only %d stars were available. Using them all, anyway."
            print msg % (style.prefix, len(cstars))

        # Replacing whitespaces with underscores is easier than having to quote
        # the path to the --coordinates file if the name of the filter contains
        # them (otherwise, optparse would only see up to the first whitespace).
        prefix = '%s_' % str(pfilter).replace(' ', '_')
        kwargs = dict(prefix = prefix, suffix = '.coordinates')
        coords_fd, coordinates_files[pfilter] = tempfile.mkstemp(**kwargs)
        atexit.register(methods.clean_tmp_files, coordinates_files[pfilter])

        # LEMONdBMiner.get_star() returns a five-element tuple with the x and y
        # coordinates, right ascension, declination and instrumental magnitude
        # of the astronomical object in the sources image.
        for star_id, _ in cstars:
            ra, dec = miner.get_star(star_id)[2:4]
            os.write(coords_fd, "%.10f\t%.10f\n" % (ra, dec))
        os.close(coords_fd)

        msg = "%sStar coordinates for %s temporarily saved to %s"
        print msg % (style.prefix, pfilter, coordinates_files[pfilter])

    # The constant astronomical objects, the only ones to which we will pay
    # attention from now on, have been identified. So far, so good. Now we
    # generate the light curves of these objects for each candidate set of
    # photometric parameters. We store the evaluated values in a dictionary in
    # which each filter maps to a list of json_parse.CandidateAnnuli objects.

    evaluated_annuli = collections.defaultdict(list)

    for pfilter, coords_path in coordinates_files.iteritems():

        print style.prefix
        msg = "%sFinding the optimal photometric parameters for the %s filter."
        print msg % (style.prefix, pfilter)

        if len(files[pfilter]) < options.min_images:
            msg = "fewer than %d images (--minimum-images option) for %s"
            args = options.min_images, pfilter
            raise NotEnoughConstantStars(msg % args)

        # The median FWHM of the images is needed in order to calculate the
        # range of apertures that we need to evaluate for this filter.

        msg = "%sCalculating the median FWHM for this filter..."
        print msg % style.prefix ,

        pfilter_fwhms = []
        for img in files[pfilter]:
            img_fwhm = photometry.get_fwhm(img, options)
            logging.debug("%s: FWHM = %.3f" % (img.path, img_fwhm))
            pfilter_fwhms.append(img_fwhm)

        fwhm = numpy.median(pfilter_fwhms)
        print ' done.'

        # FWHM to range of pixels conversion
        min_aperture = fwhm * options.lower
        max_aperture = fwhm * options.upper
        annulus      = fwhm * options.sky
        dannulus     = fwhm * options.width

        # The dimensions of the sky annulus remain fixed, while the
        # aperture is in the range [lower * FWHM, upper FWHM], with
        # increments of options.step pixels.
        filter_apertures = numpy.arange(min_aperture, max_aperture, options.step)
        assert filter_apertures[0] == min_aperture

        msg = "%sFWHM (%s passband) = %.3f pixels, therefore:"
        print msg % (style.prefix, pfilter, fwhm)
        msg = "%sAperture radius, minimum = %.3f x %.2f = %.3f pixels "
        print msg % (style.prefix, fwhm, options.lower, min_aperture)
        msg = "%sAperture radius, maximum = %.3f x %.2f = %.3f pixels "
        print msg % (style.prefix, fwhm, options.upper, max_aperture)
        msg = "%sAperture radius, step = %.2f pixels, which means that:"
        print msg % (style.prefix, options.step)

        msg = "%sAperture radius, actual maximum = %.3f + %d x %.2f = %.3f pixels"
        args = (style.prefix, min_aperture, len(filter_apertures),
                options.step, max(filter_apertures))
        print msg % args

        msg = "%sSky annulus, inner radius = %.3f x %.2f = %.3f pixels"
        print msg % (style.prefix, fwhm, options.sky, annulus)
        msg = "%sSky annulus, width = %.3f x %.2f = %.3f pixels"
        print msg % (style.prefix, fwhm, options.width, dannulus)

        msg = "%s%d different apertures in the range [%.2f, %.2f] to be evaluated:"
        args = (style.prefix, len(filter_apertures),
                filter_apertures[0], filter_apertures[-1])
        print msg % args

        # For each candidate aperture, and only with the images taken in
        # this filter, do photometry on the constant stars and compute the
        # median of the standard deviation of their light curves as a means
        # of evaluating the suitability of this combination of parameters.
        for index, aperture in enumerate(filter_apertures):

            print style.prefix

            kwargs = dict(prefix = 'photometry_', suffix = '.LEMONdB')
            fd, aper_phot_db_path = tempfile.mkstemp(**kwargs)
            atexit.register(methods.clean_tmp_files, aper_phot_db_path)
            os.close(fd)

            paths = [img.path for img in files[pfilter]]
            basic_args = [sources_img_path] + paths + \
                         [aper_phot_db_path, '--overwrite']

            extra_args = ['--filter', str(pfilter),
                          '--coordinates', coords_path,
                          '--aperture-pix', aperture,
                          '--annulus-pix', annulus,
                          '--dannulus-pix', dannulus]

            args = basic_args + phot_args + extra_args
            check_run(photometry.main, [str(a) for a in args])

            kwargs = dict(prefix = 'diffphot_', suffix = '.LEMONdB')
            fd, aper_diff_db_path = tempfile.mkstemp(**kwargs)
            atexit.register(methods.clean_tmp_files, aper_diff_db_path)
            os.close(fd)

            # Reuse the arguments used earlier for diffphot.main(). We only
            # need to change the first argument (path to the input LEMONdB)
            # and the third one (path to the output LEMONdB)
            diff_args[0] = aper_phot_db_path
            diff_args[2] = aper_diff_db_path
            check_run(diffphot.main, [str(a) for a in diff_args])

            miner = mining.LEMONdBMiner(aper_diff_db_path)

            try:
                kwargs = dict(minimum = options.min_images)
                cstars = miner.sort_by_curve_stdev(pfilter, **kwargs)
            except mining.NoStarsSelectedError:
                # There are no light curves with at least options.min_images points.
                # Therefore, much to our sorrow, we cannot evaluate this aperture.
                msg = "%sNo constant stars for this aperture. Ignoring it..."
                print msg % style.prefix
                continue

            # There must be at most 'nconstant' stars, but there may be fewer
            # if this aperture causes one or more of the constant stars to be
            # too faint (INDEF) in so many images as to prevent their lights
            # curve from being computed.
            assert len(cstars) <= options.nconstant

            if len(cstars) < options.pminimum:
                msg = ("%sJust %d constant stars, fewer than the allowed "
                       "minimum of %d, had their light curves calculated "
                       "for this aperture. Ignoring it...")
                args = style.prefix, len(cstars), options.pminimum
                print style.prefix
                continue

            # 'cstars' contains two-element tuples: (ID, stdev)
            stdevs_median = numpy.median([x[1] for x in cstars])
            params = (aperture, annulus, dannulus, stdevs_median)
            # NumPy floating-point data types are not JSON serializable
            args = (float(x) for x in params)
            candidate = json_parse.CandidateAnnuli(*args)
            evaluated_annuli[pfilter].append(candidate)

            msg = "%sAperture = %.3f, median stdev (%d stars) = %.4f"
            args = style.prefix, aperture, len(cstars), stdevs_median
            print msg % args

            percentage = (index + 1) / len(filter_apertures) * 100
            msg = "%s%s progress: %.2f %%"
            args = style.prefix, pfilter, percentage
            print msg % args

        # Let the user know of the best 'annuli', that is, the one for
        # which the standard deviation of the constant stars is minimal
        kwargs = dict(key = operator.attrgetter('stdev'))
        best_candidate = min(evaluated_annuli[pfilter], **kwargs)

        msg = "%sBest aperture found at %.3f pixels with stdev = %.4f"
        args = style.prefix, best_candidate.aperture, best_candidate.stdev
        print msg % args

    print style.prefix
    msg = "%sSaving the evaluated apertures to the '%s' JSON file ..."
    print msg % (style.prefix, output_json_path) ,
    json_parse.CandidateAnnuli.dump(evaluated_annuli, output_json_path)
    print ' done.'

    print "%sYou're done ^_^" % style.prefix
    return 0
Beispiel #57
0
def solve_btfch(puzzle):
    # get a list of the empty squares (remaining variables)
    empty_squares = get_empty_squares(puzzle)

    # if there are no remaining empty squares we're done
    if len(empty_squares) == 0:
        print("Woohoo, success! Check it out:")
        print_puzzle(puzzle)
        return 1

    # find the most constrained square (one with least remaining values)
    remaining_values = get_remaining_values(puzzle)
    mrv_list = []
    [
        mrv_list.append(len(remaining_values[square[0] * 9 + square[1]]))
        for square in empty_squares
    ]
    # make a list of the squares with the minimum remaining values (mrv)
    mrv_squares = []
    minimum = min(mrv_list)
    for i in range(len(mrv_list)):
        value = mrv_list[i]
        if value == minimum:
            mrv_squares.append(empty_squares[i])

    # if there are no ties, take the square with the MRV
    if len(mrv_squares) == 1:
        square = mrv_squares[0]
    else:
        # otherwise, find the most constraining variable (variable with highest degree)
        degree_list = []
        for cell in mrv_squares:
            degree = get_degree(cell, puzzle)
            degree_list.append(degree)

            max_degree = max(degree_list)
            max_degree_squares = []
            for i in range(len(degree_list)):
                value = degree_list[i]
                if value == max_degree:
                    max_degree_squares.append(mrv_squares[i])
            # just take the first square as a tie-breaker
            square = max_degree_squares[0]

    row = square[0]
    col = square[1]

    values = list(remaining_values[col + row * 9])

    while len(values) != 0:

        lcv_list = get_lcv(values, row, col, remaining_values)
        # take the least constraining value
        value = values[lcv_list.index(min(lcv_list))]
        values.remove(value)
        if forward_check(remaining_values, value, row, col):
            puzzle[row][col] = value
            if solve_btfch(puzzle):
                return 1
            else:
                puzzle[row][col] = 0

    return 0
def evaluate_box_proposals(json_dataset,
                           roidb,
                           thresholds=None,
                           area='all',
                           limit=None):
    """Evaluate detection proposal recall metrics. This function is a much
    faster alternative to the official COCO API recall evaluation code. However,
    it produces slightly different results.
    """
    # Record max overlap value for each gt box
    # Return vector of overlap values
    areas = {
        'all': 0,
        'small': 1,
        'medium': 2,
        'large': 3,
        '96-128': 4,
        '128-256': 5,
        '256-512': 6,
        '512-inf': 7
    }
    area_ranges = [
        [0**2, 1e5**2],  # all
        [0**2, 32**2],  # small
        [32**2, 96**2],  # medium
        [96**2, 1e5**2],  # large
        [96**2, 128**2],  # 96-128
        [128**2, 256**2],  # 128-256
        [256**2, 512**2],  # 256-512
        [512**2, 1e5**2]
    ]  # 512-inf
    assert area in areas, 'Unknown area range: {}'.format(area)
    area_range = area_ranges[areas[area]]
    gt_overlaps = np.zeros(0)
    num_pos = 0
    for entry in roidb:
        gt_inds = np.where((entry['gt_classes'] > 0)
                           & (entry['is_crowd'] == 0))[0]
        gt_boxes = entry['boxes'][gt_inds, :]
        gt_areas = entry['seg_areas'][gt_inds]
        valid_gt_inds = np.where((gt_areas >= area_range[0])
                                 & (gt_areas <= area_range[1]))[0]
        gt_boxes = gt_boxes[valid_gt_inds, :]
        num_pos += len(valid_gt_inds)
        non_gt_inds = np.where(entry['gt_classes'] == 0)[0]
        boxes = entry['boxes'][non_gt_inds, :]
        if boxes.shape[0] == 0:
            continue
        if limit is not None and boxes.shape[0] > limit:
            boxes = boxes[:limit, :]
        overlaps = box_utils.bbox_overlaps(
            boxes.astype(dtype=np.float32, copy=False),
            gt_boxes.astype(dtype=np.float32, copy=False))
        _gt_overlaps = np.zeros((gt_boxes.shape[0]))
        for j in range(min(boxes.shape[0], gt_boxes.shape[0])):
            # find which proposal box maximally covers each gt box
            argmax_overlaps = overlaps.argmax(axis=0)
            # and get the iou amount of coverage for each gt box
            max_overlaps = overlaps.max(axis=0)
            # find which gt box is 'best' covered (i.e. 'best' = most iou)
            gt_ind = max_overlaps.argmax()
            gt_ovr = max_overlaps.max()
            assert gt_ovr >= 0
            # find the proposal box that covers the best covered gt box
            box_ind = argmax_overlaps[gt_ind]
            # record the iou coverage of this gt box
            _gt_overlaps[j] = overlaps[box_ind, gt_ind]
            assert _gt_overlaps[j] == gt_ovr
            # mark the proposal box and the gt box as used
            overlaps[box_ind, :] = -1
            overlaps[:, gt_ind] = -1
        # append recorded iou coverage level
        gt_overlaps = np.hstack((gt_overlaps, _gt_overlaps))

    gt_overlaps = np.sort(gt_overlaps)
    if thresholds is None:
        step = 0.05
        thresholds = np.arange(0.5, 0.95 + 1e-5, step)
    recalls = np.zeros_like(thresholds)
    # compute recall for each iou threshold
    for i, t in enumerate(thresholds):
        recalls[i] = (gt_overlaps >= t).sum() / float(num_pos)
    # ar = 2 * np.trapz(recalls, thresholds)
    ar = recalls.mean()
    return {
        'ar': ar,
        'recalls': recalls,
        'thresholds': thresholds,
        'gt_overlaps': gt_overlaps,
        'num_pos': num_pos
    }
Beispiel #59
0
def XvmScaleToSup(x=None):
    if x is None:
        return None
    return xvm_scale_data.xvm2sup[min(100, x)-1] if x > 0 else 0.0
Beispiel #60
0
alpha = float(input('Введите шаг '))
l = 60
n = int(abs(b - a) / abs(alpha) + 1)
y = n * [0]
x = n * [0]

for i in range(n):
    f2 = a * a - 36
    y[i] = round(f2, 2)
    x[i] = round(a, 2)
    a += alpha

print('График f2')
l = 60
maxy = max(y)
miny = min(y)
if max(y) < 0:
    maxy = 0
if min(y) > 0:
    miny = 0
mo = int(abs((-miny) * l / (maxy - miny)))
if 0 in x:
    for i in range(len(x)):
        m = int(abs((y[i] - miny) / (maxy - miny)))

        if x[i] == 0:
            if m == mo:
                print('{:5.10f}'.format(x[i]),
                      '  ',
                      m * '\u2500',
                      '*', (l - m - 1) * '\u2500',