Exemple #1
0
def compare_chebhist(dname, mylambda, c, Nbin = 25):


    if mylambda == 'Do not exist':
        print('--!!Warning: eig file does not exist, can not display compare histgram')
    else:
        mylambda = 1 - mylambda
        lmin = max(min(mylambda), -1)
        lmax = min(max(mylambda),  1)

        # print c
        cheb_file_content = '\n'.join([str(st) for st in c])
        x = np.linspace(lmin, lmax, Nbin + 1)
        y = plot_chebint(c, x)
        u = (x[1:] + x[:-1]) / 2
        v =  y[1:] - y[:-1]

        plt.clf()
        plt.hold(True)
        plt.hist(mylambda,Nbin)
        plt.plot(u, v, "r.", markersize=10)
        plt.hold(False)
        plt.show()
        filename = 'data/' + dname + '.png'
        plt.savefig(filename)

        cheb_filename = 'data/' + dname + '.cheb'
        f = open(cheb_filename, 'w+')
        f.write(cheb_file_content)
        f.close()
Exemple #2
0
            def propagateConstraints_int(x, y):
                if x is None or y is None:
                    return None, None
                x1, x2, y1, y2 = x.min, x.max, y.min, y.max
                if cmp_t == 'ge' or cmp_t == 'gt':
                    x1, x2, y1, y2 = y1, y2, x1, x2 

                #treat greater like less than swap before and afterwards
                if cmp_t == 'lt' or cmp_t == 'gt':
                    x2 = min(x2, y2-1)
                    y1 = max(x1+1, y1)
                elif cmp_t == 'le' or cmp_t == 'ge':
                    x2 = min(x2, y2)
                    y1 = max(x1, y1)
                elif cmp_t == 'eq':
                    x1 = y1 = max(x1, y1)
                    x2 = y2 = min(x2, y2)
                elif cmp_t == 'ne':
                    if x1 == x2 == y1 == y2:
                        return None, None
                    if x1 == x2:
                        y1 = y1 if y1 != x1 else y1+1
                        y2 = y2 if y2 != x2 else y2-1               
                    if y1 == y2:
                        x1 = x1 if x1 != y1 else x1+1
                        x2 = x2 if x2 != y2 else x2-1

                if cmp_t == 'ge' or cmp_t == 'gt':
                    x1, x2, y1, y2 = y1, y2, x1, x2 
                con1 = IntConstraint.range(x.width, x1, x2) if x1 <= x2 else None   
                con2 = IntConstraint.range(y.width, y1, y2) if y1 <= y2 else None   
                return con1, con2
Exemple #3
0
    def updateValue(self, delta_x, delta_y):
        newTs = self.tsStart + Zoomable.pixelToNs(delta_x)
        newValue = self.valueStart - (delta_y / EXPANDED_SIZE)

        # Don't overlap first and last keyframes.
        newTs = min(max(newTs, self.inpoint + 1),
                    self.duration + self.inpoint - 1)

        newValue = min(max(newValue, 0.0), 1.0)

        if not self.has_changeable_time:
            newTs = self.lastTs

        updating = self.timelineElement.updating_keyframes
        self.timelineElement.updating_keyframes = True
        self.timelineElement.source.unset(self.lastTs)
        if (self.timelineElement.source.set(newTs, newValue)):
            self.value = Gst.TimedValue()
            self.value.timestamp = newTs
            self.value.value = newValue
            self.lastTs = newTs

            self.timelineElement.setKeyframePosition(self, self.value)
            # Resort the keyframes list each time. Should be cheap as there should never be too much keyframes,
            # if optimization is needed, check if resorting is needed, should
            # not be in 99 % of the cases.
            self.timelineElement.keyframes = sorted(
                self.timelineElement.keyframes, key=lambda keyframe: keyframe.value.timestamp)
            self.timelineElement.drawLines(self.line)
            # This will update the viewer. nifty.
            if not self.line:
                self.timelineElement.timeline._container.seekInPosition(
                    newTs + self.start)

        self.timelineElement.updating_keyframes = updating
Exemple #4
0
    def LoadTabs(self, tabs, autoselecttab = 1, settingsID = None, iconOnly = False, silently = False):
        self._iconOnly = iconOnly
        self.sr.tabs = []
        self.sr.mytabs = []
        self.sr.tabsmenu = None
        self.Flush()
        self.Prepare_LeftSide_()
        maxTextHeight = 0
        for data in tabs:
            newtab = self.GetTabClass()(parent=self)
            self.sr.mytabs.append(newtab)
            newtab.Startup(self, data)
            newtab.align = uiconst.TOLEFT
            self.sr.Set('%s_tab' % data.label, newtab)
            self.sr.tabs.append(newtab)
            maxTextHeight = max(maxTextHeight, newtab.sr.label.textheight)
            if newtab.sr.icon:
                maxTextHeight = max(maxTextHeight, newtab.sr.icon.height)

        self.Prepare_RightSide_()
        self.height = max(MINTABGROUPHEIGHT, int(maxTextHeight * 1.7))
        self._inited = 1
        self._settingsID = settingsID
        self.UpdateSizes()
        if autoselecttab:
            self.AutoSelect(silently)
Exemple #5
0
 def computeSourceRevision(self, changes):
     if not changes:
         return None
     lastChange = max([c.when for c in changes])
     lastSubmit = max([br.submittedAt for br in self.build.requests])
     when = (lastChange + lastSubmit) / 2
     return formatdate(when)
    def Slow(self):
        """Set slow timing values

        Currently this changes the timing in the following ways:
        timeouts = default timeouts * 10
        waits = default waits * 3
        retries = default retries * 3

        (if existing times are slower then keep existing times)
        """
        for setting in TimeConfig.__default_timing:
            if "_timeout" in setting:
                TimeConfig._timings[setting] = max(
                    TimeConfig.__default_timing[setting] * 10,
                    TimeConfig._timings[setting])

            if "_wait" in setting:
                TimeConfig._timings[setting] = max(
                    TimeConfig.__default_timing[setting] * 3,
                    TimeConfig._timings[setting])

            elif setting.endswith("_retry"):
                TimeConfig._timings[setting] = max(
                    TimeConfig.__default_timing[setting] * 3,
                    TimeConfig._timings[setting])

            if TimeConfig._timings[setting] < .2:
                TimeConfig._timings[setting]= .2
Exemple #7
0
    def set_size(self, width, height, ease):
        if ease:
            self.save_easing_state()
            self.set_easing_duration(600)
            self.background.save_easing_state()
            self.background.set_easing_duration(600)
            self.border.save_easing_state()
            self.border.set_easing_duration(600)
            self.preview.save_easing_state()
            self.preview.set_easing_duration(600)
            if self.rightHandle:
                self.rightHandle.save_easing_state()
                self.rightHandle.set_easing_duration(600)

        self.marquee.set_size(width, height)
        self.background.props.width = max(width - 2, 1)
        self.background.props.height = max(height - 2, 1)
        self.border.props.width = width
        self.border.props.height = height
        self.props.width = width
        self.props.height = height
        self.preview.set_size(max(width - 2, 1), max(height - 2, 1))
        if self.rightHandle:
            self.rightHandle.set_position(
                width - self.rightHandle.props.width, 0)

        if ease:
            self.background.restore_easing_state()
            self.border.restore_easing_state()
            self.preview.restore_easing_state()
            if self.rightHandle:
                self.rightHandle.restore_easing_state()
            self.restore_easing_state()
Exemple #8
0
    def get_budgets(self):  # {{{
        # Get categories
        categories = self.get_categories()

        # Issue request for budget utilization
        first_of_this_month = date.today().replace(day=1)
        eleven_months_ago = (first_of_this_month - timedelta(days=330)).replace(day=1)
        url = "{}/getBudget.xevent".format(MINT_ROOT_URL)
        params = {
            'startDate': eleven_months_ago.strftime('%m/%d/%Y'),
            'endDate': first_of_this_month.strftime('%m/%d/%Y'),
            'rnd': Mint.get_rnd(),
        }
        response = json.loads(self.get(url, params=params, headers=JSON_HEADER).text)

        # Make the skeleton return structure
        budgets = {
            'income': response['data']['income'][
                str(max(map(int, response['data']['income'].keys())))
            ]['bu'],
            'spend': response['data']['spending'][
                str(max(map(int, response['data']['income'].keys())))
            ]['bu']
        }

        # Fill in the return structure
        for direction in budgets.keys():
            for budget in budgets[direction]:
                budget['cat'] = self.get_category_from_id(
                    budget['cat'],
                    categories
                )

        return budgets
Exemple #9
0
 def evaluate(self,  u):
 
     eps = 1e-10
     
     if u<min(self.knots)+eps:
         u = min(self.knots)+eps
         
     if u>=max(self.knots)-eps:
         u=max(self.knots)-eps
         
     # scale
     weighted_pts = [vscale(pt[0:-1],pt[-1])+[pt[-1]] for pt in self.pts]
     
             
     pts = []
     for i in range(len(self.pts)):
         pts.append(vscale(weighted_pts[i], self.N(i, self.order, u)))
         
     eval_pt = reduce(vadd, pts)
     
     
     # unscale
     if eval_pt[-1]!=0:
         unweighted_pt = vscale(eval_pt[0:-1], 1.0/eval_pt[-1]) 
     else:
         unweighted_pt = eval_pt[0:-1]
         
     
     return unweighted_pt
Exemple #10
0
def save_level_costs(level, costs, filename='distance_map.csv'):
    """ Displays cell costs from an origin point over the given level.

    Args:
        level: The level to be displayed.
        costs: A dictionary containing a mapping of cells to costs from an origin point.
        filename: The name of the csv file to be created.

    """
    xs, ys = zip(*(list(level['spaces'].keys()) + list(level['walls'])))
    x_lo, x_hi = min(xs), max(xs)
    y_lo, y_hi = min(ys), max(ys)

    rows = []
    for j in range(y_lo, y_hi + 1):
        row = []

        for i in range(x_lo, x_hi + 1):
            cell = (i, j)
            if cell not in costs:
                row.append(inf)
            else:
                row.append(costs[cell])

        rows.append(row)

    assert '.csv' in filename, 'Error: filename does not contain file type.'
    with open(filename, 'w', newline='') as f:
        csv_writer = writer(f)
        for row in rows:
            csv_writer.writerow(row)
            
    
    print("Saved file:", filename)
def sentence_similarity(idx, ob, mode):

    s_list = list()
    pbar = ProgressBar(widgets=['%s: image ' % mode, SimpleProgress()],
                       maxval=len(sentences)).start()

    for im_idx, sentence_group in enumerate(np.array(sentences)[idx, :]):

        pbar.update(im_idx + 1)
        for sent in sentence_group:

            words = analyze(sent)

            sim = list()
            for w in words:

                syn1 = wn.synsets(w)
                syn2 = wn.synsets(ob)

                if syn1 and syn2:
                    sim.append(max(s1.path_similarity(s2) for (s1, s2)
                                   in product(syn1, syn2)))
                else:
                    # ignore word if no synset combination was found on wordnet
                    sim.append(None)

            if max(sim):
                s_list.append(max(sim))
            else:
                # ignore sentence if no word was similar enough
                s_list.append(float('nan'))

    pbar.finish()
    return s_list
Exemple #12
0
def getframeinfo(frame, context=1):
    """Get information about a frame or traceback object.

    A tuple of five things is returned: the filename, the line number of
    the current line, the function name, a list of lines of context from
    the source code, and the index of the current line within that list.
    The optional second argument specifies the number of lines of context
    to return, which are centered around the current line."""
    if istraceback(frame):
        lineno = frame.tb_lineno
        frame = frame.tb_frame
    else:
        lineno = frame.f_lineno
    if not isframe(frame):
        raise TypeError('{!r} is not a frame or traceback object'.format(frame))

    filename = getsourcefile(frame) or getfile(frame)
    if context > 0:
        start = lineno - 1 - context//2
        try:
            lines, lnum = findsource(frame)
        except IOError:
            lines = index = None
        else:
            start = max(start, 1)
            start = max(0, min(start, len(lines) - context))
            lines = lines[start:start+context]
            index = lineno - 1 - start
    else:
        lines = index = None

    return Traceback(filename, lineno, frame.f_code.co_name, lines, index)
Exemple #13
0
def show_level(level, path=[]):
    """ Displays a level via a print statement.

    Args:
        level: The level to be displayed.
        path: A continuous path to be displayed over the level, if provided.

    """
    xs, ys = zip(*(list(level['spaces'].keys()) + list(level['walls'])))
    x_lo, x_hi = min(xs), max(xs)
    y_lo, y_hi = min(ys), max(ys)

    path_cells = set(path)

    chars = []
    inverted_waypoints = {point: char for char, point in level['waypoints'].items()}

    for j in range(y_lo, y_hi + 1):
        for i in range(x_lo, x_hi + 1):

            cell = (i, j)
            if cell in path_cells:
                chars.append('*')
            elif cell in level['walls']:
                chars.append('X')
            elif cell in inverted_waypoints:
                chars.append(inverted_waypoints[cell])
            elif cell in level['spaces']:
                chars.append(str(int(level['spaces'][cell])))
            else:
                chars.append(' ')

        chars.append('\n')

    print(''.join(chars))
Exemple #14
0
def group_by(a, b):
    '''Return a hash table of 0..len(a)-1 by (a,b) value.'''
    a_max, b_max = max(a), max(b)
    g = np.zeros((a_max + 1, b_max + 1), dtype=np.object)
    for i, j in it.product(xrange(a_max + 1), xrange(b_max + 1)): g[i, j] = list()
    for k in xrange(n): g[a[k], b[k]].append(k)
    return g
Exemple #15
0
    def createContext(self, item, plot, onlimits):
        self.origin = item.getOrigin()
        self.scale = item.getScale()

        self.data = item.getData(copy=True)

        if onlimits:
            minX, maxX = plot.getXAxis().getLimits()
            minY, maxY = plot.getYAxis().getLimits()

            XMinBound = int((minX - self.origin[0]) / self.scale[0])
            YMinBound = int((minY - self.origin[1]) / self.scale[1])
            XMaxBound = int((maxX - self.origin[0]) / self.scale[0])
            YMaxBound = int((maxY - self.origin[1]) / self.scale[1])

            XMinBound = max(XMinBound, 0)
            YMinBound = max(YMinBound, 0)

            if XMaxBound <= XMinBound or YMaxBound <= YMinBound:
                self.data = None
            else:
                self.data = self.data[YMinBound:YMaxBound + 1,
                                      XMinBound:XMaxBound + 1]
        if self.data.size > 0:
            self.min, self.max = min_max(self.data)
        else:
            self.min, self.max = None, None
        self.values = self.data

        if self.values is not None:
            self.axes = (self.origin[1] + self.scale[1] * numpy.arange(self.data.shape[0]),
                         self.origin[0] + self.scale[0] * numpy.arange(self.data.shape[1]))
Exemple #16
0
def mostCommonColor(img):
    colorsDic = {}
    h,w,_ = img.shape
    for i in range(0, h):
        for j in range(0, w):
            bgr = tuple(img[i,j,:])
            if not colorsDic.has_key(bgr):
                colorsDic[bgr] = 1
            else:
                colorsDic[bgr] += 1
    values = list(colorsDic.values())
    keys = list(colorsDic.keys())
    if keys:
        v1 = max(values)
        c1 = keys[values.index(v1)]
        frac1 = v1 / float((h*w))
        values.remove(max(values))
        keys.remove(c1)
        if len(keys) > 0:
            v2 = max(values)
            c2 = keys[values.index(v2)]
            frac2 = v2 / float((h*w))
            return c1, frac1, c2, frac2
        else:
            return c1, frac1, None, 0
    return None, 0, None, 0
Exemple #17
0
 def _find_common(self, lineset1, lineset2):
     """find similarities in the two given linesets"""
     lines1 = lineset1.enumerate_stripped
     lines2 = lineset2.enumerate_stripped
     find = lineset2.find
     index1 = 0
     min_lines = self.min_lines
     while index1 < len(lineset1):
         skip = 1
         num = 0
         for index2 in find(lineset1[index1]):
             non_blank = 0
             for num, ((_, line1), (_, line2)) in enumerate(
                 izip(lines1(index1), lines2(index2))):
                 if line1 != line2:
                     if non_blank > min_lines:
                         yield num, lineset1, index1, lineset2, index2
                     skip = max(skip, num)
                     break
                 if line1:
                     non_blank += 1
             else:
                 # we may have reach the end
                 num += 1
                 if non_blank > min_lines:
                     yield num, lineset1, index1, lineset2, index2
                 skip = max(skip, num)
         index1 += skip
Exemple #18
0
def twopad(attrlist):
    width = findattr(attrlist, "width")
    padwidth = findattr(attrlist, "padwidth")
    padheight = findattr(attrlist, "padheight")
    polyclear = findattr(attrlist, "polyclear")
    maskclear = findattr(attrlist, "maskclear")
    silkwidth = findattr(attrlist, "silkwidth")
    silkboxwidth = findattr(attrlist, "silkboxwidth")
    silkboxheight = findattr(attrlist, "silkboxheight")
    silkoffset = findattr(attrlist, "silkoffset")
    silkpolarity = findattr(attrlist, "silkpolarity")
    silkcustom = findattr(attrlist, "silkcustom")
    
    twopadelt = element(attrlist)
    twopadelt = twopadelt + rowofpads([0,0], width+padwidth, "right", padwidth, padheight, 1, 2, maskclear, polyclear)
    silkx = max((width+2*padwidth)/2 + silkoffset,silkboxwidth/2)
    silky = max(padheight/2 + silkoffset, silkboxheight/2)
    twopadelt = twopadelt + box(silkx,silky,-silkx,-silky,silkwidth)
    if (silkpolarity == "yes"):
        polx = silkx + 2*silkoffset
        twopadelt = twopadelt + silk(silkx, silky, polx, silky, silkwidth)
        twopadelt = twopadelt + silk(silkx, -silky, polx, -silky, silkwidth)
        twopadelt = twopadelt + silk(polx, -silky, polx, silky, silkwidth)
    for line in silkcustom:
        twopadelt += "\t" + str(line) + "\n"
    return twopadelt+")\n"
def standardMC_european_option(K, T, R, V, S0, N, option_type, path_num=10000):
    dt = T / N
    sigma = V
    drift = math.exp((R - 0.5 * sigma * sigma) * dt)
    sigma_sqrt = sigma * math.sqrt(dt)
    exp_RT = math.exp(-R * T)
    european_payoff = []
    for i in xrange(path_num):
        former = S0
        for j in xrange(int(N)):
            former = former * drift * math.exp(sigma_sqrt * numpy.random.normal(0, 1))
        european_option = former

        if option_type == 1.0:
            european_payoff_call = exp_RT * max(european_option - K, 0)
            european_payoff.append(european_payoff_call)
        elif option_type == 2.0:
            european_payoff_put = exp_RT * max(K - european_option, 0)
            european_payoff.append(european_payoff_put)

    # Standard Monte Carlo
    p_mean = numpy.mean(european_payoff)
    p_std = numpy.std(european_payoff)
    p_confmc = (p_mean - 1.96 * p_std / math.sqrt(path_num), p_mean + 1.96 * p_std / math.sqrt(path_num))
    return p_mean, p_std, p_confmc
Exemple #20
0
def cost_config(pos_,Qnet,Pnet,counts):
    p_wind=pos_[0]*WINDS_PRICE_DEVICE
    p_pv=pos_[1]*PV_PRICE_BAT1
    counts.append(int(max(Pnet)))
    counts.append(abs(int(min(Pnet))))
    Qmin=min(Qnet)
    print 'Qmin:%f   '%Qmin
    Qmax=max(max(Qnet),abs(min(Qnet)))
    Pmax=max(max(Pnet),abs(min(Pnet)))
    maxVol=recyclemodule.tank_svgas(recyclemodule.ele_mkgas(Qmax))
    max_tank_Volumn.append(abs(maxVol)) 
    print 'maxVol :%f'%maxVol
    counts.append(abs(maxVol/100.0))
    q=(Qmax/BAT_POWER)*BAT_PRICE_DEVICE
    if abs(q)>ELE_PRICE_DEVICE+TANK_PRICE_DEVICE+FC_PRICE_DEVICE:
        p_cycle=ELE_PRICE_DEVICE*counts[0]+TANK_PRICE_DEVICE*counts[2]+FC_PRICE_DEVICE*counts[1]
        counts.append(round(Pmax))            
        p_bat=(Pmax)/BAT_POWER*BAT_PRICE_DEVICE
        #print 'cycle price :%f'%p_cycle
        #print 'bat   price :%f'%p_bat
           
    else:
        counts.append(0.0)
        p_bat=q
        p_cycle=0
        #print 'cycle not use'
        #print q

    print 'ele: %d fc : %d tank : %d bat : %d'%(counts[0],counts[1],counts[2],counts[3])       
    return p_wind+p_pv+p_bat+p_cycle
Exemple #21
0
def calculate_hand_rank(hand):
    ranks = card_ranks(hand)
    suits = [s for r,s in hand]

    if flush(suits) and straight(ranks): 
        return ((8, max(ranks)), 'Straight Flush')
    elif n_of_kind(ranks, 4):
        return ((7, n_of_kind(ranks, 4), n_of_kind(ranks, 1)), 'Four of a Kind')
    elif n_of_kind(ranks, 3) and n_of_kind(ranks, 2):
        return ((6, n_of_kind(ranks, 3), n_of_kind(ranks, 2)), 'Full House')
    elif flush(suits):
        return ((5, ranks), 'Flush')
    elif straight(ranks):
        return ((4, max(ranks)), 'Straight')
    elif n_of_kind(ranks, 3):
        return ((3, n_of_kind(ranks, 3), single_cards(ranks)[0], single_cards(ranks)[1]), 
                 'Three of a Kind')
    elif two_pair(ranks):
        return ((2, two_pair(ranks)[0], two_pair(ranks)[1], n_of_kind(ranks, 1)), 
                 'Two Pair')
    elif n_of_kind(ranks, 2):
        return ((1, n_of_kind(ranks, 2), single_cards(ranks)[0], single_cards(ranks)[1], 
                 single_cards(ranks)[2]), 'A Pair')
    else:
        return ((0, ranks), 'High Card')
def point_inside_polygon(x,y,polygons):
    
    idxPolygons = []
    
    if polygons != None:
        for l in range(len(polygons)):
            inside = False
            poly = polygons[l]
            #print "poly: ",  poly
            if poly != None:
                n = len(poly)
                p1x,p1y = poly[0]
                for i in range(n+1):
                    p2x,p2y = poly[i % n]
                    if y > min(p1y,p2y):
                        if y <= max(p1y,p2y):
                            if x <= max(p1x,p2x):
                                if p1y != p2y:
                                    xinters = (y-p1y)*(p2x-p1x)/(p2y-p1y)+p1x
                                if p1x == p2x or x <= xinters:
                                    inside = not inside
                    p1x,p1y = p2x,p2y
                if inside == True:
                    idxPolygons.append(l)
            
    return idxPolygons
Exemple #23
0
    def onMouse( self, event, x, y, flags, param ):
        """
        Mouse interactions with Main window:
            - Left mouse click gives pixel data under cursor
            - Left mouse drag selects rectangle

            - Right mouse button switches view mode
        """
        if event == cv2.EVENT_LBUTTONDOWN:
            if not self.gui_frame == None:
                self.drag_start = (x, y)

        if event == cv2.EVENT_LBUTTONUP:
                self.drag_start = None

                if self.selection == None:
                    pixel = self.gui_frame[y, x]
                    print "[X,Y][B G R](H, S, V):", [x, y], pixel, utils.BGRpix2HSV(pixel)
                else:
                    #self.track_window = self.selection
                    print self.selection    #self.track_window

        if self.drag_start:
            xmin = min( x, self.drag_start[0] )
            ymin = min( y, self.drag_start[1] )
            xmax = max( x, self.drag_start[0] )
            ymax = max( y, self.drag_start[1] )

            if xmax - xmin < 2 and ymax - ymin < 2:
                self.selection = None
            else:
                self.selection = ( xmin, ymin, xmax - xmin, ymax - ymin )

        if event == cv2.EVENT_RBUTTONDOWN:
            pass
Exemple #24
0
 def __handle_select_button_ccs(self, cc_no, cc_value):
     if cc_no == FX_SELECT_FIRST_BUTTON_ROW:
         if cc_value == CC_VAL_BUTTON_PRESSED:
             self.__parent.toggle_lock()
     elif cc_no == FX_SELECT_ENCODER_ROW:
         if cc_value == CC_VAL_BUTTON_PRESSED:
             new_index = min(
                 len(self.song().scenes) - 1,
                 max(0, list(self.song().scenes).index(self.song().view.selected_scene) - 1),
             )
             self.song().view.selected_scene = self.song().scenes[new_index]
     elif cc_no == FX_SELECT_SECOND_BUTTON_ROW:
         if cc_value == CC_VAL_BUTTON_PRESSED:
             new_index = min(
                 len(self.song().scenes) - 1,
                 max(0, list(self.song().scenes).index(self.song().view.selected_scene) + 1),
             )
             self.song().view.selected_scene = self.song().scenes[new_index]
     elif cc_no == FX_SELECT_POTIE_ROW:
         if cc_value == CC_VAL_BUTTON_PRESSED:
             self.song().view.selected_scene.fire_as_selected()
     elif cc_no == FX_SELECT_DRUM_PAD_ROW:
         if cc_value == CC_VAL_BUTTON_PRESSED:
             self.song().stop_all_clips()
     else:
         raise False or AssertionError, "unknown select row midi message"
def getPartInfo( part ):
        points = part['points']
        n = len(points)
        area = cx = cy = 0
        xmin = ymin = 360
        xmax = ymax = -360
        pt = points[n-1];  xx = pt[0];  yy = pt[1]
        for pt in points:
                x = pt[0];  y = pt[1]
                # bounds
                xmin = min( x, xmin );  ymin = min( y, ymin )
                xmax = max( x, xmax );  ymax = max( y, ymax )
                # area and centroid
                a = xx * y - x * yy
                area += a
                cx += ( x + xx ) * a
                cy += ( y + yy ) * a
                # next
                xx = x;  yy = y
        area /= 2
        if area:
                centroid = [ cx / area / 6, cy / area / 6 ]
        else:
                centroid = None
        part.update({
                'area': abs(area),
                'bounds': [ [ xmin, ymin ], [ xmax, ymax ] ],
                'center': [ ( xmin + xmax ) / 2, ( ymin + ymax ) / 2 ],
                'centroid': centroid,
                'extent': [ abs( xmax - xmin ), abs( ymax - ymin ) ]
        })
Exemple #26
0
    def getPixelData(self, event):
        global data

        x = event.pos().x()
        y = event.pos().y()
        self.area.append([x, y])

        value = qRgb(flag[self.color][0], flag[self.color][1], flag[self.color][2])

        # if there's 2 points, then add the whole zone to the data
        if len(self.area) == 2:
            xi = min(self.area[0][0], self.area[1][0] + 1)
            xf = max(self.area[0][0], self.area[1][0] + 1)
            yi = min(self.area[0][1], self.area[1][1] + 1)
            yf = max(self.area[0][1], self.area[1][1] + 1)

            print('zone [' + str(xi) + ',' + str(yi) + '][' + str(xf) + ',' + str(yf) + ']  => ' + self.color)

            for i in range(xi, xf):
                for j in range(yi, yf):
                    rgb = QColor(self.image.pixel(i, j)).toRgb()
                    r = rgb.red()
                    g = rgb.green()
                    b = rgb.blue()
                    data[self.color].append([i, j, r, g, b])

                    self.image.setPixel(i, j, value)

            # update image to show the already selected pixels
            self.label.setPixmap(QPixmap.fromImage(self.image))
            # reset list with the zone limits
            self.area = []
Exemple #27
0
    def get_box_intersection(self, bounding_region):
        """
        Given a bounding_region object computes and returns a new BoundingRegion that
        corresponds to the intersection of the bounding box of the current object with the
        box of the region given as argument. Retuns an empty BoundingRegion if the interseciton
        is empty.

        :param bounding_region: A BoundingRegion object to compute intersection with
        :type bounding_region: BoundingRegion
        :return: Bounding region of the intersection of the boxes
        :rtype: BoundingRegion
        """
        x1_1 = self.box[0]
        y1_1 = self.box[1]
        x1_2 = self.box[0] + self.box[2]
        y1_2 = self.box[1] + self.box[3]
        box2 = bounding_region.get_box_pixels()
        x2_1 = box2[0]
        y2_1 = box2[1]
        x2_2 = box2[0] + box2[2]
        y2_2 = box2[1] + box2[3]

        x3_1 = max(x1_1, x2_1)
        y3_1 = max(y1_1, y2_1)
        width = max(-1, min(x1_2, x2_2) - x3_1)
        height = max(-1, min(y1_2, y2_2) - y3_1)
        if width * height >= 0:
            return BoundingRegion(image_shape=self.image_shape, box=(x3_1, y3_1, width, height))
        else:
            return BoundingRegion()
def upgradeTier (card1, card2):
	defaultHealth = cards[card1.id][1]
	defaultAttack = cards[card1.id][2]
	if card1.isMaxLevel():
		delta1hp = card1.stats[0]*0.1
		delta1attack = card1.stats[1]*0.1
	else:
		delta1hp = card1.stats[0]*0.05
		delta1attack = card1.stats[1]*0.05

	if card2.isMaxLevel():
		delta2hp = card2.stats[0]*0.1
		delta2attack = card2.stats[1]*0.1
	else:
		delta2hp = card2.stats[0]*0.05
		delta2attack = card2.stats[1]*0.05     

	deltaHP = delta1hp + delta2hp
	print 
	deltaAttack = delta1attack + delta2attack
	
	if max(card1.tier,card2.tier)==1:
		koeff = 1.2
	elif max(card1.tier,card2.tier)==2:
		koeff = 1.5
	elif max(card1.tier,card2.tier)==3:
		koeff = 2
	newstats = [defaultHealth*koeff + deltaHP, defaultAttack*koeff + deltaAttack]
	newCard = Card (card1.id)
	newCard.stars = newstats
	newCard.tier = max(card1.tier,card2.tier)+1
	return newCard
def fit_CSU_edges(profile):
    fitter = fitting.LevMarLSQFitter()

    amp1_est = profile[profile == min(profile)][0]
    mean1_est = np.argmin(profile)
    amp2_est = profile[profile == max(profile)][0]
    mean2_est = np.argmax(profile)
    
    g_init1 = models.Gaussian1D(amplitude=amp1_est, mean=mean1_est, stddev=2.)
    g_init1.amplitude.max = 0
    g_init1.amplitude.min = amp1_est*0.9
    g_init1.stddev.max = 3
    g_init2 = models.Gaussian1D(amplitude=amp2_est, mean=mean2_est, stddev=2.)
    g_init2.amplitude.min = 0
    g_init2.amplitude.min = amp2_est*0.9
    g_init2.stddev.max = 3

    model = g_init1 + g_init2
    fit = fitter(model, range(0,profile.shape[0]), profile)
    
    # Check Validity of Fit
    if abs(fit.stddev_0.value) <= 3 and abs(fit.stddev_1.value) <= 3\
       and fit.amplitude_0.value < -1 and fit.amplitude_1.value > 1\
       and fit.mean_0.value > fit.mean_1.value:
        x = [fit.mean_0.value, fit.mean_1.value]
        x1 = int(np.floor(min(x)-1))
        x2 = int(np.ceil(max(x)+1))
    else:
        x1 = None
        x2 = None

    return x1, x2
    def plotResult(self, nn):
        cmask = np.where(self.y==1);
        plot(self.X[cmask,0], self.X[cmask,1], 'or', markersize=4)
        cmask = np.where(self.y==2);
        plot(self.X[cmask,0], self.X[cmask,1], 'ob', markersize=4)
        cmask = np.where(self.y==3);
        plot(self.X[cmask,0], self.X[cmask,1], 'og', markersize=4)

        minX = min(self.X[:,0])
        minY = min(self.X[:,1])
        maxX = max(self.X[:,0])
        maxY = max(self.X[:,1])

        grid_range = [minX, maxX, minY, maxY];
        delta = 0.05; levels = 100
        a = arange(grid_range[0],grid_range[1],delta)
        b = arange(grid_range[2],grid_range[3],delta)
        A, B = meshgrid(a, b)
        values = np.zeros(A.shape)

        for i in range(len(a)):
            for j in range(len(b)):
                values[j,i] = nn.getNetworkOutput( [ a[i], b[j] ] )
        contour(A, B, values, levels=[1], colors=['k'], linestyles='dashed')
        contourf(A, B, values, levels=linspace(values.min(),values.max(),levels), cmap=cm.RdBu)
def normalize(numbers):
    max_number = max(numbers)
    for i in range(len(numbers)):
        numbers[i] /= float(max_number)
    return numbers  
                                    label_scores.argmax().item())
                                label_confidence_list.append(
                                    label_scores[label_index_list[-1]].item())
                            else:
                                label_index_list.append(1)
                                label_confidence_list.append(1)
                        else:
                            label_index_list.append(
                                label_scores.argmax().item())
                            label_confidence_list.append(
                                label_scores[label_index_list[-1]].item())

                        logits_list.append(model_output)

                    # voting
                    label_index = max(label_index_list,
                                      key=label_index_list.count)
                    label_confidence = label_confidence_list[
                        label_index_list.index(label_index)]

                    logits = logits_list[label_index_list.index(label_index)]
                    sup_refu_indices = torch.tensor(
                        [0, 2]).cuda()  # index of support and refute
                    probs = torch.softmax(torch.index_select(
                        logits, 1, sup_refu_indices),
                                          dim=1)[0]

                    results[doc_id] = {
                        'label':
                        LABELS[label_index],
                        'confidence':
                        round(label_confidence, 4),
Exemple #33
0
 def update_progress(self, progress):
     progress = max(0.0, min(100.0, progress))
     progressbc.send_update(self.previous_stages_progress() +
                            (self.delta_progress() / 100.0) *
                            float(progress))
Exemple #34
0
 def previous_stages_progress(self):
     if self.prev_stage:
         return max(0.0, self.prev_stage.progress)
     else:
         return 0.0
Exemple #35
0
 def delta_progress(self):
     if self.prev_stage:
         return max(0.0, self.progress - self.prev_stage.progress)
     else:
         return max(0.0, self.progress)
Exemple #36
0
 def rightward(prev, next):
     nonlocal ans
     next = [next[0], min(next[1], prev[1] + prev[0] - next[0])]
     high, low = sorted([prev[1], next[1]])
     ans = max(ans, high + (prev[0] - next[0] - high + low) // 2)
     return next
Exemple #37
0
squares = [value**2 for value in range(1, 11)]
# for value in range(1, 11) :
#     squares.append(value ** 2)
print(squares)

print(min(squares))
print(max(squares))
print(sum(squares))
Exemple #38
0
    def check(self, instance):
        name = instance.get('name', None)
        tags = instance.get('tags', [])
        exact_match = _is_affirmative(instance.get('exact_match', True))
        search_string = instance.get('search_string', None)
        ignore_ad = _is_affirmative(instance.get('ignore_denied_access', True))
        pid = instance.get('pid')
        pid_file = instance.get('pid_file')
        collect_children = _is_affirmative(
            instance.get('collect_children', False))
        user = instance.get('user', False)
        try_sudo = instance.get('try_sudo', False)

        if self._conflicting_procfs:
            self.warning(
                'The `procfs_path` defined in `process.yaml is different from the one defined in '
                '`datadog.conf` This is currently not supported by the Agent. Defaulting to the '
                'value defined in `datadog.conf`:{}'.format(
                    psutil.PROCFS_PATH))
        elif self._deprecated_init_procfs:
            self.warning(
                'DEPRECATION NOTICE: Specifying `procfs_path` in process.yaml` is deprecated. '
                'Please specify it in `datadog.conf` instead')

        if not isinstance(search_string,
                          list) and pid is None and pid_file is None:
            raise ValueError(
                '"search_string" or "pid" or "pid_file" parameter is required')

        # FIXME 8.x remove me
        if search_string is not None:
            if "All" in search_string:
                self.warning(
                    'Deprecated: Having "All" in your search_string will greatly reduce the '
                    'performance of the check and will be removed in a future version of the agent.'
                )

        if name is None:
            raise KeyError('The "name" of process groups is mandatory')

        if search_string is not None:
            pids = self.find_pids(name,
                                  search_string,
                                  exact_match,
                                  ignore_ad=ignore_ad)
        elif pid is not None:
            # we use Process(pid) as a means to search, if pid not found
            # psutil.NoSuchProcess is raised.
            pids = self._get_pid_set(pid)
        elif pid_file is not None:
            try:
                with open(pid_file, 'r') as file_pid:
                    pid_line = file_pid.readline().strip()
                    pids = self._get_pid_set(int(pid_line))
            except IOError as e:
                # pid file doesn't exist, assuming the process is not running
                self.log.debug('Unable to find pid file: {}'.format(e))
                pids = set()
        else:
            raise ValueError(
                'The "search_string" or "pid" options are required for process identification'
            )

        if collect_children:
            pids.update(self._get_child_processes(pids))

        if user:
            pids = self._filter_by_user(user, pids)

        proc_state = self.get_process_state(name, pids, try_sudo)

        # FIXME 8.x remove the `name` tag
        tags.extend(['process_name:{}'.format(name), name])

        self.log.debug('ProcessCheck: process {} analysed'.format(name))
        self.gauge('system.processes.number', len(pids), tags=tags)

        if len(pids) == 0:
            self.warning("No matching process '{}' was found".format(name))

        for attr, mname in iteritems(ATTR_TO_METRIC):
            vals = [x for x in proc_state[attr] if x is not None]
            # skip []
            if vals:
                if attr == 'run_time':
                    self.gauge('system.processes.{}.avg'.format(mname),
                               sum(vals) / len(vals),
                               tags=tags)
                    self.gauge('system.processes.{}.max'.format(mname),
                               max(vals),
                               tags=tags)
                    self.gauge('system.processes.{}.min'.format(mname),
                               min(vals),
                               tags=tags)

                # FIXME 8.x: change this prefix?
                else:
                    self.gauge('system.processes.{}'.format(mname),
                               sum(vals),
                               tags=tags)

        for attr, mname in iteritems(ATTR_TO_METRIC_RATE):
            vals = [x for x in proc_state[attr] if x is not None]
            if vals:
                self.rate('system.processes.{}'.format(mname),
                          sum(vals),
                          tags=tags)

        self._process_service_check(name, len(pids),
                                    instance.get('thresholds', None), tags)
    def build(self, config):
        '''
            build index from scratch
        '''
        operation_method = config.get("index_operation", "new").lower()

        gallery_images, gallery_docs = split_datafile(
            config['data_file'], config['image_root'], config['delimiter'])

        # when remove data in index, do not need extract fatures
        if operation_method != "remove":
            gallery_features = self._extract_features(gallery_images, config)
        assert operation_method in [
            "new", "remove", "append"
        ], "Only append, remove and new operation are supported"

        # vector.index: faiss index file
        # id_map.pkl: use this file to map id to image_doc
        if operation_method in ["remove", "append"]:
            # if remove or append, vector.index and id_map.pkl must exist
            assert os.path.join(
                config["index_dir"], "vector.index"
            ), "The vector.index dose not exist in {} when 'index_operation' is not None".format(
                config["index_dir"])
            assert os.path.join(
                config["index_dir"], "id_map.pkl"
            ), "The id_map.pkl dose not exist in {} when 'index_operation' is not None".format(
                config["index_dir"])
            index = faiss.read_index(
                os.path.join(config["index_dir"], "vector.index"))
            with open(os.path.join(config["index_dir"], "id_map.pkl"),
                      'rb') as fd:
                ids = pickle.load(fd)
            assert index.ntotal == len(ids.keys(
            )), "data number in index is not equal in in id_map"
        else:
            if not os.path.exists(config["index_dir"]):
                os.makedirs(config["index_dir"], exist_ok=True)
            index_method = config.get("index_method", "HNSW32")

            # if IVF method, cal ivf number automaticlly
            if index_method == "IVF":
                index_method = index_method + str(
                    min(int(len(gallery_images) // 8), 65536)) + ",Flat"

            # for binary index, add B at head of index_method
            if config["dist_type"] == "hamming":
                index_method = "B" + index_method

            #dist_type
            dist_type = faiss.METRIC_INNER_PRODUCT if config[
                "dist_type"] == "IP" else faiss.METRIC_L2

            #build index
            if config["dist_type"] == "hamming":
                index = faiss.index_binary_factory(config["embedding_size"],
                                                   index_method)
            else:
                index = faiss.index_factory(config["embedding_size"],
                                            index_method, dist_type)
                index = faiss.IndexIDMap2(index)
            ids = {}

        if config["index_method"] == "HNSW32":
            logger.warning(
                "The HNSW32 method dose not support 'remove' operation")

        if operation_method != "remove":
            # calculate id for new data
            start_id = max(ids.keys()) + 1 if ids else 0
            ids_now = (
                np.arange(0, len(gallery_images)) + start_id).astype(np.int64)

            # only train when new index file
            if operation_method == "new":
                if config["dist_type"] == "hamming":
                    index.add(gallery_features)
                else:
                    index.train(gallery_features)

            if not config["dist_type"] == "hamming":
                index.add_with_ids(gallery_features, ids_now)

            for i, d in zip(list(ids_now), gallery_docs):
                ids[i] = d
        else:
            if config["index_method"] == "HNSW32":
                raise RuntimeError(
                    "The index_method: HNSW32 dose not support 'remove' operation"
                )
            # remove ids in id_map, remove index data in faiss index
            remove_ids = list(
                filter(lambda k: ids.get(k) in gallery_docs, ids.keys()))
            remove_ids = np.asarray(remove_ids)
            index.remove_ids(remove_ids)
            for k in remove_ids:
                del ids[k]

        # store faiss index file and id_map file
        if config["dist_type"] == "hamming":
            faiss.write_index_binary(
                index, os.path.join(config["index_dir"], "vector.index"))
        else:
            faiss.write_index(
                index, os.path.join(config["index_dir"], "vector.index"))

        with open(os.path.join(config["index_dir"], "id_map.pkl"), 'wb') as fd:
            pickle.dump(ids, fd)
Exemple #40
0
def main():
    progname = os.path.basename(sys.argv[0])
    usage = """Usage:\nproclst.py [options] <lst 1> <lst 2> ... \nSimple manipulations of LST files. If your goal is to produce an actual image file rather than the
sort of virtual stack represented by .lst files, use e2proc2d.py or e2proc3d.py instead. Those other programs will treat LST files as normal image files for input.\n."""

    parser = EMArgumentParser(usage=usage, version=EMANVERSION)
    ####################
    #	parser.add_argument("--average", action="store_true", help="Averages all input images (without alignment) and writes a single output image")

    parser.add_argument(
        "--create",
        type=str,
        default=None,
        help=
        "The input file(s) should be image files. To combine .lst files use --merge. Specify an .lst or .lsx file to create here (e.g., --create mylst.lst) with references to all of the images in the inputs."
    )
    parser.add_argument(
        "--eosplit",
        action="store_true",
        help=
        "Will generate _even and _odd .lst files for each specified input .lst file"
    )
    parser.add_argument(
        "--split",
        type=int,
        default=0,
        help=
        "Will put every nth particle in a separate numbered .lst file based on --create name. Ignores other subset selection options! Single input only!"
    )

    parser.add_argument(
        "--dereforig",
        type=str,
        default=None,
        help=
        "Extract the data_source and data_n parameters from each image in the file and create a new .lst file referencing the original image(s)"
    )

    parser.add_argument(
        "--exclude",
        type=str,
        default=None,
        help=
        "only works if --create is supplied. comma-separated list of indexes from the input file(s) to EXCLUDE from the created .lst file."
    )

    #parser.add_argument("--first", type=int, default=0, help="Default=0 (first image index in input(s)). This will be the first particle index in the input images to put in the output lsx/lst file.")

    parser.add_argument(
        "--include",
        type=str,
        default=None,
        help=
        "only works if --create is supplied. comma-separated list of indexes to take from the input file(s) to INCLUDE in the created .lst file. if you have the list of indexes to include in a .txt file, you can provide it through --list."
    )
    parser.add_argument(
        "--inplace",
        action="store_true",
        default=False,
        help=
        "only works with --create. if the stack specified in --create already exists, this will prevent appending to it. rather, the file will be modified in place."
    )
    #	parser.add_argument("--force", action="store_true", default=False, help="only works with --create. if the stack specified in --create already exists, it will be removed and rewritten.")

    #parser.add_argument("--last", type=str, default=-1, help="Default=-1 (last image index in input (s)). This will be the first particle index in the input images to put in the output lsx/lst file.")
    parser.add_argument(
        "--list",
        type=str,
        default=None,
        help=
        "only works if --create is supplied. .txt file with a list of indexes (one per line/row) to take from the input file(s) to INCLUDE in the created .lst file."
    )

    parser.add_argument(
        "--merge",
        type=str,
        default=None,
        help=
        "Specify the output name here. This will concatenate all of the input .lst files into a single output"
    )
    parser.add_argument(
        "--mergesort",
        type=str,
        default=None,
        help=
        "Specify the output name here. This will merge all of the input .lst files into a single (resorted) output"
    )
    parser.add_argument(
        "--mergeinterleave",
        type=str,
        default=None,
        help=
        "Specify the output name here. Interleaves images from input .lst files, eg - A0,B0,C0,A1,B1,C1,... truncates based on size of smallest input, eg- 1000,500,300 -> 900"
    )
    parser.add_argument("--mergeeo",
                        action="store_true",
                        default=False,
                        help="Merge even odd lst.")
    parser.add_argument(
        "--minhisnr",
        type=float,
        help="Integrated SNR from 1/10-1/4 1/A must be larger than this",
        default=-1,
        guitype='floatbox',
        row=8,
        col=1)
    parser.add_argument(
        "--minlosnr",
        type=float,
        help="Integrated SNR from 1/200-1/20 1/A must be larger than this",
        default=-1,
        guitype='floatbox',
        row=8,
        col=0)
    parser.add_argument("--mindf",
                        type=float,
                        help="Minimum defocus",
                        default=-1,
                        guitype='floatbox',
                        row=8,
                        col=1)
    parser.add_argument("--maxdf",
                        type=float,
                        help="Maximum defocus",
                        default=-1,
                        guitype='floatbox',
                        row=8,
                        col=0)

    parser.add_argument(
        "--numaslist",
        type=str,
        default=None,
        help=
        "extract the particle indexes (numbers) only from an lst file into a text file (one number per line)."
    )

    parser.add_argument(
        "--ppid",
        type=int,
        help="Set the PID of the parent process, used for cross platform PPID",
        default=-1)

    parser.add_argument(
        "--range",
        type=str,
        default=None,
        help=
        "Range of particles to use. Works only with --create option. Input of 0,10,2 means range(0,10, step=2)."
    )
    parser.add_argument(
        "--retype",
        type=str,
        default=None,
        help=
        "If a lst file is referencing a set of particles from particles/imgname__oldtype.hdf, this will change oldtype to the specified string in-place (modifies input files)"
    )
    parser.add_argument(
        "--refile",
        type=str,
        default=None,
        help=
        "similar to retype, but replaces the full filename of the source image file with the provided string"
    )
    parser.add_argument("--shuffle",
                        action="store_true",
                        default=False,
                        help="shuffle list inplace.")
    parser.add_argument(
        "--sym",
        type=str,
        default=None,
        help=
        "apply symmetry to a list of particles with xform.projection by duplicating each particle N time. only used along with a .lst input"
    )
    parser.add_argument(
        "--extractattr",
        type=str,
        default=None,
        help="extract an attribute from particle header as an entry in the list"
    )

    parser.add_argument(
        "--nocomments",
        action="store_true",
        default=False,
        help="Removes the comments from each line of the lst file.")

    parser.add_argument(
        "--verbose",
        "-v",
        dest="verbose",
        action="store",
        metavar="n",
        type=int,
        help=
        "verbose level [0-9], higher number means higher level of verboseness",
        default=1)

    (options, args) = parser.parse_args()

    if len(args) < 1:
        parser.error("At least one lst file required")
        sys.exit(1)

    logid = E2init(sys.argv, options.ppid)

    #if options.numaslist != None:
    if options.eosplit:
        for inp in args:
            if inp[-4:].lower() != ".lst": continue
            lin = LSXFile(inp, True)
            ename = "{}_even.lst".format(inp[:-4])
            oname = "{}_odd.lst".format(inp[:-4])
            try:
                os.unlink(ename)
            except:
                pass
            try:
                os.unlink(oname)
            except:
                pass
            loute = LSXFile(ename, False)
            louto = LSXFile(oname, False)
            for i in range(len(lin)):
                imt = lin.read(i)
                if i % 2: louto.write(-1, imt[0], imt[1], imt[2])
                else: loute.write(-1, imt[0], imt[1], imt[2])
        print("Generated: ", ename, oname)
        sys.exit(0)

    if options.numaslist:
        out = open(options.numaslist, "w")

        for f in args:
            lst = LSXFile(f, True)
            for i in range(len(lst)):
                out.write("{}\n".format(lst[i][0]))

    if options.dereforig:
        newlst = LSXFile(options.dereforig)

        for f in args:
            n = EMUtil.get_image_count(f)
            for i in range(n):
                im = EMData(f, i, True)
                # It shouldn't be possible for this to go infinitely, or there would have been a problem on the previous line
                while im["data_source"][-4:] == ".lst":
                    im = EMData(im["data_source"], im["data_n"], True)
                newlst.write(-1, im["data_n"], im["data_source"])
                if options.verbose > 1:
                    print("{},{} -> {},{}".format(f, i, im["data_source"],
                                                  im["data_n"]))

        print("exiting after --dereforig")
        sys.exit(0)

    if options.create:

        if '.lst' not in options.create and '.lsx' not in options.create:
            print(
                "\nERROR: the extension of the output file in --create must be .lst or .lsx"
            )
            sys.exit(1)

        ### use the file comment of the first list if exist
        if args[0].endswith(".lst"):
            l = LSXFile(args[0], True)
            cmt = l.filecomment.strip()
            if cmt.startswith("#keys:"):
                print("Converting lst file to the new style...")
                cmt = ""

        else:
            cmt = ""

        try:
            os.remove(options.create)
        except:
            pass

        lst = LSXFile(options.create, False, cmt)

        if options.split > 1:
            if len(args) > 1:
                print(
                    "Error: single input only. For multiple inputs create a single .lst first, then split it."
                )
                exit()
            if not args[0].endswith(".lst"):
                print(
                    "Error: only lst files can be used as input with --split")
                exit()

            lsin = LSXFile(args[0])
            #remove existing outputs
            for i in range(options.split):
                try:
                    os.unlink(f"{options.create[:-4]}_{i}.lst")
                except:
                    pass
            #create new outputs
            lsout = [
                LSXFile(f"{options.create[:-4]}_{i}.lst")
                for i in range(options.split)
            ]

            #split
            for i in range(len(lsin)):
                lsout[i % options.split][i // options.split] = lsin[i]

            exit()
        elif options.mergeeo:
            print("Merging two image stacks...")
            if len(args) != 2:
                print("Error: Need two inputs...")
                exit()
            n0 = EMUtil.get_image_count(args[0])
            n1 = EMUtil.get_image_count(args[1])
            n = max(n0, n1)

            if args[0].endswith(".lst"):
                lste = LSXFile(args[0], True)
                lsto = LSXFile(args[1], True)
                fromlst = True
            else:
                fromlst = False

            for i in range(n):
                if fromlst:
                    if i < n0:
                        ln = lste.read(i)
                        lst.write(-1, ln[0], ln[1], ln[2])
                    if i < n1:
                        ln = lsto.read(i)
                        lst.write(-1, ln[0], ln[1], ln[2])
                else:
                    if i < n0:
                        lst.write(-1, i, args[0])
                    if i < n1:
                        lst.write(-1, i, args[1])
            lst = None
            sys.exit(1)

        else:
            for f in args:
                n = EMUtil.get_image_count(f)
                if f.endswith(".lst"):
                    lstin = LSXFile(f, True)
                    fromlst = True
                else:
                    fromlst = False

                indxsinclude = list(
                    range(n)
                )  #by default, assume all particles in input file will be part of output lsx; otherwise, modify indexes to include according to options

                if options.range:
                    indxsinclude = eval("range({})".format(options.range))

                elif options.exclude:
                    indxs = set(range(n))
                    indxsexclude = set(
                        [int(i) for i in options.exclude.split(',')])
                    indxsinclude = list(indxs - indxsexclude)
                    if len(indxsinclude) < 2000:
                        print(
                            f"including ({len(indxsinclude)}): {indxsinclude}")

                elif options.include:
                    indxsinclude = [int(j) for j in options.include.split(',')]

                elif options.list:
                    ff = open(options.list)
                    lines = ff.readlines()
                    ff.close()

                    indxsinclude = []
                    k = 0
                    for line in lines:
                        if line:  #check that the line is not empty
                            indxsinclude.append(int(line.replace('\n', '')))
                        else:
                            print(
                                "\nWARNING, line {} in {} seems to be empty!".
                                format(k, options.list))
                        k += 1

                if options.verbose:
                    print("Processing {} images in {}".format(
                        len(indxsinclude), f))

                kk = 0
                for i in indxsinclude:

                    if options.range and i >= n:
                        break

                    if fromlst:
                        ln = lstin.read(i)
                        if options.inplace:
                            lst.write(kk, ln[0], ln[1], ln[2])
                        else:
                            lst.write(-1, ln[0], ln[1], ln[2])
                    else:
                        if options.inplace:
                            lst.write(kk, i, f)
                        else:
                            lst.write(-1, i, f)
                    kk += 1

        sys.exit(0)

    if options.retype != None:
        if options.minlosnr > 0 or options.minhisnr > 0 or options.mindf > 0 or options.maxdf > 0:
            print(
                "ERROR: --minlosnr and --minhisnr not compatible with --retype"
            )
            sys.exit(1)

        # if the user provided the leading __ for us, we strip it off and add it back later
        if options.retype[:2] == "__":
            options.retype = options.retype[2:]

        for f in args:
            if options.verbose: print("Processing ", f)
            lst = LSXFile(f, True)

            a = lst.read(0)
            if a[1][:10] != "particles/" and a[1][:12] != "particles3d/":
                print(
                    "To use the --retype option, the .lst file must reference image files in particles/*"
                )

            if options.verbose > 1:
                b = base_name(a[1])
                print("{} -> {}".format(a[1],
                                        b + "__" + options.retype + ".hdf"))

            # loop over the images in the lst file
            for i in range(len(lst)):
                im = lst.read(i)
                if "3d" in a[1]:
                    outname = "particles3d/{}__{}.hdf".format(
                        base_name(im[1]), options.retype)
                else:
                    outname = "particles/{}__{}.hdf".format(
                        base_name(im[1]), options.retype)
                lst.write(i, im[0], outname, im[2])

            lst.normalize()  # clean up at the end

            if options.verbose > 1: print(len(lst), " particles adjusted")

        if options.verbose: print("Done processing {} files".format(len(args)))

    if options.refile != None:
        if options.minlosnr > 0 or options.minhisnr > 0 or options.mindf > 0 or options.maxdf > 0:
            print(
                "ERROR: --minlosnr and --minhisnr not compatible with --refile"
            )
            sys.exit(1)

        for f in args:
            if options.verbose: print("Processing ", f)
            lst = LSXFile(f, True)

            # loop over the images in the lst file
            for i in range(len(lst)):
                im = lst.read(i)
                lst.write(i, im[0], options.refile, im[2])

            lst.normalize()  # clean up at the end

            if options.verbose > 1: print(len(lst), " particles adjusted")

        if options.verbose: print("Done processing {} files".format(len(args)))

    if options.merge != None:

        if options.minlosnr > 0 or options.minhisnr > 0 or options.mindf > 0 or options.maxdf > 0:
            print(
                "ERROR: --minlosnr and --minhisnr not compatible with --merge. Please use --mergesort instead."
            )
            sys.exit(1)

        # create/update output lst
        lsto = LSXFile(options.merge)
        ntot = 0

        # loop over input files
        for f in args:
            lst = LSXFile(f, True)
            ntot += len(lst)

            for i in range(len(lst)):
                im = lst.read(i)
                lsto.write(-1, im[0], im[1], im[2])

        if options.verbose:
            print("{} particles added to {}".format(ntot, options.merge))

    if options.mergeinterleave != None:

        if options.minlosnr > 0 or options.minhisnr > 0 or options.mindf > 0 or options.maxdf > 0:
            print(
                "ERROR: --minlosnr and --minhisnr not compatible with --merge. Please use --mergesort instead."
            )
            sys.exit(1)

        # with this option we need to start from scratch
        try:
            os.unlink(options.mergeinterleave)
        except:
            pass

        # create output file
        lsto = LSXFile(options.mergeinterleave)
        ntot = 0

        lstsin = [LSXFile(f, True) for f in args]
        n = min([len(x) for x in lstsin])
        nl = len(lstsin)

        for i in range(n):
            for j, lst in enumerate(lstsin):
                lsto[i * nl + j] = lst[i]

        if options.verbose:
            print("{} particles added to {}".format(n * nl,
                                                    options.mergeinterleave))

    if options.mergesort != None:
        # create/update output lst
        lsto = LSXFile(options.mergesort)
        ntot = 0

        # loop over input files
        ptcls = []
        pfiles = set()
        for f in args:
            lst = LSXFile(f, True)
            ntot += len(lst)

            for i in range(len(lst)):
                im = lst.read(i)
                ptcls.append((im[1], im[0], im[2]))
                pfiles.add(im[1])

        ptcls.sort()

        # remove particles in files not meeting our criteria
        if options.minlosnr > 0 or options.minhisnr > 0 or options.mindf > 0 or options.maxdf > 0:
            # the list conversion here is so we are iterating over a copy and not modifying the set while we iterate over it
            for pfile in list(pfiles):
                js = js_open_dict(info_name(pfile))
                ctf = js["ctf"][0]
                js.close()
                r1 = int(floor(old_div(
                    1.0, (200.0 * ctf.dsbg))))  # lowsnr is 200-20 A
                r2 = int(ceil(old_div(1.0, (20.0 * ctf.dsbg))))
                r3 = int(floor(old_div(
                    1.0, (10.0 * ctf.dsbg))))  # hisnr is 10 to 4 A
                r4 = int(ceil(old_div(1.0, (4.0 * ctf.dsbg))))
                losnr = old_div(sum(ctf.snr[r1:r2]), (r2 - r1))
                hisnr = old_div(sum(ctf.snr[r3:r4]), (r4 - r3))
                if losnr < options.minlosnr or hisnr < options.minhisnr or (
                        options.mindf > 0 and ctf.defocus < options.mindf) or (
                            options.maxdf > 0 and ctf.defocus > options.maxdf):
                    pfiles.remove(pfile)
                    if options.verbose:
                        print(pfile, " removed due to SNR or defocus limits")

        nwrt = 0
        for i in ptcls:
            if i[0] in pfiles:
                lsto.write(-1, i[1], i[0], i[2])
                nwrt += 1

        if options.verbose:
            if nwrt == ntot:
                print("{} particles in {}".format(ntot, options.mergesort))
            else:
                print("{} of {} particles written to {}".format(
                    nwrt, ntot, options.mergesort))

    if options.nocomments:
        for f in args:
            lst = LSXFile(f, True)
            for i in range(len(lst)):
                im = lst.read(i)
                lst.write(i, im[0], im[1])
            lst.normalize()

    if options.sym:
        for f in args:
            lst = load_lst_params(f)
            lout = []
            x = Transform()
            nsym = x.get_nsym(options.sym)
            for l in lst:
                x = l["xform.projection"]
                for i in range(nsym):
                    xt = x.get_sym(options.sym, i)
                    q = l.copy()
                    q["xform.projection"] = xt
                    lout.append(q)

            save_lst_params(lout, f)

    if options.shuffle:
        for f in args:
            lst = load_lst_params(f)
            np.random.shuffle(lst)
            save_lst_params(lst, f)

    if options.extractattr:
        for f in args:
            lst = load_lst_params(f)
            for l in lst:
                e = EMData(l["src"], l["idx"], True)
                if e.has_attr(options.extractattr):
                    l[options.extractattr] = e[options.extractattr]
                else:
                    print(
                        "error: not all particles have the specified attribute"
                    )
                    return
            save_lst_params(lst, f)

    E2end(logid)
  def _predict(self, image_features, num_predictions_per_location_list):
    """Computes encoded object locations and corresponding confidences.

    Args:
      image_features: A list of float tensors of shape [batch_size, height_i,
        width_i, channels_i] containing features for a batch of images.
      num_predictions_per_location_list: A list of integers representing the
        number of box predictions to be made per spatial location for each
        feature map.

    Returns:
      box_encodings: A list of float tensors of shape
        [batch_size, num_anchors_i, q, code_size] representing the location of
        the objects, where q is 1 or the number of classes. Each entry in the
        list corresponds to a feature map in the input `image_features` list.
      class_predictions_with_background: A list of float tensors of shape
        [batch_size, num_anchors_i, num_classes + 1] representing the class
        predictions for the proposals. Each entry in the list corresponds to a
        feature map in the input `image_features` list.
    """
    predictions = {
        BOX_ENCODINGS: [],
        CLASS_PREDICTIONS_WITH_BACKGROUND: [],
    }
    for head_name in self._other_heads.keys():
      predictions[head_name] = []
    # TODO(rathodv): Come up with a better way to generate scope names
    # in box predictor once we have time to retrain all models in the zoo.
    # The following lines create scope names to be backwards compatible with the
    # existing checkpoints.
    box_predictor_scopes = [_NoopVariableScope()]
    if len(image_features) > 1:
      box_predictor_scopes = [
          tf.variable_scope('BoxPredictor_{}'.format(i))
          for i in range(len(image_features))
      ]
    for (image_feature,
         num_predictions_per_location, box_predictor_scope) in zip(
             image_features, num_predictions_per_location_list,
             box_predictor_scopes):
      net = image_feature
      with box_predictor_scope:
        with slim.arg_scope(self._conv_hyperparams_fn()):
          with slim.arg_scope([slim.dropout], is_training=self._is_training):
            # Add additional conv layers before the class predictor.
            features_depth = static_shape.get_depth(image_feature.get_shape())
            depth = max(min(features_depth, self._max_depth), self._min_depth)
            tf.logging.info('depth of additional conv before box predictor: {}'.
                            format(depth))
            if depth > 0 and self._num_layers_before_predictor > 0:
              for i in range(self._num_layers_before_predictor):
                net = slim.conv2d(
                    net,
                    depth, [1, 1],
                    reuse=tf.AUTO_REUSE,
                    scope='Conv2d_%d_1x1_%d' % (i, depth))
            sorted_keys = sorted(self._other_heads.keys())
            sorted_keys.append(BOX_ENCODINGS)
            sorted_keys.append(CLASS_PREDICTIONS_WITH_BACKGROUND)
            for head_name in sorted_keys:
              if head_name == BOX_ENCODINGS:
                head_obj = self._box_prediction_head
              elif head_name == CLASS_PREDICTIONS_WITH_BACKGROUND:
                head_obj = self._class_prediction_head
              else:
                head_obj = self._other_heads[head_name]
              prediction = head_obj.predict(
                  features=net,
                  num_predictions_per_location=num_predictions_per_location)
              predictions[head_name].append(prediction)
    return predictions
Exemple #42
0
def make_graph_from_hosts(hosts):
    #hosts = parser.get_root().search_children('host', deep=True)
    graph = Graph()
    nodes = list()
    node_cache = {}
    ancestor_node_cache = {}
    descendant_node_cache = {}

    # Setting initial reference host
    main_node = NetNode()
    nodes.append(main_node)

    localhost = TracerouteHostInfo()
    localhost.ip = {"addr": "127.0.0.1/8", "type": "ipv4"}
    localhost.hostname = "localhost"
    main_node.set_host(localhost)
    main_node.set_draw_info(
            {"valid": True, "color": (0, 0, 0), "radius": NONE_RADIUS})

    #Save endpoints for attaching scanned hosts to
    endpoints = {}
    # For each host in hosts just mount the graph
    for host in hosts:
        trace = host.trace
        endpoints[host] = nodes[0]
        hops = trace.get("hops")

        # If host has traceroute information mount graph
        if hops is not None and len(hops) > 0:
            prev_node = nodes[0]
            hops = trace.get("hops", [])
            ttls = [int(hop["ttl"]) for hop in hops]

            # Getting nodes of host by ttl
            for ttl in range(1, max(ttls) + 1):
                if ttl in ttls:
                    hop = find_hop_by_ttl(hops, ttl)
                    node = node_cache.get(hop["ipaddr"])
                    if node is None:
                        node = NetNode()
                        nodes.append(node)

                        hop_host = TracerouteHostInfo()
                        hop_host.ip = {
                                "addr": hop["ipaddr"],
                                "type": "",
                                "vendor": ""
                                }
                        node.set_draw_info({"valid": True})
                        node.set_draw_info({"color": (1, 1, 1),
                                            "radius": NONE_RADIUS})

                        if hop["host"] != "":
                            hop_host.hostname = hop["host"]

                        node.set_host(hop_host)

                        node_cache[node.get_info("ip")] = node

                    rtt = hop["rtt"]
                    if rtt != "--":
                        graph.set_connection(node, prev_node, float(rtt))
                    else:
                        graph.set_connection(node, prev_node)
                else:
                    # Add an "anonymous" node only if there isn't already a
                    # node equivalent to it (i.e. at same distance from the
                    # previous "real" node)

                    pre_hop = None
                    pre_hop_distance = 0
                    for i in range(1, ttl + 1):
                        pre_hop = find_hop_by_ttl(hops, ttl - i)
                        if pre_hop is not None:
                            pre_hop_distance = i
                            break

                    post_hop = None
                    post_hop_distance = 0
                    for i in range(1, max(ttls) - ttl):
                        post_hop = find_hop_by_ttl(hops, ttl + i)
                        if post_hop is not None:
                            post_hop_distance = i
                            break

                    assert pre_hop is not None, \
                            "pre_hop should have become localhost if nothing else"  # noqa

                    ancestor_key = (pre_hop["ipaddr"], pre_hop_distance)
                    descendant_key = None
                    if post_hop is not None:
                        descendant_key = \
                                (post_hop["ipaddr"], post_hop_distance)

                    if ancestor_key in ancestor_node_cache:
                        node = ancestor_node_cache[ancestor_key]
                    elif (descendant_key is not None and
                            descendant_key in descendant_node_cache):
                        node = descendant_node_cache[descendant_key]
                        graph.set_connection(node, prev_node)
                    else:
                        node = NetNode()
                        nodes.append(node)

                        node.set_draw_info({"valid": False})
                        node.set_draw_info(
                                {"color": (1, 1, 1), "radius": NONE_RADIUS})

                        graph.set_connection(node, prev_node)

                        ancestor_node_cache[ancestor_key] = node
                        if descendant_key is not None:
                            descendant_node_cache[descendant_key] = node

                prev_node = node
                endpoints[host] = node

    # For each fully scanned host
    for host in hosts:
        ip = host.ip
        if ip is None:
            ip = host.ipv6

        node = node_cache.get(ip["addr"])
        if node is None:
            node = NetNode()
            nodes.append(node)

            node.set_draw_info({"no_route": True})

            graph.set_connection(node, endpoints[host])

        node.set_draw_info({"valid": True})
        node.set_draw_info({"scanned": True})
        set_node_info(node, host)
        node_cache[node.get_info("ip")] = node

    graph.set_nodes(nodes)
    graph.set_main_node(main_node)

    return graph
	def write(self, text): buf = ctypes.create_unicode_buffer(text); return self.writewchars(buf, max(len(buf) - 1, 0))

def wrap_windows_console_io(stream, is_output):
  def _predict(self, image_features, num_predictions_per_location_list):
    """Computes encoded object locations and corresponding confidences.

    Args:
      image_features: A list of float tensors of shape [batch_size, height_i,
        width_i, channels] containing features for a batch of images. Note that
        when not all tensors in the list have the same number of channels, an
        additional projection layer will be added on top the tensor to generate
        feature map with number of channels consitent with the majority.
      num_predictions_per_location_list: A list of integers representing the
        number of box predictions to be made per spatial location for each
        feature map. Note that all values must be the same since the weights are
        shared.

    Returns:
      A dictionary containing:
        box_encodings: A list of float tensors of shape
          [batch_size, num_anchors_i, code_size] representing the location of
          the objects. Each entry in the list corresponds to a feature map in
          the input `image_features` list.
        class_predictions_with_background: A list of float tensors of shape
          [batch_size, num_anchors_i, num_classes + 1] representing the class
          predictions for the proposals. Each entry in the list corresponds to a
          feature map in the input `image_features` list.
        (optional) mask_predictions: A list of float tensors of shape
          [batch_size, num_anchord_i, num_classes, mask_height, mask_width].


    Raises:
      ValueError: If the image feature maps do not have the same number of
        channels or if the num predictions per locations is differs between the
        feature maps.
    """
    if len(set(num_predictions_per_location_list)) > 1:
      raise ValueError('num predictions per location must be same for all'
                       'feature maps, found: {}'.format(
                           num_predictions_per_location_list))
    feature_channels = [
        image_feature.shape[3].value for image_feature in image_features
    ]
    has_different_feature_channels = len(set(feature_channels)) > 1
    if has_different_feature_channels:
      inserted_layer_counter = 0
      target_channel = max(set(feature_channels), key=feature_channels.count)
      tf.logging.info('Not all feature maps have the same number of '
                      'channels, found: {}, addition project layers '
                      'to bring all feature maps to uniform channels '
                      'of {}'.format(feature_channels, target_channel))
    else:
      # Place holder variables if has_different_feature_channels is False.
      target_channel = -1
      inserted_layer_counter = -1
    predictions = {
        BOX_ENCODINGS: [],
        CLASS_PREDICTIONS_WITH_BACKGROUND: [],
    }
    for head_name in self._other_heads.keys():
      predictions[head_name] = []
    for feature_index, (image_feature,
                        num_predictions_per_location) in enumerate(
                            zip(image_features,
                                num_predictions_per_location_list)):
      with tf.variable_scope('WeightSharedConvolutionalBoxPredictor',
                             reuse=tf.AUTO_REUSE):
        with slim.arg_scope(self._conv_hyperparams_fn()):
          (image_feature,
           inserted_layer_counter) = self._insert_additional_projection_layer(
               image_feature, inserted_layer_counter, target_channel)
          if self._share_prediction_tower:
            box_tower_scope = 'PredictionTower'
          else:
            box_tower_scope = 'BoxPredictionTower'
          box_tower_feature = self._compute_base_tower(
              tower_name_scope=box_tower_scope,
              image_feature=image_feature,
              feature_index=feature_index,
              has_different_feature_channels=has_different_feature_channels,
              target_channel=target_channel,
              inserted_layer_counter=inserted_layer_counter)
          box_encodings = self._box_prediction_head.predict(
              features=box_tower_feature,
              num_predictions_per_location=num_predictions_per_location)
          predictions[BOX_ENCODINGS].append(box_encodings)
          sorted_keys = sorted(self._other_heads.keys())
          sorted_keys.append(CLASS_PREDICTIONS_WITH_BACKGROUND)
          for head_name in sorted_keys:
            if head_name == CLASS_PREDICTIONS_WITH_BACKGROUND:
              head_obj = self._class_prediction_head
            else:
              head_obj = self._other_heads[head_name]
            prediction = self._predict_head(
                head_name=head_name,
                head_obj=head_obj,
                image_feature=image_feature,
                box_tower_feature=box_tower_feature,
                feature_index=feature_index,
                has_different_feature_channels=has_different_feature_channels,
                target_channel=target_channel,
                inserted_layer_counter=inserted_layer_counter,
                num_predictions_per_location=num_predictions_per_location)
            predictions[head_name].append(prediction)
    return predictions
Exemple #45
0
def largest_product_path(t):
    if is_leaf(t):
        return label(t)
    else:
        return label(t)*max([largest_product_path(b) for b in branches(t)])
def main():
    global best_acc1, start_epoch
    model = get_model(config.get_string('arch'))

    model.cuda()

    learning_rate = scale_lr(
        config.get_float('optimizer.lr'),
        config.get_int('dataloader.batch_size')
    )

    optimizer = optim.SGD(
        model.parameters(),
        lr=learning_rate,
        momentum=config.get_float('optimizer.momentum'),
        weight_decay=config.get_float('optimizer.weight_decay'),
        nesterov=config.get_bool('optimizer.nesterov')
    )
    criterion = nn.CrossEntropyLoss()
    scheduler = optim.lr_scheduler.MultiStepLR(
        optimizer,
        config.get_list('scheduler.milestones')
    )

    if tpp.distributed:
        model = DistributedDataParallel(model, device_ids=[tpp.local_rank])

    normalize = T.Normalize(
        config.get_list('dataset.mean'),
        config.get_list('dataset.std')
    )
    train_transform = T.Compose([
        # UT.RandomCrop(32, padding=4),
        # UT.RandomHorizontalFlip(),
        T.RandomCrop(32, padding=4),
        T.RandomHorizontalFlip(),
        T.ToTensor(),
        normalize
    ])

    val_transform = T.Compose([
        T.ToTensor(),
        normalize
    ])

    train_set = CIFAR10(
        config.get_string('dataset.root'), train=True, transform=train_transform, download=True
    )
    val_set = CIFAR10(
        config.get_string('dataset.root'), train=False, transform=val_transform, download=False
    )

    train_sampler = None
    val_sampler = None
    if tpp.distributed:
        train_sampler = DistributedSampler(train_set)
        val_sampler = DistributedSampler(val_set)

    train_loader = DataLoader(
        train_set,
        batch_size=config.get_int('dataloader.batch_size'),
        pin_memory=True,
        shuffle=(train_sampler is None),
        num_workers=config.get_int('dataloader.num_workers'),
        sampler=train_sampler
    )
    val_loader = DataLoader(
        val_set,
        batch_size=config.get_int('dataloader.batch_size'),
        pin_memory=True,
        num_workers=config.get_int('dataloader.num_workers'),
        sampler=val_sampler
    )

    for epoch in range(start_epoch, config.get_int('strategy.num_epochs')):
        # for epoch in range(start_epoch, 1):

        if tpp.distributed:
            train_sampler.set_epoch(epoch)

        train(model, train_loader, criterion, optimizer, epoch)
        acc1 = validate(model, val_loader, criterion, epoch)
        scheduler.step()

        writer.add_scalar('lr', optimizer.param_groups[0]['lr'], epoch)

        is_best = acc1 > best_acc1
        best_acc1 = max(acc1, best_acc1)

        save_checkpoint({
            'epoch': epoch + 1,
            'arch': config.get_string('arch'),
            'state_dict': model.module.state_dict() if tpp.distributed else model.state_dict(),
            'best_acc1': best_acc1,
            'optimizer': optimizer.state_dict(),
            'scheduler': scheduler.state_dict(),
        }, is_best=is_best, folder=experiment_path)
Exemple #47
0
def main():
	imgs = [ cv2.imread('data/%d.jpeg' % i) for i in range(1, 6) ]

	grays = [ cv2.cvtColor(i, cv2.COLOR_BGR2GRAY) for i in imgs ]

	orb = cv2.ORB_create()

	kp_des = [ orb.detectAndCompute(i, None) for i in grays ]

	# Find matches
	bf = cv2.BFMatcher(cv2.NORM_HAMMING, crossCheck=True)
	matches = [ bf.match(kp_des[i][1], kp_des[i+1][1]) for i in range(len(kp_des) - 1) ]

	# Homographies
	Hs = [ None ] * len(matches)

	if len(matches) > 0:

		for i in range(len(matches)):

			src_pts = np.float32([ kp_des[i][0][m.queryIdx].pt for m in matches[i] ]).reshape(-1,1,2)
			dst_pts = np.float32([ kp_des[i+1][0][m.trainIdx].pt for m in matches[i] ]).reshape(-1,1,2)

			M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)

			
			if i < (len(imgs) // 2):
				Hs[i] = M
			else:
				Hs[i] = np.linalg.inv(M)
			# Hs[i] = np.linalg.inv(M)

		# first = len(imgs) - 1
		# first = 0
		first = len(imgs) // 2
		curr_M = np.identity(3)

		curr_img = imgs[first]

		# Panorama translation
		panorama_tr = np.identity(3)

		# Starting at right side
		for i in range(first-1, -1, -1):

			# Bounds of new image (transformed)
			(new_min_w, new_min_h, new_max_w, new_max_h) = get_size(imgs[i], curr_M @ Hs[i])

			# Translate matrix ()
			tr = np.identity(3)
			tr[:, 2] = [ -new_min_w, -new_min_h, 1 ]

			panorama_tr = panorama_tr @ tr

			# Bounds of ranslated coordinates
			(new_min_w, new_min_h, new_max_w, new_max_h) = get_size(imgs[i], tr @ curr_M @ Hs[i])

			# Bounds of current panorama
			(curr_min_w, curr_min_h, curr_max_w, curr_max_h) = get_size(curr_img, tr)

			# New panorama bounds
			min_w = min(curr_min_w, new_min_w)
			max_w = max(curr_max_w, new_max_w)

			min_h = min(curr_min_h, new_min_h)
			max_h = max(curr_max_h, new_max_h)

			# Transform images
			new_image = cv2.warpPerspective(imgs[i], tr @ curr_M @ Hs[i], dsize=((max_w-min_w)+min_w, (max_h-min_h)+min_h))
			panorama = cv2.warpPerspective(curr_img, tr, dsize=((max_w-min_w)+min_w, (max_h-min_h)+min_h ))

			# Check overlap pixels
			mask1 = new_image.sum(axis=2).astype(bool)
			mask2 = panorama.sum(axis=2).astype(bool)

			mask = np.logical_and(mask1, mask2)

			# Set mean values for these pixels
			new_image[ mask ] //= 2
			panorama[ mask ] //= 2

			# Create new panorama
			new_panorama = new_image + panorama
			
			curr_img = new_panorama
			curr_M = tr @ curr_M @ Hs[i]

		curr_M = np.identity(3)

		# Starting at left side
		for i in range(first, len(Hs)):

			# Bounds of new image (transformed)
			(new_min_w, new_min_h, new_max_w, new_max_h) = get_size(imgs[i+1], curr_M @ Hs[i])

			# Translate matrix ()
			tr = np.identity(3)
			tr[:, 2] = [ 0, -new_min_h, 1 ]

			# Bounds of ranslated coordinates
			(new_min_w, new_min_h, new_max_w, new_max_h) = get_size(imgs[i+1], panorama_tr @ tr @ curr_M @ Hs[i])

			# Bounds of current panorama
			(curr_min_w, curr_min_h, curr_max_w, curr_max_h) = get_size(curr_img, tr)

			# New panorama bounds
			min_w = min(curr_min_w, new_min_w)
			max_w = max(curr_max_w, new_max_w)

			min_h = min(curr_min_h, new_min_h)
			max_h = max(curr_max_h, new_max_h)

			# Transform images
			new_image = cv2.warpPerspective(imgs[i+1], panorama_tr @ tr @ curr_M @ Hs[i], dsize=((max_w-min_w)+min_w, (max_h-min_h)+min_h))
			panorama = cv2.warpPerspective(curr_img, tr, dsize=((max_w-min_w)+min_w, (max_h-min_h)+min_h ))

			# Check overlap pixels
			mask1 = new_image.sum(axis=2).astype(bool)
			mask2 = panorama.sum(axis=2).astype(bool)

			mask = np.logical_and(mask1, mask2)

			# Set mean values for these pixels
			new_image[ mask ] //= 2
			panorama[ mask ] //= 2

			# Create new panorama
			new_panorama = new_image + panorama
			
			curr_img = new_panorama
			curr_M = tr @ curr_M @ Hs[i]

		cv2.imwrite("panorama.jpg", curr_img)
Exemple #48
0
 def max_finder(lst):
     nonlocal all_els
     all_els = all_els + lst
     return max(all_els)
Exemple #49
0
def result_analysis():
    data_path = '/Users/shichao/workding_dir/data'
    img_to_detect = cv2.imread()
    img_vol = 120
    img_start_num = 1
    USE_CROP = False
    USE_SKIMAGE = False
    arbitrary_vol = 3
    seq_len = 3
    diff_step = 1
    bbox_thresh = 500

    width = 150
    height = 100

    diff_result = frame_diff_analysis(data_path=data_path,
                                      img_vol=img_vol,
                                      img_start_num=img_start_num,
                                      use_crop=USE_CROP,
                                      arbitrary_vol=arbitrary_vol,
                                      diff_step=diff_step)
    std_result = std_analysis(data_path=data_path,
                              img_vol=img_vol,
                              img_start_num=img_start_num,
                              use_crop=USE_CROP,
                              arbitrary_vol=arbitrary_vol,
                              seq_len=seq_len)

    diff_bbox = np.zeros(diff_result.shape)
    std_bbox = np.zeros(std_result.shape)

    if USE_SKIMAGE:
        print('use scikit-image')
        props_diff = get_box_area(diff_result, use_skimage=USE_SKIMAGE)
        props_std = get_box_area(std_result, use_skimage=USE_SKIMAGE)

        for prop_diff, prop_std in zip(props_diff, props_std):
            if prop_diff.bbox_area > bbox_thresh:
                print('diff centroid {0}'.format(prop_diff.centroid))
                diff_cen_x, diff_cen_y = prop_diff.centroid
                diff_bbox[
                    max(0, int(diff_cen_x - height / 2)
                        ):min(diff_result.shape[0], int(diff_cen_x +
                                                        height / 2)),
                    max(0, int(diff_cen_y - width / 2)
                        ):min(diff_result.shape[1], int(diff_cen_y +
                                                        width / 2))] = 1

                # print('diff width and highth {0}'.format(prop_diff.coords))
            if prop_std.bbox_area > bbox_thresh:
                print('std centroid {0}'.format(prop_std.centroid))
                std_cen_x, std_cen_y = prop_diff.centroid
                std_bbox[
                    max(0, int(std_cen_x - height / 2)
                        ):min(std_result.shape[0], int(std_cen_x +
                                                       height / 2)),
                    max(0, int(std_cen_y - width / 2)
                        ):min(std_result.shape[1], int(std_cen_y +
                                                       width / 2))] = 1
                # print('std width and highth {0}'.format(prop_diff.coords))
    else:
        print('use implementation')
        diff_centroids, diff_sizes = get_box_area(diff_result,
                                                  use_skimage=USE_SKIMAGE)
        std_centroids, std_sizes = get_box_area(std_result,
                                                use_skimage=USE_SKIMAGE)
        for diff_centroid, diff_size, std_centroid, std_size in zip(
                diff_centroids, diff_sizes, std_centroids, std_sizes):
            if diff_size > bbox_thresh:
                print('diff centroid {0}'.format(diff_centroid))
                diff_cen_x, diff_cen_y = diff_centroid
                diff_bbox[
                    max(0, int(diff_cen_x - height / 2)
                        ):min(diff_result.shape[0], int(diff_cen_x +
                                                        height / 2)),
                    max(0, int(diff_cen_y - width / 2)
                        ):min(diff_result.shape[1], int(diff_cen_y +
                                                        width / 2))] = 1

            if std_size > bbox_thresh:
                print('std centroid {0}'.format(std_centroid))
                std_cen_x, std_cen_y = std_centroid
                std_bbox[
                    max(0, int(std_cen_x - height / 2)
                        ):min(std_result.shape[0], int(std_cen_x +
                                                       height / 2)),
                    max(0, int(std_cen_y - width / 2)
                        ):min(std_result.shape[1], int(std_cen_y +
                                                       width / 2))] = 1

    lt_x = max(0, int(std_cen_x - height / 2))
    rb_x = min(std_result.shape[0], int(std_cen_x + height / 2))
    lt_y = max(0, int(std_cen_y - width / 2))
    rb_y = min(std_result.shape[1], int(std_cen_y + width / 2))
    bbox_coord = [lt_x, rb_x, lt_y, rb_y]
    resnet_keras(img_to_detect, bbox_coord)
Exemple #50
0
def load_mosaic(self, index):
    # loads images in a mosaic

    labels4 = []
    s = self.img_size
    xc, yc = [int(random.uniform(s * 0.5, s * 1.5))
              for _ in range(2)]  # mosaic center x, y
    indices = [index
               ] + [random.randint(0,
                                   len(self.labels) - 1)
                    for _ in range(3)]  # 3 additional image indices
    for i, index in enumerate(indices):
        # Load image
        img, _, (h, w) = load_image(self, index)

        # place img in img4
        if i == 0:  # top left
            img4 = np.full((s * 2, s * 2, img.shape[2]), 114,
                           dtype=np.uint8)  # base image with 4 tiles
            x1a, y1a, x2a, y2a = max(xc - w, 0), max(
                yc - h, 0), xc, yc  # xmin, ymin, xmax, ymax (large image)
            x1b, y1b, x2b, y2b = w - (x2a - x1a), h - (
                y2a - y1a), w, h  # xmin, ymin, xmax, ymax (small image)
        elif i == 1:  # top right
            x1a, y1a, x2a, y2a = xc, max(yc - h, 0), min(xc + w, s * 2), yc
            x1b, y1b, x2b, y2b = 0, h - (y2a - y1a), min(w, x2a - x1a), h
        elif i == 2:  # bottom left
            x1a, y1a, x2a, y2a = max(xc - w, 0), yc, xc, min(s * 2, yc + h)
            x1b, y1b, x2b, y2b = w - (x2a - x1a), 0, max(xc,
                                                         w), min(y2a - y1a, h)
        elif i == 3:  # bottom right
            x1a, y1a, x2a, y2a = xc, yc, min(xc + w, s * 2), min(s * 2, yc + h)
            x1b, y1b, x2b, y2b = 0, 0, min(w, x2a - x1a), min(y2a - y1a, h)

        img4[y1a:y2a, x1a:x2a] = img[y1b:y2b,
                                     x1b:x2b]  # img4[ymin:ymax, xmin:xmax]
        padw = x1a - x1b
        padh = y1a - y1b

        # Labels
        x = self.labels[index]
        labels = x.copy()
        if x.size > 0:  # Normalized xywh to pixel xyxy format
            labels[:, 1] = w * (x[:, 1] - x[:, 3] / 2) + padw
            labels[:, 2] = h * (x[:, 2] - x[:, 4] / 2) + padh
            labels[:, 3] = w * (x[:, 1] + x[:, 3] / 2) + padw
            labels[:, 4] = h * (x[:, 2] + x[:, 4] / 2) + padh
        labels4.append(labels)

    # Concat/clip labels
    if len(labels4):
        labels4 = np.concatenate(labels4, 0)
        # np.clip(labels4[:, 1:] - s / 2, 0, s, out=labels4[:, 1:])  # use with center crop
        np.clip(labels4[:, 1:], 0, 2 * s,
                out=labels4[:, 1:])  # use with random_affine

    # Augment
    # img4 = img4[s // 2: int(s * 1.5), s // 2:int(s * 1.5)]  # center crop (WARNING, requires box pruning)
    img4, labels4 = random_affine(img4,
                                  labels4,
                                  degrees=self.hyp['degrees'],
                                  translate=self.hyp['translate'],
                                  scale=self.hyp['scale'],
                                  shear=self.hyp['shear'],
                                  border=-s // 2)  # border to remove

    return img4, labels4
obj_df["state_cat"] = obj_df["state"].cat.codes

obj_df["proto"] = obj_df["proto_cat"]
obj_df["service"] = obj_df["service_cat"]
obj_df["state"] = obj_df["state_cat"]

obj_df.drop('proto_cat', axis=1, inplace=True)
obj_df.drop('service_cat', axis=1, inplace=True)
obj_df.drop('state_cat', axis=1, inplace=True)

obj_df = pd.get_dummies(obj_df, columns=["attack_cat"])

X_train = obj_df.values[:, :-10]

for j in range(0, 43):
    maximum = max(X_train[:, j])
    for i in range(0, len(X_train)):
        X_train[i, j] = round(X_train[i, j] / maximum, 3)

# Read in the testing CSV file
print "Reading Testing csv file."
df2 = pd.read_csv("UNSW_NB15_testing-set.csv")
df2.drop('label', axis=1, inplace=True)

obj_df2 = df2

obj_df2["proto"] = obj_df2["proto"].astype('category')
obj_df2["service"] = obj_df2["service"].astype('category')
obj_df2["state"] = obj_df2["state"].astype('category')
obj_df2["proto_cat"] = obj_df2["proto"].cat.codes
obj_df2["service_cat"] = obj_df2["service"].cat.codes
def plot_trial_measures(group_cycle,group_order,trial_measures,bracket_offset=0.5,bracket_tickheight=1,ylims=None,xticklabels=None):
    scale = 46
    offsetscale = 2
    num_sessions = len(trial_measures[0])
    ax = plt.gca()
    ax.set_color_cycle(group_cycle)
    measures = [[trial_measures[group_order[i]][s] for s in (range(num_sessions) if i != 7 else [0])] for i in range(len(trial_measures))]
    [plot_epoch_average(measures[i],label='lesion' if group_cycle[i] == 'r' else 'control',offset=i*offsetscale,scale=scale) for i in range(len(trial_measures))]
    plt.ylabel('time to cross obstacles (s)')
    plt.xlabel('x')
    
    offset1 = 3*offsetscale
    center1 = [scale*i+offset1 for i in range(num_sessions)]
    group1_times = [[np.mean(trial_measures[i][s]) for i in group_order[0:7]] for s in range(num_sessions)]
    group1_mean = [np.mean(x) for x in group1_times]
    group1_std = [np.std(x) for x in group1_times]
    plt.errorbar(center1,group1_mean,group1_std,fmt='o',color=group_cycle[0],ecolor='k',linewidth=2,capthick=2,markersize=0)
    
    offset2 = 10*offsetscale
    center2 = [scale*i+offset2 for i in range(num_sessions)]
    group2_times = [[np.mean(trial_measures[i][s]) for i in (group_order[7:14] if s < 1 else group_order[8:14])] for s in range(num_sessions)]
    group2_mean = [np.mean(x) for x in group2_times]
    group2_std = [np.std(x) for x in group2_times]
    plt.errorbar(center2,group2_mean,group2_std,fmt='o',color=group_cycle[7],ecolor='k',linewidth=2,capthick=2,markersize=0)
    
    pltutils.fix_font_size()
    #handles, labels = ax.get_legend_handles_labels()
    #plt.legend((handles[0],handles[7]),(labels[0],labels[7]))
    tickoffset = 6.5*offsetscale
    xticks = [scale*i+tickoffset for i in range(num_sessions)]
    ax.set_xticks(xticks)
    ax.set_xticklabels(xticklabels)
    
    
    ### BRACKETS (BETWEEN GROUPS) ###
    maxstd = []
    minstd = []
    significance = 0.01
    maxrange = len(measures)
    for i in range(len(xticks)):
        sigtest = stats.ttest_ind(group1_times[i],group2_times[i])[1]
        print sigtest,"groups"
        testlabel = str.format("*",sigtest) if sigtest < significance else 'n.s.'
        maxstd.append(get_bracket_y(measures,range(maxrange) if i < 1 else range(0,7)+range(8,14),i))
        minstd.append(get_negative_bracket_y(measures,range(maxrange) if i < 1 else range(0,7)+range(8,14),i))
        
    maxstd = max(maxstd)
    for i in range(len(xticks)):
        pltutils.hbracket(xticks[i],maxstd+bracket_offset,2,label=testlabel,tickheight=bracket_tickheight)
    ##################################
        
    ### BRACKETS (BETWEEN CONDITIONS) ###
    minstd = min(minstd)
    sigtest = stats.ttest_ind(group1_times[0]+group2_times[0],group1_times[-1]+group2_times[-1])[1]
    print sigtest,"conditions"
    testlabel = str.format("*",sigtest) if sigtest < significance else 'n.s.'
    #pltutils.hbracket(xticks[1],minstd-bracket_offset,5.5,label=testlabel,tickheight=-1.5*bracket_tickheight)
    pltutils.hbracket((xticks[-1]-xticks[0])/2+xticks[0],minstd-bracket_offset,9.5,label=testlabel,tickheight=-1.5*bracket_tickheight)
    #####################################
    
    ### SEPARATORS ###
    ylims = plt.ylim() if ylims is None else ylims
    for i in range(len(xticks)-1):
        separatorxi = (xticks[i]+xticks[i+1])/2
        ax.plot((separatorxi,separatorxi),ylims,'k--')
    plt.ylim(ylims)
    ##################
    
    plt.xlabel('')
    plt.draw()

############# FIGURE 3 ##############################

#### Get contact step activation ###
#def get_contact_step_activity(session,i):
#    activity = session.steps[i]
#    step_index = 3 if session.labels[i]['direction'] == 'right' else 4
#    return activity[:,step_index]
#    
#### Plot contact maximum activation distribution ###
#def plot_contact_distribution(session):
#    max_contacts = [np.max(get_contact_step_activity(session,i)) for i in range(len(session.steps))]
#    maxStContacts = [mc for state,mc in zip(session.labels,max_contacts) if state['state'] == 'stable']
#    maxUnContacts = [mc for state,mc in zip(session.labels,max_contacts) if state['state'] == 'unstable']
#    plt.plot(maxStContacts,'b.')
#    plt.plot(maxUnContacts,'r.')
#    
## Time Difference in microseconds
#def time_diff(t):
#    numTimes = np.size(t)    
#    dT = np.zeros(numTimes-1)    
#    for i in range(numTimes-1):
#        dT[i] = (t[i+1]-t[i]).microseconds
#
#    return dT
#    
#def get_randomized_speed_profiles(experiment,a,sessions,filterpath):    
#    avgSpeeds_allsess = []
#    trialTypes_allsess = []
#
#    # Select Session
#    for s in sessions:
#
#        # Load Valid Trial Filter (manually sorted)
#        validFilename = filterpath +  r'\valid_a' + str(a) + '_s' + str(s) + '.pickle'
#        valid_trials = load_data.load_pickle(validFilename)
#        numTrials = np.size(valid_trials)
#    
#        # There is some misalignment for sessions on the last 4 animals..there session 0 is other session 1
#        if a >= 10:
#            s = s-1
#        
#        # Set Trial Lables
#        labelfilter1 = {'state':'stable'}
#        labelfilter2 = {'state':'unstable'}
#        labelFilters = [labelfilter1, labelfilter2]
#        
#        # Look at all Trajectories
#        trajectories = experiment[a][s].trajectories
#        times = experiment[a][s].time    
#        slices = experiment[a][s].slices
#        labels = experiment[a][s].labels
#        steps = experiment[a][s].steps
#        speeds = experiment[a][s].speeds
#        
#        print str.format('a:s {0}:{1} {2} {3}', a, s, numTrials, len(slices))
#        
#        # Set Valid Trials (No exploration or tracking errors)
#        crossings = valid_trials
#    
#        # Set Binning and Range
#        avgSpeeds = np.zeros((numTrials, numBins))
#        trialTypes = np.zeros((numTrials, 1))
#        for t in range(0,numTrials):
#        
#            #label_indices = np.array(pt.get_labeled_indices(labels,labelFilters[l]))
#            c = crossings[t]
#            
#            # Load X Trajectories and flip all of 'Left'
#            trialX = trajectories[slices[c],0]
#            if utils.is_dict_subset({'direction':'left'},labels[c]):
#                # ALign on 2 important rails (the center of rail 3 is 550)
#                # and the centr of rail 4 is 737, therefore, the first encounter
#                # is at 550 going "right", and when flipped, (1280-737 = 543)
#                # going "left"...therefore, to correct for the shift, I subteact 1273 
#                # and align the left and right trials
#                trialX = np.abs(trialX-1273)
#                
#            # Load Y Trajectories
#            trialY = trajectories[slices[c],1]
#            
#            # Load and Parse Times
#            trialTstrings = times[slices[c]]
#            trialT = np.array([dateutil.parser.parse(timeString) for timeString in trialTstrings])
#            
#            # Measure Progression Speed
#            diffX =  np.diff(trialX)
#            diffT = time_diff(trialT)/1000000 # Time interval in seconds
#            speedX = np.concatenate((np.zeros(1) , diffX/diffT))
#        
#            # Find enter/exit and crop trials
#            indR = np.where(trialX > 1200)
#            indL = np.where(trialX < 150)
#            if (np.size(indR) > 0) and (np.size(indL) > 0):
#                exitInd = indR[0][0]+1
#                enterInd = indL[0][-1]
#                
#            trialX = trialX[enterInd:exitInd]
#            trialY = trialY[enterInd:exitInd]
#            speedX = speedX[enterInd:exitInd]
#            
#            # Bin (progrssion - X) Speed Profiles (from position 200 to 1200)
#            for b in range(0,numBins):
#                bins = np.where((trialX >= (200+(b*binSize))) & (trialX < (200+(b*binSize)+binSize)))
#                if np.size(bins) > 0:
#                    avgSpeeds[t, b] = np.mean(speedX[bins])
#                else:
#                    avgSpeeds[t, b] = np.NaN
#            
#            # Correct for starting speed - - first Third of assay
#            baseSpeed = stats.nanmean(avgSpeeds[t, 0:14])
#            avgSpeeds[t,:] = avgSpeeds[t,:]/baseSpeed
#            
#            # Get Lables
#            label = labels[c]
#            
#            if utils.is_dict_subset({'state':'stable'},label):
#                trialTypes[t] = 0
#            else:
#                trialTypes[t] = 1
#        
#        # Pool All Average Speeds/TrialTypes Across Sessions        
#        avgSpeeds_allsess.append(avgSpeeds)
#        trialTypes_allsess.append(trialTypes)
#    
#    avgSpeeds = np.concatenate(avgSpeeds_allsess)
#    trialTypes = np.concatenate(trialTypes_allsess)
#    return avgSpeeds,trialTypes
#
#def plot_randomized_speed_profiles(avgSpeeds,trialTypes):
#    # Set Plotting Attributes
#    color1 = (0.0, 0.0, 0.0, 0.1)
#    color2 = (1.0, 0.6, 0.0, 0.1)
#    color1b = (0.0, 0.0, 0.0, 1.0)
#    color2b = (1.0, 0.6, 0.0, 1.0)
#    
#    traceColors = [color1, color2]
#    boldColors = [color1b, color2b]    
#    
#    # Plot Average Speeds in bins
#    plt.figure()
#    numTrials = np.size(trialTypes)
#    for t in range(0,numTrials):
#        if trialTypes[t] == 0:
#            plt.plot(avgSpeeds[t,:], color=color1)
#        else:
#            plt.plot(avgSpeeds[t,:], color=color2)
#
#    stableTrials = np.where(trialTypes == 0)
#    unstableTrials = np.where(trialTypes == 1)
#    mSt = stats.nanmean(avgSpeeds[stableTrials, :], 1)
#    mUn = stats.nanmean(avgSpeeds[unstableTrials, :], 1)
#    eSt = stats.nanstd(avgSpeeds[stableTrials, :], 1)/np.sqrt(np.size(stableTrials)-1)
#    eUn = stats.nanstd(avgSpeeds[unstableTrials, :], 1)/np.sqrt(np.size(unstableTrials)-1)
#    
##    eSt = stats.nanstd(avgSpeeds[stableTrials, :], 1)
##    eUn = stats.nanstd(avgSpeeds[unstableTrials, :], 1)
#
#
#    mSt = mSt[0];    
#    mUn = mUn[0];    
#    eSt = eSt[0];    
#    eUn = eUn[0];
#    
#    plt.plot(mUn, color=color2b, linewidth = 7)
#    plt.plot(mSt, color=color1b, linewidth = 7)
#
##    plt.plot(mSt + eSt, color=color1b, linewidth = 0.5)
##    plt.plot(mSt - eSt, color=color1b, linewidth = 0.5)
##    plt.plot(mUn + eUn, color=color2b, linewidth = 0.5)
##    plt.plot(mUn - eUn, color=color2b, linewidth = 0.5)
#    #pltutils.fix_font_size()
#    plt.xlabel('crossing extent (cm)')
#    plt.ylabel('normalized horizontal speed')
#    pltutils.fix_font_size()
#    plt.axis([0, 39, 0, 3])
#    
#    
##### Figure 3b ####
#    
#def get_randomized_group_average_speed_profiles(profiles):        
#    stAvg = []
#    unAvg = []
#    stErr = []
#    unErr = []
#    
#    for avgSpeeds,trialTypes in profiles:
#        # Plot Average Speeds in bins
#        stableTrials = np.where(trialTypes == 0)
#        unstableTrials = np.where(trialTypes == 1)
#        
#        mSt = stats.nanmean(avgSpeeds[stableTrials, :], 1)
#        mUn = stats.nanmean(avgSpeeds[unstableTrials, :], 1)
#        eSt = stats.nanstd(avgSpeeds[stableTrials, :], 1)/np.sqrt(np.size(stableTrials)-1)
#        eUn = stats.nanstd(avgSpeeds[unstableTrials, :], 1)/np.sqrt(np.size(unstableTrials)-1)
#    
#        mSt = mSt[0]
#        mUn = mUn[0]
#        eSt = eSt[0]
#        eUn = eUn[0]
#    
#        stAvg.append(mSt)
#        unAvg.append(mUn)
#        stErr.append(eSt)
#        unErr.append(eUn)
#    return (stAvg,stErr),(unAvg,unErr)
#    
#def get_randomized_group_speed_profile_difference(avgProfiles):    
#    diffs = []
#    errors = []
#
#    # Unpack average profile structure
#    (stAvg,stErr),(unAvg,unErr) = avgProfiles
#    
#    for mSt,eSt,mUn,eUn in zip(stAvg,stErr,unAvg,unErr):
#        # Compute Difference Speed between Stable and Unstable Trials
#        mDiff = mUn-mSt
#        eDiff = np.sqrt((eSt*eSt) + (eUn*eUn))
#        diffs.append(mDiff)
#        errors.append(eDiff)
#        
#    diffs = np.array(diffs)
#    errors = np.array(errors)
#    return diffs,errors,np.mean(diffs[:, 20:], 1)
#    
#def plot_randomized_group_average_speed_profiles(avgProfiles,labelx=True,labely=True,legend=True,title=None):
#    # Set Plotting Attributes
#    color1b = (0.0, 0.0, 0.0, 1.0)
#    color2b = (1.0, 0.6, 0.0, 1.0)
#    
#    # Unpack average profile structure
#    (stAvg,stErr),(unAvg,unErr) = avgProfiles
#    
#    # Prepare Bulk Arrays
#    stAvg = np.array(stAvg)
#    unAvg = np.array(unAvg)
#    stErr = np.array(stErr)
#    unErr = np.array(unErr)
#    
#    # Plot Averge Speed Profiles
#    a1 = plt.plot(np.mean(unAvg,0), color = color2b, linewidth = 3,label='unstable')
#    a2 = plt.plot(np.mean(stAvg,0), color = color1b, linewidth = 3,label='stable')
#    if labelx:
#        plt.xlabel('crossing extent (cm)')
#    if labely:
#        plt.ylabel('normalized horizontal speed')
#    pltutils.fix_font_size()
#    plt.axis([0, numBins, 0.5, 2.0])
#    plt.yticks([0.75,1.0,1.25,1.5,1.75])
#    if legend:
#        plt.legend((a2[0],a1[0]),('stable','unstable'),loc='upper left')
#        
#    if title is not None:
#        ax = plt.gca()
#        ax.text(0.5,0.9,title,horizontalalignment='center',transform=ax.transAxes)
#    
#def plot_randomized_speed_profile_difference_comparison(controls,lesions):
#    controlMeans = np.mean(controls, 0)
#    controlMeanAll = np.mean(controlMeans)
#    controlError = np.std(controlMeans)/np.sqrt(7-1)
#    
#    lesionMeans = np.mean(lesions, 0)
#    lesionMeanAll = np.mean(lesionMeans)
#    lesionlError = np.std(lesionMeans)/np.sqrt(6-1)
#    
#    significance = 0.05
#    sigtest = stats.ttest_ind(controlMeans, lesionMeans)[1]
#    testlabel = str.format("*",sigtest) if sigtest < significance else 'n.s.'
#    print sigtest
#    
#    cX = [1.1, 1.1, 1.1, 1.1, 1.1, 1.1, 1.1]
#    lX = [1.9, 1.9, 1.9, 1.9, 1.9, 1.9]
#    
#    plt.plot([0,3], [0,0], color=[0.25,0.25,0.25,1], linewidth = 1)
#    plt.plot(cX, controlMeans, 'bo')
#    plt.plot(lX, lesionMeans, 'ro')
#    plt.plot(1.9, lesionMeans[5], 'o', color = [1.0, 0.75, 0.75, 1.0])
#    
#    #plt.bar(1.2, controlMeanAll, 0.2, color=[1.0,1.0,1.0,0.0])
#    plt.errorbar(1.3, controlMeanAll, controlError, marker='s', mfc='blue', ecolor = 'black', mec='black', ms=1, mew=1, capsize=5, elinewidth=2)
#    plt.plot([1.0,1.4], [controlMeanAll,controlMeanAll], color=[0.25,0.25,1.0,1], linewidth = 2)
#    
#    #plt.bar(1.6, lesionMeanAll, 0.2, color=[1.0,1.0,1.0,0.0])
#    plt.errorbar(1.7, lesionMeanAll, lesionlError, marker='s', mfc='red', ecolor = 'black', mec='black', ms=1, mew=1, capsize=5, elinewidth=2)
#    plt.plot([1.6,2.0], [lesionMeanAll,lesionMeanAll], color=[1.00,0.25,0.25,1], linewidth = 2)
#    pltutils.hbracket(1.5,0.17,4.5,label=testlabel,tickheight=0.01)
#    
#    ax = plt.gca()
#    ax.set_xticks([1.1,1.9])
#    ax.set_xticklabels(['controls','lesions'])
#    plt.ylabel('normalized speed difference')
#    
#    pltutils.fix_font_size()
#    plt.axis([0.75,2.25,-0.2,0.2])
fig.savefig(str(filename) + '_profiles.pdf')  # save the figure to file
#plt.close()

# plot plume 3d

fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
ax.scatter(x, y, z)

angle = np.linspace(0, 2 * math.pi, num=50)
angle = angle.reshape((-1, 1))

x_plume = np.cos(angle)
y_plume = np.sin(angle)

z_max = max(z)
z_min = min(z)

n_sect = 50

zeta_grid = np.linspace(z_min, z_max * 0.99, num=n_sect)

l_seg = []

for i in range(1, x.shape[0], 1):
    l_seg.append(((x[i, 0] - x[i - 1, 0])**2 + (y[i, 0] - y[i - 1, 0])**2 +
                  (z[i, 0] - z[i - 1, 0])**2)**0.5)

l_seg = np.asarray(l_seg)
l_seg = l_seg.reshape((-1, 1))
Exemple #54
0
def solving_logistic_regression(datapath, learning_rate = 0.54,batch = 500,n_epoch = 30):
    ##for MNIST DATA LOADING PROCESS
    print "loading data...."
    mnist_data = upload_data(datapath)
    train, valid, test = mnist_data
    
    ##creating theano buffer for python data
    
    print 'moving to data to shared conversion'
    train_x, train_y = to_shared(train)
    valid_x, valid_y = to_shared(valid)
    test_x, test_y = to_shared(test)
    
    n_train_batch =  train[0].shape[0] // batch 
    n_valid_batch =  valid[0].shape[0] // batch
    n_test_batch  =  test[0].shape[0]  // batch
   
    
    x = T.matrix('x')
    y = T.ivector('y')
    index = T.iscalar('index')
       
    logistic = LogisticRegression(input = x,
                                  n_in = 784,
                                  n_out = 10)
    
    
    fun_valid = function(inputs  = [index],
                         outputs = logistic.error(y),
                         givens  = [(x,valid_x[index*batch:(index+1)*batch,:]),
                                    (y,valid_y[index*batch:(index+1)*batch])]
                        )   
       
    fun_test = function(inputs  = [index],
                        outputs = logistic.y_pred,
                        givens  = [(x,test_x[index*batch:(index+1)*batch,:])],
                       )    
        
    print "calaculating cost function"                
    cost = logistic.negative_log_likelihood(y) 
    
    g_W = T.grad(cost = cost,wrt = logistic.W)                                  
    g_b = T.grad(cost = cost,wrt = logistic.b)
                        
    updates = [(logistic.W, logistic.W - g_W*learning_rate),
               (logistic.b, logistic.b - g_b*learning_rate)]
               
    fun_train = function(inputs =[index],
                         outputs = logistic.params,
                         updates = updates,
                         givens = [(x,train_x[index*batch:(index+1)*batch,:]),
                                   (y,train_y[index*batch:(index+1)*batch])]
                         )
                  
                      

    ################
    #TRAINING MODEL#                      
    ################..........................................                     
    print 'training starts now -->'
    patience = 5000
    patience_increase = 2
    
    improvement = 0.96
    validation_frequency = min(n_train_batch, patience//2)    
  
    least_error = np.Inf
    epoch = 0
    done_looping = False
    
    print 'EPOCH counting .....'
    start_time = timeit.default_timer()
    while epoch < n_epoch and (not done_looping):
        for current_batch in range(n_train_batch):            
            total_batches = (epoch*n_train_batch) + current_batch
            fun_train(current_batch) 
            
            if (total_batches+1) % validation_frequency == 0:                
                this_error = [fun_valid(n) for n in range(n_valid_batch)]
                this_error = np.mean(this_error)
                
                if this_error < least_error*improvement:
                    least_error = this_error
                    patience =  max(patience,total_batches * patience_increase)
                    with open('/home/sameer/best_model.pkl', 'wb') as f:
                        pickle.dump(logistic, f)
                    
        if total_batches > patience:
            done_looping = True
        epoch += 1
        if total_batches != 0:
            print least_error
            print 'the convergence ratio is %f' %(patience/float(total_batches))
    
    end_time = timeit.default_timer()
    net_time = end_time - start_time
    print 'total time %f' %net_time
    print 'time per epoch %f' %(net_time/epoch)
    print 'the error is %f' %least_error
    print 'the total number of  epoch %d' %epoch    
Exemple #55
0
def proc_file(filepath):
    """
    proc_file - process one .xlsx file

    :param str filepath: path to file
    :return: list of lists, rows of info. as expected in main()
    """

    print(filepath)

    # get the first sheet
    book = load_workbook(filename=filepath, read_only=True, data_only=True)
    sheets = book.get_sheet_names()
    sheet = book[sheets[0]]
    row_source = sheet.rows
    row0 = next(row_source)
    # get field names from the first row
    fields = [i.value for i in row0]

    data = {
        'filepath': filepath,
        'fields':
        {field: AttrDict({f: 0
                          for f in FIELDS})
         for field in fields}
    }

    for field in fields:
        # init. mins/maxs with invalid value for later calc.
        data['fields'][field].update(
            dict(
                min=NAN,
                max=NAN,
                field=field,
                file=filepath,
            ))

    rows = 0
    for row in row_source:

        if rows % 1000 == 0:  # feedback every 1000 rows
            print(rows)
            # Much cleaner to exit by creating a file called "STOP" in the
            # local directory than to try and use Ctrl-C, when using
            # multiprocessing.  Save time by checking only every 1000 rows.
            if os.path.exists("STOP"):
                print("Process aborting because of './STOP' file.")
                return

        rows += 1

        for cell_n, cell in enumerate(row):
            d = data['fields'][fields[cell_n]]
            if cell.value is None or unicode(cell.value).strip() == '':
                d.blank += 1
            else:
                try:
                    x = float(cell.value)
                    d.sum += x
                    d.sumsq += x * x
                    d.n += 1
                    # min is x if no value seen yet, else min(prev-min, x)
                    if isnan(d.min):
                        d.min = x
                    else:
                        d.min = min(d.min, x)
                    # as for min
                    if isnan(d.max):
                        d.max = x
                    else:
                        d.max = max(d.max, x)
                except ValueError, TypeError:
                    d.bad += 1
 def _zip_lists(self, *args):
     max_len = len(max(*args, key=len))
     eq_it = map(lambda v: v + [v[-1]] * (max_len - len(v)), args)
     return zip(*eq_it)
Exemple #57
0
h,w = map(int,input().split())
C = [input() for i in range(h)]
M = h*w
ans = -1
for id in range(M):
    dp = [[0]*M for i in range(1<<M)]
    dp[0][id] = 1
    for i in range(1<<M):
        for j in range(M):
            if dp[i][j] == 0:
                continue
            x,y = divmod(j,w)
            for dx,dy in ((1,0),(0,1),(-1,0),(0,-1)):
                nx = x+dx
                ny = y+dy
                nid = nx*w+ny
                if 0 <= nx < h and 0 <= ny < w and C[nx][ny] != "#":
                    if i >> nid & 1:
                        continue
                    dp[i|1<<nid][nid] = 1

            if j == id:
                ans = max(ans,bin(i).count("1"))
if ans < 3:
    ans = -1
print(ans)
Exemple #58
0
    def _plotTimeCounts(self, wksp):
        """ Plot time/counts
        """
        import datetime
        # Rebin events by pulse time
        try:
            # Get run start and run stop
            if wksp.getRun().hasProperty("run_start"):
                runstart = wksp.getRun().getProperty("run_start").value
            else:
                runstart = wksp.getRun().getProperty("proton_charge").times[0]
            runstop = wksp.getRun().getProperty("proton_charge").times[-1]

            runstart = str(runstart).split(".")[0].strip()
            runstop = str(runstop).split(".")[0].strip()

            t0 = datetime.datetime.strptime(runstart, "%Y-%m-%dT%H:%M:%S")
            tf = datetime.datetime.strptime(runstop, "%Y-%m-%dT%H:%M:%S")

            # Calculate
            dt = tf-t0
            timeduration = dt.days*3600*24 + dt.seconds

            timeres = float(timeduration)/MAXTIMEBINSIZE
            if timeres < 1.0:
                timeres = 1.0

            sumwsname = '_Summed_{}'.format(wksp)
            if AnalysisDataService.doesExist(sumwsname) is False:
                sumws = api.SumSpectra(InputWorkspace=wksp, OutputWorkspace=sumwsname)
                sumws = api.RebinByPulseTimes(InputWorkspace=sumws, OutputWorkspace=sumwsname,
                                              Params='{}'.format(timeres))
                sumws = api.ConvertToPointData(InputWorkspace=sumws, OutputWorkspace=sumwsname)
            else:
                sumws = AnalysisDataService.retrieve(sumwsname)
        except RuntimeError as e:
            return str(e)

        vecx = sumws.readX(0)
        vecy = sumws.readY(0)

        xmin = min(vecx)
        xmax = max(vecx)
        ymin = min(vecy)
        ymax = max(vecy)

        # Reset graph
        self.ui.mainplot.set_xlim(xmin, xmax)
        self.ui.mainplot.set_ylim(ymin, ymax)

        self.ui.mainplot.set_xlabel('Time (seconds)', fontsize=13)
        self.ui.mainplot.set_ylabel('Counts', fontsize=13)

        # Set up main line
        setp(self.mainline, xdata=vecx, ydata=vecy)

        # Reset slide
        newslidery = [min(vecy), max(vecy)]

        newleftx = xmin + (xmax-xmin)*self._leftSlideValue*0.01
        setp(self.leftslideline, xdata=[newleftx, newleftx], ydata=newslidery)

        newrightx = xmin + (xmax-xmin)*self._rightSlideValue*0.01
        setp(self.rightslideline, xdata=[newrightx, newrightx], ydata=newslidery)
        self.canvas.draw()
Exemple #59
0
def vector(a,b,problem,story,target,feats=False):
    a = a[1]
    b = b[1]

    vec = []
    features = []
    features.append(" a role d ")
    vec.append(int(a.role == 'do'))
    vec.append(int(a.role == 'subj'))
    vec.append(int(a.role == 'other'))
    vec.append(int(b.role == 'do'))
    vec.append(int(b.role == 'subj'))
    vec.append(int(b.role == 'other'))

    #subset
    vec.append(a.subset)
    vec.append(b.subset)

    features.append("a compound?")
    vec.append(int(a.compound))

    features.append("b compound?")
    vec.append(int(b.compound))

    features.append("a subtype of b")
    vec.append(int(a.entity in b.subtypes))

    features.append("b subtype of a")
    vec.append(int(b.entity in a.subtypes))

    features.append("a contians b entity match")
    if a.contains == None and b.entity == None: vec.append(0)
    elif a.contains == None or b.entity == None: vec.append(-1)
    elif b.entity in a.contains: vec.append(1)
    else: vec.append(-1)

    features.append("b contains a entity match")
    if b.contains == None and a.entity == None: vec.append(0)
    elif b.contains == None or a.entity == None: vec.append(-1)
    elif a.entity in b.contains: vec.append(1)
    else: vec.append(-1)


    features.append("acontainer bentity match")
    if a.container == None and b.entity == None: vec.append(0)
    elif a.container == None or b.entity == None: vec.append(-1)
    elif b.entity in a.container: vec.append(1)
    else: vec.append(-1)

    features.append("bcontainer aentity match")
    if b.container == None and a.entity == None: vec.append(0)
    elif b.container == None or a.entity == None: vec.append(-1)
    elif a.entity in b.container: vec.append(1)
    else: vec.append(-1)

    features.append("b container a entity match")
    if b.container == None and a.container == None: vec.append(0)
    elif b.container == None or a.container==None: vec.append(-1)
    else:
        #bcont = b.container.split(" ")[-1]
        #acont = a.container.split(" ")[-1]
        bcont = b.container
        acont = a.container
        if bcont in acont or acont in bcont: vec.append(1)
        else: vec.append(-1)

    features.append("entity match")
    if b.entity == None and a.entity == None: vec.append(0)
    elif b.entity == a.entity: vec.append(1)
    else: vec.append(-1)

    features.append("adj match")
    if b.adjs == None and a.adjs == None: vec.append(0)
    elif b.adjs == a.adjs: vec.append(1)
    else: vec.append(-1)

    features.append("loc match")
    if b.location == None and a.location == None: vec.append(0)
    elif b.location == a.location: vec.append(1)
    else: vec.append(-1)

    features.append('number distances')
    try:
        distance = abs(int(a.idx)-int(b.idx))
        distance = 1 / ( 10000 - distance )
    except: distance = 1
    vec.append(distance)

    asidx = a.idx//1000
    bsent = b.idx//1000




    features.append('x is operand')
    if a.num == 'x' or b.num=='x': vec.append(1)
    else: vec.append(0)
    features.append('x is not operand')
    if a.num =='x' or b.num == 'x': vec.append(0)
    else: vec.append(1)

    features.append('a target match')
    if a.entity==target: vec.append(1)
    else: vec.append(0)
    features.append('b target match')
    if b.entity==target: vec.append(1)
    else: vec.append(0)


    asidx = a.idx//1000
    bsidx = b.idx//1000
    story = story['sentences']
    asent = [x[0] for x in story[asidx]['words']]
    bsent = [x[0] for x in story[bsidx]['words']]
    #words inbetween features
    awidx = a.idx%1000
    bwidx = b.idx%1000
    allwords = []
    for j in range(len(story)):
        for i,x in enumerate(story[j]['words']):
            allwords.append((j*1000+i,x[0]))
    low = min(a.idx,b.idx)
    high = max(a.idx,b.idx)
    wordseg = [x[1] for x in allwords if x[0]>low and high>x[0]]
    for item in [',','and','but']:
        features.append(item)
        if item in wordseg:
            vec.append(1)
        else:
            vec.append(0)

    features.extend(["a times",'b times',"a total",'b total',"a together",'b together',"a more", 'b more' ,"a less",'b less',"a add",'b add',"a divide",'b divide',"a split",'b split',"a equal",'b equal',"a equally",'b equally'])
    for li in ["times","total","together","more","less","add","divide","split","equal","equally"]:
        if li in asent:
            vec.append(1)
        else:
            vec.append(0)
        if li in bsent: vec.append(1)
        else: vec.append(0)

    #target features
    problem = story[-1]['text'].lower()
    if " how " in problem:
        problem = problem.split(" how ")[-1]
    elif " what " in problem:
        problem = problem.split(" what ")[-1]

    if " , " in problem:
        problem = problem.split(" , ")[0]
    features.append("in all")
    if "in all" in problem: vec.append(1)
    else: vec.append(0)
    features.append("end with")
    if "end with" in problem: vec.append(1)
    else: vec.append(0)
    problem = problem.split()
    features.extend(["times","total","together","more","less","add","divide","split","left","equal","equally","now",'left','start'])
    for li in ["times","total","together","more","less","add","divide","split","left","equal","equally","now",'left','start']:
        if li in problem:
            vec.append(1)
        else:
            vec.append(0)


    if a.verbs == None or b.verbs == None:
        dist = 1
    else:
        avl = a.verbs.split(" ")
        bvl = b.verbs.split(" ")

        if len([x for x in avl if x in bvl ])>0:
            dist = 0
        else:
            dist = 1
            for aw in avl:
                asyns = wn.synsets(aw)
                for asyn in asyns:
                    for bw in bvl:
                        bsyns = wn.synsets(bw)
                        for bsyn in bsyns:
                            if asyn._pos == bsyn._pos: 
                                try:
                                    sim = 1/(1+bsyn.res_similarity(asyn,brown_ic))
                                except:
                                    sim = 2
                                if sim < dist:
                                    dist = sim
    features.append("Verb distance")
    vec.append(dist)

    #verb similarity
    verbs = ['be', 'do', 'go', 'have', 'leave', 'keep', 'get', 'make', 'tell', 'place', 'lose', 'change', 'give', 'hand', 'take', 'buy', 'receive', 'put', 'set', 'like', 'want', 'call', 'divide', 'split']
    #verbs = pickle.load(open('data/predicates'+FOLD,'rb'))
    #verbs = ['add','multiply','divide','subtract']

    for v in verbs:
        features.append(v)
        vsyns = wn.synsets(v, pos='v')

        dist = 1
        if b.verbs is not None:
            for verb in b.verbs.split(' '):
                bsyns = wn.synsets(verb, pos='v')
                if verb == v:
                    dist = 0
                else:
                    for vsyn in vsyns:
                        for bsyn in bsyns:
                            try:
                                sim = 1/(1+vsyn.lin_similarity(bsyn,brown_ic))
                            except:
                                sim = 2
                            if sim < dist:
                                dist = sim
        vec.append(dist)
    if feats:
        return (features, vec)
    else:
        return vec
Exemple #60
0
 def getValue(self):
     """
     Compute and return my value
     """
     # compute and return my value
     return max(operand.getValue() for operand in self.operands)