Exemplo n.º 1
0
def cpuspec_to_hex(cpuspec):
    """convert a cpuspec to the hexadecimal string representation"""
    log.debug('cpuspec_to_string(%s)', cpuspec)
    cpuspec_check(cpuspec, usemax=False)
    groups = cpuspec.split(',')
    number = 0
    for sub in groups:
        items = sub.split('-')
        if len(items) == 1:
            if not len(items[0]):
                # two consecutive commas in cpuspec
                continue
            # one cpu in this group
            log.debug(" adding cpu %s to result", items[0])
            number |= 1 << int(items[0])
        elif len(items) == 2:
            il = [int(ii) for ii in items]
            if il[1] >= il[0]: rng = lrange(il[0], il[1] + 1)
            else: rng = lrange(il[1], il[0] + 1)
            log.debug(' group=%s has cpu range of %s', sub, rng)
            for num in rng:
                number |= 1 << num
        else:
            raise CpusetException('CPUSPEC "%s" has bad group "%s"' %
                                  (cpuspec, sub))
    log.debug(' final int number=%s in hex=%x', number, number)
    return '%x' % number
Exemplo n.º 2
0
 def do_test_pagination(self, bareList):
     data = mklist('x', *lrange(101, 131))
     if not bareList:
         data = base.ListResult(data)
         data.offset = None
         data.total = len(data)
         data.limit = None
     self.assertListResultEqual(
         resultspec.ResultSpec(offset=0).apply(data),
         base.ListResult(mklist('x', *lrange(101, 131)), offset=0,
                         total=30))
     self.assertListResultEqual(
         resultspec.ResultSpec(offset=10).apply(data),
         base.ListResult(mklist('x', *lrange(111, 131)),
                         offset=10,
                         total=30))
     self.assertListResultEqual(
         resultspec.ResultSpec(offset=10, limit=10).apply(data),
         base.ListResult(mklist('x', *lrange(111, 121)),
                         offset=10,
                         total=30,
                         limit=10))
     self.assertListResultEqual(
         resultspec.ResultSpec(offset=20, limit=15).apply(data),
         base.ListResult(mklist('x', *lrange(121, 131)),
                         offset=20,
                         total=30,
                         limit=15))  # off the end
Exemplo n.º 3
0
def plot_performance(parser, args, pore_measure):
    """
    Plot the pore performance in terms of reads per pore
    """
    flowcell_layout = minion_flowcell_layout()

    pore_values = []
    for pore in flowcell_layout:
        if pore in pore_measure:
            pore_values.append(pore_measure[pore])
        else:
            pore_values.append(0)

    # make a data frame of the lists
    d = {
        'rownum': lrange(1, 17) * 32,
        'colnum': sorted(lrange(1, 33) * 16),
        'tot_reads': pore_values,
        'labels': flowcell_layout
    }
    df = pd.DataFrame(d)

    d = df.pivot("rownum", "colnum", "tot_reads")
    sns.heatmap(d, annot=True, fmt="d", linewidths=.5)

    if args.saveas is not None:
        plot_file = args.saveas
        plt.savefig(plot_file, figsize=(8.5, 8.5))
    else:
        plt.show()
Exemplo n.º 4
0
def map_along_axis(f, arr, axis):
    '''Apply a function to a specific axis of an array
       This is slightly different from np.apply_along_axis when used
       in more than 2 dimensions.
       apply_along_axis applies the function to the 1D arrays which are associated with that axis
       map_along_axis transposes the original array so that that dimension is first
       and then applies the function to each entire (N-1)D array

       Example:
       >>> arr = np.arange(8).reshape([2,2,2])
       >>> arr
       array([[[0, 1],
               [2, 3]],
              [[4, 5],
               [6, 7]]])
       >>> np.apply_along_axis(np.sum, arr, 1)
       array([[ 2,  4],
              [10, 12]])
       >>> map_along_axis(np.sum, arr, 1)
       array([10, 18])
    '''
    arr = np.asanyarray(arr)
    axis = axis + arr.ndim if axis < 0 else axis
    new_dim_order = [axis] + lrange(axis) + lrange(axis+1, arr.ndim)
    return np.array([f(a) for a in arr.transpose(new_dim_order)])
def map_along_axis(f, arr, axis):
    '''Apply a function to a specific axis of an array
       This is slightly different from np.apply_along_axis when used
       in more than 2 dimensions.
       apply_along_axis applies the function to the 1D arrays which are associated with that axis
       map_along_axis transposes the original array so that that dimension is first
       and then applies the function to each entire (N-1)D array

       Example:
       >>> arr = np.arange(8).reshape([2,2,2])
       >>> arr
       array([[[0, 1],
               [2, 3]],
              [[4, 5],
               [6, 7]]])
       >>> np.apply_along_axis(np.sum, arr, 1)
       array([[ 2,  4],
              [10, 12]])
       >>> map_along_axis(np.sum, arr, 1)
       array([10, 18])
    '''
    arr = np.asanyarray(arr)
    axis = axis + arr.ndim if axis < 0 else axis
    new_dim_order = [axis] + lrange(axis) + lrange(axis + 1, arr.ndim)
    return np.array([f(a) for a in arr.transpose(new_dim_order)])
Exemplo n.º 6
0
 def test_pagination_prepaginated(self):
     data = base.ListResult(mklist('x', *lrange(10, 20)))
     data.offset = 10
     data.total = 30
     data.limit = 10
     self.assertListResultEqual(
         # ResultSpec has its offset/limit fields cleared
         resultspec.ResultSpec().apply(data),
         base.ListResult(mklist('x', *lrange(10, 20)),
                         offset=10, total=30, limit=10))
Exemplo n.º 7
0
    def rgb_ll_rgb2xy_list(
        cls,
        rgb_ll,
        rgb_TARGET,
    ):
        n_ROW, n_COL = (len(rgb_ll), list2singleton(lmap(len, rgb_ll)))

        xy_list_ALL = lproduct(lrange(n_COL), lrange(n_ROW))
        xy_list_VALID = [(x, y) for x, y in xy_list_ALL
                         if rgb_ll[y][x] == rgb_TARGET]
        return xy_list_VALID
Exemplo n.º 8
0
def evaluateOfficial(annotations, results, relevant_labels, dataset, quiet):
    """
      Calculate of mAP and interpolated PR-curve based on the FIVR evaluation process.
      Args:
        annotations: the annotation labels for each query
        results: the similarities of each query with the videos in the dataset
        relevant_labels: labels that are considered positives
        dataset: video ids contained in the dataset
      Returns:
        mAP: the mean Average Precision
        ps_curve: the values of the PR-curve
    """
    pr, mAP = [], []
    iterations = viewitems(annotations) if not quiet else tqdm(viewitems(annotations))
    for query, gt_sets in iterations:
        query = str(query)
        if query not in results: print('WARNING: Query {} is missing from the result file'.format(query)); continue
        if query not in dataset: print('WARNING: Query {} is not in the dataset'.format(query)); continue

        # set of relevant videos
        query_gt = set(sum([gt_sets[label] for label in relevant_labels if label in gt_sets], []))
        query_gt = query_gt.intersection(dataset)
        if not query_gt: print('WARNING: Empty annotation set for query {}'.format(query)); continue

        # calculation of mean Average Precision (Eq. 6)
        i, ri, s = 0.0, 0, 0.0
        y_target, y_score = [], []
        for video, sim in sorted(viewitems(results[query]), key=lambda x: x[1], reverse=True):
            if video in dataset:
                y_score.append(sim)
                y_target.append(1.0 if video in query_gt else 0.0)
                ri += 1
                if video in query_gt:
                    i += 1.0
                    s += i / ri
        mAP.append(s / len(query_gt))
        #if not quiet:
        #    print('Query:{}\t\tAP={:.4f}'.format(query, s / len(query_gt)))

        # add the dataset videos that are missing from the result file
        missing = len(query_gt) - y_target.count(1)
        y_target += [1.0 for _ in lrange(missing)] # add 1. for the relevant videos
        y_target += [0.0 for _ in lrange(len(dataset) - len(y_target))] # add 0. for the irrelevant videos
        y_score += [0.0 for _ in lrange(len(dataset) - len(y_score))]

        # calculation of interpolate PR-curve (Eq. 5)
        precision, recall, thresholds = precision_recall_curve(y_target, y_score)
        p = []
        for i in lrange(20, -1, -1):
            idx = np.where((recall >= i * 0.05))[0]
            p.append(np.max(precision[idx]))
        pr.append(p)
    # return mAP
    return mAP, np.mean(pr, axis=0)[::-1]
def feature_extraction_videos(model, cores, batch_sz, video_list, output_path):
    """
      Function that extracts the intermediate CNN features
      of each video in a provided video list.

      Args:
        model: CNN network
        cores: CPU cores for the parallel video loading
        batch_sz: batch size fed to the CNN network
        video_list: list of video to extract features
        output_path: path to store video features
    """
    videos = {i: video.strip() for i, video in enumerate(open(video_list).readlines())}
    print('\nNumber of videos: ', len(videos))
    print('Storage directory: ', output_path)
    print('CPU cores: ', cores)
    print('Batch size: ', batch_sz)

    print('\nFeature Extraction Process')
    print('==========================')
    pool = Pool(cores)
    future_videos = dict()
    output_list = []
    pbar = tqdm(lrange(np.max(videos.keys()) + 1), mininterval=1.0, unit='videos')
    for video in pbar:
        if os.path.exists(videos[video]):
            video_name = os.path.splitext(os.path.basename(videos[video]))[0]
            if video not in future_videos:
                video_tensor = load_video(videos[video], model.desired_size)
            else:
                video_tensor = future_videos[video].get()
                del future_videos[video]

            # load videos in parallel
            for _ in lrange(cores - len(future_videos)):
                next_video = np.max(future_videos.keys()) + 1 \
                    if len(future_videos) else video + 1

                if next_video in videos and \
                    next_video not in future_videos and \
                        os.path.exists(videos[next_video]):
                    future_videos[next_video] = pool.apply_async(load_video,
                                                                 args=[videos[next_video], model.desired_size])

            # extract features
            features = model.extract(video_tensor, batch_sz)

            path = os.path.join(output_path, '{}_{}'.format(video_name, model.net_name))
            output_list += ['{}\t{}'.format(video_name, path)]
            pbar.set_postfix(video=video_name)

            # save features
            np.save(path, features)
    np.savetxt('{}/video_feature_list.txt'.format(output_path), output_list, fmt='%s')
Exemplo n.º 10
0
def getDetailsForBuilds(master, buildset, builds, wantProperties=False, wantSteps=False,
                        wantPreviousBuild=False, wantLogs=False):

    builderids = {build['builderid'] for build in builds}

    builders = yield defer.gatherResults([master.data.get(("builders", _id))
                                          for _id in builderids])

    buildersbyid = {builder['builderid']: builder
                    for builder in builders}

    if wantProperties:
        buildproperties = yield defer.gatherResults(
            [master.data.get(("builds", build['buildid'], 'properties'))
             for build in builds])
    else:  # we still need a list for the big zip
        buildproperties = lrange(len(builds))

    if wantPreviousBuild:
        prev_builds = yield defer.gatherResults(
            [getPreviousBuild(master, build) for build in builds])
    else:  # we still need a list for the big zip
        prev_builds = lrange(len(builds))

    if wantSteps:
        buildsteps = yield defer.gatherResults(
            [master.data.get(("builds", build['buildid'], 'steps'))
             for build in builds])
        if wantLogs:
            for s in flatten(buildsteps, types=(list, UserList)):
                logs = yield master.data.get(("steps", s['stepid'], 'logs'))
                s['logs'] = list(logs)
                for l in s['logs']:
                    l['content'] = yield master.data.get(("logs", l['logid'], 'contents'))

    else:  # we still need a list for the big zip
        buildsteps = lrange(len(builds))

    # a big zip to connect everything together
    for build, properties, steps, prev in zip(builds, buildproperties, buildsteps, prev_builds):
        build['builder'] = buildersbyid[build['builderid']]
        build['buildset'] = buildset
        build['url'] = getURLForBuild(
            master, build['builderid'], build['number'])

        if wantProperties:
            build['properties'] = properties

        if wantSteps:
            build['steps'] = list(steps)

        if wantPreviousBuild:
            build['prev_build'] = prev
Exemplo n.º 11
0
    def img2chunk(
        cls,
        img,
        ll_ij2is_valid,
    ):
        ll = cls.img2rgb_ll(img)
        n_ROW, n_COL = (img.height, img.width)

        f_valid = partial(ll_ij2is_valid, ll)
        ij_list_ALL = lproduct(lrange(n_COL), lrange(n_ROW))
        ij_list = lfilter(f_valid, ij_list_ALL)
        raise Exception(pformat(ij_list), )
Exemplo n.º 12
0
def getDetailsForBuilds(master, buildset, builds, wantProperties=False, wantSteps=False,
                        wantPreviousBuild=False, wantLogs=False):

    builderids = set([build['builderid'] for build in builds])

    builders = yield defer.gatherResults([master.data.get(("builders", _id))
                                          for _id in builderids])

    buildersbyid = dict([(builder['builderid'], builder)
                         for builder in builders])

    if wantProperties:
        buildproperties = yield defer.gatherResults(
            [master.data.get(("builds", build['buildid'], 'properties'))
             for build in builds])
    else:  # we still need a list for the big zip
        buildproperties = lrange(len(builds))

    if wantPreviousBuild:
        prev_builds = yield defer.gatherResults(
            [getPreviousBuild(master, build) for build in builds])
    else:  # we still need a list for the big zip
        prev_builds = lrange(len(builds))

    if wantSteps:
        buildsteps = yield defer.gatherResults(
            [master.data.get(("builds", build['buildid'], 'steps'))
             for build in builds])
        if wantLogs:
            for s in flatten(buildsteps, types=(list, UserList)):
                logs = yield master.data.get(("steps", s['stepid'], 'logs'))
                s['logs'] = list(logs)
                for l in s['logs']:
                    l['content'] = yield master.data.get(("logs", l['logid'], 'contents'))

    else:  # we still need a list for the big zip
        buildsteps = lrange(len(builds))

    # a big zip to connect everything together
    for build, properties, steps, prev in zip(builds, buildproperties, buildsteps, prev_builds):
        build['builder'] = buildersbyid[build['builderid']]
        build['buildset'] = buildset
        build['url'] = getURLForBuild(
            master, build['builderid'], build['number'])

        if wantProperties:
            build['properties'] = properties

        if wantSteps:
            build['steps'] = list(steps)

        if wantPreviousBuild:
            build['prev_build'] = prev
Exemplo n.º 13
0
    def img2xy_list(
        cls,
        img,
    ):
        ll = cls.img2rgb_ll(img)
        n_ROW, n_COL = (img.height, img.width)

        xy2is_valid = None

        xy_list_ALL = lproduct(lrange(n_COL), lrange(n_ROW))
        xy_list_VALID = lfilter(xy2is_valid, xy_list_ALL)
        return xy_list_VALID
Exemplo n.º 14
0
def adjoining_cuts(cuts, bit, board):
    '''
    Given the cuts on an edge, computes the cuts on the adjoining edge.

    cuts: An array of Cut objects
    bit: A Router_Bit object
    board: A Board object

    Returns an array of Cut objects
    '''
    nc = len(cuts)
    adjCuts = []
    # if the left-most input cut does not include the left edge, add an
    # adjoining cut that includes the left edge
    if cuts[0].xmin > 0:
        left = 0
        right = my_round(cuts[0].xmin + bit.offset) - board.dheight
        if right - left >= board.dheight:
            adjCuts.append(Cut(left, right))
    # loop through the input cuts and form an adjoining cut, formed
    # by looking where the previous cut ended and the current cut starts
    for i in lrange(1, nc):
        left = my_round(cuts[i-1].xmax - bit.offset + board.dheight)
        right = max(left + bit.width, my_round(cuts[i].xmin + bit.offset) - board.dheight)
        adjCuts.append(Cut(left, right))
    # if the right-most input cut does not include the right edge, add an
    # adjoining cut that includes this edge
    if cuts[-1].xmax < board.width:
        left = my_round(cuts[-1].xmax - bit.offset) + board.dheight
        right = board.width
        if right - left >= board.dheight:
            adjCuts.append(Cut(left, right))
    return adjCuts
Exemplo n.º 15
0
 def test_pagination_prepaginated_without_clearing_resultspec(self):
     data = base.ListResult(mklist("x", *lrange(10, 20)))
     data.offset = 10
     data.limit = 10
     # ResultSpec does not have its offset/limit fields cleared - this is
     # detected as an assertion failure
     self.assertRaises(AssertionError, lambda: resultspec.ResultSpec(offset=10, limit=20).apply(data))
Exemplo n.º 16
0
def lossmap(data, twiss, slim=None, merge=True, threshold=0.0001,
            extracted=None, save=None):
    # Kind of hacky with the twiss but ok
    mydata = lossstats(data, slim=slim, normalize=True, merge=merge)
    if extracted is not None:
        mydata[extracted[0]] -= extracted[1]
    mydata = mydata[mydata>=threshold]
    
    if not isinstance(twiss, pd.DataFrame):
        _, twiss = readtfs(twiss, usecols=['NAME', 'S'])
    
    mydata = mydata.to_frame(name='LOSS')
    mydata['S'] = twiss['S'].loc[mydata.index]
    mydata.sort_values('S', inplace=True)
    
    fig, ax = plt.subplots()
    xvals = lrange(1, len(mydata)+1)
    ax.bar(xvals, mydata['LOSS'].values, 0.05, color='k',bottom=threshold)
    ax.set_xlim(0,len(mydata)+1)
    ax.set_xticks(xvals)
    ax.set_xticklabels(mydata.index)
    fig.autofmt_xdate(bottom=0.2, rotation=30, ha='right')
    ax.set_yscale("log")
    plt.show()

    if save is not None:
        plt.savefig(save, bbox_inches='tight')
        plt.close()
        
    return fig, ax
Exemplo n.º 17
0
 def test_claimBuildRequests_stress(self):
     return self.do_test_claimBuildRequests(
         [fakedb.BuildRequest(id=id, buildsetid=self.BSID, builderid=self.BLDRID1) for id in range(1, 1000)],
         1300305713,
         lrange(1, 1000),
         [(id, epoch2datetime(1300305713), self.MASTER_ID) for id in range(1, 1000)],
     )
Exemplo n.º 18
0
    def test_addBuild_existing_race(self):
        clock = task.Clock()
        clock.advance(TIME1)
        yield self.insertTestData(self.backgroundData)

        # add new builds at *just* the wrong time, repeatedly
        numbers = lrange(1, 8)

        def raceHook(conn):
            if not numbers:
                return
            conn.execute(self.db.model.builds.insert(),
                         {'number': numbers.pop(0), 'buildrequestid': 41,
                          'masterid': 88, 'workerid': 13, 'builderid': 77,
                          'started_at': TIME1, 'state_string': "hi"})

        id, number = yield self.db.builds.addBuild(builderid=77,
                                                   buildrequestid=41, workerid=13, masterid=88,
                                                   state_string=u'test test2', _reactor=clock,
                                                   _race_hook=raceHook)
        bdict = yield self.db.builds.getBuild(id)
        validation.verifyDbDict(self, 'dbbuilddict', bdict)
        self.assertEqual(number, 8)
        self.assertEqual(bdict, {'buildrequestid': 41, 'builderid': 77,
                                 'id': id, 'masterid': 88, 'number': number, 'workerid': 13,
                                 'started_at': epoch2datetime(TIME1),
                                 'complete_at': None, 'state_string': u'test test2',
                                 'results': None})
    def test_claimBuildRequests_other_master_claim_stress(self):
        d = self.do_test_claimBuildRequests(
            [
                fakedb.BuildRequest(
                    id=id, buildsetid=self.BSID, builderid=self.BLDRID1)
                for id in range(1, 1000)
            ] + [
                fakedb.BuildRequest(
                    id=1000, buildsetid=self.BSID, builderid=self.BLDRID1),
                # the fly in the ointment..
                fakedb.BuildRequestClaim(brid=1000,
                                         masterid=self.OTHER_MASTER_ID,
                                         claimed_at=1300103810),
            ],
            1300305712,
            lrange(1, 1001),
            expfailure=buildrequests.AlreadyClaimedError)
        d.addCallback(
            lambda _: self.db.buildrequests.getBuildRequests(claimed=True))

        @d.addCallback
        def check(results):
            # check that [1,1000) were not claimed, and 1000 is still claimed
            self.assertEqual([
                (r['buildrequestid'], r['claimed_by_masterid'],
                 r['claimed_at']) for r in results
            ][:10], [(1000, self.OTHER_MASTER_ID, epoch2datetime(1300103810))])

        return d
Exemplo n.º 20
0
 def test_endpoint_returns_total_without_applying_filters(self):
     data = base.ListResult(mklist('x', *lrange(10, 20)))
     data.total = 99
     # apply doesn't want to get a total with filters still outstanding
     f = resultspec.Filter(field='x', op='gt', values=[23])
     self.assertRaises(AssertionError, lambda:
                       resultspec.ResultSpec(filters=[f]).apply(data))
Exemplo n.º 21
0
def extrude(v2d, tri2d, order, z1, z2, units):
    if units.metric:
        scale = 1.0
    else:
        scale = 1.0 / units.increments_per_inch
    nv2d = len(v2d)
    ntri2d = len(tri2d)
    v3d = [[0, 0, 0]] * (nv2d * 2)
    i = 0
    i2 = nv2d
    for v in v2d:
        v1 = [v[0] * scale, v[1] * scale, z1 * scale]
        v2 = [v[0] * scale, v[1]* scale, z2 * scale]
        v3d[i] = [v1[order[0]], v1[order[1]], v1[order[2]]]
        v3d[i2] = [v2[order[0]], v2[order[1]], v2[order[2]]]
        i += 1
        i2 += 1
    tri3d = [[0, 0, 0]] * (2 * ntri2d)
    i = 0
    i2 = ntri2d
    for t in tri2d:
        tri3d[i] = [t[0], t[1], t[2]]
        tri3d[i2] = [t[0] + nv2d, t[1] + nv2d, t[2] + nv2d]
        i += 1
        i2 += 1
    for i in lrange(nv2d):
        ip = i + 1
        ie = i + nv2d
        iep = ie + 1
        if ip == nv2d:
            ip = 0
            iep = nv2d
        tri3d.append([i, ip, iep])
        tri3d.append([i, iep, ie])
    return (v3d, tri3d)
Exemplo n.º 22
0
 def test_endpoint_returns_total_without_applying_filters(self):
     data = base.ListResult(mklist('x', *lrange(10, 20)))
     data.total = 99
     # apply doesn't want to get a total with filters still outstanding
     f = resultspec.Filter(field='x', op='gt', values=[23])
     with self.assertRaises(AssertionError):
         resultspec.ResultSpec(filters=[f]).apply(data)
Exemplo n.º 23
0
    def progress(self, count):
        if self.finished:
            return

        count = min(count, self.finalcount)

        if self.finalcount:
            percentcomplete = int(round(100 * count / self.finalcount))
            if percentcomplete < 1: percentcomplete = 1
        else:
            percentcomplete = 100
            self.finished = True
            return

        blockcount = percentcomplete // 2
        if not config.mread:
            if blockcount > self.blockcount:
                for i in lrange(self.blockcount, blockcount):
                    self.f.write(self.block)
                    self.f.flush()

            if percentcomplete == 100:
                self.f.write("]\n")
                self.finished = True
        self.blockcount = blockcount
Exemplo n.º 24
0
 def draw_passes(self, painter, blabel, cuts, y1, y2, flags, xMid):
     '''
     Draws and labels the router passes on a template or board.
     '''
     board_T = self.geom.board_T
     # brush = QtGui.QBrush(QtCore.Qt.white)
     brush = None
     ip = 0
     if y1 > y2:
         shift = (0, -2)
     else:
         shift = (0, 2)
     passMid = None
     self.set_font_size(painter, 'template')
     for c in cuts[::-1]:
         for p in lrange(len(c.passes) - 1, -1, -1):
             xp = c.passes[p] + board_T.xL()
             ip += 1
             label = '%d%s' % (ip, blabel)
             if xp == xMid:
                 passMid = label
             painter.drawLine(xp, y1, xp, y2)
             if p == 0 or c.passes[p] - c.passes[p-1] > self.sep_annotate:
                 paint_text(painter, label, (xp, y1), flags, shift, -90, fill=brush)
     return passMid
Exemplo n.º 25
0
    def permutations(self):
        "Generate all permutations of a _truth_table in lexicographical order"

        tt = self

        n = tt.nvars()
        a = lrange(n)

        while True:

            yield tt

            for j in xrange(1, n):
                if a[j] > a[j - 1]:
                    break
            else:
                return

            for l in xrange(n):
                if a[j] > a[l]:
                    a[j], a[l] = a[l], a[j]
                    tt = tt.permute(l, j)
                    break

            k = j - 1
            l = 0

            while k > l:

                a[k], a[l] = a[l], a[k]
                tt = tt.permute(l, k)

                k -= 1
                l += 1
Exemplo n.º 26
0
def extrude(v2d, tri2d, order, z1, z2, units):
    if units.metric:
        scale = 1.0
    else:
        scale = 1.0 / units.increments_per_inch
    nv2d = len(v2d)
    ntri2d = len(tri2d)
    v3d = [[0, 0, 0]] * (nv2d * 2)
    i = 0
    i2 = nv2d
    for v in v2d:
        v1 = [v[0] * scale, v[1] * scale, z1 * scale]
        v2 = [v[0] * scale, v[1] * scale, z2 * scale]
        v3d[i] = [v1[order[0]], v1[order[1]], v1[order[2]]]
        v3d[i2] = [v2[order[0]], v2[order[1]], v2[order[2]]]
        i += 1
        i2 += 1
    tri3d = [[0, 0, 0]] * (2 * ntri2d)
    i = 0
    i2 = ntri2d
    for t in tri2d:
        tri3d[i] = [t[0], t[1], t[2]]
        tri3d[i2] = [t[0] + nv2d, t[1] + nv2d, t[2] + nv2d]
        i += 1
        i2 += 1
    for i in lrange(nv2d):
        ip = i + 1
        ie = i + nv2d
        iep = ie + 1
        if ip == nv2d:
            ip = 0
            iep = nv2d
        tri3d.append([i, ip, iep])
        tri3d.append([i, iep, ie])
    return (v3d, tri3d)
    def extract(self, image_tensor, batch_sz):
        """
          Function that extracts intermediate CNN features for
          each input image.

          Args:
            image_tensor: numpy tensor of input images
            batch_sz: batch size

          Returns:
            features: extracted features from each input image
        """
        preprocessed_images = self.preprocess(image_tensor)
        features = np.empty((preprocessed_images.shape[0], self.final_sz))
        for i in lrange(preprocessed_images.shape[0] // batch_sz + 1):
            batch = preprocessed_images[i * batch_sz:(i + 1) * batch_sz]
            if batch.size > 0:
                self.net.blobs['data'].reshape(*batch.shape)
                self.net.blobs['data'].data[...] = batch
                self.net.forward()
                start = 0
                for layer in self.layers:
                    activations = self.net.blobs[layer].data[...]
                    # normalize on channel dimension
                    activations /= np.linalg.norm(
                        activations, axis=1, keepdims=True) + 1e-15
                    # global max-pooling on channel dimension
                    activations = activations.max(axis=(2, 3))
                    # normalize feature vector
                    activations /= np.linalg.norm(
                        activations, axis=1, keepdims=True) + 1e-15
                    features[i * batch_sz:(i + 1) * batch_sz,
                             start:start + activations.shape[1]] = activations
                    start += activations.shape[1]
        return features
Exemplo n.º 28
0
def adjoining_cuts(cuts, bit, board):
    '''
    Given the cuts on an edge, computes the cuts on the adjoining edge.

    cuts: An array of Cut objects
    bit: A Router_Bit object
    board: A Board object

    Returns an array of Cut objects
    '''
    nc = len(cuts)
    adjCuts = []
    # if the left-most input cut does not include the left edge, add an
    # adjoining cut that includes the left edge
    if cuts[0].xmin > 0:
        left = 0
        right = my_round(cuts[0].xmin + bit.offset) - board.dheight
        if right - left >= board.dheight:
            adjCuts.append(Cut(left, right))
    # loop through the input cuts and form an adjoining cut, formed
    # by looking where the previous cut ended and the current cut starts
    for i in lrange(1, nc):
        left = my_round(cuts[i - 1].xmax - bit.offset + board.dheight)
        right = max(left + bit.width,
                    my_round(cuts[i].xmin + bit.offset) - board.dheight)
        adjCuts.append(Cut(left, right))
    # if the right-most input cut does not include the right edge, add an
    # adjoining cut that includes this edge
    if cuts[-1].xmax < board.width:
        left = my_round(cuts[-1].xmax - bit.offset) + board.dheight
        right = board.width
        if right - left >= board.dheight:
            adjCuts.append(Cut(left, right))
    return adjCuts
Exemplo n.º 29
0
 def draw_one_board(self, painter, board, bit):
     '''
     Draws a single board
     '''
     if not board.active:
         return
     (x, y) = board.perimeter(bit)
     painter.save()
     pen = QtGui.QPen(QtCore.Qt.black)
     pen.setWidthF(0)
     painter.setPen(pen)
     icon = self.woods[board.wood]
     if isinstance(icon, str):
         brush = QtGui.QBrush(QtGui.QPixmap(icon))
     else:
         brush = QtGui.QBrush(QtCore.Qt.black, icon)
     (inverted, invertable) = self.transform.inverted()
     brush.setMatrix(inverted.toAffine())
     painter.setBrush(brush)
     n = len(x)
     poly = QtGui.QPolygonF()
     for i in lrange(n):
         poly.append(QtCore.QPointF(x[i], y[i]))
     painter.drawPolygon(poly)
     painter.restore()
Exemplo n.º 30
0
 def testAverages(self):
     data = lrange(10)
     for i in data:
         metrics.MetricTimeEvent.log('foo_time', i)
     report = self.observer.asDict()
     self.assertEqual(
         report['timers']['foo_time'], sum(data) / float(len(data)))
Exemplo n.º 31
0
 def testAverages(self):
     data = lrange(10)
     for i in data:
         metrics.MetricTimeEvent.log('foo_time', i)
     report = self.observer.asDict()
     self.assertEqual(report['timers']['foo_time'],
                      sum(data) / float(len(data)))
Exemplo n.º 32
0
 def draw_passes(self, painter, blabel, cuts, y1, y2, flags, xMid):
     '''
     Draws and labels the router passes on a template or board.
     '''
     board_T = self.geom.board_T
     # brush = QtGui.QBrush(QtCore.Qt.white)
     brush = None
     ip = 0
     if y1 > y2:
         shift = (0, -2)
     else:
         shift = (0, 2)
     passMid = None
     self.set_font_size(painter, 'template')
     for c in cuts[::-1]:
         for p in lrange(len(c.passes) - 1, -1, -1):
             xp = c.passes[p] + board_T.xL()
             ip += 1
             label = '%d%s' % (ip, blabel)
             if xp == xMid:
                 passMid = label
             painter.drawLine(xp, y1, xp, y2)
             if p == 0 or c.passes[p] - c.passes[p - 1] > self.sep_annotate:
                 paint_text(painter,
                            label, (xp, y1),
                            flags,
                            shift,
                            -90,
                            fill=brush)
     return passMid
Exemplo n.º 33
0
 def _fallbackFDImplementation(self):
     """
     Fallback-fallback implementation where we just assume that we need to
     close 256 FDs.
     """
     maxfds = 256
     return lrange(maxfds)
Exemplo n.º 34
0
 def draw_one_board(self, painter, board, bit):
     '''
     Draws a single board
     '''
     if not board.active:
         return
     (x, y) = board.perimeter(bit)
     painter.save()
     pen = QtGui.QPen(QtCore.Qt.black)
     pen.setWidthF(0)
     painter.setPen(pen)
     icon = self.woods[board.wood]
     if isinstance(icon, str):
         brush = QtGui.QBrush(QtGui.QPixmap(icon))
     else:
         brush = QtGui.QBrush(QtCore.Qt.black, icon)
     (inverted, invertable) = self.transform.inverted()
     brush.setMatrix(inverted.toAffine())
     painter.setBrush(brush)
     n = len(x)
     poly = QtGui.QPolygonF()
     for i in lrange(n):
         poly.append(QtCore.QPointF(x[i], y[i]))
     painter.drawPolygon(poly)
     painter.restore()
Exemplo n.º 35
0
    def test_claimBuildRequests_other_master_claim_stress(self):
        d = self.do_test_claimBuildRequests(
            [fakedb.BuildRequest(id=id, buildsetid=self.BSID, builderid=self.BLDRID1)
             for id in range(1, 1000)] +
            [
                fakedb.BuildRequest(
                    id=1000, buildsetid=self.BSID, builderid=self.BLDRID1),
                # the fly in the ointment..
                fakedb.BuildRequestClaim(brid=1000,
                                         masterid=self.OTHER_MASTER_ID, claimed_at=1300103810),
            ], 1300305712, lrange(1, 1001),
            expfailure=buildrequests.AlreadyClaimedError)
        d.addCallback(lambda _:
                      self.db.buildrequests.getBuildRequests(claimed=True))

        @d.addCallback
        def check(results):
            # check that [1,1000) were not claimed, and 1000 is still claimed
            self.assertEqual([
                (r['buildrequestid'], r[
                 'claimed_by_masterid'], r['claimed_at'])
                for r in results
            ][:10], [
                (1000, self.OTHER_MASTER_ID, epoch2datetime(1300103810))
            ])
        return d
Exemplo n.º 36
0
    def test_addBuild_existing_race(self):
        clock = task.Clock()
        clock.advance(TIME1)
        yield self.insertTestData(self.backgroundData)

        # add new builds at *just* the wrong time, repeatedly
        numbers = lrange(1, 8)

        def raceHook(conn):
            if not numbers:
                return
            conn.execute(self.db.model.builds.insert(),
                         {'number': numbers.pop(0), 'buildrequestid': 41,
                          'masterid': 88, 'workerid': 13, 'builderid': 77,
                          'started_at': TIME1, 'state_string': "hi"})

        id, number = yield self.db.builds.addBuild(builderid=77,
                                                   buildrequestid=41, workerid=13, masterid=88,
                                                   state_string=u'test test2', _reactor=clock,
                                                   _race_hook=raceHook)
        bdict = yield self.db.builds.getBuild(id)
        validation.verifyDbDict(self, 'dbbuilddict', bdict)
        self.assertEqual(number, 8)
        self.assertEqual(bdict, {'buildrequestid': 41, 'builderid': 77,
                                 'id': id, 'masterid': 88, 'number': number, 'workerid': 13,
                                 'started_at': epoch2datetime(TIME1),
                                 'complete_at': None, 'state_string': u'test test2',
                                 'results': None})
Exemplo n.º 37
0
 def _fallbackFDImplementation(self):
     """
     Fallback-fallback implementation where we just assume that we need to
     close 256 FDs.
     """
     maxfds = 256
     return lrange(maxfds)
 def test_claimBuildRequests_stress(self):
     return self.do_test_claimBuildRequests([
         fakedb.BuildRequest(
             id=id, buildsetid=self.BSID, builderid=self.BLDRID1)
         for id in range(1, 1000)
     ], 1300305713, lrange(
         1, 1000), [(id, epoch2datetime(1300305713), self.MASTER_ID)
                    for id in range(1, 1000)])
Exemplo n.º 39
0
 def test_pagination_prepaginated_without_clearing_resultspec(self):
     data = base.ListResult(mklist('x', *lrange(10, 20)))
     data.offset = 10
     data.limit = 10
     # ResultSpec does not have its offset/limit fields cleared - this is
     # detected as an assertion failure
     with self.assertRaises(AssertionError):
         resultspec.ResultSpec(offset=10, limit=20).apply(data)
Exemplo n.º 40
0
def cpuspec_inverse(cpuspec):
    """calculate inverse of cpu specification"""
    cpus = [0 for x in lrange(maxcpu + 1)]
    groups = cpuspec.split(',')
    log.debug("cpuspec_inverse(%s) maxcpu=%d groups=%d", cpuspec, maxcpu,
              len(groups))
    for set in groups:
        items = set.split('-')
        if len(items) == 1:
            if not len(items[0]):
                # common error of two consecutive commas in cpuspec,
                # just ignore it and keep going
                continue
            cpus[int(items[0])] = 1
        elif len(items) == 2:
            for x in lrange(int(items[0]), int(items[1]) + 1):
                cpus[x] = 1
        else:
            raise CpusetException("cpuspec(%s) has bad group %s" %
                                  (cpuspec, set))
    log.debug("cpuspec array: %s", cpus)
    # calculate inverse of array
    for x in lrange(0, len(cpus)):
        cpus[x] = int(cpus[x] == 0)
    log.debug("      inverse: %s", cpus)
    # build cpuspec expression
    nspec = ""
    ingrp = False
    for x in lrange(0, len(cpus)):
        if cpus[x] == 0 and ingrp:
            nspec += str(begin)
            if x > begin + 1:
                nspec += '-' + str(x if cpus[x] else x - 1)
            ingrp = False
        if cpus[x] == 1:
            if not ingrp:
                if len(nspec): nspec += ','
                begin = x
            ingrp = True
            if x == len(cpus) - 1:
                nspec += str(begin)
                if x > begin:
                    nspec += '-' + str(x)
    log.debug("inverse cpuspec: %s", nspec)
    return nspec
Exemplo n.º 41
0
 def test_01(self):
     l = lrange(5)
     hyp = list(IterTool.iter2sliding_window(l, 3))
     ref = [
         (0, 1, 2),
         (1, 2, 3),
         (2, 3, 4),
     ]
     self.assertEqual(hyp, ref)
Exemplo n.º 42
0
 def __init__(self, finalcount, progresschar=None):
     self.finalcount = finalcount
     self.blockcount = 0
     self.finished = False
     # Use dark shade (U+2593) char for progress if none passed
     if not progresschar:
         self.block = '\u2593'
     else:
         self.block = progresschar
     if config.mread:
         return
     self.f = sys.stdout
     if not self.finalcount: return
     self.f.write('[')
     for i in lrange(50):
         self.f.write(' ')
     self.f.write(']%')
     for i in lrange(52):
         self.f.write('\b')
Exemplo n.º 43
0
def triplet_generator_cc(dataset, cc_web_features):
    """
      Function that generates video triplets from CC_WEB_VIDEO.

      Args:
        dataset: dataset object that contains the VCDB video pairs
        vcdb_features: global features of the videos in CC_WEB_VIDEO
      Returns:
        triplets: the list of triplets with video indexes
    """
    print('\nCC_WEB_VIDEO Triplet Generation')
    print('===============================')
    triplets = []

    # generate triplets from each query set
    for i, ground_truth in enumerate(dataset['ground_truth']):
        pos = [
            k for k, v in viewitems(ground_truth)
            if v in ['E', 'L', 'V', 'S', 'M']
        ]
        neg = [k for k, v in viewitems(ground_truth) if v in ['X', '-1']]
        for q in tqdm(lrange(len(pos)), desc='Query {}'.format(i)):
            for p in lrange(q + 1, len(pos)):
                video1 = cc_web_features[pos[q]]
                video2 = cc_web_features[pos[p]]

                # calculate distances
                pair_distance = euclidean(video1, video2)
                if pair_distance > 0.1:
                    negative_distances = cdist(np.array([video1, video2]),
                                               cc_web_features[neg],
                                               metric='euclidean')

                    hard_negatives = np.where(
                        negative_distances[0] < pair_distance)[0]
                    triplets += [[pos[q], pos[p], neg[e]]
                                 for e in hard_negatives]

                    hard_negatives = np.where(
                        negative_distances[1] < pair_distance)[0]
                    triplets += [[pos[p], pos[q], neg[e]]
                                 for e in hard_negatives]
    return triplets
Exemplo n.º 44
0
 def test_completeBuildRequests_stress(self):
     return self.do_test_completeBuildRequests(
         [fakedb.BuildRequest(id=id, buildsetid=self.BSID, builderid=self.BLDRID1) for id in range(1, 280)]
         + [
             fakedb.BuildRequestClaim(brid=id, masterid=self.MASTER_ID, claimed_at=1300103810)
             for id in range(1, 280)
         ],
         1300305712,
         [(id, True, 7, epoch2datetime(1300305712)) for id in range(1, 280)],
         brids=lrange(1, 280),
     )
Exemplo n.º 45
0
    def __init__(self, bit, boards, config):
        Base_Spacing.__init__(self, bit, boards, config)

        dh2 = 2 * self.dhtot
        t = [Spacing_Param(0, self.boards[0].width // 4 + dh2, 0),\
             Spacing_Param(self.bit.width + dh2, self.boards[0].width // 2 + dh2,\
                           self.bit.width + dh2),\
             Spacing_Param(None, None, True)]
        self.params = {}
        for i in lrange(len(t)):
            self.params[self.keys[i]] = t[i]
Exemplo n.º 46
0
    def draw_boards(self, painter):
        '''
        Draws all the boards
        '''

        # Draw all of the boards
        for i in lrange(4):
            self.draw_one_board(painter, self.geom.boards[i], self.geom.bit,
                                self.colors['board_background'])

        # Label the boards
        if self.config.show_router_pass_identifiers or self.config.show_router_pass_locations:
            self.set_font_size(painter, 'boards')
            pen = QtGui.QPen(QtCore.Qt.SolidLine)
            pen.setColor(self.colors['canvas_foreground'])
            painter.setPen(pen)

            x1 = self.geom.boards[0].xL() - self.geom.bit.width // 2
            x2 = self.geom.boards[0].xL() - self.geom.bit.width // 4
            flags = QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter

            y = self.geom.boards[0].yB()
            p = (x1, y)
            paint_text(painter, 'A', p, flags, (-3, 0))
            painter.drawLine(x1, y, x2, y)

            i = 0 # index in self.labels

            if self.geom.boards[3].active:
                y = self.geom.boards[3].yT()
                p = (x1, y)
                paint_text(painter, 'B', p, flags, (-3, 0))
                painter.drawLine(x1, y, x2, y)
                y = self.geom.boards[3].yB()
                p = (x1, y)
                paint_text(painter, 'C', p, flags, (-3, 0))
                painter.drawLine(x1, y, x2, y)
                i = 2
            if self.geom.boards[2].active:
                y = self.geom.boards[2].yT()
                p = (x1, y)
                paint_text(painter, self.labels[i], p, flags, (-3, 0))
                painter.drawLine(x1, y, x2, y)
                y = self.geom.boards[2].yB()
                p = (x1, y)
                paint_text(painter, self.labels[i + 1], p, flags, (-3, 0))
                painter.drawLine(x1, y, x2, y)
                i += 2

            y = self.geom.boards[1].yT()
            p = (x1, y)
            paint_text(painter, self.labels[i], p, flags, (-3, 0))
            painter.drawLine(x1, y, x2, y)
Exemplo n.º 47
0
 def test_worst_status(self):
     res = lrange(len(results.Results))
     res.sort(
         cmp=lambda a, b: 1 if (results.worst_status(a, b) == a) else -1)
     self.assertEqual(res, [
         results.SKIPPED,
         results.SUCCESS,
         results.WARNINGS,
         results.FAILURE,
         results.EXCEPTION,
         results.RETRY,
         results.CANCELLED,
     ])
Exemplo n.º 48
0
def test_apply_at_depth_1():
    a = np.arange(24).reshape([2, 3, 4])

    np.array_equal(apply_at_depth(np.sum, a),
                   apply_at_depth_ravel(np.sum, a))

    for depth in [0] + lrange(-4,4):
        assert np.array_equal(apply_at_depth(np.sum, a, depth=depth),
                              apply_at_depth_ravel(np.sum, a, depth=depth))
    assert apply_at_depth(np.sum, a, depth=0) == np.sum(a)
    assert np.array_equal(apply_at_depth(np.sum, a, depth=-1),
                          np.sum(a, axis=-1))
    assert np.array_equal(apply_at_depth(np.subtract, a, np.array([1]), depth=0),
                          a - 1)
    assert np.array_equal(apply_at_depth(np.subtract, a, np.array([1]), depth=0),
                          [i - 1 for i in a])
    assert np.array_equal(apply_at_depth(np.subtract, a, np.array([1]), depth=0),
                          [[j - 1 for j in i]
                           for i in a])
    for depth in [0] + lrange(-4,4):
        assert np.array_equal(apply_at_depth(np.subtract, a, np.array([1]), depth=depth),
                              a - 1)
Exemplo n.º 49
0
 def test_sort_worst_status(self):
     res = lrange(len(results.Results))
     res.sort(
         key=lambda a: a if a != results.SKIPPED else -1)
     self.assertEqual(res, [
         results.SKIPPED,
         results.SUCCESS,
         results.WARNINGS,
         results.FAILURE,
         results.EXCEPTION,
         results.RETRY,
         results.CANCELLED,
     ])
Exemplo n.º 50
0
def unserialize(s, config):
    '''
    Unserializes the string s, and returns the tuple (bit, boards, spacing)
    '''
    inp = StringIO.StringIO(s)
    u = pickle.Unpickler(inp)
    version = u.load()
    if config.debug:
        print('unserialized version:', version)
    # form the units
    metric = u.load()
    if metric:
        units = utils.Units(metric=True)
    else:
        ipi = u.load()
        units = utils.Units(ipi)
    # form the bit
    width = u.load()
    depth = u.load()
    angle = u.load()
    bit = router.Router_Bit(units, width, depth, angle)
    # form the boards
    nb = u.load()
    boards = []
    for i in lrange(nb):
        boards.append(router.Board(bit, 10)) # dummy width argument, for now
    for b in boards:
        b.width = u.load()
        b.height = u.load()
        b.wood = u.load()
        b.active = u.load()
        b.dheight = u.load()
    # form the spacing
    sp_type = u.load()
    if sp_type == 'Edit':
        if config.debug:
            print('unserialized edit spacing')
        cuts = u.load()
        sp = spacing.Edit_Spaced(bit, boards, config)
        sp.set_cuts(cuts)
    else:
        if sp_type == 'Equa':
            sp = spacing.Equally_Spaced(bit, boards, config)
        else:
            sp = spacing.Variable_Spaced(bit, boards, config)
        sp.params = u.load()
        if config.debug:
            print('unserialized ', sp_type, `sp.params`)
        sp.set_cuts()
    return (bit, boards, sp, sp_type)
Exemplo n.º 51
0
    def _resourceFDImplementation(self):
        """
        Fallback implementation where the resource module can inform us about
        how many FDs we can expect.

        Note that on OS-X we expect to be using the /dev/fd implementation.
        """
        import resource
        maxfds = resource.getrlimit(resource.RLIMIT_NOFILE)[1] + 1
        # OS-X reports 9223372036854775808. That's a lot of fds
        # to close
        if maxfds > 1024:
            maxfds = 1024
        return lrange(maxfds)
Exemplo n.º 52
0
 def do_test_pagination(self, bareList):
     data = mklist('x', *lrange(101, 131))
     if not bareList:
         data = base.ListResult(data)
         data.offset = None
         data.total = len(data)
         data.limit = None
     self.assertListResultEqual(
         resultspec.ResultSpec(offset=0).apply(data),
         base.ListResult(mklist('x', *lrange(101, 131)),
                         offset=0, total=30))
     self.assertListResultEqual(
         resultspec.ResultSpec(offset=10).apply(data),
         base.ListResult(mklist('x', *lrange(111, 131)),
                         offset=10, total=30))
     self.assertListResultEqual(
         resultspec.ResultSpec(offset=10, limit=10).apply(data),
         base.ListResult(mklist('x', *lrange(111, 121)),
                         offset=10, total=30, limit=10))
     self.assertListResultEqual(
         resultspec.ResultSpec(offset=20, limit=15).apply(data),
         base.ListResult(mklist('x', *lrange(121, 131)),
                         offset=20, total=30, limit=15))  # off the end
Exemplo n.º 53
0
    def set_fig_dimensions(self, template, boards):
        '''
        Computes the figure dimension attributes, fig_width and fig_height, in
        increments.
        Returns True if the dimensions changed.
        '''
        # Try default margins, but reset if the template is too small for margins
        units = boards[0].units
        top_margin = units.abstract_to_increments(self.config.top_margin)
        bottom_margin = units.abstract_to_increments(self.config.bottom_margin)
        left_margin = units.abstract_to_increments(self.config.left_margin)
        right_margin = units.abstract_to_increments(self.config.right_margin)
        separation = units.abstract_to_increments(self.config.separation)
        self.margins = utils.Margins(separation, separation, left_margin, right_margin,
                                     bottom_margin, top_margin)

        # Set the figure dimensions
        fig_width = template.length + self.margins.left + self.margins.right
        fig_height = template.height + self.margins.bottom + self.margins.top
        for i in lrange(4):
            if boards[i].active:
                fig_height += boards[i].height + self.margins.sep

        if boards[3].active:
            fig_height += template.height + self.margins.sep

        if self.config.show_caul:
            fig_height += template.height + self.margins.sep

        min_width = 64
        if fig_width < min_width:
            fig_width = min_width
            self.margins.left = (fig_width - template.length) // 2
            self.margins.right = self.margins.left
            fig_width = template.length + self.margins.left + self.margins.right

        dimensions_changed = False
        if fig_width != self.fig_width:
            self.fig_width = fig_width
            dimensions_changed = True
        if fig_height != self.fig_height:
            self.fig_height = fig_height
            dimensions_changed = True

        # The 1200 here is effectively a dpi for the screen
        scale = 1200 * boards[0].units.increments_to_inches(1)
        self.window_width = int(scale * fig_width)
        self.window_height = int(scale * fig_height)

        return dimensions_changed
Exemplo n.º 54
0
 def cut_add(self):
     '''
     Adds a cut to the first location possible, searching from the left.
     The active cut is set the the new cut.
     '''
     neck_width = utils.my_round(self.bit.neck)
     index = None
     cuts_save = copy.deepcopy(self.cuts)
     if self.cuts[0].xmin > self.bit.neck:
         if self.config.debug:
             print('add at left')
         index = 0
         xmin = 0
         xmax = self.cuts[0].xmin - neck_width
     wadd = 2 * self.bit.width + neck_width
     wdelta = self.bit.width - neck_width
     for i in lrange(1, len(self.cuts)):
         if self.cuts[i].xmin - self.cuts[i - 1].xmax + wdelta >= wadd:
             if self.config.debug:
                 print('add in cut')
             index = i
             xmin = self.cuts[i - 1].xmax + neck_width
             xmax = xmin + self.bit.width
             break
         elif self.cuts[i].xmax - self.cuts[i].xmin >= wadd:
             if self.config.debug:
                 print('add in cut')
             index = i + 1
             xmin = self.cuts[i].xmax - self.bit.width
             xmax = self.cuts[i].xmax
             self.cuts[i].xmax = self.cuts[i].xmin + self.bit.width
             break
     if index is None and \
        self.cuts[-1].xmax < self.boards[0].width - self.bit.neck:
         if self.config.debug:
             print('add at right')
         index = len(self.cuts)
         xmax = self.boards[0].width
         xmin = self.cuts[-1].xmax + neck_width
     if index is None:
         return 'Unable to add cut'
     self.undo_cuts.append(cuts_save)
     c = self.cuts[0:index]
     c.append(router.Cut(xmin, xmax))
     c.extend(self.cuts[index:])
     self.cuts = c
     self.cursor_cut = index
     self.active_cuts = [index]
     return 'Added cut'
Exemplo n.º 55
0
    def draw_boards(self, painter):
        '''
        Draws all the boards
        '''

        # Draw the A and B boards
        for i in lrange(4):
            self.draw_one_board(painter, self.geom.boards[i], self.geom.bit)

        # Label the boards
        painter.setPen(QtCore.Qt.SolidLine)
        x1 = self.geom.boards[0].xL() - self.geom.bit.width // 2
        x2 = self.geom.boards[0].xL() - self.geom.bit.width // 4
        flags = QtCore.Qt.AlignRight | QtCore.Qt.AlignVCenter
        self.set_font_size(painter, 'boards')

        y = self.geom.boards[0].yB()
        p = (x1, y)
        paint_text(painter, 'A', p, flags, (-3, 0))
        painter.drawLine(x1, y, x2, y)

        i = 0 # index in self.labels

        if self.geom.boards[3].active:
            y = self.geom.boards[3].yT()
            p = (x1, y)
            paint_text(painter, 'B', p, flags, (-3, 0))
            painter.drawLine(x1, y, x2, y)
            y = self.geom.boards[3].yB()
            p = (x1, y)
            paint_text(painter, 'C', p, flags, (-3, 0))
            painter.drawLine(x1, y, x2, y)
            i = 2
        if self.geom.boards[2].active:
            y = self.geom.boards[2].yT()
            p = (x1, y)
            paint_text(painter, self.labels[i], p, flags, (-3, 0))
            painter.drawLine(x1, y, x2, y)
            y = self.geom.boards[2].yB()
            p = (x1, y)
            paint_text(painter, self.labels[i + 1], p, flags, (-3, 0))
            painter.drawLine(x1, y, x2, y)
            i += 2

        y = self.geom.boards[1].yT()
        p = (x1, y)
        paint_text(painter, self.labels[i], p, flags, (-3, 0))
        painter.drawLine(x1, y, x2, y)
Exemplo n.º 56
0
    def test_selects_for_each_class(self):
        df = pd.DataFrame()
        df['f1'] = [10] * 10 + lrange(10) + lrange(10)
        df['f2'] = lrange(10) + [10] * 10 + lrange(10)
        df['f3'] = lrange(10) + lrange(10) + [10] * 10
        df['y'] = [0] * 10 + [1] * 10 + [2] * 10

        y = df.y
        X = df.drop(['y'], axis=1)
        X_relevant = select_features(X, y, ml_task='classification')
        assert {'f1', 'f2', 'f3'} == set(X_relevant.columns)
Exemplo n.º 57
0
    def do_fuzz(self, endTime):
        lru.inv_failed = False

        def delayed_miss_fn(key):
            return deferUntilLater(random.uniform(0.001, 0.002), set([key + 1000]))

        self.lru = lru.AsyncLRUCache(delayed_miss_fn, 50)

        keys = lrange(250)
        errors = []  # bail out early in the event of an error
        results = []  # keep references to (most) results

        # fire off as many requests as we can in one second, with lots of
        # overlap.
        while not errors and reactor.seconds() < endTime:
            key = random.choice(keys)

            d = self.lru.get(key)

            def check(result, key):
                self.assertEqual(result, set([key + 1000]))
                if random.uniform(0, 1.0) < 0.9:
                    results.append(result)
                    results[:-100] = []

            d.addCallback(check, key)

            @d.addErrback
            def eb(f):
                errors.append(f)
                return f  # unhandled error -> in the logs

            # give the reactor some time to process pending events
            if random.uniform(0, 1.0) < 0.5:
                yield deferUntilLater(0)

        # now wait until all of the pending calls have cleared, noting that
        # this method will be counted as one delayed call, in the current
        # implementation
        while len(reactor.getDelayedCalls()) > 1:
            # give the reactor some time to process pending events
            yield deferUntilLater(0.001)

        self.assertFalse(lru.inv_failed, "invariant failed; see logs")
        log.msg("hits: %d; misses: %d; refhits: %d" % (self.lru.hits, self.lru.misses, self.lru.refhits))
Exemplo n.º 58
0
 def draw_one_board(self, painter, board, bit, fill_color):
     '''
     Draws a single board
     '''
     if not board.active:
         return
     # form the polygon to draw
     (x, y) = board.perimeter(bit)
     n = len(x)
     poly = QtGui.QPolygonF()
     for i in lrange(n):
         poly.append(QtCore.QPointF(x[i], y[i]))
     # paint it
     painter.save()
     pen = QtGui.QPen(self.colors['board_foreground'])
     pen.setWidthF(0)
     painter.setPen(pen)
     icon = self.woods[board.wood]
     if icon is not None:
         if isinstance(icon, str):
             # then it's an image file
             brush = QtGui.QBrush(QtGui.QPixmap(icon))
         else:
             # oterhwise, if must be a pattern fill
             if icon == QtCore.Qt.SolidPattern:
                 color = fill_color
             else:
                 # It's not a solid fill, so the polygon with the background color, first
                 color = self.colors['board_background']
                 brush = QtGui.QBrush(color)
                 painter.setBrush(brush)
                 painter.drawPolygon(poly)
                 color = self.colors['board_foreground']
             brush = QtGui.QBrush(color, icon)
         (inverted, invertable) = self.transform.inverted()
         brush.setMatrix(inverted.toAffine())
         painter.setBrush(brush)
     painter.drawPolygon(poly)
     painter.restore()
Exemplo n.º 59
0
 def cut_delete(self, f):
     """
     Deletes cut of index f.  Returns True if able to delete the cut,
     False otherwise.
     """
     if len(self.cuts) < 2:  # don't delete the last cut
         return False
     # delete from the cuts list
     c = self.cuts[0:f]
     c.extend(self.cuts[f + 1 :])
     self.cuts = c
     # adjust the cursor appropriately
     if self.cursor_cut >= f and self.cursor_cut > 0:
         self.cursor_cut -= 1
     # adjust the active cuts list
     id = self.active_cuts.index(f)
     c = self.active_cuts[0:id]
     c.extend(self.active_cuts[id + 1 :])
     self.active_cuts = c
     for i in lrange(len(self.active_cuts)):
         if self.active_cuts[i] > f:
             self.active_cuts[i] -= 1
     return True