def calculate_zernikes(self, workspace):
     zernike_indexes = cpmz.get_zernike_indexes(self.zernike_degree.value + 1)
     meas = workspace.measurements
     for o in self.objects:
         object_name = o.object_name.value
         objects = workspace.object_set.get_objects(object_name)
         #
         # First, get a table of centers and radii of minimum enclosing
         # circles per object
         #
         ij = np.zeros((objects.count + 1, 2))
         r = np.zeros(objects.count + 1)
         for labels, indexes in objects.get_labels():
             ij_, r_ = minimum_enclosing_circle(labels, indexes)
             ij[indexes] = ij_
             r[indexes] = r_
         #
         # Then compute x and y, the position of each labeled pixel
         # within a unit circle around the object
         #
         ijv = objects.ijv
         l = ijv[:, 2]
         yx = (ijv[:, :2] - ij[l, :]) / r[l, np.newaxis]
         z = cpmz.construct_zernike_polynomials(
                 yx[:, 1], yx[:, 0], zernike_indexes)
         for image_group in self.images:
             image_name = image_group.image_name.value
             image = workspace.image_set.get_image(
                     image_name, must_be_grayscale=True)
             pixels = image.pixel_data
             mask = (ijv[:, 0] < pixels.shape[0]) & \
                    (ijv[:, 1] < pixels.shape[1])
             mask[mask] = image.mask[ijv[mask, 0], ijv[mask, 1]]
             yx_ = yx[mask, :]
             l_ = l[mask]
             z_ = z[mask, :]
             if len(l_) == 0:
                 for i, (n, m) in enumerate(zernike_indexes):
                     ftr = self.get_zernike_magnitude_name(image_name, n, m)
                     meas[object_name, ftr] = np.zeros(0)
                     if self.wants_zernikes == Z_MAGNITUDES_AND_PHASE:
                         ftr = self.get_zernike_phase_name(image_name, n, m)
                         meas[object_name, ftr] = np.zeros(0)
                 continue
             areas = scind.sum(
                     np.ones(l_.shape, int), labels=l_, index=objects.indices)
             for i, (n, m) in enumerate(zernike_indexes):
                 vr = scind.sum(
                         pixels[ijv[mask, 0], ijv[mask, 1]] * z_[:, i].real,
                         labels=l_, index=objects.indices)
                 vi = scind.sum(
                         pixels[ijv[mask, 0], ijv[mask, 1]] * z_[:, i].imag,
                         labels=l_, index=objects.indices)
                 magnitude = np.sqrt(vr * vr + vi * vi) / areas
                 ftr = self.get_zernike_magnitude_name(image_name, n, m)
                 meas[object_name, ftr] = magnitude
                 if self.wants_zernikes == Z_MAGNITUDES_AND_PHASE:
                     phase = np.arctan2(vr, vi)
                     ftr = self.get_zernike_phase_name(image_name, n, m)
                     meas[object_name, ftr] = phase
Example #2
0
 def _distance_to_W(self, ids=None):
     allneighbors = {}
     weights = {}
     if ids:
         ids = np.array(ids)
     else:
         ids = np.arange(len(self._nmat))
     if self.binary:
         for i, neighbors in enumerate(self._nmat):
             ns = [ni for ni in neighbors if ni != i]
             neigh = list(ids[ns])
             if len(neigh) == 0:
                 allneighbors[ids[i]] = []
                 weights[ids[i]] = []
             else:
                 allneighbors[ids[i]] = neigh
                 weights[ids[i]] = [1] * len(ns)
     else:
         self.dmat = self.kd.sparse_distance_matrix(
             self.kd, max_distance=self.threshold)
         for i, neighbors in enumerate(self._nmat):
             ns = [ni for ni in neighbors if ni != i]
             neigh = list(ids[ns])
             if len(neigh) == 0:
                 allneighbors[ids[i]] = []
                 weights[ids[i]] = []
             else:
                 try:
                     allneighbors[ids[i]] = neigh
                     weights[ids[i]] = [self.dmat[(
                         i, j)] ** self.alpha for j in ns]
                 except ZeroDivisionError:
                     raise Exception, "Cannot compute inverse distance for elements at same location (distance=0)."
     return allneighbors, weights
	def load_text(self):
		'''
		The text of instances are not stored in the prediction result file,
		so you need to call this function to load texts from testing data.

		>>> from libshorttext.analyzer import *
		>>> insts = InstanceSet('prediction_result_path')
		>>> insts.load_text()

		This method also load the extra svm features if extra svm files
		are used when training.
		'''
		EMPTY_MESSAGE = '**None**'
		sorted_insts = sorted(self.insts, key = lambda inst: inst.idx)
		i = 0
		for idx, lines in enumerate(izip(*([open(self.filepath, 'r')] + [open(f, 'r') for f in self.extra_svm_files]))):
			line = lines[0]
			extra_svm_feats = lines[1:]
			nr_extra_svm_feats = len(extra_svm_feats)
			if idx > sorted_insts[-1].idx:
				break
			if idx == sorted_insts[i].idx:
				try:
					sorted_insts[i].text = line.split('\t',1)[1].strip()
				except:
					sorted_insts[i].text = EMPTY_MESSAGE

				sorted_insts[i].extra_svm_feats = [None] * nr_extra_svm_feats
				for j, extra_svm_feat in enumerate(extra_svm_feats):
					try:
						sorted_insts[i].extra_svm_feats[j] = dict(map(lambda t: (int(t[0]), float(t[1])), [feat.split(':') for feat in extra_svm_feat.split(None, 1)[1].split()]))
					except:
						sorted_insts[i].extra_svm_feats[j] = EMPTY_MESSAGE
				i += 1
Example #4
0
def recompute_unread(min_date = None):
    from r2.models import Inbox, Account, Comment, Message
    from r2.lib.db import queries

    def load_accounts(inbox_rel):
        accounts = set()
        q = inbox_rel._query(eager_load = False, data = False,
                             sort = desc("_date"))
        if min_date:
            q._filter(inbox_rel.c._date > min_date)

        for i in fetch_things2(q):
            accounts.add(i._thing1_id)

        return accounts

    accounts_m = load_accounts(Inbox.rel(Account, Message))
    for i, a in enumerate(accounts_m):
        a = Account._byID(a)
        print "%s / %s : %s" % (i, len(accounts_m), a)
        queries.get_unread_messages(a).update()
        queries.get_unread_comments(a).update()
        queries.get_unread_selfreply(a).update()

    accounts = load_accounts(Inbox.rel(Account, Comment)) - accounts_m
    for i, a in enumerate(accounts):
        a = Account._byID(a)
        print "%s / %s : %s" % (i, len(accounts), a)
        queries.get_unread_comments(a).update()
        queries.get_unread_selfreply(a).update()
Example #5
0
def knapsack_unbounded_dp(items, C):
    # order by max value per item size
    items = sorted(items, key=lambda item: item[VALUE]/float(item[SIZE]), reverse=True)
 
    # Sack keeps track of max value so far as well as the count of each item in the sack
    print('!')
    sack = [(0, [0 for i in items]) for i in range(0, C+1)]   # value, [item counts]
    print('!')
    for i,item in enumerate(items): 
        name, size, value = item
        for c in range(size, C+1):
            print(sack)
            sackwithout = sack[c-size]  # previous max sack to try adding this item to
            trial = sackwithout[0] + value
            used = sackwithout[1][i]
            if sack[c][0] < trial:
                # old max sack with this added item is better
                sack[c] = (trial, sackwithout[1][:])
                sack[c][1][i] +=1   # use one more
 
    value, bagged = sack[C]
    numbagged = sum(bagged)
    size = sum(items[i][1]*n for i,n in enumerate(bagged))
    # convert to (iten, count) pairs) in name order
    bagged = sorted((items[i][NAME], n) for i,n in enumerate(bagged) if n)
 
    return value, size, numbagged, bagged
Example #6
0
def rx_oversampled(frames, ref_frame, modulated_frame, x_preamble, data, rx_kernel, demapper, timeslots, fft_len, cp_len, cs_len):
    ref_frame_os = signal.resample(ref_frame, 2 * len(ref_frame))
    x_preamble_os = signal.resample(x_preamble, 2 * len(x_preamble))

    nyquist_frame_len = cp_len + 2 * fft_len + cs_len + cp_len + timeslots * fft_len + cs_len
    n_frames = np.shape(frames)[0]
    sync_frames = np.zeros((n_frames, nyquist_frame_len), dtype=np.complex)
    print('nyquist sampled frame len', nyquist_frame_len, 'with n_frames', n_frames)
    f_start = cp_len + 2 * fft_len + cs_len
    d_start = f_start + cp_len
    print('data start: ', d_start)
    for i, f in enumerate(frames[0:2]):
        tf = np.roll(f, 1)
        tf[0] = 0
        ff = signal.resample(tf, len(f) // 2)
        sframe = synchronize_time(ff, ref_frame_os, x_preamble_os, 2 * fft_len, 2 * cp_len)
        sframe = signal.resample(sframe, len(sframe) // 2)
        sframe = synchronize_freq_offsets(sframe, modulated_frame, x_preamble, fft_len, cp_len, samp_rate=3.125e6)
        print(len(sframe), len(ref_frame))
        rx_preamble = sframe[cp_len:cp_len + 2 * fft_len]
        avg_phase = calculate_avg_phase(rx_preamble, x_preamble)
        # m, c = calculate_avg_phase(rx_preamble, x_preamble)
        # avg_phase = calculate_avg_phase(sframe, ref_frame)
        # phase_eqs = m * np.arange(-cp_len, len(sframe) - cp_len) + c
        # sframe *= np.exp(-1j * phase_eqs)
        # sframe *= np.exp(-1j * avg_phase)
        sync_frames[i] = sframe
        rx_data_frame = sframe[d_start:d_start + fft_len * timeslots]
        # # rx_data_frame *= np.exp(-1j * avg_phase)
        #
        demodulate_frame(rx_data_frame, modulated_frame, rx_kernel, demapper, data, timeslots, fft_len)

    for i, f in enumerate(sync_frames[0:3]):
        rx_data_frame = f[d_start:d_start + fft_len * timeslots]
        demodulate_frame(rx_data_frame, modulated_frame, rx_kernel, demapper, data, timeslots, fft_len)
Example #7
0
 def links(self, data_matrix):
     data_size = data_matrix.shape[0]
     kernel_matrix = pairwise_kernels(data_matrix, metric=self.metric, **self.kwds)
     # compute instance density as average pairwise similarity
     density = np.sum(kernel_matrix, 0) / data_size
     # compute list of nearest neighbors
     kernel_matrix_sorted = np.argsort(-kernel_matrix)
     # make matrix of densities ordered by nearest neighbor
     density_matrix = density[kernel_matrix_sorted]
     # if a denser neighbor cannot be found then assign link to the instance itself
     link_ids = list(range(density_matrix.shape[0]))
     # for all instances determine link link
     for i, row in enumerate(density_matrix):
         i_density = row[0]
         # for all neighbors from the closest to the furthest
         for jj, d in enumerate(row):
             # proceed until n_nearest_neighbors have been explored
             if self.n_nearest_neighbors is not None and jj > self.n_nearest_neighbors:
                 break
             j = kernel_matrix_sorted[i, jj]
             if jj > 0:
                 j_density = d
                 # if the density of the neighbor is higher than the density of the instance assign link
                 if j_density > i_density:
                     link_ids[i] = j
                     break
     return link_ids
    def WorkBook_writeSheet(self, filename):
        columns = ['Date', 'Month', 'ID #', 'Contact ID #', \
            'Talked to Person X?', 'Closeness/Trust with X', \
            'Connecting ID', 'Connector ID']

        # Writes to csv file
        with open(filename, 'w') as f:
            writer = csv.writer(f)
            writer.writerow(columns)
            for row in self.sheet:
                date = row[DATE_COLUMN]
                month = row[MONTH_COLUMN]
                connecting = row[CONNECTING_COLUMN]
                connector = row[CONNECTOR_COLUMN]
                talkVal = row[TALKED_WEIGHT]
                closeVal = row[CLOSENESS_WEIGHT]
                connectingID = row[CONNECTING_ID_COLUMN]
                connectorID = row[CONNECTOR_ID_COLUMN]

                row = [date, month, connecting, connector, talkVal, \
                    closeVal, connectingID, connectorID]

                writer.writerow(row)

        # Converts from the written csv file to xlsx
        for csvfile in glob.glob(os.path.join('.', '*.csv')):
            workbook = Workbook(csvfile[0:-4] + '.xlsx')
            worksheet = workbook.add_worksheet()
            with open(csvfile, 'rb') as f:
                reader = csv.reader(f)
                for r, row in enumerate(reader):
                    for c, col in enumerate(row):
                        worksheet.write(r, c, col)
            workbook.close()
        sys.exit()
    def plots_1d(self, roots, params=None, legend_labels=None, legend_ncol=None, nx=None,
                 paramList=None, roots_per_param=False, share_y=None, markers=None, xlims=None):
        if roots_per_param:
            params = [self.check_param(roots[i][0], param) for i, param in enumerate(params)]
        else: params = self.get_param_array(roots[0], params)
        if paramList is not None:
            wantedParams = self.paramNameListFromFile(paramList)
            params = [param for param in params if param.name in wantedParams]
        nparam = len(params)
        if share_y is None: share_y = self.settings.prob_label is not None and nparam > 1
        plot_col, plot_row = self.make_figure(nparam, nx=nx)
        plot_roots = roots
        for i, param in enumerate(params):
            subplot(plot_row, plot_col, i + 1)
            if roots_per_param: plot_roots = roots[i]
            if markers is not None and i < len(markers): marker = markers[i]
            else: marker = None
#            self.plot_1d(plot_roots, param, no_ylabel=share_y and  i % self.plot_col > 0, marker=marker, prune=(None, 'both')[share_y])
            self.plot_1d(plot_roots, param, no_ylabel=share_y and  i % self.plot_col > 0, marker=marker)
            if xlims is not None: xlim(xlims[i][0], xlims[i][1])
            if share_y: self.spaceTicks(gca().xaxis, expand=True)

        self.finish_plot([legend_labels, roots][legend_labels is None], legend_ncol=legend_ncol)
        if share_y: subplots_adjust(wspace=0)

        return plot_col, plot_row
Example #10
0
def categorize(data, colnum, missingvals, ranges=[]):
    categories = set()
    for row in data:
        if row[colnum] not in missingvals:
            categories.add(row[colnum])
    catlist = list(categories)
    catlist.sort()
    # print(', '.join(['%i: %s' % (n, catlist[n]) for n in xrange(len(catlist))]), "(with missing vals:", missingvals, ")")
    
    missing_indices = []
    for index, row in enumerate(data):
        if row[colnum] in missingvals: # missing data
            row[colnum] = 0
            missing_indices.append(index)
        else: # this row doesn't have missing data.
            if len(ranges) > 0: # find val in ranges and use that index.
                found = False
                for i, r in enumerate(ranges):
                    if isinstance(r, basestring): # compare strings.
                        if r in row[colnum]:
                            row[colnum] = i
                            found = True
                            break
                    elif isinstance(r, ( int, long )) and not re.search('[a-zA-Z]', row[colnum]):
                        # ref : http://stackoverflow.com/questions/3501382/checking-whether-a-variable-is-an-integer-or-not
                        if float(row[colnum]) >= r and len(ranges) > i+1 and isinstance(ranges[i+1], ( int, long )) and float(row[colnum]) < ranges[i+1]:
                            row[colnum] = i
                            found = True
                            break
                if not found:
                    print(row[colnum]) # error here
            else: # no ranges given, so just set category of appearance.
                row[colnum] = catlist.index(row[colnum])+1
    return missing_indices
def validate_label_generation():
    mals1_df = pd.read_csv('data/sorted-train-labels-vs251-252.csv')
    mals2_df = pd.read_csv('data/sorted-train-labels-vs263-264-apt.csv')

    counter = 0
    m1_x = np.array(mals1_df['malware_type_x'])
    m1_f = np.array(mals1_df['family_name'])
    m1_sl = np.array(mals1_df['sample_label'])
    m1_fl = np.array(mals1_df['family_label'])
    m2_x = np.array(mals2_df['malware_type_x'])
    m21_f = np.array(mals2_df['family_name'])
    m2_sl = np.array(mals2_df['sample_label'])
    m2_fl = np.array(mals2_df['family_label'])
    
    for idx1, mname1 in enumerate(m1_x):
        for idx2, mname2 in enumerate(m2_x):
            if mname1 == mname2:
                if m1_sl[idx1] != m2_sl[idx2]:
                    print("Sample label incongruence: {:d} {:d}".format(m1_sl[idx1], m2_sl[idx2]))
                    counter += 1
                    
                if (m1_fl[idx1] != m2_fl[idx2]):
                    print("Family label incongruence: {:d} {:d}".format(m1_fl[idx1], m2_fl[idx2]))
                    counter += 1            
        
        if (idx1 % 1000) == 0:
            print("Processed {:d} malware names.".format(idx1))


    print("Total Incongruence Errors: {:d}".format(counter))
    
    return
Example #12
0
    def testPeriodsMonths(self):
        """ Test iteration over periods (months) """

        dt = datetime.datetime

        ef = S3TimePlotEventFrame(dt(2011, 1, 5),
                                  dt(2011, 4, 28),
                                  slots="months")
        expected = [(dt(2011, 1, 5), dt(2011, 2, 5)),
                    (dt(2011, 2, 5), dt(2011, 3, 5)),
                    (dt(2011, 3, 5), dt(2011, 4, 5)),
                    (dt(2011, 4, 5), dt(2011, 4, 28))]
        for i, period in enumerate(ef):
            self.assertEqual(period.start, expected[i][0])
            self.assertEqual(period.end, expected[i][1])

        ef = S3TimePlotEventFrame(dt(2011, 1, 5),
                                  dt(2011, 8, 16),
                                  slots="3 months")
        expected = [(dt(2011, 1, 5), dt(2011, 4, 5)),
                    (dt(2011, 4, 5), dt(2011, 7, 5)),
                    (dt(2011, 7, 5), dt(2011, 8, 16))]
        for i, period in enumerate(ef):
            self.assertEqual(period.start, expected[i][0])
            self.assertEqual(period.end, expected[i][1])
Example #13
0
    def testPeriodsWeeks(self):
        """ Test iteration over periods (weeks) """

        dt = datetime.datetime

        ef = S3TimePlotEventFrame(dt(2011, 1, 5),
                                  dt(2011, 1, 28),
                                  slots="weeks")
        expected = [(dt(2011, 1, 5), dt(2011, 1, 12)),
                    (dt(2011, 1, 12), dt(2011, 1, 19)),
                    (dt(2011, 1, 19), dt(2011, 1, 26)),
                    (dt(2011, 1, 26), dt(2011, 1, 28))]
        for i, period in enumerate(ef):
            self.assertEqual(period.start, expected[i][0])
            self.assertEqual(period.end, expected[i][1])

        ef = S3TimePlotEventFrame(dt(2011, 1, 5),
                                  dt(2011, 2, 16),
                                  slots="2 weeks")
        expected = [(dt(2011, 1, 5), dt(2011, 1, 19)),
                    (dt(2011, 1, 19), dt(2011, 2, 2)),
                    (dt(2011, 2, 2), dt(2011, 2, 16))]
        for i, period in enumerate(ef):
            self.assertEqual(period.start, expected[i][0])
            self.assertEqual(period.end, expected[i][1])
def lcs_dy_prog(s1, s2):
    table = np.zeros((len(s1), len(s2)), dtype=np.int)
    def lookup(i, j):
        if i < 0 or j < 0:
            return 0
        else:
            return table[i, j]
    # find length of the lcs
    for i, c1 in enumerate(s1):
        for j, c2 in enumerate(s2):
            if c1 == c2:
                table[i, j] = lookup(i - 1, j - 1) + 1
            else:
                table[i, j] = max(lookup(i - 1, j), lookup(i, j - 1))
    # backtrac to find lcs (not unique)
    i = len(s1) - 1
    j = len(s2) - 1
    res = ""
    while i >= 0 and j >= 0:
        if s1[i] == s2[j]:
            res += s1[i]
            i -= 1
            j -= 1
        else:
            if lookup(i - 1,j) > lookup(i, j - 1):
                i -= 1
            else:
                j -= 1
    res = res[::-1]
    return res
Example #15
0
    def _make_scalar_compound_controller(self, fcurves, keyframes, bez_chans, default_xform):
        ctrl = plCompoundController()
        subctrls = ("X", "Y", "Z")
        for i in subctrls:
            setattr(ctrl, i, plLeafController())
        exported_frames = ([], [], [])
        ctrl_fcurves = { i.array_index: i for i in fcurves }

        for keyframe in keyframes:
            for i, subctrl in enumerate(subctrls):
                fval = keyframe.values.get(i, None)
                if fval is not None:
                    keyframe_type = hsKeyFrame.kBezScalarKeyFrame if i in bez_chans else hsKeyFrame.kScalarKeyFrame
                    exported = hsScalarKey()
                    exported.frame = keyframe.frame_num
                    exported.frameTime = keyframe.frame_time
                    exported.inTan = keyframe.in_tans[i]
                    exported.outTan = keyframe.out_tans[i]
                    exported.type = keyframe_type
                    exported.value = fval
                    exported_frames[i].append(exported)
        for i, subctrl in enumerate(subctrls):
            my_keyframes = exported_frames[i]

            # ensure this controller has at least ONE keyframe
            if not my_keyframes:
                hack_frame = hsScalarKey()
                hack_frame.frame = 0
                hack_frame.frameTime = 0.0
                hack_frame.type = hsKeyFrame.kScalarKeyFrame
                hack_frame.value = default_xform[i]
                my_keyframes.append(hack_frame)
            getattr(ctrl, subctrl).keys = (my_keyframes, my_keyframes[0].type)
        return ctrl
Example #16
0
    def rectangle_plot(self, xparams, yparams, yroots, filled=True, ymarkers=None, xmarkers=None, **kwargs):
            self.make_figure(nx=len(xparams), ny=len(yparams))
#            f, plots = subplots(len(yparams), len(xparams), sharex='col', sharey='row')
            sharey = None
            yshares = []
            for x, xparam in enumerate(xparams):
                sharex = None
                for y, (yparam, roots) in enumerate(zip(yparams, yroots)):
#                    f.sca(plots[y, x])
                    if x > 0: sharey = yshares[y]
                    ax = self.subplot(x + 1, y + 1, sharex=sharex, sharey=sharey)
                    if y == 0: sharex = ax
                    self.plot_2d(roots, param_pair=[xparam, yparam], filled=filled, do_xlabel=y == len(yparams) - 1,
                                 do_ylabel=x == 0, add_legend_proxy=x == 0 and y == 0)
                    if ymarkers is not None and ymarkers[y] is not None: self.add_y_marker(ymarkers[y], **kwargs)
                    if xmarkers is not None and xmarkers[x] is not None: self.add_x_marker(xmarkers[x], **kwargs)
                    if y == 0: lims = xlim()
                    else: lims = (min(xlim()[0], lims[0]), max(xlim()[1], lims[1]))
                    if y != len(yparams) - 1: setp(ax.get_xticklabels(), visible=False)
                    if x != 0: setp(ax.get_yticklabels(), visible=False)
                    if x == 0: yshares.append(ax)

                sharex.set_xlim(lims)
                self.spaceTicks(sharex.xaxis)
                sharex.set_xlim(sharex.xaxis.get_view_interval())
            for ax in yshares:
                self.spaceTicks(ax.yaxis)
                ax.set_ylim(ax.yaxis.get_view_interval())
            subplots_adjust(wspace=0, hspace=0)
            self.finish_plot(no_gap=True)
    def flux_matrix(self, geo):
        """Returns a sparse matrix which can be used to multiply a vector of connection table values for underground
        blocks, to give approximate average fluxes of those values at the block centres."""
        natm = geo.num_atmosphere_blocks
        nele = geo.num_underground_blocks
        conindex = dict([((c.block[0].name, c.block[1].name), i) for i, c in enumerate(self.connectionlist)])
        from scipy import sparse

        A = sparse.lil_matrix((3 * nele, self.num_connections))
        if not self.block_centres_defined:
            self.calculate_block_centres(geo)
        for iblk, blk in enumerate(self.blocklist[natm:]):
            ncons = blk.num_connections
            for conname in blk.connection_name:
                otherindex, sgn = [(0, -1), (1, 1)][conname[0] == blk.name]
                blk2name = conname[otherindex]
                icon = conindex[conname]
                centre2 = self.block[blk2name].centre
                if centre2 <> None:
                    n = centre2 - blk.centre
                    n /= np.linalg.norm(n)
                else:
                    n = np.array([0, 0, 1])  # assumed connection to atmosphere
                for i, ni in enumerate(n):
                    A[3 * iblk + i, icon] = -sgn * ni / (ncons * self.connection[conname].area)
        return A
Example #18
0
	def classListBox2(self, id):		
		pageNumber = self.noteBook.GetCurrent()
		cname = self.classBox[pageNumber].GetEntry(id).GetText().Data()
		
		self.methodBox[pageNumber].RemoveAll()
		self.labelBox[pageNumber].RemoveAll()

		methods = self.cmap[cname]['methods']
		labels= self.cmap[cname]['labels']

		self.cmap[cname]['selected'] = True
	
		# List methods
		
		names   = self.cmap[cname]['sortedmethods']
		for index, name in enumerate(names):
			if not methods[name]: continue
			self.methodBox[pageNumber].AddEntry(name, index)
		self.methodBox[pageNumber].Layout()

		# List getByLabels
		
		names = labels.keys()
		names.sort()
		for index, name in enumerate(names):
			if not labels[name]: continue
			self.labelBox[pageNumber].AddEntry(name, index)
		self.labelBox[pageNumber].Layout()		
Example #19
0
def load_level(filename):
    """ Loads a level from a given text file.

    Args:
        filename: The name of the txt file containing the maze.

    Returns:
        The loaded level (dict) containing the locations of walls (set), the locations of spaces (dict), and
        a mapping of locations to waypoints (dict).

    """
    walls = set()
    spaces = {}
    waypoints = {}
    with open(filename, "r") as f:

        for j, line in enumerate(f.readlines()):
            for i, char in enumerate(line):
                if char == '\n':
                    continue
                elif char == WALL:
                    walls.add((i, j))
                elif char.isnumeric():
                    spaces[(i, j)] = float(char)
                elif char.islower():
                    spaces[(i, j)] = 1.
                    waypoints[char] = (i, j)

    level = {'walls': walls,
             'spaces': spaces,
             'waypoints': waypoints}

    return level
Example #20
0
File: main.py Project: Enomiss/MSc
    def run_image_find(self):
        
        if self.ss_labels is None or self.s_labels is None:
            return
        
        featured_labels = self.tree.get_featured( VARIABLE['edge_mode'] )
#         print( featured_labels )
        
        object_color = { }
        color = 1 
        
        label_img = numpy.zeros( (len( self.ss_labels), len( self.s_labels[0])) )
        for object_name, object_featured in featured_labels.items():
            object_color[ object_name ] = color
            
            
            for i, ss_label in enumerate( self.ss_labels ):
                print(i, ss_label, object_featured.keys())
                if ss_label in object_featured.keys():
                    for j, label in enumerate( self.s_labels[i] ):
                        if label in object_featured[ ss_label ]:
                            label_img[i,j] =  color 
                        
                        
            color += 1
            
        for row in label_img:
            print(row)
            
        print( self.ss_labels )
        print( object_color )
        
        self.piximage.color_objects(label_img)
Example #21
0
 def load_records(self, records):
     """Load in a database and interpret it as a network
     
     First column must be unique keys which define the instance units.
     Each column is a pool (names, gangs, ages, etc).
     Every row is mutually excitory.
     """
     self.units[:] = []
     self.pools[:] = []
     for counter,record in enumerate(records):
         relatedunits = ['%s:%s' % (k,v) for (k,v) in record.items()]
         relatedunits.insert(0,'_:%s'% counter)
         if not len(relatedunits): continue
         key = len(self.units)
         for poolnum, name in enumerate(relatedunits):
             if poolnum >= len(self.pools):
                 self.pools.append(Pool(self.unitbyname))
             pool = self.pools[poolnum]
             if name in self.unitbyname:
                 unit = self.unitbyname[name]
             else:
                 unit = Unit(name, pool, self.unitbyname)
                 self.units.append(unit)
             pool.addmember(unit)
             if poolnum > 0:
                 self.units[key].addexciter(unit)
                 unit.addexciter(self.units[key])
Example #22
0
def get_unigram_probs(vocab, counts, smooth_constant):
    special_symbol_ids = [vocab[x] for x in SPECIAL_SYMBOLS]
    vocab_size = len(vocab) - len(SPECIAL_SYMBOLS)
    num_words_with_non_zero_counts = 0
    for word_id, count in enumerate(counts):
        if word_id in special_symbol_ids:
            continue
        if counts[word_id] > 0:
            num_words_with_non_zero_counts += 1

    if num_words_with_non_zero_counts < vocab_size and smooth_constant == 0.0:
        sys.exit(sys.argv[0] + ": --smooth-unigram-counts should not be zero, "
                               "since there are words with zero-counts")

    smooth_count = smooth_constant * num_words_with_non_zero_counts / vocab_size

    total_counts = 0.0
    for word_id, count in enumerate(counts):
        if word_id in special_symbol_ids:
            continue
        counts[word_id] += smooth_count
        total_counts += counts[word_id]

    probs = []
    for count in counts:
        probs.append(count / total_counts)

    return probs
Example #23
0
def plot_overtime(data_file):
    data = performance.load_score_dict(data_file)

    avg_sim = []
    std_sim = []

    # Lets compute the average fraction of matching paths for each case
    for index, time_step in enumerate(data):
        if index == 0:
            continue
        prev_step = data[index - 1] 


        sim_list = []

        
        for pair_index, pair in enumerate(time_step):

            curr_chain = set([x[0] for x in pair])
            print curr_chain
            prev_chain = set([x[0] for x in prev_step[pair_index]])

            if len(curr_chain) == 0 or len(prev_chain) == 0:
                continue

            sim = float(len(curr_chain & prev_chain)) / len(curr_chain)
            
            sim_list.append(sim)

        avg_sim.append(np.mean(sim_list)) 
        std_sim.append(np.std(sim_list))

        print "Next Time Step!"

    plotting.overtime_plot(avg_sim, std_sim)  
def extractNames(li):
    finList = []
##  Loop through the list that has the HTML page content
    for a in li:
##  Tokenize the HTML text into smaller blocks of text
        for send in nltk.sent_tokenize(str(a)):
            smLi = []
##  Tokenize the smaller blocks of text in individual words and then add a Part-of-Speech(POS) tag
            for index, chunk in enumerate(nltk.pos_tag(nltk.word_tokenize(send))):
##  If the POS tag is NNP (noun)
                if 'NNP' in chunk[1]:
##  If the each character in the word is an alphanumeric character and there are more than 2 characters in the word
                    if(len(' '.join(e for e in chunk[0] if e.isalnum())) > 2):
##  Append the list with the index of the word, chunk that has the POS tag and the link
                        smLi.append([index, chunk, a[1]])
            finList.append(smLi)
    nameLi = []
    for f in finList:
        if len(f) > 0:
            strName = ''
            for index, i in enumerate(f):
##  If strName is blank, declare it with the current word in the list
                if strName == '':
                    strName = i[1][0]
##  If index+1 is not at the end of the list, continue
                if (index + 1) < len(f):
##  If the index is a consecutive index, add to the strName
                    if i[0] + 1 == f[index + 1][0]:
                        strName = strName + ' ' + f[index + 1][1][0]
##  If the index is not a consecutive index, append strName to the nameLi list with the article link and make the strName blank
                    else:
                        if ' ' in strName:
                            nameLi.append([strName, i[2]])
                        strName = ''
    return nameLi
Example #25
0
    def _eval_kernel(self):
        # get points within bandwidth distance of each point
        if not hasattr(self, 'neigh'):
            kdtq = self.kdt.query_ball_point
            neighbors = [kdtq(self.data[i], r=bwi[0]) for i,
                         bwi in enumerate(self.bandwidth)]
            self.neigh = neighbors
        # get distances for neighbors
        data = np.array(self.data)
        bw = self.bandwidth

        kdtq = self.kdt.query
        z = []
        for i, nids in enumerate(self.neigh):
            di, ni = kdtq(self.data[i], k=len(nids))
            zi = np.array([dict(zip(ni, di))[nid] for nid in nids]) / bw[i]
            z.append(zi)
        zs = z
        # functions follow Anselin and Rey (2010) table 5.4
        if self.function == 'triangular':
            self.kernel = [1 - z for z in zs]
        elif self.function == 'uniform':
            self.kernel = [np.ones(z.shape) * 0.5 for z in zs]
        elif self.function == 'quadratic':
            self.kernel = [(3. / 4) * (1 - z ** 2) for z in zs]
        elif self.function == 'quartic':
            self.kernel = [(15. / 16) * (1 - z ** 2) ** 2 for z in zs]
        elif self.function == 'gaussian':
            c = np.pi * 2
            c = c ** (-0.5)
            self.kernel = [c * np.exp(-(z ** 2) / 2.) for z in zs]
        else:
            print 'Unsupported kernel function', self.function
Example #26
0
def loadTrajectoryData(inFile = UJILocDataFile):

	with open(UJILocDataFile, 'r') as dataFile: 
		data = dataFile.read()

	# 9-axis IMU data
	# trajectory: dictionary with three elements
	# N is number of samples in the trajectory (data taken at 10Hz)
	# mag: Nx3 numpy array where each line has XYZ mag data
	# gyro: Nx3 numpy array where each line has XYZ gyro vel data
	# accel: Nx3 numpy array where each line has XYZ lin accelerometer data
	segments = data.split("<", 2)
	IMUDataStr = segments[0].split('\n')[:-1]
	magArr = []
	oriArr = []	
	accelArr = []

	for i, lineStr in enumerate(IMUDataStr): 

		lineStr = lineStr.split(' ', 10)[:-1]
		lineStr = [float(x) for x in lineStr]
		magArr.append(lineStr[1:4]) # xyz mag data for sample
		accelArr.append(lineStr[4:7]) # xyz accelerometer data for single samp
		oriArr.append(lineStr[7:10]) # xyz gyro data for sample

	# values initially are given as euler angles which are not good for imu-type calculations. 
	# so we fix em! 	
	gyroArr = rawSensorStateProc.orientationToGyro(oriArr) 
	initOrientationMatrix = rawSensorStateProc.calcInitialOrientation(oriArr[0])

	# IMUData = [{'mag': magArr, 'gyro': gyroArr, 'accel': accelArr}]
	
	# process waypoint data
	# each waypoint consists of a latitude coordinate, longitude coordinate,
	# and index (what IMU dataopoint it represents)
	waypoints = []
	waypointStr = segments[1].split(">", 2)
	numWaypoints = int(waypointStr[0])
	waypointLns = waypointStr[1].lstrip().split('\n')

	for i, lineStr in enumerate(waypointLns): 

		line = lineStr.split(' ', WAYPOINTS_ELEMS_PER_LINE)
		line = [float(x) for x in line]
		
		if i == 0:
			waypoints.append({'lat': line[0], 'long': line[1], 'index': line[4]}) 
		
		waypoints.append({'lat': line[2], 'long': line[3], 'index': line[5]})

		seqLen = line[5]

	
	traj = ({'waypoints': np.array(waypoints), 'mag': np.array(magArr), 'gyro': np.array(gyroArr), 
			 'accel': np.array(accelArr), 'orientSensed': np.array(oriArr), 
			 'initOrient': initOrientationMatrix, 'seqLen': seqLen})

	return traj

# loadTrajectoryData()
Example #27
0
 def restore_sources(self, pubkey, tx):
     """
     Restore the sources of a cancelled tx
     :param sakia.entities.Transaction tx:
     """
     txdoc = TransactionDoc.from_signed_raw(tx.raw)
     for offset, output in enumerate(txdoc.outputs):
         if output.conditions.left.pubkey == pubkey:
             source = Source(currency=self.currency,
                             pubkey=pubkey,
                             identifier=txdoc.sha_hash,
                             type='T',
                             noffset=offset,
                             amount=output.amount,
                             base=output.base)
             self._sources_processor.drop(source)
     for index, input in enumerate(txdoc.inputs):
         source = Source(currency=self.currency,
                         pubkey=txdoc.issuers[0],
                         identifier=input.origin_id,
                         type=input.source,
                         noffset=input.index,
                         amount=input.amount,
                         base=input.base)
         if source.pubkey == pubkey:
             self._sources_processor.insert(source)
Example #28
0
    def model(self):
        model = {
            "hostname": "",
            "dns[0]": "",
            "dns[1]": "",
            "ntp[0]": "",
            "ntp[1]": "",
            "bond.name": "",
            "bond.slaves.selected": "",
            "bond.options": defaults.NicBonding.default_options
        }

        model["hostname"] = defaults.Hostname().retrieve()["hostname"] or \
            network.hostname()

        # Pull name-/timeservers from config files (not defaults)
        nameservers = config.network.nameservers()
        if nameservers:
            for idx, nameserver in enumerate(nameservers):
                model["dns[%d]" % idx] = nameserver

        timeservers = config.network.timeservers()
        if timeservers:
            for idx, timeserver in enumerate(timeservers):
                model["ntp[%d]" % idx] = timeserver

        model.update(self._model_extra)

        return model
Example #29
0
 def initializeGUI(self):
     self.d = {"Switches": {}, "Triggers": {}}
     # set layout
     layout = QtGui.QGridLayout()
     self.setLayout(layout)
     # get switch names and add them to the layout, and connect their function
     layout.addWidget(QtGui.QLabel("Switches"), 0, 0)
     switchNames = yield self.server.get_switching_channels()
     for order, name in enumerate(switchNames):
         button = QtGui.QPushButton(name)
         self.d["Switches"][name] = button
         button.setCheckable(True)
         initstate = yield self.server.get_state(name)
         button.setChecked(initstate)
         self.setButtonText(button, name)
         button.clicked.connect(self.buttonConnection(name, button))
         layout.addWidget(button, 0, 1 + order)
     # do same for trigger channels
     layout.addWidget(QtGui.QLabel("Triggers"), 1, 0)
     triggerNames = yield self.server.get_trigger_channels()
     for order, name in enumerate(triggerNames):
         button = QtGui.QPushButton(name)
         button.clicked.connect(self.triggerConnection(name))
         self.d["Triggers"][name] = button
         layout.addWidget(button, 1, 1 + order)
Example #30
0
    def generate_pdb(self,pdb_name,shift=0,traduction={"1":"bead","2":"telo","3":"ribo","4":"cent","5":"spbb","6":"rcut","7":"scut"}):
        """
        Generate a pdb file according to the molecules given to the LSimu object
        traduction must contain as key the type of bead and as item the pdb name
        (should be 4 letters name)
        
        (Very ugly code)
        """
        #Initial stuff for pdb:
        towrite = []
        
        atomid = 0
       
        for n,molecule in enumerate(self.molecules):

            for rn,((x,y,z),type_bead) in enumerate(zip(molecule.coords,molecule.types_beads)):
                atomid+= 1
                
                name="bead"
                residue_name="bea"
                chain_id=string.ascii_letters[(n + shift) % len(string.ascii_letters)]
                residue_number=rn

                name = traduction["%i" % type_bead]
                
        
                towrite.append(self.one_pdb_atom(x,y,z,atomid,name,residue_name,residue_number,chain_id))
                
        with open(pdb_name,"w") as f:
            f.write("".join(towrite))