コード例 #1
0
ファイル: pod.py プロジェクト: JaimeLeal/projects
def plot_energy(S, filename):

    cen = np_array(S**2).cumsum() / np_array(S**2).sum() * 100
    DPI = 100
    fig = plt.figure()

    # Energy subplot
    ax1 = fig.add_subplot(2, 1, 1)
    line = ax1.plot(S**2, "o-", linewidth=1)
    ax1.set_yscale("log")
    plt.title("Basis vector vs Energy")
    plt.xlabel("Basis vector number")
    plt.ylabel("Energy")
    plt.axis([0, None, None, None])
    plt.grid(True)

    # Cumulative energy subplot
    ax2 = fig.add_subplot(2, 1, 2)
    line = ax2.plot(cen, "o-", linewidth=1)
    ax2.axis([0, None, 90, 101])
    plt.title("Cumulative Energy")
    plt.xlabel("Basis vector number")
    plt.ylabel("Cumulative Energy")
    plt.grid(True)

    plt.tight_layout()
    plt.savefig('{}.png'.format(filename), bbox_inches='tight', dpi=DPI)

    return
コード例 #2
0
ファイル: helper_functions.py プロジェクト: erdc-cm/RAPIDpy
def compare_csv_decimal_files(file1, file2, header=True, timeseries=False):
    """
    This function compares two csv files
    """
    #CHECK NUM LINES
    with open_csv(file1) as fh1, \
         open_csv(file2) as fh2:
         assert sum(1 for line1 in fh1) == sum(1 for line2 in fh2)
    
    with open_csv(file1) as fh1, \
         open_csv(file2) as fh2:
        csv1 = csv.reader(fh1)
        csv2 = csv.reader(fh2)
        
        if header:
            assert next(csv1) == next(csv2) #header
            
        while True:
            try:
                row1 = next(csv1)
                row2 = next(csv2)
                compare_start_index = 0
                if timeseries:
                    assert row1[0] == row2[0] #check dates
                    compare_start_index=1
                    
                assert_almost_equal(np_array(row1[compare_start_index:], dtype=np_float32),
                                    np_array(row2[compare_start_index:], dtype=np_float32),
                                    decimal=2)
            except StopIteration:
                break
                pass
    return True
コード例 #3
0
ファイル: profileManager.py プロジェクト: jnesme/GroopM-1
    def __init__(self, dbFileName, force=False, scaleFactor=1000):
        # data
        self.dataManager = GMDataManager()  # most data is saved to hdf
        self.dbFileName = dbFileName        # db containing all the data we'd like to use
        self.condition = ""                 # condition will be supplied at loading time
        # --> NOTE: ALL of the arrays in this section are in sync
        # --> each one holds information for an individual contig 
        self.indices = np_array([])        # indices into the data structure based on condition
        self.covProfiles = np_array([])     # coverage based coordinates
        self.transformedCP = np_array([])   # the munged data points
        self.averageCoverages = np_array([]) # average coverage across all stoits
        self.kmerSigs = np_array([])        # raw kmer signatures
        self.kmerVals = np_array([])        # PCA'd kmer sigs

        self.contigNames = np_array([])
        self.contigLengths = np_array([])
        self.contigColours = np_array([])   # calculated from kmerVals
        
        self.binIds = np_array([])          # list of bin IDs
        # --> end section

        # meta                
        self.validBinIds = {}               # valid bin ids -> numMembers
        self.binnedRowIndicies = {}         # dictionary of those indices which belong to some bin
        self.restrictedRowIndicies = {}     # dictionary of those indices which can not be binned yet
        self.numContigs = 0                 # this depends on the condition given
        self.numStoits = 0                  # this depends on the data which was parsed

        # contig links
        self.links = {}
        
        # misc
        self.forceWriting = force           # overwrite existng values silently?
        self.scaleFactor = scaleFactor      # scale every thing in the transformed data to this dimension
コード例 #4
0
def real_imaginary_freq_domain(samples):
  """
  Apply fft on the samples and return the real and imaginary
  parts in separate 
  """
  freq_domain = fft(samples)
  freq_domain_real = np_array([abs(x.real) for x in freq_domain])
  freq_domain_imag = np_array([abs(x.imag) for x in freq_domain])

  return freq_domain_real, freq_domain_imag
コード例 #5
0
 def __init_statistics(self):
     stats = self.raw_stats
     if stats is not None:
         combined = np_array([[int(team), stats['oprs'][team], stats['dprs'][team],
                               stats['ccwms'][team]] for team in stats['oprs'].keys()], np_object)
     else:
         teams = self.get_team()[:, 0]
         num_teams = len(teams)
         combined = np_rot90(
             np_array([teams, np_zeros(num_teams), np_zeros(num_teams), np_zeros(num_teams)], np_object))[::-1]
     self.stats = combined
コード例 #6
0
ファイル: featurecam.py プロジェクト: zeraholladay/pygames
def centroidfinder(cvimage, color, threshold):
    lo =  [ c - t for c, t in zip(color, threshold) ]
    hi =  [ c + t for c, t in zip(color, threshold) ]
    mat = cv.CreateMat(cvimage.height, cvimage.width, cv.CV_8U)
    cv.InRangeS(cvimage, lo, hi, mat)
    data = [ [x, y] for x, y in product(range(mat.height), range(mat.width))
             if int(mat[x, y]) ]
    np_data = np_array(data)
    np_centroids = np_array( [ [0, 0], [0, mat.width],
                            [mat.height, 0], [mat.height, mat.width] ])
    centroids, labels = kmeans2(np_data, np_centroids)
    return [ (x, y) for x, y in centroids.tolist() ]
コード例 #7
0
    def _interp_line(self, line):
#        handles = line.getViewHandlePositions()
#        x = []
#        y = []
#        for h in handles:
#            x.append(h[1].x())
#            y.append(h[1].y())

        (x,y) = self._handle2points(line)

        xi = range(int(x[0]),int(x[-1])+1)
        yi = np_interp(np_array(xi), np_array(x), np_array(y))


        return (xi,yi)
コード例 #8
0
    def __init_matches(self):

        for match_type, var in [['qm', 'qualification_matches'], ['qf', 'quarter_final_matches'],
                                ['sf', 'semi_final_matches'], ['f', 'final_matches']]:
            num_matches = self.__count_matches(self.raw_matches, match_type)
            if num_matches is not 0:
                # zero = range(num_matches)
                red_teams = np_zeros((num_matches,), np_object)
                blue_teams = np_zeros((num_matches,), np_object)
                blue_scores = np_zeros((num_matches,), np_object)
                red_scores = np_zeros((num_matches,), np_object)
                match_code = np_zeros((num_matches,), np_object)
                match_numbers = np_arange(1, num_matches + 1, 1)

                for match in self.raw_matches:
                    if match['comp_level'] == match_type:
                        match_num = match['match_number'] - 1

                        red_teams[match_num] = [np_int(match['alliances']['red']['teams'][0][3:]),
                                                np_int(match['alliances']['red']['teams'][1][3:]),
                                                np_int(match['alliances']['red']['teams'][2][3:])]

                        red_scores[match_num] = [-1 if match['alliances']['red']['score'] is None
                                                 else match['alliances']['red']['score'],
                                                 -1 if match['score_breakdown']['red']['auto'] is None
                                                 else match['score_breakdown']['red']['auto'],
                                                 -1 if match['score_breakdown']['red']['foul'] is None
                                                 else match['score_breakdown']['red']['foul']]

                        blue_teams[match_num] = [np_int(match['alliances']['blue']['teams'][0][3:]),
                                                 np_int(match['alliances']['blue']['teams'][1][3:]),
                                                 np_int(match['alliances']['blue']['teams'][2][3:])]

                        blue_scores[match_num] = [-1 if match['alliances']['blue']['score'] is None
                                                  else match['alliances']['blue']['score'],
                                                  -1 if match['score_breakdown']['blue']['auto'] is None
                                                  else match['score_breakdown']['blue']['auto'],
                                                  -1 if match['score_breakdown']['blue']['foul'] is None
                                                  else match['score_breakdown']['blue']['foul']]
                        match_code[match_num] = match['key']

                red_win = np_array(red_scores.tolist())[:, 0] > np_array(blue_scores.tolist())[:, 0]
                winner = np_array(['blue'] * len(red_win))
                winner[red_win] = 'red'

                self.__setattr__(var,
                                 np_rot90(np_array([[match_type] * num_matches, match_numbers, red_teams, blue_teams,
                                                    red_scores, blue_scores, winner, match_code], np_object))[::-1])
コード例 #9
0
ファイル: offline.py プロジェクト: FomkaV/wifi-arsenal
def updateAlgoData():
    """
    Update from raw data into FPs directly used by location.fixPosWLAN() from WppDB(wpp_clusterid, wpp_cfps).
    1) Retrieve latest incremental rawdata(csv) from remote FTP server(hosted by FPP).
    2) Decompress bzip2, import CSV into wpp_uprecsinfo with its ver_uprecs, Update ver_uprecs in wpp_uprecsver.
    3) Incr clustering inserted rawdata for direct algo use.
    """
    dbips = DB_OFFLINE
    for dbip in dbips:
        dbsvr = dbsvrs[dbip]
        wppdb = WppDB(dsn=dbsvr['dsn'], dbtype=dbsvr['dbtype'])
        ver_wpp = wppdb.getRawdataVersion()
        # Sync rawdata into wpp_uprecsinfo from remote FTP server.
        print 'Probing rawdata version > [%s]' % ver_wpp
        vers_fpp,localbzs = syncFtpUprecs(FTPCFG, ver_wpp)
        if not vers_fpp: print 'Not found!'; continue
        else: print 'Found new vers: %s' % vers_fpp
        # Handle each bzip2 file.
        alerts = {'vers':[], 'details':''}
        tab_rd = 'wpp_uprecsinfo'
        for bzfile in localbzs:
            # Filter out the ver_uprecs info from the name of each bzip file.
            ver_bzfile = bzfile.split('_')[-1].split('.')[0]
            # Update ver_uprecs in wpp_uprecsver to ver_bzfile.
            wppdb.setRawdataVersion(ver_bzfile)
            print '%s\nUpdate ver_uprecs -> [%s]' % ('-'*40, ver_bzfile)
            # Decompress bzip2.
            sys.stdout.write('Decompress & append rawdata ... ')
            csvdat = csv.reader( BZ2File(bzfile) )
            try:
                indat = np_array([ line for line in csvdat ])
            except csv.Error, e:
                sys.exit('\n\nERROR: %s, line %d: %s!\n' % (bzfile, csvdat.line_num, e))
            # Append ver_uprecs(auto-incr),area_ok(0),area_try(0) to raw 16-col fp.
            append_info = np_array([ [ver_bzfile,0,0] for i in xrange(len(indat)) ])
            indat_withvers = np_append(indat, append_info, axis=1).tolist(); print 'Done'
            # Import csv into wpp_uprecsinfo.
            try:
                sys.stdout.write('Import rawdata: ')
                wppdb.insertMany(table_name=tab_rd, indat=indat_withvers, verb=True)
            except Exception, e:
                _lineno = sys._getframe().f_lineno
                _file = sys._getframe().f_code.co_filename
                alerts['details'] += '\n[ver:%s][%s:%s]: %s' % \
                        (ver_bzfile, _file, _lineno, str(e).replace('\n', ' '))
                alerts['vers'].append(ver_bzfile)
                print 'ERROR: Insert Rawdata Failed!'
                continue
コード例 #10
0
ファイル: png.py プロジェクト: zack-vii/archive
    def iterboxed(self, rows):
        """Iterator that yields each scanline in boxed row flat pixel
        format.  `rows` should be an iterator that yields the bytes of
        each row in turn.
        """

        def asvalues(raw):
            """Convert a row of raw bytes into a flat row.  Result will
            be a freshly allocated object, not shared with
            argument.
            """

            if self.bitdepth == 8:
                return np_array(raw,'uint8')
            else:
                raw = tostring(raw)
                return np_array(struct.unpack('!%dH' % (len(raw)//2), raw),'uint%d' % self.bitdepth)
            assert self.bitdepth < 8
            width = self.width
            # Samples per byte
            spb = 8//self.bitdepth
            out = array('B')
            mask = 2**self.bitdepth - 1
            shifts = [self.bitdepth * i
                for i in reversed(list(range(spb)))]
            for o in raw:
                out.extend([mask&(o>>i) for i in shifts])
            return out[:width]

        return np_array(map(asvalues, rows))
コード例 #11
0
def get_zone_count_estimates(location_id, door_count_placement_view_pair, start_date, end_date, adjusted=False):
  """Iterates through .csv files to return a list of (datetime, zone_count)
  ARGS
    location_id: location_id of installation, eg '55'
    door_count_placement_view_pair: placement and view id pair, e.g. ('3333230','0')
    start_date: in format YYYY-MM-DD, <datetime>
    end_date: in format YYYY-MM-DD. range is exclusive '<'. <datetime>
    adjusted: to select between raw data or adusted <bool>. if adjusted is chosen but not available, returns raw.
  RETURN
    array with (datetime, zone_count) tuples
  """
  datetime_zone_count_pairs = []
  day = timedelta(days = 1)

  curr_day = start_date

  while curr_day < end_date:
    date_str = date2str(curr_day, "%Y-%m-%d")
    fullpath = ANALYSIS_FOLDER_GLOBAL+str(location_id)+'/'+gtfilename(location_id,door_count_placement_view_pair,curr_day)
    if DEBUG:
      print 'get_zone_count_estimates: reading file:', fullpath
    data = read_csv(fullpath)
    for idx in range(len(data)):
      ts = utc.localize(get_datetime_from_csv_row(data[idx]), is_dst=None).astimezone(utc)
      if ts >= start_date and ts < end_date:
        datetime_zone_count_pairs.append(get_zone_count(data[idx], adjusted))
    curr_day += day
  datetime_zone_count_pairs = np_array(datetime_zone_count_pairs)
  return datetime_zone_count_pairs
コード例 #12
0
def amplitude_regularization(signal, bits=16, factor=0.7):
  """
  ARGS: 
    signal: signal amplitudes, should be in the range [-1.0, 1.0], numpy array of numbers
    bits: bit-depth value, <int>
    factor: 0.7 by default, as suggested by Gerald Friedland @ ICSI
  RETURN: 
    regularized: amplitude regularized signal, <number> or numpy array of numbers
  """
  if isinstance(signal, list):
    signal = np_array(signal)
  elif isinstance(signal, (int, long, float, complex)):
    raise Exception("Invalid arg")
    
  # convert amplitude [-1.0, 1.0] to N-bit samples
  half_n_bits = 2**(bits-1)
  signal_scaled_to_n_bits = (signal + 1) * half_n_bits

  # regularize
  regularized = signal_scaled_to_n_bits ** factor

  # scale back to [-1.0,1.0]
  regularized -=  half_n_bits
  regularized /= half_n_bits
  return regularized
コード例 #13
0
ファイル: cluster.py プロジェクト: AlexRBaker/RefineM
    def pca(self, data_matrix):
        """Perform PCA.

        Principal components are given in self.pca,
        and the variance in self.variance.

        Parameters
        ----------
        data_matrix : list of lists
          List of tetranucleotide signatures
        """

        cols = len(data_matrix[0])
        data_matrix = np_reshape(np_array(data_matrix), (len(data_matrix), cols))

        pca = PCA()
        pc, variance = pca.pca_matrix(data_matrix, 3, bCenter=True, bScale=False)

        # ensure pc matrix has at least 3 dimensions
        if pc.shape[1] == 1:
            pc = np_append(pc, np_zeros((pc.shape[0], 2)), 1)
            variance = np_append(variance[0], np_ones(2))
        elif pc.shape[1] == 2:
            pc = np_append(pc, np_zeros((pc.shape[0], 1)), 1)
            variance = np_append(variance[0:2], np_ones(1))

        return pc, variance
コード例 #14
0
ファイル: _steffensen.py プロジェクト: chrislit/abydos
    def sim(self, src, tar):
        """Return the Steffensen similarity of two strings.

        Parameters
        ----------
        src : str
            Source string (or QGrams/Counter objects) for comparison
        tar : str
            Target string (or QGrams/Counter objects) for comparison

        Returns
        -------
        float
            Steffensen similarity

        Examples
        --------
        >>> cmp = Steffensen()
        >>> cmp.sim('cat', 'hat')
        0.24744247205786737
        >>> cmp.sim('Niall', 'Neil')
        0.1300991207720166
        >>> cmp.sim('aluminum', 'Catalan')
        0.011710186806836031
        >>> cmp.sim('ATCG', 'TAGC')
        4.1196952743871653e-05


        .. versionadded:: 0.4.0

        """
        if src == tar:
            return 1.0
        if not src or not tar:
            return 0.0

        self._tokenize(src, tar)

        a = self._intersection_card()
        b = self._src_only_card()
        c = self._tar_only_card()
        d = self._total_complement_card()
        n = a + b + c + d

        p = np_array([[a, b], [c, d]]) / n

        psisq = 0.0

        for i in range(len(p)):
            pi_star = p[i, :].sum()
            for j in range(len(p[i])):
                pj_star = p[:, j].sum()
                num = p[i, j] * (p[i, j] - pi_star * pj_star) ** 2
                if num:
                    psisq += num / (
                        pi_star * (1 - pi_star) * pj_star * (1 - pj_star)
                    )

        return psisq
コード例 #15
0
def hz2mel(f):
  """ Convert a number or numpy numerical array of frequencies in Hz into mel
  ARGS: Frequency or array of frequencies,  <number> or numpy array of numbers
  RETURN: Mel frequency(ies), <number> or numpy array of numbers
  """
  if isinstance(f, list):
    f = np_array(f)  
  return 1127.01048 * log(f/700.0 +1)
コード例 #16
0
def get_sample_energy(samples):
  """
  ARGS:
    samples: samples of a signal
  """
  if isinstance(samples, list) or isinstance(samples, tuple):
    samples = np_array(samples)
  return sum(samples**2)
コード例 #17
0
def mel2hz(m):
  """ Convert a number or numpy numerical array of frequency in mel into Hz
  ARGS: Mel Frequency or array of mel frequencies,  <number> or numpy array of numbers
  RETURN: frequency(ies) in Hz, <number> or numpy array of numbers
  """
  if isinstance(m, list):
    m = np_array(m)
  return (exp(m / 1127.01048) - 1) * 700
コード例 #18
0
ファイル: chassis.py プロジェクト: BaseBot/triangula
 def __init__(self, wheels):
     """
     Create a new chassis, specifying a set of wheels.
     
     :param wheels:
         A sequence of :class:`triangula.chassis.HoloChassis.OmniWheel` objects defining the wheels for this chassis.
     """
     self.wheels = wheels
     self._matrix_coefficients = np_array([[wheel.co_x, wheel.co_y, wheel.co_theta] for wheel in self.wheels])
コード例 #19
0
ファイル: profileManager.py プロジェクト: jnesme/GroopM-1
 def makeColourProfile(self):
     """Make a colour profile based on ksig information"""
     working_data = np_array(self.kmerSigs, copy=True) 
     Center(working_data,verbose=0)
     p = PCA(working_data)
     components = p.pc()
     
     # now make the colour profile based on PC1
     self.kmerVals = np_array([float(i) for i in components[:,0]])
     
     # normalise to fit between 0 and 1
     self.kmerVals -= np_min(self.kmerVals)
     self.kmerVals /= np_max(self.kmerVals)
     if(False):
         plt.figure(1)
         plt.subplot(111)
         plt.plot(components[:,0], components[:,1], 'r.')
         plt.show()
コード例 #20
0
    def init_classifier_fn(self, **kwargs):
        cs_df = self._academic_clusterer.courses_features
        AcademicFailureEstimator.COURSES = cs_df['course'].values
        
        se_df = self._academic_clusterer.semesters_features
        sf_df = self._academic_clusterer.students_features
        gpa_df = self._academic_clusterer.ha_df.drop_duplicates(['student','GPA'])
        ss_df = pd_merge( se_df, sf_df, on='student' )
        ss_df = pd_merge( ss_df, gpa_df, on='student' )
        ss_df = pd_merge( ss_df, cs_df, on='course' )
        
        data = ss_df.apply( self.get_ss_features, axis=1 )
        data = np_array( data.tolist() )
        X = data
        y = ss_df['ha_reprobado'].apply(lambda x: 0 if x else 1).values

        # H = np_unique( X[:,0] )
        # H = np_array( [ H, np_zeros( len(H) ) ] ).T
        # l = np_ones( len( H ) )
        # X = np_append( X, H, axis=0)
        # y = np_append( y, l )

        X_train, X_test, y_train, y_test = train_test_split(X,
                                                            y,
                                                            test_size=0.30,
                                                            random_state=7)

        # logreg = LogisticRegression(random_state=7)
        logreg = AdaBoostClassifier(random_state=10)
        logreg = CalibratedClassifierCV( logreg, cv=2, method='sigmoid')
        # logreg = GaussianNB()
        logreg.fit(X, y)
        logreg_prob = logreg.predict_proba
        logreg_predict = logreg.predict

        y_pred = logreg.predict(X_test)
        recall = recall_score(y_test, y_pred)

        def quality(data):
            _z_ = logreg_predict(data)[0]
            sample = X[ y==_z_ ]
            sample_ = X[ y==(1-_z_) ]
            d = np_linalg_norm( [data] - sample )
            d_ = np_linalg_norm( [data] - sample_ )
            r = np_max( d_ )/np_max( d )
            # r = np_mean( d )/np_mean( d_ )
            # r = np_min( d )/np_min( d_ )
            # r = 0.5 * ( r + recall )
            if r > 1:
                r = abs( 1-r )
            r = 0.5 * ( r + recall )
            return str( r )
        
        clf = lambda data: [ logreg_prob( data ), quality(data) ]
        self._clf = clf
        """
コード例 #21
0
 def get_team_rankings(self, team='all'):
     # ['Rank' 'Team' 'Qual Avg' 'Auto' 'Container' 'Coopertition' 'Litter' 'Tote' 'Played']
     request = np.array(self.raw_rankings[1:]).astype(np.int)
     # print(np.array(self.raw_rankings[0]))
     if team is 'all':
         return request
     getter = request[request[:, 1] == team]
     if len(getter) is not 0:
         return request[request[:, 1] == team][0]
     return np_array([0, team, 0, 0, 0, 0, 0, 0, 0])
コード例 #22
0
ファイル: classify.py プロジェクト: billyprice1/dump-scraper
    def run(self):
        dump_logger = getLogger('dumpscraper')
        # Let's invoke the getscore runner and tell him to work on training data
        dump_logger.info("Calculating dump score...")
        running = getscore.DumpScraperGetscore(self.settings, self.parentArgs)
        running.run()

        # First of all let's feed the classifier with the training data
        training = scipy_genfromtxt(self.settings['data_dir'] + "/" + "training/features.csv", delimiter=",", skip_header=1, usecols=(0, 1, 2))
        target = scipy_genfromtxt(self.settings['data_dir'] + "/" + "training/features.csv", delimiter=",", skip_header=1, usecols=(-2))

        clf = sklearn.neighbors.KNeighborsClassifier(10, weights='uniform')
        clf.fit(training, target)

        trash_count = hash_count = plain_count = 0
        cleared = []

        with open(self.settings['data_dir'] + "/" + 'features.csv', 'rb') as csvfile:
            reader = csv_reader(csvfile)

            for line in reader:
                if line[0] == 'Trash score':
                    continue

                features = np_array(line[0:3])
                features = features.reshape(1, -1)
                label = clf.predict(features)

                if label == 0:
                    folder = 'trash'
                    trash_count += 1
                elif label == 1:
                    folder = 'hash'
                    hash_count += 1
                elif label == 2:
                    folder = 'plain'
                    plain_count += 1

                target_file = self.settings['data_dir'] + "/" + 'organized/' + folder + "/" + line[-1]
                target_dir = path.dirname(target_file)

                # If asked for a clean run, let's delete the entire folder before copying any file
                if self.parentArgs.clean and target_dir not in cleared and path.exists(target_dir):
                    cleared.append(target_dir)
                    shutil_rmtree(target_dir)

                if not path.exists(target_dir):
                    makedirs(target_dir)

                shutil_copyfile(self.settings['data_dir'] + "/" + 'raw/' + line[-1], target_file)

        dump_logger.info("Trash files: " + str(trash_count))
        dump_logger.info("Hash files: " + str(hash_count))
        dump_logger.info("Plain files: " + str(plain_count))
        dump_logger.info("Operation completed")
コード例 #23
0
def ftom(f):
  """ convert frequency to midi note 
  ARGS 
    f: frequency(ies) in Hz > 0, <number> or numpy number array
  RETURN
    m: MIDI note with A 440 as reference, <float>
  """
  if isinstance(f, list):
    f = np_array(f)
  m = 17.3123405046 * log(.12231220585 * f) 
  return m
コード例 #24
0
 def _get_numeric_node_col_type_numeric(self, array):
     """array: sorted array by the numeric column"""
     split = self.get_numeric_col_split(array)
     if split.split_index is None:
         return None, None
     thr = (array[split.split_index - 1, 0] + array[split.split_index, 0]) / 2
     left_indices = array[:split.split_index, 3]
     right_indices = array[split.split_index:, 3]
     indices = {'left': left_indices, 'right': right_indices}
     indices = {k: np_array(v).astype(int) for k, v in indices.items()}
     return NumericBinaryNode(array.shape[0], split.impurity, self.col_name, thr), indices
コード例 #25
0
def extractDAISY(images):
	featureVecs = []

	for image in images:
		featureVecs.append(feature.daisy(image,
										 step=8,
										 radius=8,
										 rings=3).flatten())

	featureVecs = np_array(featureVecs)
	return featureVecs
コード例 #26
0
    def equilibrium_pts(self):

        #Finds equlibrium points as [M,N,Z]

        from numpy import array as np_array
        from numpy.linalg import solve as np_solve

        g = self.k2 * (self.r2 - self.d2) / self.r2

        A = np_array([[self.r1/self.k1,self.a1,0], [-self.a2,0,self.b],\
        [0,self.b,self.r2/self.k2]])
        c = np_array([self.r1, self.d1, self.r2 - self.d2])

        self.E0,self.E1,self.E2,self.E3  = [3*[0],[self.k1,0,0],[0,0,g],\
        [self.k1,0,g]]

        self.E4 = [0,(self.b*self.r2*g-self.r2*self.d1)/(self.k2*self.b**2),\
           self.d1/self.b]

        self.E5 = np_solve(A, c).tolist()
コード例 #27
0
ファイル: bin.py プロジェクト: zhaoxia413/GroopM
def mungeCbar(cbar):
    '''Set dims of the colorbar by hacking the mainframe and coping with
    different matplotlib versions'''
    try:
        # older matplotlib
        cbar.outline.set_ydata([0.15] * 2 + [0.85] * 4 + [0.15] * 3)
    except AttributeError:
        # matplotlib 1.4.x
        tmp_xy = cbar.outline.get_xy()
        tmp_xy[:, 1] = np_array(([0.15] * 2 + [0.85] * 4 + [0.15] * 3))
        cbar.outline.set_xy(tmp_xy)
コード例 #28
0
    def getBreakpointsByCardinality(self, cardinality):

        if cardinality not in self.breakpointsByCardinality:
            frac = 1.0 / cardinality
            list_percent = []
            for i_fl in np_arange(frac, 1.0, frac):
                list_percent.append(i_fl)
            self.breakpointsByCardinality[cardinality] = (
                np_array(norm.ppf(list_percent)) * self.std + self.mean)

        return self.breakpointsByCardinality[cardinality]
コード例 #29
0
def numpy_full_list(array, desired_length):
    '''retuns array with desired length by repeating last item'''
    if not isinstance(array, ndarray):
        array = np_array(array)

    length_diff = desired_length - array.shape[0]

    if length_diff > 0:
        new_part = np_repeat(array[np_newaxis, -1], length_diff, axis=0)
        return np_concatenate((array, new_part))[:desired_length]
    return array[:desired_length]
コード例 #30
0
ファイル: bin.py プロジェクト: Ecogenomics/GroopM
def mungeCbar(cbar):
    '''Set dims of the colorbar by hacking the mainframe and coping with
    different matplotlib versions'''
    try:
        # older matplotlib
        cbar.outline.set_ydata([0.15] * 2 + [0.85] * 4 + [0.15] * 3)
    except AttributeError:
        # matplotlib 1.4.x
        tmp_xy = cbar.outline.get_xy()
        tmp_xy[:,1] = np_array(([0.15] * 2 + [0.85] * 4 + [0.15] * 3))
        cbar.outline.set_xy(tmp_xy)
コード例 #31
0
    def read_input(self, sample_name: str, start_idx: int, end_idx: int):

        self.__check_indexes(start_idx, end_idx)

        full_transform = self.read_full_transform(sample_name)
        if full_transform.shape[0] < self.max_time_steps * self.time_size:
            return np_array([])

        x = self.normalize_input(full_transform[start_idx:end_idx, :])
        return np_reshape(
            x, (1, self.max_time_steps, self.time_size, self.freq_size))
コード例 #32
0
    def get_hist_sub_image(self, offset, dimensions):
        x, y = offset
        width, height = dimensions
        sub = self.source[y:y + height, x:x + width]
        hist_values = histogram1d(sub,
                                  bins=self.no_of_colors,
                                  range=[0, self.no_of_colors])

        def to_hist_value(color):
            return hist_values[color]

        return np_array(to_hist_value(sub))
コード例 #33
0
def pre_emphasis(signal, p):
  """ Apply pre-emphasis filter
  ARGS 
    signal: signal amplitudes, should be in the range [-1.0,1.0], array of numbers
  RETURN 
    Filtered signal, array of numbers
  """
  if isinstance(signal, list):
    signal = np_array(signal)
  elif isinstance(signal, (int, long, float, complex)):
    raise Exception("Invalid arg")
  return scipy.signal.lfilter([1., -p], 1, signal)
コード例 #34
0
 def log_transitions(self):
     if len(self.memory) > 0:
         basename = self.logdir + "/{}.{}".format(
             self.environment_name,
             datetime.now().strftime("%Y-%m-%d-%H-%M-%s"))
         print("Base Filename: ", basename, flush=True)
         state, action, reward, next_state, done = zip(*self.memory)
         np_save(basename + "-state.npy",
                 np_array(state),
                 allow_pickle=False)
         np_save(basename + "-action.npy",
                 np_array(action),
                 allow_pickle=False)
         np_save(basename + "-reward.npy",
                 np_array(reward),
                 allow_pickle=False)
         np_save(basename + "-nextstate.npy",
                 np_array(next_state),
                 allow_pickle=False)
         np_save(basename + "-done.npy", np_array(done), allow_pickle=False)
         self.memory.clear()
コード例 #35
0
    def read_txt(self, txt_name: str, txt_dir='') -> np_ndarray:
        txt_path = self._standard_check(txt_name, txt_dir)

        with open(txt_path, 'r') as txt:
            lines = txt.readlines()

        lines_list = []
        for line in lines:
            data = [txt_name.replace('.gt.txt', '')] + line.split()
            lines_list.append(data)

        return np_array(lines_list)
コード例 #36
0
    def set_data(self):
        data_len = self.col_mat.shape[0]*self.col_mat.shape[1]
        self.pos = np_empty((data_len, 3))
        self.size = np_ones((data_len))*.1
        self.color = np_empty((data_len, 4))

        jj = 0
        for ii in range(len(self.y)):
            for zz in range(len(self.z)):
                self.pos[jj,:] = np_array((self.x[ii]/self.kx,self.y[ii]/self.ky,zz/self.kz))
                self.color[jj,:] = self.col_mat[ii,zz]/255.
                jj = jj + 1
コード例 #37
0
ファイル: delay_cc.py プロジェクト: mbhatt1/QNET
    def _toSLH(self):

        # These numerically optimal solutions were obtained as outlined in
        # my blog post on the Mabuchi-Lab internal blog
        # email me ([email protected])for details.
        if self.N == 1:
            kappa0 = 9.28874141848 / self.tau
            kappas = np_array([7.35562929]) / self.tau
            Deltas = np_array([3.50876192]) / self.tau
        elif self.N == 3:
            kappa0 = 14.5869543803 / self.tau
            kappas = np_array([ 13.40782559, 9.29869721]) / self.tau
            Deltas = np_array([3.48532283, 7.14204585]) / self.tau
        elif self.N == 5:
            kappa0 = 19.8871474779 / self.tau
            kappas = np_array([19.03316217, 10.74270752, 16.28055664]) / self.tau
            Deltas = np_array([3.47857213, 10.84138821, 7.03434809]) / self.tau
        else:
            raise NotImplementedError("The number of cavities to realize the delay must be one of 1,3 or 5.")

        h0 = make_namespace_string(self.name, 'C0')
        hp =  [make_namespace_string(self.name, "C{:d}p".format(n+1)) for n in range((self.N-1)/2)]
        hm =  [make_namespace_string(self.name, "C{:d}m".format(n+1)) for n in range((self.N-1)/2)]


        S = Matrix([1.])
        slh0 = SLH(S, Matrix([[sqrt(kappa0) * Destroy(h0)]]), ZeroOperator)
        slhp = [SLH(S, Matrix([[sqrt(kj) * Destroy(hj)]]), Dj * Create(hj) * Destroy(hj)) for (kj, Dj, hj) in zip(kappas, Deltas, hp)]
        slhm = [SLH(S, Matrix([[sqrt(kj) * Destroy(hj)]]), -Dj * Create(hj) * Destroy(hj)) for (kj, Dj, hj) in zip(kappas, Deltas, hm)]

        return freduce(lambda a, b: a << b, slhp + slhm, slh0)
コード例 #38
0
ファイル: _node.py プロジェクト: luk-f/pyCFOFiSAX
    def __init__(self, tree, parent, sax, cardinality):
        """
        Initialization function of the rootnode class

        :returns: a root node
        :rtype: RootNode
        """

        self.iSAX_word = np_array([sax, cardinality]).T

        Node.__init__(self, parent=parent, name=str(self.iSAX_word))

        self.tree = tree
        self.sax = sax
        self.cardinality = cardinality  

        self.cardinality_next = np_copy(self.cardinality)
        self.cardinality_next = np_array([x*2 for x in self.cardinality_next])

        # Number of sequences contained in the node (or by its sons)
        self.nb_sequences = 0

        """ The incremental computing part for CFOF """
        self.mean = np_empty(shape=self.tree.size_word)
        # Allows the incremental calculation of self.mean
        self.sum = np_empty(shape=self.tree.size_word)

        self.std = np_empty(shape=self.tree.size_word)
        # Allows the incremental calculation of self.std
        self.sn = np_empty(shape=self.tree.size_word)

        # Specific to internal nodes
        self.nodes = []
        self.key_nodes = {}

        self.terminal = False
        self.level = 0

        self.id = RootNode.id_global
        RootNode.id_global += 1
コード例 #39
0
        def asvalues(raw):
            """Convert a row of raw bytes into a flat row.  Result will
            be a freshly allocated object, not shared with
            argument.
            """

            if self.bitdepth == 8:
                return np_array(raw, 'uint8')
            else:
                raw = tostring(raw)
                return np_array(struct.unpack('!%dH' % (len(raw) // 2), raw),
                                'uint%d' % self.bitdepth)
            assert self.bitdepth < 8
            width = self.width
            # Samples per byte
            spb = 8 // self.bitdepth
            out = array('B')
            mask = 2**self.bitdepth - 1
            shifts = [self.bitdepth * i for i in reversed(list(range(spb)))]
            for o in raw:
                out.extend([mask & (o >> i) for i in shifts])
            return out[:width]
コード例 #40
0
ファイル: png.py プロジェクト: zack-vii/archive
        def asvalues(raw):
            """Convert a row of raw bytes into a flat row.  Result will
            be a freshly allocated object, not shared with
            argument.
            """

            if self.bitdepth == 8:
                return np_array(raw,'uint8')
            else:
                raw = tostring(raw)
                return np_array(struct.unpack('!%dH' % (len(raw)//2), raw),'uint%d' % self.bitdepth)
            assert self.bitdepth < 8
            width = self.width
            # Samples per byte
            spb = 8//self.bitdepth
            out = array('B')
            mask = 2**self.bitdepth - 1
            shifts = [self.bitdepth * i
                for i in reversed(list(range(spb)))]
            for o in raw:
                out.extend([mask&(o>>i) for i in shifts])
            return out[:width]
コード例 #41
0
    def set_data(self):
        data_len = self.col_mat.shape[0] * self.col_mat.shape[1]
        self.pos = np_empty((data_len, 3))
        self.size = np_ones((data_len)) * .1
        self.color = np_empty((data_len, 4))

        jj = 0
        for ii in range(len(self.y)):
            for zz in range(len(self.z)):
                self.pos[jj, :] = np_array(
                    (self.x[ii] / self.kx, self.y[ii] / self.ky, zz / self.kz))
                self.color[jj, :] = self.col_mat[ii, zz] / 255.
                jj = jj + 1
コード例 #42
0
ファイル: bin.py プロジェクト: alneberg/GroopM
 def getCentroidStats(self, profile):
     """Calculate the centroids of the profile"""
     working_list = profile[self.rowIndices]
     
     # return the mean and stdev
     # we divide by std so we need to make sure it's never 0
     tmp_stds = np_std(working_list, axis=0)
     mean_std = np_mean(tmp_stds)
     try:
         std = np_array([x if x != 0 else mean_std for x in tmp_stds])
     except:
         std = mean_std
     return (np_median(working_list,axis=0), std)
コード例 #43
0
ファイル: bin.py プロジェクト: zhaoxia413/GroopM
    def getCentroidStats(self, profile):
        """Calculate the centroids of the profile"""
        working_list = profile[self.rowIndices]

        # return the mean and stdev
        # we divide by std so we need to make sure it's never 0
        tmp_stds = np_std(working_list, axis=0)
        mean_std = np_mean(tmp_stds)
        try:
            std = np_array([x if x != 0 else mean_std for x in tmp_stds])
        except:
            std = mean_std
        return (np_median(working_list, axis=0), std)
コード例 #44
0
def de_mean(signal, axis=-1):
  """ De-mean signal(zero center)
  ARGS: 
    signal: signal alitudeii, shouldin the range [-1.0,1.0], <number> or array of numbers
    axis: axis to apply mean, <int>
  RETURN: 
    Zero centered signal, <number> or numpy array of numbers
  """
  if isinstance(signal, list):
    signal = np_array(signal)
  elif isinstance(signal, (int, long, float, complex)):
    raise Exception("Invalid arg")
  return signal - mean(signal,axis)
コード例 #45
0
def getCNNSamples(dir, mult=5):
	allVectors = getPreparedImagesCNN(dir)

	N = allVectors.shape[1]

	posPairs = []
	for i in range(N):
		posPairs.append(np.concatenate((allVectors[0][i],allVectors[1][i]),axis=2))

	OGRange = np_array(range(N),dtype=int)
	negPairs = []
	for i in range(mult):
		negIndex = np_array(range(N),dtype=int)

		np.random.shuffle(negIndex)
		while 0 in (negIndex-OGRange):
			np.random.shuffle(negIndex)

		for i in range(N):
			negPairs.append(np.concatenate((allVectors[0][i],allVectors[1][negIndex[i]]),axis=2))

	return np_array(posPairs),np_array(negPairs)
コード例 #46
0
def toTensor(X):
    if type(X) is typeTensor:
        return X
    if isDict(X):
        X = list(X)
    if isList(X):
        X = np_array(X)
    try:
        if X.dtype == np_bool:
            X = X.astype(np_uint8)
        return _toTensor(X)
    except:
        return X
コード例 #47
0
    def read_image_sequence(self, video_name: str):
        self.set_image_sequence_name(video_name)

        # Set video directory
        video_dir = os_path_join(self.im_seqs_dir, video_name)
        # List all the images included inside the video directory
        images_names = os_listdir(video_dir)

        # Sort the images with respect to their name
        sorted_images_names = sorted(images_names)

        # Read every image and store it in a list.
        # Turn this list to a numpy array.
        self.left_sequence = np_array([
            cv2_imread(os_path_join(video_dir, img_name))
            for img_name in sorted_images_names if img_name.endswith('L.jpg')
        ])
        self.right_sequence = np_array([
            cv2_imread(os_path_join(video_dir, img_name))
            for img_name in sorted_images_names if img_name.endswith('R.jpg')
        ])
        return self.left_sequence, self.right_sequence
コード例 #48
0
    def getHeaderMid(self, listP, offset, size):
        list = [0] * size
        for i in range(0, size):
            list[i] = listP[i + offset][2][0][0]

        midy = (listP[size - 1 + offset][1] + listP[offset][1]) >> 1
        src = np_array(list)
        left = StripRegion._filteringAnomaly(src, StripRegion._modeCheck)

        src = StripRegion._filteringAnomaly(left, StripRegion._two_sigma)
        midx = np_average(src)
        self.points = [()] * 2
        self.points[0] = (midx, midy)
コード例 #49
0
    def get_cct(self, methods="Hernandez 1999"):
        '''
        approximate CCT using CIE 1931 xy values
        '''
        x, y, z = self.get_xyz()

        if 0 in [x, y, z]:
            return 0.0

        logs.logger.debug(f"x = {x}, y = {y}, z = {z}")

        if isinstance(methods, str):
            methods = [methods]

        ccts = list()

        for curr_method in methods:
            if curr_method == 'me_mccamy':
                # McCamy's Approx
                small_x = x / (x + y + z)
                small_y = y / (x + y + z)

                n = (small_x - 0.3320) / (0.1858 - small_y)
                cct = 437 * (n**3) + 3601 * (n**2) + 6861 * n + 5517

                if DEBUG:
                    logs.logger.debug(
                        f"[me_mccamy] calc x = {small_x}, calc y = {small_y} | Calc CCT = {cct} K"
                    )
            elif curr_method in XY_TO_CCT_METHODS:
                xyz_arr = np_array([x, y, z])
                xy_arr = XYZ_to_xy(xyz_arr)
                cct = xy_to_CCT(xy_arr, curr_method)
                if DEBUG:
                    logs.logger.debug(
                        f"[{curr_method}] calc x,y = {xy_arr} | CCT = {cct}")
            else:
                options = ["me_mccamy"] + list(XY_TO_CCT_METHODS)

                logs.logger.error(
                    f"{curr_method} Not found!\nCCT calculation methods: \n {options}"
                )

                return

            ccts.append(int(cct))

        if len(ccts) == 1:
            return ccts[0]
        else:
            return ccts
コード例 #50
0
    def getLastOrder(self):
        # if not live
        if not self.app.isLive():
            self.last_action = 'SELL'
            return

        orders = self.account.getOrders(self.app.getMarket(), '', 'done')
        if len(orders) > 0:
            last_order = orders[-1:]

            # if orders exist and last order is a buy
            if str(last_order.action.values[0]) == 'buy':
                self.last_buy_size = float(last_order[last_order.action == 'buy']['size'])
                self.last_buy_filled = float(last_order[last_order.action == 'buy']['filled'])
                self.last_buy_price = float(last_order[last_order.action == 'buy']['price'])

                # binance orders do not show fees
                if self.app.getExchange() == 'coinbasepro':
                    self.last_buy_fee = float(last_order[last_order.action == 'buy']['fees'])

                self.last_action = 'BUY'
                return
            else:
                self.minimumOrderQuote()
                self.last_action = 'SELL'
                self.last_buy_price = 0.0
                return
        else:
            base = float(self.account.getBalance(self.app.getBaseCurrency()))
            quote = float(self.account.getBalance(self.app.getQuoteCurrency()))

            # nil base or quote funds
            if base == 0.0 and quote == 0.0:
                sys.tracebacklimit = 0
                raise Exception(f'Insufficient Funds! ({self.app.getBaseCurrency()}={str(base)}, {self.app.getQuoteCurrency()}={str(base)})') 

            # determine last action by comparing normalised [0,1] base and quote balances 
            order_pairs = np_array([ base, quote ])
            order_pairs_normalised = (order_pairs - np_min(order_pairs)) / np_ptp(order_pairs)

            if order_pairs_normalised[0] < order_pairs_normalised[1]:
                self.minimumOrderQuote()
                self.last_action = 'SELL'
            elif order_pairs_normalised[0] > order_pairs_normalised[1]:
                self.minimumOrderBase()
                self.last_action = 'BUY'

            else:
                self.last_action = 'WAIT'

            return
コード例 #51
0
    def plot_generated_nodes(partitioner):
        import matplotlib.cm as cm
        import matplotlib.pyplot as plt
        from matplotlib.collections import PatchCollection
        from matplotlib.patches import Rectangle
        from numpy import array as np_array

        areas = list()

        fig = plt.figure()
        fig.set_size_inches(15, 20)
        ax1 = fig.add_subplot(211)

        xs = [(s.llx, s.urx) for s in partitioner.sinks + [partitioner.source]]
        ys = [(s.lly, s.ury) for s in partitioner.sinks + [partitioner.source]]

        x_max, y_max = max([x[1] for x in xs]), max([y[1] for y in ys])
        x_min, y_min = min([x[0] for x in xs]), min([y[0] for y in ys])

        for s in partitioner.sinks:
            x, y = s.llx, s.lly
            w, h = s.width, s.height
            rectangle = Rectangle((x, y), w, h, alpha=0.3)
            ax1.add_patch(rectangle)

        x, y = partitioner.source.llx, partitioner.source.lly
        w, h = partitioner.source.width, partitioner.source.height
        rectangle = Rectangle((x, y), w, h, facecolor='red', alpha=0.3)
        ax1.add_patch(rectangle)

        ax1.set_xlim([x_min, x_max])
        ax1.set_ylim([x_min, y_max])

        patches = list()
        for n in partitioner.nodes:
            x, y = n.llx, n.lly
            w, h = n.width, n.height
            rectangle = Rectangle((x, y), w, h)
            patches.append(rectangle)
            areas.append(n.area)

        ax2 = fig.add_subplot(212)
        col = PatchCollection(patches, alpha=0.4)
        col.set(array=np_array(areas), cmap='jet')

        ax2.add_collection(col)
        ax2.set_xlim([x_min, x_max])
        ax2.set_ylim([x_min, y_max])
        # plt.colorbar(col)

        fig.savefig('test.png', dpi=200)
コード例 #52
0
    def predict(self, data: list):
        """Загрузка модели из файла весов и совершение прогноза

        :param data: данные для совершения прогноза
        :return: список с результатом прогноза модели
        """
        try:
            prediction_data = np_array(data).reshape(-1, len(data))
            prediction_data = self.xscaler.transform(prediction_data)
            predicted = self.model.predict(prediction_data)
            return self.yscaler.inverse_transform(predicted)
        except TypeError as e:
            # TODO: Logger
            return None
コード例 #53
0
def get_booked_bases(
    base_class, booked_bases_list
):  # runs on class init, saves a list of booked bases at the time of init to self.booked
    index_start = index_end = None
    gc = service_account(filename=_secret_file)
    sh = gc.open_by_key(cfg.database["jaeger_cal"])
    ws = sh.worksheet("Current")
    cal_export = np_array(ws.get_all_values())
    date_col = cal_export[:, 0]
    for index, value in enumerate(date_col):
        if not index_start and value == dt.now(tz.utc).strftime('%b-%d'):
            # gets us the header for the current date section in the google sheet
            index_start = index + 1
            continue
        if value == (dt.now(tz.utc) + td(days=1)).strftime('%b-%d'):
            # gets us the header for tomorrow's date in the sheet
            index_end = index  # now we know the range on the google sheet to look for base availability
            break
    if index_start is None or index_end is None:
        log.warning(
            f"Unable to find date range in Jaeger calendar for today's date. Returned: '{index_start}' "
            f"to '{index_end}'")
        return

    today_bookings = cal_export[index_start:index_end]

    for booking in today_bookings:
        try:
            start_time = date_parser(
                booking[10])  # 45 mins before start of reservation
            if booking[11] != "":
                end_time = date_parser(booking[11])
            else:
                end_time = date_parser(booking[9])
            if start_time <= dt.now(tz.utc) <= end_time:
                splitting_chars = ['/', ',', '&', '(', ')']
                booked_bases = booking[3]
                for sc in splitting_chars:
                    booked_bases = booked_bases.replace(sc, ';')
                booked_bases = [
                    _identify_base_from_name(base, base_class)
                    for base in booked_bases.split(";")
                ]
                for booked in booked_bases:
                    if booked is not None and booked not in booked_bases_list:
                        booked_bases_list.append(booked)
        except (ValueError, TypeError) as e:
            log.warning(
                f"Skipping invalid line in Jaeger Calendar:\n{booking}\nError: {e}"
            )
コード例 #54
0
ファイル: curve.py プロジェクト: sourlemon11/curve_synthesis
        def generate_points(_vars, const=None):
            logging.debug(f"Running generate_points")

            def lamb_const_linspace(var, const):
                logging.debug(f"Running lamb_const_linspace")
                if var.is_constant: return const
                else: return var.linspace

            if const is not None:
                logging.debug(f"const is not None")
                args = tuple(
                    lamb_const_linspace(v, const) for v in self.Vars.values())
            else:
                logging.debug(f"const is not not None")
                args = tuple(v.linspace for v in self.Vars.values())

            logging.debug(f"using args {args}")
            gpts = self.func(*args)
            # use [x][x] to exclude datatype element
            logging.debug(f"ASSIGNING POINTS with function {self.func}")
            # logging.debug(f"{ gpts[0][0] }")
            self.x.append(np_array(gpts[0][0]))
            self.y.append(np_array(gpts[1][0]))
コード例 #55
0
def naive_search_with_np(n):
    prime_numbers = np_array([2])
    i = prime_numbers[0]
    while len(prime_numbers) != n:
        i += 1
        for d in np_arange(2, i):
            i_is_prime = True
            if i%d == 0:
                i_is_prime = False
                break
        if i_is_prime:
            prime_numbers = np_append(prime_numbers, i)

    print('Байт:', getsizeof(prime_numbers))
コード例 #56
0
ファイル: pola_on_off.py プロジェクト: dishwasher-rfi/dw
def get_pola_data(dataset, on_off, clean=True):

    for key in on_off.keys():
        t0 = on_off[key].t_start.unix
        tf = on_off[key].t_stop.unix
        for dkey in DATA_KEYS:
            on_off[key].data[dkey] = np_array([
                x[dkey] for x in dataset.iterrows()
                if ((x['time'] + x['subtime'] * CLOCK) > t0) and (
                    (x['time'] + x['subtime'] * CLOCK) < tf)
            ])

        if clean and on_off[key].data[DATA_KEYS[0]].shape == (0, ):
            on_off.pop(key)