Exemplo n.º 1
0
def _init_unicode():
    """ Prepare unicode property tables and key pattern. """

    global _loaded
    global _unicode_properties
    global _unicode_key_pattern
    if _use_cache is not None:
        props = join(_use_cache, "%s_unicode_properties.cache" % _cache_prefix)
        if (not exists(join(_use_cache, "%s_unicode_properties.cache" % _cache_prefix))):
            _unicode_properties = _build_unicode_property_table((0x0000, 0x10FFFF))
            _unicode_key_pattern = _build_unicode_key_pattern()

            try:
                with open(props, 'wb') as f:
                    pickle.dump(_unicode_key_pattern, f)
                    pickle.dump(_unicode_properties, f)
            except Exception:
                if exists(props):
                    unlink(props)
        else:
            try:
                with open(props, 'rb') as f:
                    _unicode_key_pattern = pickle.load(f)
                    _unicode_properties = pickle.load(f)
            except Exception:
                if exists(props):
                    unlink(props)
                _unicode_properties = _build_unicode_property_table((0x0000, 0x10FFFF))
                _unicode_key_pattern = _build_unicode_key_pattern()
    else:
        _unicode_properties = _build_unicode_property_table((0x0000, 0x10FFFF))
        _unicode_key_pattern = _build_unicode_key_pattern()

    _loaded = True
Exemplo n.º 2
0
def _init_unicode():
    """
    Prepare unicode property tables and key pattern
    """
    global _loaded
    global _unicode_properties
    global _unicode_key_pattern
    if _use_cache is not None:
        props = join(_use_cache, "%s_unicode_properties.cache" % _cache_prefix)
        if (not exists(join(_use_cache, "%s_unicode_properties.cache" % _cache_prefix))):
            _unicode_properties = _build_unicode_property_table((0x0000, 0x10FFFF))
            _unicode_key_pattern = _build_unicode_key_pattern()

            try:
                with open(props, 'wb') as f:
                    pickle.dump(_unicode_key_pattern, f)
                    pickle.dump(_unicode_properties, f)
            except:
                if exists(props):
                    unlink(props)
        else:
            try:
                with open(props, 'rb') as f:
                    _unicode_key_pattern = pickle.load(f)
                    _unicode_properties = pickle.load(f)
            except:
                if exists(props):
                    unlink(props)
                _unicode_properties = _build_unicode_property_table((0x0000, 0x10FFFF))
                _unicode_key_pattern = _build_unicode_key_pattern()
    else:
        _unicode_properties = _build_unicode_property_table((0x0000, 0x10FFFF))
        _unicode_key_pattern = _build_unicode_key_pattern()

    _loaded = True
    def plot():
        '''
        PLOT PARAMETERS CAN BE ADJUSTED IN THIS METHOD
        '''
        # initialize plotter-instance with pickled stationbox:
        stationBoxFile = open('stationBox.p','r')
        pplotter = plotter(pickle.load(stationBoxFile))
        cmt_lat = 37.10
        cmt_lon = -3.69

        # plot picks (Event coordinates needed to get relative distance):
        CMTevent=model.Event(cmt_lat, cmt_lon, str_to_time("2010-04-11 22:08:15.500"), 
           "Spain_Durcal" , 625500 ,6.3)
        pplotter.plotpicks(CMTevent)

        # plot misfits:
        try:
            pickledResults = open('numpy_results.p','r')
            resultArray = pickle.load(pickledResults)
        finally:
            pickledResults.close()
        
        #pplotter.plotMisfits(data, _depths)
        #.............................................................
        # plot contour misfits for each model:
        # 1st: load misfit array:
        for phaseKey in ['p', 's', 'Scs', 'Scss']:    
            pplotter.plotContourMisfit(resultArray, _lats, _lons, _depths, phaseKey, _models[3])
        

        #.............................................................
        # set time shift file:
        tShiftFiles = glob.glob('map/t_shift*.txt')
        
        if tShiftFiles==[] or (tShiftFiles!=[] and (os.stat(tShiftFiles[0])[8] < os.stat('numpy_results.p')[8])): 
            for phase in ['p','s']:
                for testmodel in _models:
                    print '(re)setting t-shift files for model: {0} and phase:{1}'.format(testmodel, phase)
                    depthfinder.setTshiftFile(CMTevent, np.searchsorted(_lats, CMTevent.lat, ), 
                                np.searchsorted(_lons, CMTevent.lon), resultArray, testmodel, phase, _depths, maxdist )               
        else:
            pass

        for shiftmodel in _models:
            pplotter.plotMisfitShiftMap('s',shiftmodel)

        #.............................................................
        #write horizontal layers to image files;
        #pplotter.saveMisfitLayersAsImages(resultArray, _lats, _lons, _depths, 'models/premlocal1_Stich.nd','p', 630000, 600000 )
        #asdf 

        CMTeventIndexLongitude = getIndexOfValueInArray(_lons, cmt_lon)
        CMTeventIndexLatitude = getIndexOfValueInArray(_lats, cmt_lat)
        pplotter.plotMisfitsForLocation(_models[0], CMTeventIndexLatitude, CMTeventIndexLongitude, resultArray, _depths)


        #.............................................................
        # create map
        subprocess.call("map/durcal_stations.sh&", shell=True)
Exemplo n.º 4
0
 def from_iteration(cls, save_dir, i):
     """
     Loads the data from a previous run
     :param save_dir: directory for the data
     :param i: index of iteration
     :return:
     """
     iteration_folder = save_dir + "/%03d" % (i, )
     that = pickle.load(open(iteration_folder + "/bayesian_opt.pickle", "rb"))
     if not isinstance(that, cls):
         raise BayesianOptimizationError(BayesianOptimizationError.LOAD_ERROR, "not a robo instance")
     new_x, X, Y, best_guess = pickle.load(open(iteration_folder + "/observations.pickle", "rb"))
     return that, new_x, X, Y, best_guess
Exemplo n.º 5
0
def create_index(number):
    create_stop()
    file = open('stop.pkl', 'rb')
    stop = pickle.load(file)
    file.close()
    #inverse=defaultdict(lambda : defaultdict(int))
    inverse = {}
    col = {}
    for i in range(0, 3):
        doc = 'd' + str(i)
        col[doc] = defaultdict(int)
        for word in preprocess(i):
            if word not in stop:  #  it uses the dictionary's hashing contrarly to using .key()
                col[doc][word] += 1
                if word in inverse.keys():
                    if doc not in inverse[word].keys():
                        inverse[word][doc] += 1
                else:
                    inverse[word] = defaultdict(int)
                    inverse[word][doc] += 1

    file = open('structure.pkl', 'wb')
    pickle.dump(col, file)
    file.close()
    return col
Exemplo n.º 6
0
    def start_slices(self):
        """
        Second step: start previously conflictive (non-standard) slices at FlowVisor.
        """
        errors = []
        slice_ids = []
        # If 'conflictive_slice_ids' file exists, do the following.
        # Otherwise warn and skip.
        try:
            f = open("%s/conflictive_slice_ids" % path, "r")
            ids = pickle.load(f)
            f.close()
            os.remove("%s/conflictive_slice_ids" % path)
            slices = filter_own_slices(ids)
            for (counter, slice) in enumerate(slices):
                aggs = slice.aggregates.filter(leaf_name="OpenFlowAggregate")
                slice_id = str(settings.SITE_DOMAIN) + "_" + str(slice.id)
                slice_ids.append({
                    "id": slice_id,
                    "keywords": ids[counter]['keywords']
                })
                for agg in aggs:
                    try:
                        print "Starting", slice_id, "at aggregate", str(agg)
                        time.sleep(FLOWVISOR_SLEEP_TIME)
                        with Timeout(TIMEOUT):
                            agg.as_leaf_class().client.proxy.create_slice(
                                slice_id, slice.project.name,
                                slice.project.description, slice.name,
                                slice.description,
                                slice.openflowsliceinfo.controller_url,
                                slice.owner.email,
                                slice.openflowsliceinfo.password,
                                agg.as_leaf_class()._get_slivers(slice))
                        # Starting slice at Expedient
                        slice.started = True
                        slice.save()
                    except Exception as e:
                        message = """Could not fix the naming in Aggregate Manager %s for slice with the following details: name:\t\t %s
FlowVisor name:\t\t %s

The cause of the error is: %s. Please try to fix it manually""" % (str(
                            agg.as_leaf_class()), slice.name, slice_id, e)
                        send_mail(
                            "OCF: error while standardizing Flowvisor slices",
                            message, "*****@*****.**",
                            [settings.ROOT_EMAIL])
                        errors.append(message)
            # Save the slice IDs to grant flowspace nevertheless of other errors
            f = open("%s/slice_ids_to_grant_fs" % path, "w")
            pickle.dump(slice_ids, f)
            f.close()
            if errors:
                return "\033[93mFailure while starting previously non-standard slices at FlowVisor: %s\033[0m" % str(
                    errors)
            else:
                return "\033[92mSuccessfully started previously non-standard slices at FlowVisor\033[0m"
        except Exception as e:
            print e
            return "\033[93mCould not access file with slice IDs. Skipping...\033[0m\n"
Exemplo n.º 7
0
def random_h(G, stringie, dimension, num_iter, max_index):
    h = []
    ht = []
    for i in range(num_iter):
        namefile = stringie + str(i) + '.pck'
        gen_temp = pickle.load(open(namefile))
        if gen_temp.has_key(dimension):
            h.append(network_hollowness(gen_temp[dimension], max_index))
            ht.append(
                network_weighted_hollowness(G, gen_temp[dimension], max_index))
    print 'h=' + str(np.mean(h)) + '+-' + str(np.std(h))
    print 'ht=' + str(np.mean(ht)) + '+-' + str(np.std(ht))
    if len(h) >= 1:
        a = np.mean(h)
        b = np.std(h)
    else:
        a = 0
        b = 0
    if len(ht) > 1:
        c = np.mean(ht)
        d = np.std(ht)
    else:
        c = 0
        d = 0
    return a, b, c, d
Exemplo n.º 8
0
def test_pickle(obj):
    """Test if an object can successfully be pickled and loaded again
    Returns True if succeeds
            False if fails
    """
    import pickle
    # sys.setrecursionlimit(10000)
    path = 'test.p.tmp'
    if os.path.isfile(path):
        os.remove(path)  # remove temp file
    try:
        with open(path, 'wb') as f:
            pickle.dump(obj, f)
        print('Pickled object')
        with open(path, 'rb') as f:
            out = pickle.load(f)
        print('Loaded object')
    except Exception as e:
        print('{}'.format(e))
        return False
    if os.path.isfile(path):
        print('Pickled file size: {:g} Bytes'.format(os.path.getsize(path)))
        os.remove(path)  # remove temp file
    import pdb; pdb.set_trace()
    print('In:\n{}\nOut:\n{}'.format(out, obj))
    if not isinstance(obj, dict):
        out = out.__dict__
        obj = obj.__dict__
    if compare_dict(out, obj):
        return True
    else:
        return False
Exemplo n.º 9
0
 def from_iteration(cls, save_dir, i):
     """
     Loads the data from a previous run
     :param save_dir: directory for the data
     :param i: index of iteration
     :return:
     """
     iteration_folder = save_dir + "/%03d" % (i, )
     that = pickle.load(
         open(iteration_folder + "/bayesian_opt.pickle", "rb"))
     if not isinstance(that, cls):
         raise BayesianOptimizationError(
             BayesianOptimizationError.LOAD_ERROR, "not a robo instance")
     new_x, X, Y, best_guess = pickle.load(
         open(iteration_folder + "/observations.pickle", "rb"))
     return that, new_x, X, Y, best_guess
Exemplo n.º 10
0
    def load(self, file_name):
        """Load the buffer from a file.

        @param file_name
        """
        self.buffer = pickle.load(open(file_name, 'rb'))
        self.size = len(self.buffer)
Exemplo n.º 11
0
def _load_data(datafile, datafilefmt):
    '''
    Load the data file from the last run, or if it does not exist, return an
    empty dictionary.  Backup any existing data in the process.
    '''
    if not os.path.isfile(datafile):
        data = {}
    else:
        if datafilefmt == 'json':
            with open(datafile, encoding='utf8') as f:
                data = json.load(f)
        elif datafilefmt == 'json.zip':
            with zipfile.ZipFile(datafile) as z:
                fname = os.path.split(datafile)[1].rsplit('.', 1)[0]
                with z.open(fname) as f:
                    data = json.loads(f.read().decode('utf8'))
        elif datafilefmt in ('pickle', 'pkl'):
            with open(datafile, 'rb') as f:
                data = pickle.load(f)
        else:
            raise ValueError(
                'Invalid data file format {0}'.format(datafilefmt))
        # Create 2 levels of backups--maybe a little paranoid, but safer
        # The data file is only written after everything is complete and there
        # are no errors, so copying it in this manner doesn't risk overwriting
        # valid backups with bad data
        if os.path.isfile(datafile + '.backup'):
            shutil.copy(datafile + '.backup', datafile + '.backup2')
        shutil.copy(datafile, datafile + '.backup')

    return data
Exemplo n.º 12
0
def get_ngrams(N=3):
    '''Return doc- and corpus-level ngram counts for each word type.

    Counts are based on n-grams of size 1 through N (inclusive).
    Ngrams are stored and returned as a dictionary:

        ngrams = {
            'doc': {
                doc1.id : {
                    0: {
                        ngram1: <count>,
                        ngram2: <count>,
                        ...
                        },
                    ...
                    },
                ...
                },
            'corpus': {
                0: {
                    ngram1: <count>,
                    ngram2: <count>,
                    ...
                    },
                ...
                },
            }
    '''
    try:
        with open(NGRAM_FILE % N, 'rb') as f:
            return pickle.load(f)

    except (IOError, EOFError):
        return _get_ngrams(N)
Exemplo n.º 13
0
def getDates():
    try:
        import cpickle as pickle
    except ImportError:
        import pickle as pickle

    import datetime

    
    dateList=[]

    dateHistFile = "dateList.d"

    try:
        dateList=pickle.load( open( dateHistFile, "rb" ) )
    except IOError:

        #Change this to the min date instead of a fixed date
        #But then we need to consider if the holiday dates are in the same range

        dateList = map(okDate,[ datetime.datetime(2012, 12, 31, 0, 0) + datetime.timedelta(days=x) for x in range(0,365)])

        dateList = [dateVal for dateVal in dateList if dateVal is not None]
        #If this file is being run or loaded, then we need to recreate the pickled dateList
        pickle.dump( dateList, open( dateHistFile, "wb" ))

    return dateList
Exemplo n.º 14
0
def _load_data(datafile, datafilefmt):
    '''
    Load the data file from the last run, or if it does not exist, return an
    empty dictionary.  Backup any existing data in the process.
    '''
    if not os.path.isfile(datafile):
        data = {}
    else:
        if datafilefmt == 'json':
            with open(datafile, encoding='utf8') as f:
                data = json.load(f)
        elif datafilefmt == 'json.zip':
            with zipfile.ZipFile(datafile) as z:
                fname = os.path.split(datafile)[1].rsplit('.', 1)[0]
                with z.open(fname) as f:
                    data = json.loads(f.read().decode('utf8'))
        elif datafilefmt in ('pickle', 'pkl'):
            with open(datafile, 'rb') as f:
                data = pickle.load(f)
        else:
            raise ValueError('Invalid data file format {0}'.format(datafilefmt))
        # Create 2 levels of backups--maybe a little paranoid, but safer
        # The data file is only written after everything is complete and there
        # are no errors, so copying it in this manner doesn't risk overwriting
        # valid backups with bad data
        if os.path.isfile(datafile + '.backup'):
            shutil.copy(datafile + '.backup', datafile + '.backup2')
        shutil.copy(datafile, datafile + '.backup')

    return data
Exemplo n.º 15
0
def getDates():
    try:
        import cpickle as pickle
    except ImportError:
        import pickle as pickle

    import datetime

    dateList = []

    dateHistFile = "dateList.d"

    try:
        dateList = pickle.load(open(dateHistFile, "rb"))
    except IOError:

        #Change this to the min date instead of a fixed date
        #But then we need to consider if the holiday dates are in the same range

        dateList = map(okDate, [
            datetime.datetime(2012, 12, 31, 0, 0) + datetime.timedelta(days=x)
            for x in range(0, 365)
        ])

        dateList = [dateVal for dateVal in dateList if dateVal is not None]
        #If this file is being run or loaded, then we need to recreate the pickled dateList
        pickle.dump(dateList, open(dateHistFile, "wb"))

    return dateList
Exemplo n.º 16
0
def load_luma_map(luma_file=LUMA_FILE):
    print "LOADING LUMA MAP", luma_file
    try:
        with open(luma_file) as f:
            global LUMA_MAP
            LUMA_MAP = pickle.load(f)
            print_luma_completion()
    except Exception, e:
        print "WARNING: NOT LOADING LUMA MAP", e
Exemplo n.º 17
0
def load_luma_observations():
    global LUMA_OBS
    if os.path.exists(CHANGES_FILE):
        try:
            with open(CHANGES_FILE) as f:
                LUMA_OBS = pickle.load(f)
                print "LOADED %s LUMA OBSERVATIONS" % len(LUMA_OBS)
        except Exception, e:
            print "EXCEPT", e
Exemplo n.º 18
0
    def init_last_iteration(self):
        """
        Loads the last iteration from a previously stored run
        :return: the previous observations
        """
        max_iteration = self._get_last_iteration_number()

        iteration_folder = os.path.join(self.save_dir, "%03d" % (max_iteration, ))

        that = pickle.load(open(os.path.join(iteration_folder, "bayesian_opt.pickle"), "rb"))
        self.objective_fkt = that.objective_fkt
        self.acquisition_fkt = that.acquisition_fkt
        self.model = that.model
        self.maximize_fkt = that.maximize_fkt
        self.X_lower = that.X_lower
        self.X_upper = that.X_upper
        self.dims = that.dims
        return pickle.load(open(iteration_folder + "/observations.pickle", "rb"))
Exemplo n.º 19
0
def loadPickle(_fileName=PickleFileG):
    '''读取上一次执行结果,方便与最新数据进行比较'''
    if os.path.exists(_fileName):
        f=file(_fileName,'rb')
        _Data = pickle.load(f)
        f.close()
        return _Data
    else:
        return {'date':'','data':{}}
Exemplo n.º 20
0
def load_luma_map(luma_file=LUMA_FILE):
  print "LOADING LUMA MAP", luma_file
  try:
    with open(luma_file) as f:
      global LUMA_MAP
      LUMA_MAP = pickle.load(f)
      print_luma_completion()
  except Exception, e:
    print "WARNING: NOT LOADING LUMA MAP", e
Exemplo n.º 21
0
 def configure(self, options, conf):
     self.hash_file = join(conf.workingDir, self.hash_file)
     if isfile(self.hash_file):
         log.debug("Loading last known hashes and dependency graph")
         with open(self.hash_file, 'r') as f:
             data = load(f)
         self._known_hashes = data['hashes']
         self._known_graph = data['graph']
     Plugin.configure(self, options, conf)
Exemplo n.º 22
0
def load_luma_observations():
  global LUMA_OBS
  if os.path.exists(CHANGES_FILE):
    try:
      with open(CHANGES_FILE) as f:
        LUMA_OBS = pickle.load(f)
        print "LOADED %s LUMA OBSERVATIONS" % len(LUMA_OBS)
    except Exception, e:
      print "EXCEPT", e
Exemplo n.º 23
0
def loadData():
    Tkinter.Tk().withdraw()
    fp = tkFileDialog.askopenfilename()
    print("Opening " + fp + "...")
    with open(fp, 'r') as file:
        emgs = pickle.load(file)

    print "Loaded data file."

    return emgs
Exemplo n.º 24
0
def save_temps(filename):
    f = open(filename, 'w')
    data = cpickle.load(f)
    for i in range(1,9):
        temp = read_temp(i)
        now = datetime.datetime.now()
        data[now] = [i, temp]
        time.sleep(0.5)
    cpickle.dump(data, f)
    f.close()
Exemplo n.º 25
0
 def configure(self, options, conf):
     self.hash_file = os.path.join(conf.workingDir, self.hash_file)
     if os.path.isfile(self.hash_file):
         log.debug("Loading last known hashes and dependency graph")
         f = open(self.hash_file, 'r')
         data = load(f)
         f.close()
         self._known_hashes = data['hashes']
         self._known_graph = data['graph']
     Plugin.configure(self, options, conf)
Exemplo n.º 26
0
    def init_last_iteration(self):
        """
        Loads the last iteration from a previously stored run
        :return: the previous observations
        """
        max_iteration = self._get_last_iteration_number()

        iteration_folder = os.path.join(self.save_dir,
                                        "%03d" % (max_iteration, ))

        that = pickle.load(
            open(os.path.join(iteration_folder, "bayesian_opt.pickle"), "rb"))
        self.objective_fkt = that.objective_fkt
        self.acquisition_fkt = that.acquisition_fkt
        self.model = that.model
        self.maximize_fkt = that.maximize_fkt
        self.X_lower = that.X_lower
        self.X_upper = that.X_upper
        self.dims = that.dims
        return pickle.load(
            open(iteration_folder + "/observations.pickle", "rb"))
Exemplo n.º 27
0
    def __init__(self):
        DIR = dirname(__file__)
        morfessor_file = join(DIR, 'data/finnsyll-morfessor.bin')
        ngram_file = join(DIR, 'data/finnsyll-ngrams.pickle')

        io = morfessor.MorfessorIO()
        self.model = io.read_binary_model_file(morfessor_file)
        self.constraints = CONSTRAINTS
        self.constraint_count = len(CONSTRAINTS)

        with open(ngram_file, 'rb') as f:
            self.ngrams, self.vocab, self.total = pickle.load(f)
Exemplo n.º 28
0
def getObj(dataFile=None):
    try:
        import cpickle as pickle
    except ImportError:
        import pickle as pickle

    dataObj={}
    try:
        dataObj=pickle.load( open( dataFile, "rb" ) )
    except IOError:
        return dataObj    
    return dataObj
Exemplo n.º 29
0
def pickle_load(path, base=None, **kwargs):
    if isinstance(path, Path):
        path = str(path)

    if base is not None:
        path = os.path.join(base, path)

    if isinstance(path, basestring):
        if path[-2:] != '.p':
            path += '.p'
        with open(path, 'rb') as f:
            try:
                out = pickle.load(f, **kwargs)
            except EOFError as e:
                logger.error('path "{}" is not a pickle file. {}'.format(path, e))
                raise e
    elif isinstance(path, file):
        out = pickle.load(path, **kwargs)
        path.close()
    else:
        raise ValueError('Unexpected path format')
    return out
Exemplo n.º 30
0
def run_job(f, *args, **kwargs):
    ''' Internal function, runs the function in a remote process.
    Uses fork() to perform it.

    Availability: Unix
    '''
    out_file = tempfile.mkstemp()
    os.close(out_file[0])
    pid = os.fork()
    if pid != 0:
        # parent: wait for the child
        pid, status = os.waitpid(pid, 0)
        # read output file
        #print('read from', os.getpid(), ':', out_file[1])
        if os.stat(out_file[1]).st_size == 0:
            # child did not write anything
            os.unlink(out_file[1])
            raise OSError('child did not output anything')
        if status != 0:
            os.unlink(out_file[1])
            raise RuntimeError('subprocess error: %d' % status)
        result = pickle.load(open(out_file[1], 'rb'))
        os.unlink(out_file[1])
        # traceback objects cannot be pickled...
        #if isinstance(result, tuple) and len(result) == 3 \
        #and isinstance(result[1], Exception):
        ## result is an axception with call stack: reraise it
        #raise result[0], result[1], result[2]
        if isinstance(result, Exception):
            raise result
        return result

    # child process
    try:
        try:
            #print('exec in', os.getpid(), ':', f, args, kwargs)
            result = f(*args, **kwargs)
            #print('OK')
        except Exception as e:
            # traceback objects cannot be pickled...
            #result = (type(e), e, sys.exc_info()[2])
            result = e
        #print('write:', out_file[1], ':', result)
        try:
            pickle.dump(result, open(out_file[1], 'wb'), protocol=2)
        except Exception as e:
            print('pickle failed:', e, '\nfor object:', type(result))
    finally:
        # sys.exit() is not enough
        os._exit(0)
Exemplo n.º 31
0
    def readFromFile(self, filename):
        """ Read contacts in from a file. """
        try:
            f = open(filename, "rb")
            addresses = pickle.load(f)
        except IOError:
            QMessageBox.information(self, "Unable to open file: %s" % filename)
        finally:
            f.close()

        if len(addresses) == 0:
            QMessageBox.information(self, "No contacts in file: %s" % filename)
        else:
            for address in addresses:
                self.addEntry(address["name"], address["address"])
Exemplo n.º 32
0
def readSession(filepath, helper=None):
    """
    Loads a dictionary session from file.

    :param filepath: path to load session file.
    :param helper: function to pos-process session file
    :return: session
    """
    # safely read session file
    with secure_open(filepath, 'rb') as logger:
        session = serializer.load(logger)  # get session
    if helper:
        return helper(session)
    else:
        return session
Exemplo n.º 33
0
    def readFromFile(self, filename):
        """ Read contacts in from a file. """
        try:
            f = open(filename, "rb")
            addresses = pickle.load(f)
        except IOError:
            QMessageBox.information(self, "Unable to open file: %s" % filename)
        finally:
            f.close()

        if len(addresses) == 0:
            QMessageBox.information(self, "No contacts in file: %s" % filename)
        else:
            for address in addresses:
                self.addEntry(address["name"], address["address"])
Exemplo n.º 34
0
    def load(cls, filename):
        """ Create an engine given metadata from a pickle file.

        Parameters
        ----------
        filename : str
        """
        with open(filename, 'rb') as f:
            dat = pkl.load(f)
            self = cls(**dat['init_args'])
            for key, val in dat['cls_attrs'].items():
                setattr(self, '_' + key, val)

            random.setstate(dat['rng_state']['py'])
            np.random.set_state(dat['rng_state']['np'])

        return self
Exemplo n.º 35
0
    def load(cls, filename):
        """ Create an engine given metadata from a pickle file.

        Parameters
        ----------
        filename : str
        """
        with open(filename, 'rb') as f:
            dat = pkl.load(f)
            self = cls(**dat['init_args'])
            for key, val in dat['cls_attrs'].items():
                setattr(self, '_' + key, val)

            random.setstate(dat['rng_state']['py'])
            np.random.set_state(dat['rng_state']['np'])

        return self
    def start_slices(self):
        """
        Second step: start previously conflictive (non-standard) slices at FlowVisor.
        """
        errors = []
        slice_ids = []
        # If 'conflictive_slice_ids' file exists, do the following.
        # Otherwise warn and skip.
        try:
            f = open("%s/conflictive_slice_ids" % path,"r")
            ids = pickle.load(f)
            f.close()
            os.remove("%s/conflictive_slice_ids" % path)
            slices = filter_own_slices(ids)
            for (counter, slice) in enumerate(slices):
                aggs = slice.aggregates.filter(leaf_name="OpenFlowAggregate")
                slice_id = str(settings.SITE_DOMAIN) + "_" + str(slice.id)
                slice_ids.append({"id":slice_id, "keywords":ids[counter]['keywords']})
                for agg in aggs:
                    try:
                        print "Starting", slice_id, "at aggregate", str(agg)
                        time.sleep(FLOWVISOR_SLEEP_TIME)
                        with Timeout(TIMEOUT):
                            agg.as_leaf_class().client.proxy.create_slice(slice_id, slice.project.name,slice.project.description,slice.name, slice.description, slice.openflowsliceinfo.controller_url, slice.owner.email, slice.openflowsliceinfo.password, agg.as_leaf_class()._get_slivers(slice))
                        # Starting slice at Expedient
                        slice.started = True
                        slice.save()
                    except Exception as e:
                        message = """Could not fix the naming in Aggregate Manager %s for slice with the following details: name:\t\t %s
FlowVisor name:\t\t %s

The cause of the error is: %s. Please try to fix it manually""" % (str(agg.as_leaf_class()),slice.name, slice_id, e)
                        send_mail("OCF: error while standardizing Flowvisor slices", message, "*****@*****.**", [settings.ROOT_EMAIL])
                        errors.append(message)
            # Save the slice IDs to grant flowspace nevertheless of other errors
            f = open("%s/slice_ids_to_grant_fs" % path,"w")
            pickle.dump(slice_ids, f)
            f.close()
            if errors:
                return "\033[93mFailure while starting previously non-standard slices at FlowVisor: %s\033[0m" % str(errors)
            else:
                return "\033[92mSuccessfully started previously non-standard slices at FlowVisor\033[0m"
        except Exception as e:
            print e
            return "\033[93mCould not access file with slice IDs. Skipping...\033[0m\n"
Exemplo n.º 37
0
def getHistory():  #add the

    try:
        import cpickle as pickle
    except ImportError:
        import pickle as pickle

    import datetime
    import time
    historyFile = "stockHist.d"
    try:
        histDict = pickle.load(open(historyFile, "rb"))
        # Need to check if the data files have changed and if so recreate
        # Check the max date in the database against the retrieved array
    except IOError:
        from sys import getsizeof
        from symbols import symbolList
        from processFuncs import writeLog

        histDict = {}
        recHistory = []

        dateSearch = datetime.datetime(
            2013, 1, 2, 0, 0
        )  #We know this is the starting date. Should probably use the tradingDates.py

        db = openData('stockData')
        useIndex(db, 'nasdaq', 'Date', 'Symbol')
        #db.nasdaq.ensure_index([('Date','Symbol')])
        writeLog("Creating Price History")

        #Symbol,Date,Open,High,Low,Close,Volume
        #prices={'AAPL':[('20130201',433.45,434.30,432.33,436.3,2030400),('20130202',433.55,434.50,432.33,436.3,2030400)]}
        #.strftime("%Y%m%d")
        for symbol in symbolList:
            recHistory = map(lambda x: (x['Date'].combine(x['Date'].date(),datetime.time(0,0,0)),x['Open'],x['High'],x['Low'],x['Close']),\
                db.nasdaq.find({'Symbol':symbol},{'Date':1,'Open': 1,'High':1,'Low':1,'Close': 1, '_id':0 }).sort('Date',1))
            #print recHistory
            #raw_input()
            histDict[symbol] = recHistory
            recHistory = []
        pickle.dump(histDict, open(historyFile, "wb"))

    return histDict
Exemplo n.º 38
0
def getHistory(): #add the

    try:
        import cpickle as pickle
    except ImportError:
        import pickle as pickle

    import datetime
    import time
    historyFile = "stockHist.d"
    try:
        histDict=pickle.load( open( historyFile, "rb" ) )
        # Need to check if the data files have changed and if so recreate
        # Check the max date in the database against the retrieved array
    except IOError:
        from sys import getsizeof
        from symbols import symbolList
        from processFuncs import writeLog
        
        histDict={}
        recHistory=[]
        
        dateSearch = datetime.datetime(2013, 1, 2, 0, 0) #We know this is the starting date. Should probably use the tradingDates.py
        
        db=openData('stockData')
        useIndex(db,'nasdaq','Date','Symbol')
        #db.nasdaq.ensure_index([('Date','Symbol')])
        writeLog("Creating Price History")

        #Symbol,Date,Open,High,Low,Close,Volume
        #prices={'AAPL':[('20130201',433.45,434.30,432.33,436.3,2030400),('20130202',433.55,434.50,432.33,436.3,2030400)]}
        #.strftime("%Y%m%d")
        for symbol in symbolList:
            recHistory = map(lambda x: (x['Date'].combine(x['Date'].date(),datetime.time(0,0,0)),x['Open'],x['High'],x['Low'],x['Close']),\
                db.nasdaq.find({'Symbol':symbol},{'Date':1,'Open': 1,'High':1,'Low':1,'Close': 1, '_id':0 }).sort('Date',1))
            #print recHistory
            #raw_input()
            histDict[symbol]=recHistory
            recHistory=[]
        pickle.dump( histDict, open( historyFile, "wb" ))
        

    
    return histDict
Exemplo n.º 39
0
    def intensityMap(self, plot=False):
        path = self._gfile.split("/")[-1]
        nR = 100
        nphi = 200
        try:
            fieldlines = pickle.load(open("fieldline_store/" + path + "__fl.p", "r"))
        except:
            fieldlines = []
            Rstart = 1.37
            phistart = 160.0
            dR = 0.001
            dphi = 0.2
            for i in np.arange(nR):
                R = Rstart + i * dR
                fline = self.tracer.trace(R, 0.0, phistart * 2.0 * np.pi / 360.0, mxstep=10000, ds=0.01)
                for j in np.arange(nphi):
                    fline.rotate_toroidal(dphi * 2.0 * np.pi / 360.0)
                    line = self.projectFieldline(fline)
                    indsR = np.where(np.array(fline.R) > 0.6)[0]
                    inds = np.where(np.abs(fline.Z)[indsR] < 1.0)[0]
                    fieldlines.append(line[inds])
            pickle.dump(fieldlines, open("fieldline_store/" + path + "__fl.p", "w"))
        frame = self.bgsub.apply(self._currentframeData)
        intensity = []
        for i in np.arange(len(fieldlines)):
            line = fieldlines[i]
            temp = []
            points = np.zeros(frame.shape)
            for j in np.arange(line.shape[0]):
                yind, xind = int(line[j, 0]), int(line[j, 1])
                if xind > 0 and xind < frame.shape[0] and yind > 0 and yind < frame.shape[1]:
                    points[xind, yind] = 1.0
            intensity.append(np.sum(points * frame))

        intensity = np.array(intensity).reshape((nR, nphi))
        if plot:
            fig = plt.figure()
            levels = np.linspace(np.min(intensity), np.max(intensity), 200)
            plt.contourf(np.linspace(1.37, 1.47, 100), np.linspace(160, 200, 200), intensity.T, levels=levels)
            plt.show()
        else:
            return intensity
Exemplo n.º 40
0
    def new_f(*args, **kwargs):
        compressed = ''
        if len(args) > 0:
            compressed = '_' + '_'.join([str(arg)[:10] for arg in args])
        if len(kwargs) > 0:
            compressed += '_' + '_'.join([(str(k)+str(v))[:10] for k,v in kwargs])

        filename = '%s%s.pickle' % (f.__name__, compressed)

        if os.path.exists(filename):
            pickled = open(filename, 'rb')
            result = pickle.load(pickled)
            pickled.close()
        else:
            result = f(*args, **kwargs)
            pickled = open(filename, 'wb')
            pickle.dump(result, pickled)
            pickled.close()

        return result
Exemplo n.º 41
0
    def new_f(*args, **kwargs):
        compressed = ''
        if len(args) > 0:
            compressed = '_' + '_'.join([str(arg)[:10] for arg in args])
        if len(kwargs) > 0:
            compressed += '_' + '_'.join([(str(k)+str(v))[:10] for k,v in kwargs])

        filename = '%s%s.pickle' % (f.__name__, compressed)

        if os.path.exists(filename):
            pickled = open(filename, 'rb')
            result = pickle.load(pickled)
            pickled.close()
        else:
            result = f(*args, **kwargs)
            pickled = open(filename, 'wb')
            pickle.dump(result, pickled)
            pickled.close()

        return result
Exemplo n.º 42
0
    def new_f(*args, **kwargs):
        compressed = ''
        if len(args) > 0:
            compressed = '_' + '_'.join([str(arg)[:10] for arg in args])
        if len(kwargs) > 0:
            compressed += '_' + '_'.join([(str(k) + str(v))[:10] for k, v in kwargs])

        logging.debug("Input File Name  " +args[1])
        filename = '%s.pickle' % (args[1])

        logging.debug("Pickling file name %s "% filename)

        if os.path.exists(filename):
            pickled = open(filename, 'rb')
            result = pickle.load(pickled)
            pickled.close()
        else:
            result = f(*args, **kwargs)
            pickled = open(filename, 'wb')
            pickle.dump(result, pickled)
            pickled.close()
        return result
Exemplo n.º 43
0
def get_posteriors(N=3):
    '''Return doc- and corpus-level posterior probabilities of each word type.

    Posteriors are based on n-grams of size 1 through N (inclusive).
    Posteriors are stored and returned as a dictionary:

        posteriors = {
            'doc': {
                doc1.id : {
                    0: {
                        tok1.id: <posterior>,
                        tok2.id: <posterior>,
                        ...
                        },
                    ...
                    },
                ...
                },
            'corpus': {
                0: {
                    tok1.id: <posterior>,
                    tok2.id: <posterior>,
                    ...
                    },
                ...
                },
            }
    '''
    try:
        with open(POSTERIOR_FILE % N, 'rb') as f1:

            with open(INFORMATIVITY_FILE % N, 'rb') as f2:
                assert f2

            return pickle.load(f1)

    except (IOError, EOFError, AssertionError):
        return _get_posteriors(N)
Exemplo n.º 44
0
    def __init__(self, **karg):
        super(Cache2, self).__init__()
        Cache2._CACHES.append(self)
        _karg = {'poolsize': 50, 'base': None}
        _karg.update(karg)

        self.poolsize = _karg['poolsize']
        self.pool = dict()
        self.ttl = dict()
        self.tick = 0
        self.base = _karg['base']

        if _karg['base']:
            if _karg['base'] not in Cache2._CACHEFILES:
                Cache2._CACHEFILES.add(_karg['base'])
            else:
                raise Exception, 'Basefiles conflicts [{0}]'.\
                        format(_karg['base'])
            if os.path.exists(_karg['base']):
                with open(_karg['base'],'rb') as basefile:
                    self.poolsize, self.pool, self.ttl, self.tick = pickle.load(
                        basefile)
                self.base = _karg['base']
Exemplo n.º 45
0
    def __init__(self, **karg):
        super(Cache2, self).__init__()
        Cache2._CACHES.append(self)
        _karg = {'poolsize': 50, 'base': None}
        _karg.update(karg)

        self.poolsize = _karg['poolsize']
        self.pool = dict()
        self.ttl = dict()
        self.tick = 0
        self.base = _karg['base']

        if _karg['base']:
            if _karg['base'] not in Cache2._CACHEFILES:
                Cache2._CACHEFILES.add(_karg['base'])
            else:
                raise Exception, 'Basefiles conflicts [{0}]'.\
                        format(_karg['base'])
            if os.path.exists(_karg['base']):
                with open(_karg['base'], 'rb') as basefile:
                    self.poolsize, self.pool, self.ttl, self.tick = pickle.load(
                        basefile)
                self.base = _karg['base']
Exemplo n.º 46
0
def get_informativity(N=3):
    '''Return doc- and corpus-level informativity of each word type.

    Informativity is based on n-grams of size 1 through N (inclusive).
    Informativity results are stored and returned as a dictionary:

        informativity = {
            'doc': {
                doc1.id : {
                    sigma1: {
                        1: <bigram_informativity>,
                        2: <trigram_informativity>,
                        ...
                        },
                    ...
                    },
                ...
                },
            'corpus': {
                sigma1: {
                    1: <bigram_informativity>,
                    2: <trigram_informativity>,
                    ...
                    },
                ...
                },
            }
    '''
    try:
        with open(INFORMATIVITY_FILE % N, 'rb') as f:
            return pickle.load(f)

    except (IOError, EOFError):
        # informativity is calculated at the same time as posteriors
        get_posteriors(N)

        return get_informativity(N)
Exemplo n.º 47
0
def random_h(G,stringie,dimension,num_iter,max_index):
	h=[];
	ht=[];
	for i in range(num_iter):
		namefile=stringie+str(i)+'.pck';
		gen_temp=pickle.load(open(namefile));
		if gen_temp.has_key(dimension):
			h.append(network_hollowness(gen_temp[dimension],max_index));
			ht.append(network_weighted_hollowness(G,gen_temp[dimension],max_index));
	print 'h='+str(np.mean(h))+'+-'+str(np.std(h));
	print 'ht='+str(np.mean(ht))+'+-'+str(np.std(ht));
	if len(h)>=1:
		a=np.mean(h);
		b=np.std(h);
	else:
		a=0;
		b=0;
	if len(ht)>1:
		c=np.mean(ht);
		d=np.std(ht);
	else:
		c=0;
		d=0;
	return a,b,c,d;
Exemplo n.º 48
0
def load_pyeq(equilibrium,filename):
	try:
		import cpickle as pickle		
	except:
		import pickle
		
	#First try loading filename as given
	
	try:
		file = open(filename,'rb')
	except:
		#Next try append the .pyeq filename
		filename += '.pyeq'
		try:
			file = open(filename,'rb')
		except:
			raise IOError('Error: unable to open file --> '+filename)
	
	try:
		eq = pickle.load(file)
	except:
		raise IOError('Error: unable to unpickle data from file --> '+filename)
	
	return eq
Exemplo n.º 49
0
    def test_variant_ordering_no_stress(self):
        # ensure that the syllabifier returns variants in order from most
        # preferred to least preferred
        F = FinnSyll(split=True, variation=True, rules=False, stress=False)

        with open('tests/ranked_sylls.pickle', 'rb') as f:
            pairs = pickle.load(f)

        errors = 0

        for i, expected in pairs.items():

            try:
                test = F.syllabify(unicode(i, 'utf-8').lower())

            except (TypeError, NameError):
                test = F.syllabify(i.lower())

            try:
                self.assertEqual(test, expected)

            except AssertionError as e:
                errors += 1
                message = ''

                for line in e.message.split('\n'):

                    if line.startswith('-'):
                        message += line + '\n'
                    elif line.startswith('+'):
                        message += line

                print(message + '\n')

        if errors:
            raise AssertionError(errors)
Exemplo n.º 50
0
def getRatios():

    try:
        import cpickle as pickle
    except ImportError:
        import pickle as pickle

    import datetime
    pairsFile = "ratioHistory.d"

    try:
        pairsDict = pickle.load(open(pairsFile, "rb"))
    except IOError:
        from sys import getsizeof
        from symbols import symbolList
        import numpy as np
        from processFuncs import writeLog
        from math import log
        #Then we need to create the file
        # Structure {Symbol:[12.12,13,13.2...],...]
        # In our program, we need to retrieve the history array of open,close values and then append the latest values for each
        # symbol
        # For testing, just retrieve the latest history
        from createHistory import getHistory

        # This is how we get the price ratio
        # This will get us the average price and then divide them
        # np.divide(np.average(histDict['AAPL']),np.average(histDict['AMZN']))

        # This will create an array that shows the history of the price ratios
        # np.divide(histDict['AAPL'],histDict['AMZN'])

        # Find the standard deviation of the price ratios
        # np.std(np.divide(histDict['AAPL'],histDict['AMZN']))
        pairsDict = {}
        pairsHistory = []
        symbol1Arry = []
        symbol2Arry = []
        savePair = 0
        dupeList = symbolList[:]
        stockHist = getHistory(
        )  #This will be the entire history in a dictionary with key=symbol the dates are based on order
        writeLog("Creating Pairs")
        for symbol in symbolList:
            # Don't need to do this since we already have a history function
            #symbolArry = stockHist[symbol]#map(lambda x: x['Close'],db.nasdaq.find({'Symbol':symbol},{'Close': 1, '_id':0 }))
            symbolArry = map(lambda x: (x[0], x[4]), stockHist[symbol])
            for symbol2 in dupeList:
                symbol2Arry = []
                if symbol2 == symbol:
                    savePair = 0
                    continue

                #symbol2Arry = stockHist[symbol2] #map(lambda x: x['Close'],db.nasdaq.find({'Symbol':symbol2},{'Close': 1, '_id':0 }))
                symbol2Arry = map(lambda x: (x[0], x[4]), stockHist[symbol2])

                minLen = min(len(symbolArry), len(symbol2Arry))

                if minLen > 0:
                    symbolArry = symbolArry[-minLen:]
                    symbol2Arry = symbol2Arry[-minLen:]
                    savePair = 1

                #if len(symbolArry)==len(symbol2Arry):
                #    savePair=1

                if savePair:
                    # Need to create another dictionary that stores the average and standard deviation
                    # Also, need to keep track of correlation
                    # For each day we need to divide
                    symbol2dict = dict(symbol2Arry)
                    doPairAppend = pairsHistory.append
                    for day in symbolArry:
                        dayDate = day[0]
                        dayClose = day[1]
                        if dayDate not in symbol2dict:
                            continue
                        day2Close = symbol2dict[dayDate]

                        doPairAppend(
                            (dayDate,
                             log(
                                 round(
                                     np.divide(float(dayClose),
                                               float(day2Close)), 5))))

                    #print pairsHistory
                    #print symbol,symbol2
                    #raw_input()
                    pairsDict[("%s %s") % (symbol, symbol2)] = pairsHistory

                    if False:
                        for day in symbolArry:
                            dayDate = day[0]
                            day2Close = day[1]
                            #The date is stored in GMT time, so sometimes it has standard time and sometimes it's daylight
                            #time. Have to remove the GMT portion. Not sure if in import or when creating history.
                            if dayDate not in symbol2dict:
                                continue
                            dayClose = symbol2dict[dayDate]
                            pairsHistory.append(
                                (dayDate,
                                 round(np.divide(day2Close, dayClose), 5)))

                        pairsDict[("%s %s") % (symbol2, symbol)] = pairsHistory
                    #pairsHistory=np.divide(symbol2Arry,symbolArry,)
                    #pairsDict[("%s %s")%(symbol2,symbol)]=pairsHistory

                pairsHistory = []
                symbol1Arry = []
                symbol2Arry = []
                savePair = 0

            #if len(dupeList):
            #    dupeList.pop(0)
            #else:
            #    break

        pickle.dump(pairsDict, open(pairsFile, "wb"))
    return pairsDict
Exemplo n.º 51
0
            updates=updates)

    def train(self, datas):
        total_cost = 0.0
        random.shuffle(datas)
        for i in range(len(datas)):
            #for i in range(1):
            batch_sentences = datas[i][0]
            batch_answers = datas[i][1]
            cost, pred = self.train_model(batch_sentences, batch_answers, 0)
            #print str(pred) +"\t"+ str(batch_answers)+"\t"+str(norm_weight)
            total_cost += cost
        print "total cost is " + str(total_cost / len(datas))


data_set = pickle.load(open("data/imdb.pkl"))
"""
train data  : 20000
test data   : 5000

train_set[0] : uni-gram word sequence
train_set[1] : class of data

test_set[0] : uni-fram word sequence
test_set[1] : class of data

전체 단어 집합 : 91189
훈련 데이터 입력 단어 :  
"""
train_set = data_set['train']
test_set = data_set['test']
Exemplo n.º 52
0
 def load(self, fileobject=None):
     fileobject, close = file_open(fileobject or self._cachefile, 'rb')
     self._symcache = pickle.load(fileobject)
     if close:
         fileobject.close()
Exemplo n.º 53
0
    def __init__(self, name):
        self.name = name
        self.name_backwards = name[::-1]
        return

data = []
data.append(SimpleObject('pickle'))
data.append(SimpleObject('cpickle'))
data.append(SimpleObject('last'))

#Simulate a file with StringIO
out_s = StringIO()

#Write to the stream
for o in data:
    print 'WRITING : %s (%s)' %(o.name, o.name_backwards)
    pickle.dump(o, out_s)
    out_s.flush()

#Set up a read-able stream
in_s = StringIO(out_s.getvalue())

#Read the data
while True:
    try:
        o = pickle.load(in_s)
    except EOFError:
        break
    else:
        print 'READ : %s (%s)' % (o.name, o.name_backwards)
Exemplo n.º 54
0
def getRatios():
    
    try:
        import cpickle as pickle
    except ImportError:
        import pickle as pickle

    import datetime
    pairsFile = "ratioHistory.d"

    try:
        pairsDict=pickle.load( open( pairsFile, "rb" ) )
    except IOError:
        from sys import getsizeof
        from symbols import symbolList
        import numpy as np
        from processFuncs import writeLog
        from math import log
        #Then we need to create the file
        # Structure {Symbol:[12.12,13,13.2...],...]
        # In our program, we need to retrieve the history array of open,close values and then append the latest values for each 
        # symbol
        # For testing, just retrieve the latest history
        from createHistory import getHistory
        
        # This is how we get the price ratio
        # This will get us the average price and then divide them
        # np.divide(np.average(histDict['AAPL']),np.average(histDict['AMZN']))
        
        # This will create an array that shows the history of the price ratios
        # np.divide(histDict['AAPL'],histDict['AMZN'])

        # Find the standard deviation of the price ratios
        # np.std(np.divide(histDict['AAPL'],histDict['AMZN']))
        pairsDict={}
        pairsHistory=[]
        symbol1Arry=[]
        symbol2Arry=[]
        savePair=0
        dupeList=symbolList[:]
        stockHist = getHistory() #This will be the entire history in a dictionary with key=symbol the dates are based on order
        writeLog("Creating Pairs")
        for symbol in symbolList:
            # Don't need to do this since we already have a history function
            #symbolArry = stockHist[symbol]#map(lambda x: x['Close'],db.nasdaq.find({'Symbol':symbol},{'Close': 1, '_id':0 }))
            symbolArry = map(lambda x:(x[0],x[4]),stockHist[symbol]) 
            for symbol2 in dupeList:
                symbol2Arry=[]
                if symbol2 == symbol:
                    savePair=0
                    continue

                #symbol2Arry = stockHist[symbol2] #map(lambda x: x['Close'],db.nasdaq.find({'Symbol':symbol2},{'Close': 1, '_id':0 }))
                symbol2Arry=map(lambda x:(x[0],x[4]),stockHist[symbol2])

                minLen=min(len(symbolArry),len(symbol2Arry))
                
                if minLen>0:
                    symbolArry=symbolArry[-minLen:]
                    symbol2Arry=symbol2Arry[-minLen:]
                    savePair=1

                #if len(symbolArry)==len(symbol2Arry):
                #    savePair=1
                
                if savePair:
                    # Need to create another dictionary that stores the average and standard deviation
                    # Also, need to keep track of correlation
                    # For each day we need to divide
                    symbol2dict=dict(symbol2Arry)
                    doPairAppend=pairsHistory.append
                    for day in symbolArry:
                        dayDate=day[0]
                        dayClose=day[1]
                        if dayDate not in symbol2dict:
                            continue
                        day2Close=symbol2dict[dayDate]
                        
                        doPairAppend((dayDate,log(round(np.divide(float(dayClose),float(day2Close)),5))))
                    
                    #print pairsHistory
                    #print symbol,symbol2
                    #raw_input()
                    pairsDict[("%s %s")%(symbol,symbol2)]=pairsHistory
                        
                    if False:
                        for day in symbolArry:
                            dayDate=day[0]
                            day2Close=day[1]
                            #The date is stored in GMT time, so sometimes it has standard time and sometimes it's daylight
                            #time. Have to remove the GMT portion. Not sure if in import or when creating history.
                            if dayDate not in symbol2dict:
                                continue
                            dayClose=symbol2dict[dayDate]
                            pairsHistory.append((dayDate,round(np.divide(day2Close,dayClose),5)))
                       
                        pairsDict[("%s %s")%(symbol2,symbol)]=pairsHistory
                    #pairsHistory=np.divide(symbol2Arry,symbolArry,)
                    #pairsDict[("%s %s")%(symbol2,symbol)]=pairsHistory

                
                pairsHistory=[]
                symbol1Arry=[]
                symbol2Arry=[]
                savePair=0
                
            #if len(dupeList):
            #    dupeList.pop(0) 
            #else:
            #    break
            
        pickle.dump( pairsDict, open(pairsFile, "wb" ))
    return pairsDict
Exemplo n.º 55
0
# Theunissen Lab dependencies.
from lasp.signal import lowpass_filter, highpass_filter
from lasp.sound import spectrogram, plot_spectrogram
from ipywidgets import *

# Specify the input file again. The segemented file is a pickle file that has s single block but
# multiple labelled segements corresponding to sections with song.  It was created with audiosegmentnix.py (in songephys).
experiment_dir = "/auto/tdrive/billewood/intan data/LbY6074"
pkl_file_in = "LbY6074__161215_132633_seg_c1.pkl"
#pkl_file_out = "LbY6074__161216_104808_seg_c1.pkl"  # c1 for check 1.  You could use the same name and it will overwrite
# Open the Inupt Pkl file
pklFileIn = open(os.path.join(experiment_dir, 'NIX', pkl_file_in), 'rb')

# Read the data
block = pickle.load(pklFileIn)

pklFileIn.close()

print 'Found', len(block.segments), 'segments:'
nstudent = 0
ntutor = 0
for i, seg in enumerate(block.segments):
    if seg.name.startswith('student'):
        nstudent += 1
    elif seg.name.startswith('tutor'):
        ntutor += 1
print '\t', nstudent, 'student songs and', ntutor, 'tutor songs.'

# Add a little time before and after each event.
# times before and after were already added before segmentation
Exemplo n.º 56
0
 def load_pickle(name):
     with open(name, 'wb') as p:
         return P.load(p)
Exemplo n.º 57
0
 def load(self, fileobject=None):
     fileobject, close = file_open(fileobject or self._cachefile, 'rb')
     self._symcache = pickle.load(fileobject)
     if close:
         fileobject.close()
Exemplo n.º 58
0
#!/usr/bin/env Python

try:
    import cpickle as pickle

except ImportError:
    import pickle

from collections import defaultdict
from nltk.corpus import brown, cmudict

FILENAME = 'dictionary.pickle'

try:
    Dictionary = pickle.load(open(FILENAME))

except IOError:
    cmudict = cmudict.dict()

    BROWN_CATEGORIES = [
        'lore',
        'learned',
        'fiction',
        'mystery',
        'science_fiction',
        'adventure',
        'romance',
        'humor',
        ]

    VOWELS = [
def load_table(filepath):
    with open(filepath, 'rb') as f:
        table_data = pickle.load(f)
    return Table(table_data[0], table_data[1], table_data[2], table_data[3])