Exemple #1
0
    def _save(self):
        """
        Saves configuration data to the specified .ncfg(YAML) file

        """

        try:
            with open(self.filename, 'w') as f:
                settings = oDict([('format', self.format),
                                  ('analysis_mode', self.analysis_mode),
                                  ('mode_id', self.mode_id),
                                  ('graphic_format', self.graphic_format),
                                  ('unit_no', self.unit_no),
                                  ('lfp_file', self.lfp_file),
                                  ('cell_type', self.cell_type),
                                  ('spike_file', self.spike_file),
                                  ('spatial_file', self.spatial_file),
                                  ('nwb_file', self.nwb_file),
                                  ('excel_file', self.excel_file),
                                  ('data_directory', self.data_directory)])

                cfgData = oDict([('settings', settings),
                                 ('analyses', self.analyses),
                                 ('parameters', self.parameters)])

                yaml.dump(cfgData, f, default_flow_style=False)
        except:
            logging.error(
                'Configuration cannot be saved in the specified file!')
Exemple #2
0
    def __init__(self, **kwargs):
        """
        Instantiate the `NBase` class.

        Parameters
        ---------
        **kwargs
            Keyword arguments

        """
        super().__init__(**kwargs)
        self._spikes = []
        self._spikes_by_name = oDict()
        self._lfp = []
        self._lfp_by_name = oDict()

        self._record_info = {'File version': '',
                             'Date': '',
                             'Time': '',
                             'Experimenter': '',
                             'Comments': '',
                             'Duration': 0,
                             'No of channels': 1,
                             'Bytes per timestamp': 1,
                             'Sampling rate': 1,
                             'Bytes per sample': 1,
                             'ADC Fullscale mv': 1,
                             'Format': 'Axona',
                             'Source': self._filename}
        self.__type = 'base'
Exemple #3
0
    def test_dynamic_float(self):
        r = Route("/floatAction/<f:value>", 'get', FooController, 'foo_action')
        self.assertEqual("^/floatAction/(\\d+(?:\\.\\d+)?)/$", r.reg_uri)
        self.assertEqual(oDict({'value': float}), r.name_and_type)
        self.assertEqual(False, r.is_static())

        r = Route("/floatAction/<f:value>/suffix", 'get', FooController, 'foo_action')
        self.assertEqual("^/floatAction/(\\d+(?:\\.\\d+)?)/suffix/$", r.reg_uri)
        self.assertEqual(oDict({'value': float}), r.name_and_type)
        self.assertEqual(False, r.is_static())
Exemple #4
0
    def test_dynamic_default2(self):
        r = Route("/save/<:name>", 'get', FooController, 'foo_action')
        self.assertEqual("^/save/([^/]+)/$", r.reg_uri)
        self.assertEqual(oDict({'name': str}), r.name_and_type)
        self.assertEqual(False, r.is_static())

        r = Route("/save/<:name>/suffix", 'get', FooController, 'foo_action')
        self.assertEqual("^/save/([^/]+)/suffix/$", r.reg_uri)
        self.assertEqual(oDict({'name': str}), r.name_and_type)
        self.assertEqual(False, r.is_static())
Exemple #5
0
    def test_dynamic_list(self):
        r = Route("/save/<a:ids>", 'get', FooController, 'foo_action')
        self.assertEqual("^/save/([^/|^,]*(?:,[^/]*))/$", r.reg_uri)
        self.assertEqual(oDict({'ids': list}), r.name_and_type)
        self.assertEqual(False, r.is_static())

        r = Route("/save/<a:ids>/suffix", 'get', FooController, 'foo_action')
        self.assertEqual("^/save/([^/|^,]*(?:,[^/]*))/suffix/$", r.reg_uri)
        self.assertEqual(oDict({'ids': list}), r.name_and_type)
        self.assertEqual(False, r.is_static())
Exemple #6
0
    def test_dynamic_int(self):
        r = Route("/save/<i:age>", 'get', FooController, 'foo_action')
        self.assertEqual("^/save/(\\d+)/$", r.reg_uri)
        self.assertEqual(FooController, r.controller)
        self.assertEqual('foo_action', r.action)
        self.assertEqual(oDict({'age': int}), r.name_and_type)
        self.assertEqual(False, r.is_static())

        r = Route("/save/<i:age>/suffix", 'get', FooController, 'foo_action')
        self.assertEqual("^/save/(\\d+)/suffix/$", r.reg_uri)
        self.assertEqual(oDict({'age': int}), r.name_and_type)
        self.assertEqual(False, r.is_static())
Exemple #7
0
 def to_dict(self):
     return oDict([
             ("format_version", self.format_version()),
             ("time_stamp", self.time_stamp()),
             ("author", self.author()),
             ("app", self.app())
             ])
Exemple #8
0
def grab_partitions(dev):
    drive_name = os.path.basename(dev)
    parts = oDict()
    #o = b''.join(sys_command('/usr/bin/lsblk -o name -J -b {dev}'.format(dev=dev)).exec())
    o = b''.join(sys_command('/usr/bin/lsblk -J {dev}'.format(dev=dev)).exec())
    if b'not a block device' in o:
        ## TODO: Replace o = sys_command() with code, o = sys_command()
        ##       and make sys_command() return the exit-code, way safer than checking output strings :P
        return {}

    if not o[:1] == b'{':
        print('[E] Error in getting blk devices:', o)
        exit(1)

    r = json.loads(o.decode('UTF-8'))
    if len(r['blockdevices']) and 'children' in r['blockdevices'][0]:
        for part in r['blockdevices'][0]['children']:
            #size = os.statvfs(dev + part['name'][len(drive_name):])
            parts[part['name'][len(drive_name):]] = {
                #'size' : size.f_frsize * size.f_bavail,
                #'blocksize' : size.f_frsize * size.f_blocks
                'size': part['size']
            }

    return parts
Exemple #9
0
def residual_stat(y, y_fit, p):
    """
    Calculate the goodness of fit and other residual statistics.

    These are calculated between observed and fitted values from a model.

    Parameters
    ----------
    y : ndarray
        Observed data
    y_fit : ndarray
        Fitted data to a linear model
    p : int
        Model order

    Returns
    -------
    _results : dict
        Dictionary of residual statistics

    """
    # p= total explanatory variables excluding constants
    _results = oDict()
    res = y - y_fit
    ss_res = np.sum(res**2)
    ss_tot = np.sum((y - np.mean(y))**2)
    r_sq = 1 - ss_res / ss_tot
    adj_r_sq = 1 - (ss_res / ss_tot) * ((len(y) - 1) / (len(y) - p - 1))
    _results['Pearson R'], _results['Pearson P'] = stats.pearsonr(y, y_fit)

    _results['Rsq'] = r_sq
    _results['adj Rsq'] = adj_r_sq

    return _results
Exemple #10
0
def get_yahoo_hft_data(ticker_symbol, **kwargs):
    kwkeys = list(kwargs.keys())
    if 'days' not in kwkeys:
        hist_value = 1
    else:
        hist_value = kwargs['days']
    try:
        urlToVisit = 'http://chartapi.finance.yahoo.com/instrument/1.0/' + ticker_symbol + \
                     '/chartdata;type=quote;range=%sd/csv' % (str(hist_value))
        sourceCode = web.urlopen(urlToVisit).read()
        splitSource = "".join(map(chr, sourceCode))
        splitSource = splitSource.split('\n')
        splitSource = [i for i in splitSource if (len(i.split(',')) == 6) and ('values' not in i)]
        ydt_time, ydt_close, ydt_high, ydt_low, ydt_open, ydt_volume = [], [], [], [], [], []
        for i in splitSource:
            x = i.split(',')
            ydt_time.append(int(x[0]))
            ydt_close.append(float(x[1]))
            ydt_high.append(float(x[2]))
            ydt_low.append(float(x[3]))
            ydt_open.append(float(x[4]))
            ydt_volume.append(float(x[5]))
        ydt_time_readable = [dt.datetime.fromtimestamp(i) for i in ydt_time]
        ydt_date = [dt.datetime(i.year, i.month, i.day) for i in ydt_time_readable]
        ydt_time = [i.hour*10000+i.minute*100+i.second for i in ydt_time_readable]
        local_yd1 = cruf.DataFrame(data=oDict([('Date', ydt_date), ('Time', ydt_time), ('Open', ydt_open),
                                          ('High', ydt_high), ('Low', ydt_low), ('Close', ydt_close)]))
        return local_yd1
    except (KeyboardInterrupt, SystemExit):
        raise
    except:
        print('Unexpected Error: ', sys.exc_info()[0])
Exemple #11
0
    def checkbox(self,
                 name,
                 value="1",
                 id_="",
                 checked=False,
                 disabled=False,
                 class_='',
                 html=None):
        html = html or {}
        d = oDict()
        if self.model is None:
            name = name or "checkbox"
            if checked is True and 'checked' not in html:
                d['checked'] = "checked"
            d.update(html)
        else:
            if str(self._get_model_value(name)) == str(value):
                d['checked'] = "checked"
            if not id_:
                id_ = '%s_%s_%s' % (self.model.name_for_view(), name, value)
            d.update(html)

        return self.text(name=name,
                         value=value,
                         id_=id_,
                         tp="checkbox",
                         disabled=disabled,
                         class_=class_,
                         html=d)
Exemple #12
0
def get_yahoo_obk_data(ticker_symbol):
    try:
        urlToVisit = 'http://finance.yahoo.com/q/ecn?s='+ticker_symbol+'+Order+Book'
        sourceCode = web.urlopen(urlToVisit).read()
        splitSource = "".join(map(chr, sourceCode))
        soup = BeautifulSoup(splitSource)
        table = soup.findAll('table')
        table = table[1:3]
        bid_price, bid_size, ask_price, ask_size = [], [], [], []
        depth = [1, 2, 3, 4, 5]
        for k, i in enumerate(table):
            notag = re.sub('<.*?>','/t',str(i))
            notag = notag.split(sep='/t')
            notag = [j for j in notag if (j != '') and (j != ' ')]
            notag = notag[3:len(notag)]
            if k == 0:
                bid_price = [float(j) for j in notag[0:9:2]]
                bid_size = [float(j) for j in notag[1:10:2]]
            else:
                ask_price = [float(j) for j in notag[0:9:2]]
                ask_size = [float(j) for j in notag[1:10:2]]
        orderBook = cruf.DataFrame(data=oDict([('Depth', depth), ('BidPrice', bid_price), ('BidSize', bid_size),
            ('AskPrice', ask_price), ('AskSize', ask_size)]))
        return orderBook
    except (KeyboardInterrupt, SystemExit):
        raise
    except:
        print('Unexpected Error: ', sys.exc_info()[0])
Exemple #13
0
 def __init__(self, dict_factory=None, registry=None):
     self._dict_factory = oDict
     self._registry = oDict()
     if dict_factory is not None:
         self.dict_factory = dict_factory
     if registry is not None:
         self.registry = registry
Exemple #14
0
    def __init__(self, **kwargs):
        """
        Instantiate the `NAbstract` class.

        Parameters
        ---------
        **kwargs
            Keyword arguments, directly supported are:
            filename, system, and name

        Returns
        -------
        None

        """
        self._filename = kwargs.get('filename', '')
        self._system = kwargs.get('system', 'Axona')
        self._name = kwargs.get('name', 'c0')
        self._description = ''
        self._results = oDict()
        self._record_info = {'File version': '',
                             'Date': '',
                             'Time': '',
                             'Experimenter': '',
                             'Comments': '',
                             'Duration': 0,
                             'Format': 'Axona',
                             'Source': self._filename}

        self.__type = 'abstract'
Exemple #15
0
    def __init__(self):
        """
        Attributes
        ----------
        spatial : NSpatial
            Spatial data object
        spike : NSpike
            Spike data object
        lfp : Nlfp
            LFP data object
        hdf : NHdf
            Object for manipulating HDF5 file
        data_format : str
            Recording system or format of the data file

        """

        super().__init__()
        self.spike = NSpike(name='C0')
        self.spatial = NSpatial(name='S0')
        self.lfp = NLfp(name='L0')
        self.data_format = 'Axona'
        self._results = oDict()
        self.hdf = Nhdf()

        self.__type = 'data'
Exemple #16
0
    def event_trig_average(self, event_stamp=None, **kwargs):
        """
        Averaging event-triggered LFP signals
        
        Parameters
        ----------
        event_stamp : ndarray
            Timestamps of the events or the spiking activities for measuring the
            event triggered average of the LFP signal
        **kwargs
            Keywrod arguments
 
        Returns
        -------
        dict
            Graphical data of the analysis

        """

        graph_data = oDict()
        window = np.array(kwargs.get('window', [-0.5, 0.5]))
        #        mode = kwargs.get('mode', None)

        if event_stamp is None:
            spike = kwargs.get('spike', None)

            try:
                data_type = spike.get_type()
            except:
                logging.error(
                    'The data type of the addes object cannot be determined!')

            if data_type == 'spike':
                event_stamp = spike.get_unit_stamp()
            elif spike in self.get_spike_names():
                event_stamp = self.get_spike(spike).get_unit_stamp()

        if event_stamp is None:
            logging.error('No valid event timestamp or spike is provided')
        else:
            lfp = self.get_samples() * 1000
            Fs = self.get_sampling_rate()
            time = self.get_timestamp()
            center = time.searchsorted(event_stamp, side='left')
            win = np.ceil(window * Fs).astype(int)
            win = np.arange(win[0], win[1])

            # Keep windows within data
            center = np.array([center[i] for i in range(0, len(event_stamp)) \
                if center[i]+ win[0] >= 0 and center[i]+ win[-1] <= time.size])

            eta = reduce(lambda y, x: y + lfp[x + win], center)
            eta = eta / center.size

            graph_data['t'] = win / Fs
            graph_data['ETA'] = eta
            graph_data['center'] = center

        return graph_data
Exemple #17
0
 def set_registry(self, new_registry):
     it = new_registry.items() if isinstance(
         new_registry, Mapping) else iter(new_registry)
     new_registry = oDict((k, Coder.cast(v)) for k, v in it)
     old_registry = self.registry
     if new_registry != old_registry:
         self._registry.clear()
         self._registry.update(new_registry)
Exemple #18
0
def listToDictKeys(ilist):
    """
    Convert a list to an ordered dict where each item is a key and gives the id of the item in the list
    """
    res = oDict()
    ii = 0
    for key in ilist:
        res[validateDimNames(str(key))] = ii
        ii += 1
    return(res)
Exemple #19
0
def create_key_table():
    result = oDict()
    keys = list()
    for name in dir(Qt.Key):
        attr = getattr(Qt.Key, name)
        if isinstance(attr, Qt.Key):
            keys.append(attr)
    keys.sort()
    for k in keys:
        result[k] = False
    return result
Exemple #20
0
    def __init__(self):
        """See NData class description."""
        super().__init__()
        self.spike = NSpike(name='C0')
        self.spatial = NSpatial(name='S0')
        self.lfp = NLfp(name='L0')
        self.data_format = 'Axona'
        self._results = oDict()
        self.hdf = Nhdf()

        self.__type = 'data'
Exemple #21
0
def create_native_key_table():
    result = oDict()
    keys = list()
    for name in dir(VirtualKeyCodes):
        attr = getattr(VirtualKeyCodes, name)
        if isinstance(attr, VirtualKeyCodes):
            keys.append(attr)
    keys.sort()
    for k in keys:
        result[k] = False
    return result
Exemple #22
0
    def begin_render(self):
        d = oDict()
        if self.id: d['id'] = self.id
        d['action'] = self.action
        d['method'] = self.method
        d['accept-charset'] = self.charset
        if self.method.upper() == 'POST' and self.multipart:
            d['enctype'] = "multipart/form-data"

        d.update(self.html)
        return '<form %s>' % ' '.join(
            ['%s="%s"' % (k, v) for k, v in d.items()])
Exemple #23
0
def get_yahoo_fqt_data(ticker_symbols):
    try:
        urlToVisit = 'http://finance.yahoo.com/d/quotes.csv?s='
        for i in ticker_symbols:
            urlToVisit += i + '+'
        urlToVisit = urlToVisit[0:(len(urlToVisit)-1)] + '&f=aa5bb6ohgl1d1t1v'
        sourceCode = web.urlopen(urlToVisit).read()
        splitSource = "".join(map(chr, sourceCode))
        table = splitSource.split(sep='\n')
        table = table[0:(len(table)-1)]  # remove the '' at the end
        # replace all N/A by nan
        for i, j in enumerate(table):
            table[i] = re.sub('N/A', 'nan', j)
        ask_price, ask_size, bid_price, bid_size, open_price, high_price, low_price, \
            last_price, last_date, last_time, volume = [], [], [], [], [], [], [], [], [], [], []
        for i in range(len(table)):
            table_item = table[i].split(',')
            ask_price.append(float(table_item[0]))
            ask_size.append(float(table_item[1]))
            bid_price.append(float(table_item[2]))
            bid_size.append(float(table_item[3]))
            open_price.append(float(table_item[4]))
            high_price.append(float(table_item[5]))
            low_price.append(float(table_item[6]))
            last_price.append(float(table_item[7]))
            last_date_item = re.sub('"', '', table_item[8])
            last_date_item = last_date_item.split('/')
            last_date_item = [int(x) for x in last_date_item]
            last_date_item = dt.datetime(last_date_item[2], last_date_item[0], last_date_item[1])
            last_date.append(last_date_item)
            last_time_item = re.sub('"', '', table_item[9])
            last_time_ampm = last_time_item[-2:]
            last_time_rest = last_time_item[0:(len(last_time_item)-2)].split(':')
            last_time_rest = [int(x) for x in last_time_rest]
            if (last_time_ampm == 'am') and (last_time_rest[0] != 12):
                last_time.append(last_time_rest[0]*10000+last_time_rest[1]*100)
            elif (last_time_ampm == 'pm') and (last_time_rest[0] != 12):
                last_time.append(last_time_rest[0]*10000+last_time_rest[1]*100+120000)
            elif (last_time_ampm == 'am') and (last_time_rest[0] == 12):
                last_time.append(last_time_rest[1]*100)
            else:
                last_time.append(last_time_rest[0]*10000+last_time_rest[1]*100)
            volume.append(float(table_item[10]))
        futQuote = cruf.DataFrame(data=oDict([('Ticker', ticker_symbols), ('Date', last_date), ('Time', last_time),
                                            ('AskPrice', ask_price), ('AskSize', ask_size), ('BidPrice', bid_price),
                                            ('BidSize', bid_size), ('OpenPrice', open_price), ('HighPrice', high_price),
                                            ('LowPrice', low_price), ('LastPrice', last_price), ('Volume', volume)]))
        return futQuote
    except (KeyboardInterrupt, SystemExit):
        raise
    except:
        print('Unexpected Error: ', sys.exc_info()[0])
Exemple #24
0
def getCcdVisitSchemaSql():
    """Define the schema for the CcdVisit table.

    Returns
    -------
    ccdVisitNames : `collections.OrderedDict`
       Names of columns in the ccdVisit table.
    """
    return oDict([("ccdVisitId", "INTEGER PRIMARY KEY"), ("ccdNum", "INTEGER"),
                  ("filterName", "TEXT"), ("filterId", "INTEGER"),
                  ("ra", "REAL"), ("decl", "REAL"), ("expTime", "REAL"),
                  ("expMidptMJD", "REAL"), ("calibrationMean", "REAL"),
                  ("calibrationErr", "REAL")])
Exemple #25
0
    def __init__(self, filename=[]):
        """
        See also Configuration.

        Parameters
        ----------
        filename : str
            Full file name of the configuration storage (.ncfg)

        """
        self.filename = filename
        self.format = 'Axona'
        self.analysis_mode = 'Single Unit'
        self.mode_id = 0
        self.graphic_format = 'pdf'
        self.valid_graphics = {'PDF': 'pdf', 'Postscript': 'ps'}
        self.unit_no = 0
        self.cell_type = ''
        self.spike_file = ''
        self.spatial_file = ''
        self.lfp_file = ''
        self.nwb_file = ''
        self.excel_file = ''
        self.data_directory = ''
        self.config_directory = ''
        self.analyses = {}
        self.parameters = {}
        self.special_analysis = {}

        for k, v in PARAMETERS.items():  # setting the default parameters
            self.parameters.update(v)

        for k, v in ANALYSES.items():  # setting the default analyses
            self.analyses.update({k: v})

        self.options = oDict()
        self.mode_dict = oDict([('Single Unit', 0),
                                ('Single Session', 1),
                                ('Listed Units', 2)])
Exemple #26
0
def parse_tick_chg_us():
    urlToVisit = 'http://www.nasdaq.com/markets/stocks/symbol-change-history.aspx?sortby=EFFECTIVE&descending=Y'
    sourceCode = web.urlopen(urlToVisit).read()
    splitSource = "".join(map(chr, sourceCode))
    soup = BeautifulSoup(splitSource)
    table = soup.findAll('table')
    table_red = []
    for i in table:
        istr = str(i)
        if re.search('<td class="body2">', istr):
            table_red.append(istr)
    table_red = table_red[0]
    table_red = table_red.split('\n')
    hdata = []
    for i in table_red:
        notag = re.sub('<.*?>', '/t' ,str(i))
        if notag != '/t':
            hdata.append(notag.replace('/t','').strip())
    hdata = hdata[3:]
    old_ticks = hdata[0::3]
    new_ticks = hdata[1::3]
    eff_date = hdata[2::3]
    val2 = cruf.DataFrame(oDict([('old', old_ticks), ('new', new_ticks), ('eff', eff_date)]))
    new_file = crsf.cr_un.path('US_Universe_Cur.npz')
    # new_file = crsf.cr_un+'US_Universe_Cur.npz'
    if os.path.isfile(new_file):
        val = cruf.DataFrame.from_bin(new_file)
    else:
        val = cruf.DataFrame.from_bin(crsf.cr_un+'US_Universe_Orig.npz')
    repl_ticks = list(set(val['Ticker'].values).intersection(set(val2['old'].values)))
    if len(repl_ticks) > 0:
        val2 = val2[[i for i, j in enumerate(val2['old'].values) if j in repl_ticks], :]
        cur_date = dt.date.today()
        cur_date = dt.datetime(cur_date.year, cur_date.month, cur_date.day)
        for j in range(0, val2.shape[0]):
            dtj = val2[j, 'eff']
            dtj = dt.datetime(int(dtj[6:10]), int(dtj[0:2]), int(dtj[3:5]))
            if dtj <= cur_date:
                dt_loc = [i for i, k in enumerate(val['Ticker'].values) if k == val2[j, 'old']][0]
                val[dt_loc, 'Ticker'] = val2[j, 'new']
        val.to_bin(crsf.cr_un+'US_Universe_Cur')
        #### add extra code to rename any table here (not tested)
        for j in range(0, val2.shape[0]):
            crsf.cr_pr_rw.remove(val2[j, 'new'])
            crsf.cr_pr_dv.remove(val2[j, 'new'])
            crsf.cr_pr_cl.remove(val2[j, 'new'])
            crsf.cr_pr_rw.rename(val2[j, 'old'], val2[j, 'new'])
            crsf.cr_pr_dv.rename(val2[j, 'old'], val2[j, 'new'])
            crsf.cr_pr_cl.rename(val2[j, 'old'], val2[j, 'new'])
    return None
Exemple #27
0
    def reset_results(self):
        """
        Reset the NData results to an empty OrderedDict.

        Parameters
        ----------
        None

        Returns
        -------
        None

        """
        self._results = oDict()
Exemple #28
0
 def __init__(self,
              id,
              txt_file,
              xml_file,
              closure=[],
              lowercase=False,
              conflate_digits=False,
              ctakes_out_dir=False,
              pos=True,
              less_strict=False):
     self.txt_file = txt_file
     self.xml_file = xml_file
     self.id = id
     if less_strict:
         return
     self.text = self.read_txt(lowercase=lowercase,
                               conflate_digits=conflate_digits)
     self.events, self.timex3, self.tlinks = oDict(), oDict(), oDict()
     self.read_annotations()
     self.tokenizer = SimpleTokenizer()
     self.tokenization = self.tokenizer.tokenize(self.text)
     self.tokenization.assign_tokens_to_entities(self.events.values())
     self.tokenization.assign_tokens_to_entities(self.timex3.values())
     self.tokenization.assign_paragraph_to_entities(self.events.values())
     self.tokenization.assign_paragraph_to_entities(self.timex3.values())
     if ctakes_out_dir:
         self.tokenization.read_ctakes(self.id, ctakes_out_dir)
     elif pos:
         self.tokenization.POS_tag()
     for label in closure:
         self.closure(label)
     print('TLINKS:', len(self.tlinks))
     self.link_events()
     self.link_entities()
     self.doc_struct = None
     self.extra_events = {}
Exemple #29
0
	def __init__(self, srcPort):
	#UDP socket
		self.skt  = socket(AF_INET, SOCK_DGRAM) #UDP
		self.skt.bind(('', srcPort)) 			#used for recv
	#receivers, senders and ACK waiting list
		self.expId 		= ListDict()			#destAddr => a list of acceptable pktId
		self.nextId 	= ListDict()			#destAddr => lastPktId
		self.notACKed 	= oDict()				#(pktId, destAddr) => [timestamp, resendNum, sendPkt]
	#packet Buffer
		self.datPkts   = Queue()
	#coroutine
		spawn(self.recvLoop)
		spawn(self.ackLoop)
		sleep(0)
	#failed Connections
		self.failed = []
Exemple #30
0
def get_yahoo_eod_data(ticker_symbol, **kwargs):
    kwkeys = list(kwargs.keys())
    if 'end_date' in kwkeys:
        end_dt_value = kwargs['end_date'] if isinstance(kwargs['end_date'], dt.datetime) else \
            dt2.parse(kwargs['end_date'])
        # end_dt_value = dt2.parse(kwargs['end_date'])
        # end_dt_value = pext.process_date(kwargs['end_date'])
    else:
        end_dt_value = dt.datetime.now()
        end_dt_value = dt.datetime(end_dt_value.year, end_dt_value.month, end_dt_value.day)
    if 'start_date' in kwkeys:
        start_dt_value = kwargs['start_date'] if isinstance(kwargs['start_date'], dt.datetime) else \
            dt2.parse(kwargs['start_date'])
        # start_dt_value = pext.process_date(kwargs['start_date'])
    else:
        hist_value = 15000 if 'history' not in kwkeys else kwargs['history']
        start_dt_value = dt.datetime.fromordinal(end_dt_value.toordinal() - hist_value)
    try:
        urlToVisit = 'http://ichart.finance.yahoo.com/table.csv?s=%s' % ticker_symbol + \
            '&a=%s' % (start_dt_value.month-1) + '&b=%s' % (start_dt_value.day) + '&c=%s' % (start_dt_value.year) + \
            '&d=%s' % (end_dt_value.month-1) + '&e=%s' % (end_dt_value.day) + '&f=%s' % (end_dt_value.year) + \
            '&g=d&ignore=.csv'  # for dividends use g=v
        sourceCode = web.urlopen(urlToVisit).read()
        splitSource = "".join(map(chr, sourceCode))
        splitSource = splitSource.split('\n')
        splitSource = [i for i in splitSource if (len(i.split(',')[0]) == 10) and ('Date' not in i)]
        ydt_date, ydt_open, ydt_high, ydt_low, ydt_close, ydt_volume, ydt_adjclose = [], [], [], [], [], [], []
        for i in splitSource:
            x = i.split(',')
            ydt_date.append(dt2.parse(x[0]))
            # ydt_date.append(pext.process_date(x[0]))
            ydt_open.append(float(x[1]))
            ydt_high.append(float(x[2]))
            ydt_low.append(float(x[3]))
            ydt_close.append(float(x[4]))
            ydt_volume.append(float(x[5]))
            ydt_adjclose.append(float(x[6]))
        local_yd1 = cruf.DataFrame(data=oDict([('Date', ydt_date), ('Open', ydt_open), ('High', ydt_high),
                                             ('Low', ydt_low), ('Close', ydt_close), ('Volume', ydt_volume),
                                             ('AdjClose', ydt_adjclose)]))
        local_yd1.sort('Date')
        # pext.sort_clean(local_yd1, 'Date')
        return local_yd1
    except (KeyboardInterrupt, SystemExit):
        raise
    except:
        print('Unexpected Error: ', sys.exc_info()[0])
Exemple #31
0
 def label(self, name, value='', class_='', html=None):
     html = html or {}
     d = oDict()
     if self.model:
         d['for'] = '%s_%s' % (self.model.name_for_view(), name)
     else:
         d['for'] = name
     if not value:
         value = ' '.join([
             string.capwords(x.strip()) for x in label_value_p.split(name)
             if x and x.strip()
         ])
     if class_:
         d['class'] = class_
     d.update(html)
     return '<label %s>%s</label>' % (' '.join(
         ['%s="%s"' % (k, v) for k, v in d.items()]), value)
Exemple #32
0
    def button(self,
               value=None,
               name="button",
               tp="submit",
               disabled=False,
               html=None):
        html = html or {}
        d = oDict()
        d['name'] = name or "button"
        d['type'] = tp or 'submit'
        if disabled: d['disabled'] = 'disabled'

        d.update(html)
        return '<button %s>%s</button>' % \
                    (' '.join([ '%s="%s"' % (k, v) for k, v in d.items() ]),
                        value or "Button"
                    )
Exemple #33
0
def linfit(X, Y, getPartial=False):
    """
    Calculates the linear regression coefficients in least-square sense.
    
    Parameters
    ----------
    X : ndarray
        Matrix with input variables or factors (num_dim X num_obs)
    Y : ndarray
        Array of oservation data
    getPartial : bool
        Get the partial correlation coefficients if 'True'
    
    Returns
    -------
    _results : dict
        Dictionary with results of least-square optimization of linear regression
        
    """    
    
    _results = oDict()
    if len(X.shape) == 2:
        Nd, Nobs = X.shape
    else:
        Nobs = X.shape[0]
        Nd = 1
    if Nobs == len(Y):
        A = np.vstack([X, np.ones(X.shape[0])]).T
        B = np.linalg.lstsq(A, Y, rcond=-1)[0]
        Y_fit = np.matmul(A, B)
        _results['coeff'] = B[:-1]
        _results['intercept'] = B[-1]
        _results['yfit'] = Y_fit
        _results.update(residual_stat(Y, Y_fit, 1))
    else:
        logging.error('linfit: Number of rows in X and Y does not match!')

    if Nd > 1 and getPartial:
        semiCorr = np.zeros(Nd) # Semi partial correlation
        for d in np.arange(Nd):
            part_results = linfit(np.delete(X, 1, axis=0), Y, getPartial=False)
            semiCorr[d] = _results['Rsq']- part_results['Rsq']
        _results['semiCorr'] = semiCorr

    return _results
Exemple #34
0
    def __init__(self, **kwargs):
        """
        Create a circular statistics object.

        Parameters
        ----------
        **kwargs: keyword arguments
            rho : ndarry
                Polar co-ordinate rho, the radii of points.
            theta : ndarray
                Polar co-ordinate theta, the angle of points.

        Returns
        -------
        None

        """
        self._rho = kwargs.get('rho', None)
        self._theta = kwargs.get('theta', None)
        self._result = oDict()
Exemple #35
0
def get_yahoo_fch_data(ticker_symbol):
    try:
        urlToVisit = 'http://finance.yahoo.com/q/fc?s=%s' % (ticker_symbol)
        sourceCode = web.urlopen(urlToVisit).read()
        splitSource = "".join(map(chr, sourceCode))
        soup = BeautifulSoup(splitSource)
        table = soup.findAll('table')
        tableRed = []
        for i in table:
            istr = str(i)
            if re.search(ticker_symbol, istr) and re.search('id="yfnc_futures"', istr):
                tableRed.append(istr)
        hdata = []
        for i in tableRed:
            notag = re.sub('<.*?>', '/t' ,str(i))
            notag = notag.split(sep='/t')
            notag = [re.sub('\xa0', '', j) for j in notag]
            notag = [j for j in notag if (j != '') and (j != ' ')]
            notag = [re.sub(',', '', j) for j in notag]
            eol_loc = 0
            for j, jd in enumerate(notag):
                if jd == '\n':
                    eol_loc = j
            notag = notag[0:eol_loc]
            if 'Get Futures Chain for:' not in notag:
                hdata.append(notag)
        if len(hdata) > 1:
            print('New Yahoo Futures Chain Format')
        else:
            hdata = hdata[0]
        hdata = hdata[4:len(hdata)] # removes Symbol, Name, Last Trade, Change
        numCont = round(len(hdata)/6)
        tickCont = [hdata[j] for j in [6*i for i in range(numCont)]]
        tickQuote = [float(hdata[j]) for j in [(6*i+2) for i in range(numCont)]]
        futQuote = cruf.DataFrame(data=oDict([('Ticker', tickCont), ('Last', tickQuote)]))
        return futQuote
    except (KeyboardInterrupt, SystemExit):
        raise
    except:
        print('Unexpected Error: ', sys.exc_info()[0])
Exemple #36
0
def get_yahoo_div_data(ticker_symbol, **kwargs):
    kwkeys = list(kwargs.keys())
    if 'end_date' in kwkeys:
        end_dt_value = kwargs['end_date'] if isinstance(kwargs['end_date'], dt.datetime) else \
            dt2.parse(kwargs['end_date'])
    else:
        end_dt_value = dt.datetime.now()
        end_dt_value = dt.datetime(end_dt_value.year, end_dt_value.month, end_dt_value.day)
    if 'start_date' in kwkeys:
        start_dt_value = kwargs['start_date'] if isinstance(kwargs['start_date'], dt.datetime) else \
            dt2.parse(kwargs['start_date'])
    else:
        hist_value = 15000 if 'history' not in kwkeys else kwargs['history']
        start_dt_value = dt.datetime.fromordinal(end_dt_value.toordinal() - hist_value)
    try:
        urlToVisit = 'http://ichart.finance.yahoo.com/table.csv?s=%s' % ticker_symbol + \
            '&a=%s' % (start_dt_value.month-1) + '&b=%s' % (start_dt_value.day) + '&c=%s' % (start_dt_value.year) + \
            '&d=%s' % (end_dt_value.month-1) + '&e=%s' % (end_dt_value.day) + '&f=%s' % (end_dt_value.year) + \
            '&g=v&ignore=.csv'  # for dividends use g=v
        sourceCode = web.urlopen(urlToVisit).read()
        splitSource = "".join(map(chr, sourceCode))
        splitSource = splitSource.split('\n')
        splitSource = splitSource[:-1]
        splitSource = [i for i in splitSource if (len(i.split(',')[0]) == 10) and ('Date' not in i)]
        if len(splitSource) > 0:  # bug here! should be > 0
            ydt_date, ydt_div = [], []
            for i in splitSource:
                x = i.split(',')
                ydt_date.append(dt2.parse(x[0]))
                ydt_div.append(float(x[1]))
            local_yd1 = cruf.DataFrame(data=oDict([('Date', ydt_date), ('Dividend', ydt_div)]))
            local_yd1.sort('Date')
            return local_yd1
        else:
            return None
    except (KeyboardInterrupt, SystemExit):
        raise
    except:
        print('Unexpected Error: ', sys.exc_info())
Exemple #37
0
    def get_results(self, spaces_to_underscores=False):
        """
        Return the parametric results of the analyses.

        Parameters
        ----------
        spaces_to_underscores: bool
            If True, any keys in the results dicts with spaces
            will be converted to keys with underscores
            this is useful for compatibility with R, as an example.

        Returns
        -------
        OrderedDict

        """
        if spaces_to_underscores:
            results = oDict()
            for x, v in self._results.items():
                results[x.replace(' ', '_')] = v
            return results
        return self._results
Exemple #38
0
    def text(self,
             name,
             value=None,
             id_='',
             tp='text',
             placeholder='',
             size='',
             maxlength='',
             disabled=False,
             class_='',
             html=None,
             autoid=True):
        html = html or {}
        d = oDict()
        id_ = self._buildid_(id_, autoid, name)
        if id_:
            d['id'] = id_
        d['name'] = self._build_name(name)

        d['type'] = tp or 'text'
        if value or value == '':
            d['value'] = value
        elif self.model:
            d['value'] = self._get_model_value(name)
        if placeholder:
            d['placeholder'] = placeholder
        if size or size == 0:
            d['size'] = size
        if maxlength or maxlength == 0:
            d['maxlength'] = maxlength

        if disabled:
            d['disabled'] = "disabled"
        if class_:
            d['class'] = class_
        d.update(html)

        return '<input %s />' % ' '.join(
            ['%s="%s"' % (k, v) for k, v in d.items()])
Exemple #39
0
    def psth(self, event=None, spike=None, **kwargs):
        """
        Calculates peri-stimulus time histogram (PSTH)

        Parameters
        ----------
        event 
            Event name or tag
        spike : NSpike
            NSpike object to characterize

        **kwargs
            Keyword arguments

        Returns
        -------
        dict
            Graphical data of the analysis

        """

        graph_data = oDict()
        if not event:
            event = self._curr_tag
        elif event in self._event_names:
            event = self.get_tag(event)
        if not spike:
            spike = kwargs.get('spike', 'xxxx')
        spike = self.get_spike(spike)
        if event:
            if spike:
                graph_data = spike.psth(self.get_event_stamp(event), **kwargs)
            else:
                logging.error('No valid spike specified')
        else:
            logging.error(str(event) + ' is not a valid event')

        return graph_data
Exemple #40
0
def orig_tick_list_us():
    wb = xl.load_workbook(crsf.cr_un.name+'\\US_Universe.xlsx')
    sheet1 = wb.get_sheet_by_name('Sheet1')
    ticks_cells = sheet1['A1':'A5000']
    ticks = []
    for r in ticks_cells:
        for c in r:
            tmp = c.value
            if tmp is not None:
                ticks.append(tmp.replace('US Equity', '').rstrip().replace('/','-'))  # convert to yahoo format
    n = len(ticks)
    type_cells = sheet1['E1':'E'+str(n)]
    types = []
    for r in type_cells:
        for c in r:
            types.append(c.value)
    name_cells = sheet1['B1':'B'+str(n)]
    names = []
    for r in name_cells:
        for c in r:
            names.append(c.value)
    gics_cells = sheet1['K1':'K'+str(n)]
    gics = []
    for r in gics_cells:
        for c in r:
            tmp = c.value
            if tmp is not None:
                gics.append(c.value)
            else:
                gics.append('')
    val = cruf.DataFrame(oDict([('Ticker', ticks), ('Type', types), ('Name', names), ('GICS', gics)]))
    val = val[1:,:]
    val['GICS'] = gics[1:]
    crsf.cr_un.store('US_Universe_Orig')
    # val.to_bin(crsf.cr_un+'US_Universe_Orig')
    return None
Exemple #41
0
def orig_tick_list_us():
    # Get the SPTMI Universe
    wb = xl.load_workbook(crsf.cr_un.name+'\\SPTMI Universe.xlsx')
    sheet1 = wb.get_sheet_by_name('Sheet1')
    ticks_cells = sheet1['A1':'A5000']
    ticks = []
    for r in ticks_cells:
        for c in r:
            tmp = c.value
            if tmp is not None:
                ticks.append(tmp.replace('US Equity', '').rstrip().replace('/','-'))  # convert to yahoo format
    n = len(ticks)
    type_cells = sheet1['E1':'E'+str(n)]
    types = []
    for r in type_cells:
        for c in r:
            types.append(c.value)
    name_cells = sheet1['B1':'B'+str(n)]
    names = []
    for r in name_cells:
        for c in r:
            names.append(c.value)
    gics_cells = sheet1['J1':'J'+str(n)]
    gics = []
    for r in gics_cells:
        for c in r:
            tmp = c.value
            if tmp is not None:
                gics.append(c.value)
            else:
                gics.append('')
    val = cruf.DataFrame(oDict([('Ticker', ticks), ('Type', types), ('Name', names), ('GICS', gics)]))
    val = val[1:,:]
    val['GICS'] = gics[1:]

    # Get the ETF and ADR Universe
    wb = xl.load_workbook(crsf.cr_un.name+'\\ETFADR Universe.xlsx')
    sheet1 = wb.get_sheet_by_name('Sheet1')
    ticks_cells = sheet1['A1':'A5000']
    ticks = []
    for r in ticks_cells:
        for c in r:
            tmp = c.value
            if tmp is not None:
                ticks.append(tmp.replace('US Equity', '').rstrip().replace('/','-'))  # convert to yahoo format
    n = len(ticks)
    type_cells = sheet1['E1':'E'+str(n)]
    types = []
    for r in type_cells:
        for c in r:
            types.append(c.value)
    name_cells = sheet1['B1':'B'+str(n)]
    names = []
    for r in name_cells:
        for c in r:
            names.append(c.value)
    gics_cells = sheet1['J1':'J'+str(n)]
    gics = []
    for r in gics_cells:
        for c in r:
            tmp = c.value
            if tmp is not None:
                gics.append(c.value)
            else:
                gics.append('')
    val2 = cruf.DataFrame(oDict([('Ticker', ticks), ('Type', types), ('Name', names), ('GICS', gics)]))
    val2 = val2[1:, :]
    val2['GICS'] = gics[1:]
    # combine the two inplace
    val.row_bind(val2)
    crsf.cr_un.store('US_Universe_Orig', val)
    crsf.cr_un.store('US_Universe_Cur', val)
    return None
Exemple #42
0
def get_yahoo_sqt_data(ticker_symbols):
    try:
        urlToVisit = 'http://download.finance.yahoo.com/d/quotes.csv?s='
        for i in ticker_symbols:
            urlToVisit += i + '+'
        urlToVisit = urlToVisit[0:(len(urlToVisit)-1)] + '&f=a2aa5bb6ohgl1d1t1j1f6j2v'
        sourceCode = web.urlopen(urlToVisit).read()
        splitSource = "".join(map(chr, sourceCode))
        table = splitSource.split(sep='\n')
        table = table[0:(len(table)-1)]  # remove the '' at the end
        # replace all N/A by nan
        for i, j in enumerate(table):
            table[i] = re.sub('N/A', 'nan', j)
        avg_volume, ask_price, ask_size, bid_price, bid_size, open_price, high_price, low_price, last_price, \
            last_date, last_time, mkt_cap, shares_float, shares_out, volume = [], [], [], [], [], [], [], [], [], \
                [], [], [], [], [], []
        for i in range(len(table)):
            table_item = table[i].split(',')
            avg_volume.append(float(table_item[0]))
            ask_price.append(float(table_item[1]))
            ask_size.append(float(table_item[2]))
            bid_price.append(float(table_item[3]))
            bid_size.append(float(table_item[4]))
            open_price.append(float(table_item[5]))
            high_price.append(float(table_item[6]))
            low_price.append(float(table_item[7]))
            last_price.append(float(table_item[8]))
            last_date_item = re.sub('"', '', table_item[9])
            last_date_item = last_date_item.split('/')
            last_date_item = [int(x) for x in last_date_item]
            last_date_item = dt.datetime(last_date_item[2], last_date_item[0], last_date_item[1])
            last_date.append(last_date_item)
            last_time_item = re.sub('"', '', table_item[10])
            last_time_ampm = last_time_item[-2:]
            last_time_rest = last_time_item[0:(len(last_time_item)-2)].split(':')
            last_time_rest = [int(x) for x in last_time_rest]
            if (last_time_ampm == 'am') and (last_time_rest[0] != 12):
                last_time.append(last_time_rest[0]*10000+last_time_rest[1]*100)
            elif (last_time_ampm == 'pm') and (last_time_rest[0] != 12):
                last_time.append(last_time_rest[0]*10000+last_time_rest[1]*100+120000)
            elif (last_time_ampm == 'am') and (last_time_rest[0] == 12):
                last_time.append(last_time_rest[1]*100)
            else:
                last_time.append(last_time_rest[0]*10000+last_time_rest[1]*100)
            mkt_cap_item = table_item[11]
            if mkt_cap_item != 'nan':
                mkt_cap_mult = mkt_cap_item[-1]
                mkt_cap_item = float(mkt_cap_item[0:(len(mkt_cap_item)-1)]) # not sure if it can handle mkt cap < 1M
                # keep mkt_cap in Millions
                if mkt_cap_mult == 'T':
                    mkt_cap_item *= 1000000
                elif mkt_cap_mult == 'B':
                    mkt_cap_item *= 1000
                elif mkt_cap_mult == 'K':
                    mkt_cap_item /= 1000
                mkt_cap.append(mkt_cap_item)
            else:
                mkt_cap.append(float(mkt_cap_item))
            shares_float.append(float(table_item[12]))
            shares_out.append(float(table_item[13]))
            volume.append(float(table_item[14]))
        stkQuote = cruf.DataFrame(data=oDict([('Ticker', ticker_symbols), ('Date', last_date), ('Time', last_time),
                                            ('AskPrice', ask_price), ('AskSize', ask_size), ('BidPrice', bid_price),
                                            ('BidSize', bid_size), ('OpenPrice', open_price), ('HighPrice', high_price),
                                            ('LowPrice', low_price), ('LastPrice', last_price), ('Volume', volume),
                                            ('MktCap', mkt_cap), ('AvgVolume', avg_volume),
                                            ('SharesFloat', shares_float), ('SharesOut', shares_out)]))
        return stkQuote
    except (KeyboardInterrupt, SystemExit):
        raise
    except:
        print('Unexpected Error: ', sys.exc_info()[0])
Exemple #43
0
def swap(d):
    return oDict([(v, k) for k, v in d.items()])
Exemple #44
0
# -*- coding: utf-8 -*- #

from collections import OrderedDict as oDict
menu = oDict([
	('홈', '/home'),
	('업로드', '/upload')
])