Ejemplo n.º 1
0
def t2t_table(table_dict, units=None, **kwargs):
    """ Create a t2t format table. 

        this is required by t2t for tables
        see for example: http://txt2tags.org/markup.html

        Example:
            >>> d=OrderedDict()
            >>> d['name']=['bright','dim']
            >>> d['flux']=['large','small']
            >>> print t2t_table(d)
            ||  name |  flux |
            | bright | large |
            |    dim | small |

            >>> print t2t_table(d, units=dict(flux='[ergs]'))
            ||  name | flux [ergs] |
            | bright |       large |
            |    dim |       small |
     """
    if units is not None:
        for k,v in units.items():
            table_dict[k + ' %s' % v] = table_dict.pop(k)

    outtable=StringIO()

    asciitable.write(table_dict, outtable,
                     Writer=asciitable.FixedWidth,
                     names=table_dict.keys(),
                     **kwargs
                    )
    t=outtable.getvalue()

    t='||' + t[2:]
    return t.strip()
Ejemplo n.º 2
0
def latex_table(table_dict, units=None, latexdict=None, **kwargs):
    r""" Format a dictionary into a latex table.
         Use the DefaultDict if you care about the order of the table.

            >>> d=OrderedDict()
            >>> d['name']=['bright','dim']
            >>> d['flux']=['large','small']
            >>> print latex_table(d, units=dict(flux='[ergs]'))
            \begin{deluxetable}{cc}
            \tablehead{\colhead{name} & \colhead{flux}\\ \colhead{ } & \colhead{[ergs]}}
            \startdata
            bright & large \\
            dim & small \\
            \enddata
            \end{deluxetable}


    """

    if latexdict is None: 
        latexdict=dict()

    if units is not None:
        latexdict['units']=units

    outtable=StringIO()
        
    asciitable.write(table_dict, outtable, 
                     Writer=asciitable.AASTex,
                     names=table_dict.keys(),
                     latexdict=latexdict,
                     **kwargs
                    )
    t=outtable.getvalue()
    return t.strip()
Ejemplo n.º 3
0
def write_ascii(self, filename, **kwargs):
    '''
    Read a table from an ASCII file using asciitable

    Optional Keyword Arguments:

        Writer - Writer class (default= Basic)
        delimiter - column delimiter string
        write_comment - string defining a comment line in table
        quotechar - one-character string to quote fields containing special characters
        formats - dict of format specifiers or formatting functions
        names - list of names corresponding to each data column
        include_names - list of names to include in output (default=None selects all names)
        exclude_names - list of names to exlude from output (applied after include_names)

    See the asciitable documentation at http://cxc.harvard.edu/contrib/asciitable/ for more details.
    '''

    _check_asciitable_installed()

    if 'overwrite' in kwargs:
        overwrite = kwargs.pop('overwrite')
    else:
        overwrite = False

    if type(filename) is str and os.path.exists(filename):
        if overwrite:
            os.remove(filename)
        else:
            raise Exception("File exists: %s" % filename)

    asciitable.write(self.data, filename, **kwargs)
Ejemplo n.º 4
0
def spec_save(data, outfile):

    # Import modules
    import numpy as np
    import asciitable
    import os

    # Make a file to write the data to
    open(outfile, 'a')

    # Delete outfile if it exists
    for i in range(1, len(outfile) + 1):
        if outfile[-i] == '/':
            outdir = outfile[:-i + 1]
            outname = outfile[-i + 1:]
            break
        else:
            continue

    for file in os.listdir(outdir):
        if file == outname:
            os.listdir(outdir).remove(outname)
            break
        else:
            continue
    # Rearrange so that each frame is column, instead of row
    datalist = []
    names = []
    for i in range(np.shape(data)[0]):
        names.append('Frame ' + repr(i + 1))
    for j in range(np.shape(data)[1]):
        datalist.append(list(data[:, j]))

    # Write data with asciitable
    asciitable.write(datalist, outfile, names=names)
Ejemplo n.º 5
0
def latexMatrix(cfMx, rowlabels=None):
    '''
    if you have the asciitable package installed, avail through mac ports
    and other distributions, then we can output latex format tables for
    our data. This is handy for generating latex output of a confusion matrix.
    '''
    try:
        import asciitable
        import asciitable.latex
    except:
        print "Error importing asciitable...you may not have this package installed."
        return None
    
    #build data dictionary, per-column structure
    if rowlabels != None:
        data = {'col000':rowlabels}
        col_align = ['l']
    else:
        data = {}
        col_align = []
    numCols = cfMx.shape[1]
    for i in range(numCols):
        data['col%s'%(str(i+1).zfill(3))] = cfMx[:,i]
        col_align.append('c')
        
    col_align_str = "|%s|"%("|".join(col_align))
    asciitable.write(data, sys.stdout, Writer=asciitable.Latex, col_align=col_align_str)
Ejemplo n.º 6
0
def test_write_noheader_no_delimiter(numpy):
    """Write a table as a fixed width table with no delimiter."""
    out = io.StringIO()
    asciitable.write(dat, out, Writer=asciitable.FixedWidthNoHeader, bookend=False, delimiter=None)
    assert_equal_splitlines(out.getvalue(), """\
1.2    "hello"  1  a
2.4  's worlds  2  2
""")
Ejemplo n.º 7
0
def test_write_noheader_no_bookend(numpy):
    """Write a table as a fixed width table with no bookend."""
    out = io.StringIO()
    asciitable.write(dat, out, Writer=asciitable.FixedWidthNoHeader, bookend=False)
    assert_equal_splitlines(out.getvalue(), """\
1.2 |   "hello" | 1 | a
2.4 | 's worlds | 2 | 2
""")
Ejemplo n.º 8
0
def test_write_noheader_no_pad(numpy):
    """Write a table as a fixed width table with no padding."""
    out = io.StringIO()
    asciitable.write(dat, out, Writer=asciitable.FixedWidthNoHeader, delimiter_pad=None)
    assert_equal_splitlines(out.getvalue(), """\
|1.2|  "hello"|1|a|
|2.4|'s worlds|2|2|
""")
Ejemplo n.º 9
0
def test_write_noheader_normal(numpy):
    """Write a table as a normal fixed width table."""
    out = io.StringIO()
    asciitable.write(dat, out, Writer=asciitable.FixedWidthNoHeader)
    assert_equal_splitlines(out.getvalue(), """\
| 1.2 |   "hello" | 1 | a |
| 2.4 | 's worlds | 2 | 2 |
""")
Ejemplo n.º 10
0
def dd_writer(packet, fpart):
	if fcs.dd:
		dd_fname = "%s.%s.dat" % (fcs.dd_prefix, fpart)
		if fcs.dd_test:
			print "@FILENAME: " + dd_fname
			asciitable.write(packet, sys.stdout)
		else:
			print "DD: " + dd_fname
			asciitable.write(packet, dd_fname)
Ejemplo n.º 11
0
 def write_success_table(self,filename):
     boolthing = np.ones_like(hdudf_h.mstar_list)
     i_fail = np.where(np.asarray(hdudf_h.image_files)=='')[0]
     print boolthing.shape, i_fail.shape
     print boolthing, i_fail
     boolthing[i_fail] = 0
     data = np.asarray([boolthing,hdudf_h.x_array,hdudf_h.y_array])
     asciitable.write(data,filename)
     return
Ejemplo n.º 12
0
    def __parseKnownIssues(self, jobs):
        for job in jobs:
            if job.parsed:
                continue
            if job.result["status"] != "Completed" and job.result["status"] != "Aborted":
                genLogger.warn(
                    "%s J:%s status %s, not Completed or Aborted, SKIP"
                    % (job.type, job.result["id"], job.result["status"])
                )
                continue
            if job.result["result"] == "Pass":
                genLogger.debug("%s J:%s result %s, SKIP" % (job.type, job.result["id"], job.result["result"]))
                continue
            for rs in job.result["recipeSet"]:
                if rs.result["response"] == "nak":
                    continue
                skip_left = ""
                for r in rs.result["recipe"]:
                    if r.result["result"] == "Pass" and not r.result["guestrecipe"]:
                        continue
                    if skip_left:
                        skip_left = ""
                        continue
                    for t in r.result["task"]:
                        if t.result["result"] == "Pass":
                            continue
                        ctx = {"job": job, "rs": rs, "r": r, "gr": "", "t": t}
                        ret = self.__parseTask(ctx)
                        if ret == "SKIP_LEFT":
                            skip_left = True
                            break
                    skip_left_gr = ""
                    for gr in r.result["guestrecipe"]:
                        if gr.result["result"] == "Pass":
                            continue
                        if skip_left_gr:
                            skip_left_gr = ""
                            continue
                        for gt in gr.result["task"]:
                            if gt.result["result"] == "Pass":
                                continue
                            ctx = {"job": job, "rs": rs, "r": r, "gr": gr, "t": gt}
                            ret = self.__parseTask(ctx)
                            if ret == "SKIP_LEFT":
                                skip_left_gr = True
                                break

        self.__updateJobState(jobs, "Parsed")
        self.jobState.write()

        asciitable.write(
            self.knownIssuesTable, self.knownIssuesResult, names=self.columns, Writer=asciitable.FixedWidth
        )
        asciitable.write(
            self.unknownIssuesTable, self.unknownIssuesResult, names=self.columns, Writer=asciitable.FixedWidth
        )
Ejemplo n.º 13
0
def test_write_formats(numpy):
    """Write a table as a fixed width table with no delimiter."""
    out = io.StringIO()
    asciitable.write(dat, out, Writer=asciitable.FixedWidth,
                     formats={'Col1': '%-8.3f', 'Col2': '%-15s'})
    assert_equal_splitlines(out.getvalue(), """\
|     Col1 |            Col2 | Col3 | Col4 |
| 1.200    | "hello"         |    1 |    a |
| 2.400    | 's worlds       |    2 |    2 |
""")
Ejemplo n.º 14
0
def test_write_twoline_normal(numpy):
    """Write a table as a normal fixed width table."""
    out = io.StringIO()
    asciitable.write(dat, out, Writer=asciitable.FixedWidthTwoLine)
    assert_equal_splitlines(out.getvalue(), """\
Col1      Col2 Col3 Col4
---- --------- ---- ----
 1.2   "hello"    1    a
 2.4 's worlds    2    2
""")
Ejemplo n.º 15
0
def test_write_twoline_no_bookend(numpy):
    """Write a table as a fixed width table with no bookend."""
    out = io.StringIO()
    asciitable.write(dat, out, Writer=asciitable.FixedWidthTwoLine, bookend=True, delimiter='|')
    assert_equal_splitlines(out.getvalue(), """\
|Col1|     Col2|Col3|Col4|
|----|---------|----|----|
| 1.2|  "hello"|   1|   a|
| 2.4|'s worlds|   2|   2|
""")
Ejemplo n.º 16
0
    def __call__(self):
        with open(_IMPORT_YAML_PATH, 'r') as f:
            i_txs = self._load_parsed_txs(f)

        processed_output = defaultdict(list)
        unprocessed_output = defaultdict(list)
        for i, i_tx in enumerate(i_txs, start=1):
            if i_tx.processed:
                output = processed_output
            else:
                output = unprocessed_output

            output['id'].append(i)
            output['date'].append(str(i_tx.parsed_tx.date))
            output['account'].append(str(i_tx.parsed_tx.account))
            output['description'].append(
                i_tx.parsed_tx.description[:130].replace('\n', ' '))
            output['amount'].append(float(i_tx.parsed_tx.amount))
            output['category'].append(i_tx.category)

        output_io = StringIO()

        def format_amount(x):
            if x < 0:
                color = Fore.RED
            else:
                color = Fore.GREEN
            output = "{}{:.2f}{}".format(color, x, Fore.RESET)
            return output

        if processed_output:
            output_io.write("Transactions ready to commit:\n\n")
            asciitable.write(processed_output,
                             output_io,
                             Writer=asciitable.FixedWidthNoHeader,
                             names=[
                                 'id', 'date', 'account', 'description',
                                 'category', 'amount'
                             ],
                             formats={'amount': lambda x: format_amount(x)})
            if unprocessed_output:
                output_io.write("\n")

        if unprocessed_output:
            output_io.write("Transactions ready for processing:\n\n")
            asciitable.write(unprocessed_output,
                             output_io,
                             Writer=asciitable.FixedWidthNoHeader,
                             names=[
                                 'id', 'date', 'account', 'description',
                                 'category', 'amount'
                             ],
                             formats={'amount': lambda x: format_amount(x)})

        return output_io.getvalue()
Ejemplo n.º 17
0
    def __call__(self):
        # Get the category if it already exists, or create a new one.
        with session_scope() as session:
            categories = session.query(Category) \
                                .order_by(Category.name) \
                                .all()

        output = defaultdict(list)
        for category in categories:
            # If the category doesn't have a budget set, skip it.
            if category.budget_items is None or category.budget_items == []:
                continue

            txs = category.transactions

            # Remove income transactions and transactions outside the date
            # range.
            (_, num_days) = calendar.monthrange(self.year, self.month)
            beg_date = datetime.date(self.year, self.month, 1)
            end_date = beg_date + datetime.timedelta(days=num_days)
            txs = filter(lambda x: x.is_debit(), txs)
            txs = filter(lambda x: x.is_in_period(beg_date, end_date), txs)

            # Get the budget item for this category
            budget_item = category.budget_items[-1]

            # Get the total spent for this category
            # Only include debit transactions, which have a negative amount
            spent = sum([abs(tx.amount) for tx in txs], 0)
            rem = budget_item.amount - spent

            # Calculate percentage of budget
            spent_pct = (spent / budget_item.amount) * 100.0
            rem_pct = (rem / budget_item.amount) * 100.0

            # Add data to output dict
            output['category'].append(category.name)
            output['budget'].append("{:.2f}".format(budget_item.amount))
            output['spent'].append("{:.2f} ({:.0f}%)".format(spent,
                                                             spent_pct))
            output['remaining'].append("{:.2f} ({:.0f}%)".format(rem,
                                                                 rem_pct))
            output['transactions'].append(len(txs))

        if not output:
            return "No budget set"

        output_io = StringIO()
        asciitable.write(output, output_io,
                         Writer=asciitable.FixedWidth,
                         names=['category', 'budget', 'spent', 'remaining',
                                'transactions'],)

        return output_io.getvalue()
Ejemplo n.º 18
0
 def report(self, format='ascii'):
     if format == 'ascii':
         if len(self.data.keys()) > 0:
             asciitable.write([ (id, float(ir['v'])) for (id, ir) in self.data.items() ],
                 names=['Cell ID', 'IR [mΩ]'],
                 formats={ 'Cell ID': '%s', 'IR [mΩ]': '%s'},
                 Writer=asciitable.FixedWidth)
         else:
             self.log.warning('no data')
     else:
         log.error('unknown report format', format=format)
Ejemplo n.º 19
0
def test_write_twoline_no_pad(numpy):
    """Write a table as a fixed width table with no padding."""
    out = io.StringIO()
    asciitable.write(dat, out, Writer=asciitable.FixedWidthTwoLine, delimiter_pad=' ',
                     position_char='=')
    assert_equal_splitlines(out.getvalue(), """\
Col1        Col2   Col3   Col4
====   =========   ====   ====
 1.2     "hello"      1      a
 2.4   's worlds      2      2
""")
Ejemplo n.º 20
0
    def report(self, format='ascii'):
        if format != 'ascii':
            log.error('unknown report format', format=format)
            return

        if len(self.cells.items()) == 0:
            self.log.warning('no data')
            return

        asciitable.write([ ([ group for group in key ] + [self.cells[key]]) for key in sorted(self.cells.keys()) ],
            names=[ query.program_string for query in self.config.key_queries ] + ['Count'],
            Writer=asciitable.FixedWidth)
Ejemplo n.º 21
0
    def __init__(self):
        self.autoRerun = ""
        self.parseKnownIssues = ""
        self.errataName = ""
        self.errataLname = ""
        self.rerunedRSId = []
        self.force = False
        self.__parseArgs()
        self.errataInfo = ErrataInfo(self.errataName, self.errataLname, False)

        self.resultPath = "./result"
        self.jobStatePath = "%s/%s.%s" % (self.resultPath, self.errataInfo.errataId, "jobstate")
        genLogger.info("jobStatePath      : %s", self.jobStatePath)
        self.jobState = ConfigObj(self.jobStatePath, encoding="utf8")

        if self.parseKnownIssues == "y":
            self.knownIssuesPath = []
            self.knownIssues = []
            self.knownIssuesRPath = "./known_issues"
            self.knownIssuesPath.append(
                "%s/%s.%s" % (self.knownIssuesRPath, self.errataInfo.rhel_version, "known_issues")
            )
            self.knownIssuesPath.append(
                "%s/RHEL-%s.%s" % (self.knownIssuesRPath, self.errataInfo.major, "known_issues")
            )
            for i in range(0, len(self.knownIssuesPath)):
                str = "%d  : %s" % (i, self.knownIssuesPath[i])
                genLogger.info("knownIssuesPath%s" % str)
                self.knownIssues.append(ConfigObj(self.knownIssuesPath[i], encoding="utf8"))

            self.knownIssuesResult = "%s/%s.%s" % (self.resultPath, self.errataInfo.errataId, "knownIssues")
            self.unknownIssuesResult = "%s/%s.%s" % (self.resultPath, self.errataInfo.errataId, "unknownIssues")
            self.tableTemple = {
                "Path": ["---"],
                "TaskName": ["---"],
                "TaskResult": ["---"],
                "TaskStatus": ["---"],
                "ResultPath": ["---"],
                "PathResult": ["---"],
                "Checked": ["---"],
            }
            self.columns = ["Path", "TaskName", "TaskResult", "TaskStatus", "ResultPath", "PathResult", "Checked"]
            if not os.path.exists(self.knownIssuesResult):
                asciitable.write(
                    self.tableTemple, self.knownIssuesResult, names=self.columns, Writer=asciitable.FixedWidth
                )
            if not os.path.exists(self.unknownIssuesResult):
                asciitable.write(
                    self.tableTemple, self.unknownIssuesResult, names=self.columns, Writer=asciitable.FixedWidth
                )
            reader = asciitable.get_reader(Reader=asciitable.FixedWidth)
            self.knownIssuesTable = reader.read(self.knownIssuesResult)
            self.unknownIssuesTable = reader.read(self.unknownIssuesResult)
Ejemplo n.º 22
0
    def __call__(self):
        with open(_IMPORT_YAML_PATH, 'r') as f:
            i_txs = self._load_parsed_txs(f)

        processed_output = defaultdict(list)
        unprocessed_output = defaultdict(list)
        for i, i_tx in enumerate(i_txs, start=1):
            if i_tx.processed:
                output = processed_output
            else:
                output = unprocessed_output

            output['id'].append(i)
            output['date'].append(str(i_tx.parsed_tx.date))
            output['account'].append(str(i_tx.parsed_tx.account))
            output['description'].append(
                i_tx.parsed_tx.description[:130].replace('\n', ' '))
            output['amount'].append(float(i_tx.parsed_tx.amount))
            output['category'].append(i_tx.category)

        output_io = StringIO()

        def format_amount(x):
            if x < 0:
                color = Fore.RED
            else:
                color = Fore.GREEN
            output = "{}{:.2f}{}".format(color, x, Fore.RESET)
            return output

        if processed_output:
            output_io.write("Transactions ready to commit:\n\n")
            asciitable.write(
                processed_output, output_io,
                Writer=asciitable.FixedWidthNoHeader,
                names=['id', 'date', 'account', 'description', 'category',
                       'amount'],
                formats={'amount': lambda x: format_amount(x)})
            if unprocessed_output:
                output_io.write("\n")

        if unprocessed_output:
            output_io.write("Transactions ready for processing:\n\n")
            asciitable.write(
                unprocessed_output, output_io,
                Writer=asciitable.FixedWidthNoHeader,
                names=['id', 'date', 'account', 'description', 'category',
                       'amount'],
                formats={'amount': lambda x: format_amount(x)})

        return output_io.getvalue()
Ejemplo n.º 23
0
    def get_t2t_table(table, **kwargs):

        outtable=StringIO()

        asciitable.write(table, outtable, 
                         Writer=asciitable.FixedWidth,
                         names=table.keys(),
                         **kwargs
                        )
        t=outtable.getvalue()

        # this is required by t2t for tables
        # see for example: http://txt2tags.org/markup.html
        t='||' + t[2:]
        return t
Ejemplo n.º 24
0
    def get_t2t_table(table, **kwargs):

        outtable = StringIO()

        asciitable.write(table,
                         outtable,
                         Writer=asciitable.FixedWidth,
                         names=table.keys(),
                         **kwargs)
        t = outtable.getvalue()

        # this is required by t2t for tables
        # see for example: http://txt2tags.org/markup.html
        t = '||' + t[2:]
        return t
Ejemplo n.º 25
0
    def write_vals(self, filename):
        """Write dvals and mvals for each model component (as applicable) to an
        ascii table file.  Some component have neither (couplings), some have
        just dvals (TelemData), others have both (Node, AcisDpaPower).
        Everything is guaranteed to be time synced, so write a single time
        column.
        """
        colvals = OrderedDict(time=self.times)
        for comp in self.comps:
            if hasattr(comp, 'dvals'):
                colvals[comp.name + '_data'] = comp.dvals
            if hasattr(comp, 'mvals') and comp.predict:
                colvals[comp.name + '_model'] = comp.mvals

        asciitable.write(colvals, filename, names=colvals.keys())
Ejemplo n.º 26
0
def fixed_width_table(table_dict, table_kwargs=dict(bookend=False, delimiter=None), indent=None):
    outtable=StringIO()
    asciitable.write(table_dict, outtable,
                     Writer=asciitable.FixedWidth,
                     names=table_dict.keys(),
                     **table_kwargs)
    t=outtable.getvalue()

    # remove final newline
    assert t[-1] == '\n'
    t=t[:-1]
    if indent is None:
        return t

    t=t.split('\n')
    return '\n'.join(['%s%s' % (indent,i) for i in t])
Ejemplo n.º 27
0
def rms_dict(values, filename="rms.dat"):
    """Iterates through the values dictionary in pairs calculating RMS"""
    def rms_pair(a,b):
        return numpy.mean(numpy.sqrt((a["y"]-b["y"])**2))

    columns=["name"]
    formats=dict(name=lambda x: x)
    rows=[]
    for key2 in values.keys():
        columns.append(key2)
        formats[key2]="%4.1F"
    
    for key1 in values.keys():
        r=[key1]
        for key2 in values.keys():
            r.append(rms_pair(values[key1], values[key2]))
        rows.append(r)
    asciitable.write(rows, names=columns, formats=formats, delimiter=",")
Ejemplo n.º 28
0
    def report(self, format='ascii'):
        if format != 'ascii':
            log.error('unknown report format', format=format)
            return

        if len(self.cells.items()) == 0:
            self.log.warning('no data')
            return

        if len(self.config.infoset_queries) > 0:
            asciitable.write([ [id] + [ str(result) for result in self.cells[id] ] for id in sorted(self.cells.keys(), key=lambda key: self.sort_results[key]) ],  # noqa
                names=['.id'] + [ query.program_string for query in self.config.infoset_queries ],
                formats={ f'{sort_query.program_string}': '%s' for sort_query in self.sort_queries },
                Writer=asciitable.FixedWidth)
        else:
            for (id, item) in self.cells.items():
                print(f"=== Infoset for {id}")
                print(item[0])
Ejemplo n.º 29
0
def fits2ascii(fitsfile, outpath=None, columns=['all'], verbose=True):
    """
    Convert fits file into ascii (see ascii2fits for the reverse command)

    --- INPUT

    --- EXAMPLE OF USE ---


    NB! not tested as of 160118... download asciitable
    """
    #-------------------------------------------------------------------------------------------------------------
    if verbose: print(' - Loading fits file ' + fitsfile)
    datfits = afits.open(fitsfile)
    fitstab = datfits[1].data
    fitscol = fitstab.columns

    pdb.set_trace()
    #-------------------------------------------------------------------------------------------------------------
    if verbose: print(' - Will write the following columns to the ascii file:')
    if 'all' in columns:
        keys = fitscol.names
        if verbose: print('   all ("all" was found in list of columns)')
    else:
        keys = columns
        if verbose: print('   ' + ','.join(keys))
    #-------------------------------------------------------------------------------------------------------------
    if verbose: print(' - Initializing and fillling dictionary with data')
    asciidata = {}
    for kk in keys:
        asciidata[kk] = []
        asciidata[kk][:] = fitstab[kk][:]
    #-------------------------------------------------------------------------------------------------------------
    if verbose: print(' - Write dictionary to ascii file:')
    head = fitsfile.split('.fit')[0]
    asciiname = head + '.ascii'
    if outpath != None:
        asciibase = outpath + asciiname.split('/')[-1]
    asciitable.write(asciidata,
                     asciiname,
                     Writer=asciitable.CommentedHeader,
                     names=keys)
    #-------------------------------------------------------------------------------------------------------------
    if verbose: print(' - Wrote data to: ' + asciiname)
Ejemplo n.º 30
0
def save_multibench_results(multibench_results, samples_per_sample_size,
                            save_path):
    formatting_number_precision = 3
    columns = [
        "Number of Samples", "Run time", "Memory", "Disk read", "Disk write",
        "List of Samples"
    ]

    def number_formatter(num):
        return round(num, formatting_number_precision)

    extracting_keys = multibench_results[0].keys()

    if os.path.exists(save_path):
        os.remove(save_path)

    for extracting_key in extracting_keys:
        data_lists = []
        for ind, multibench_result in enumerate(multibench_results):
            samples = samples_per_sample_size[ind]
            multibench_result_target = multibench_result[extracting_key]

            row_list = [
                len(samples),
                number_formatter(multibench_result_target["runtime"]),
                number_formatter(multibench_result_target["memory"]),
                number_formatter(multibench_result_target["disk_read"]),
                number_formatter(multibench_result_target["disk_write"]),
                ", ".join(samples)
            ]

            data_lists.append(row_list)

        with open(save_path, "a") as fstream:
            fstream.write('->%s\n' % extracting_key)

            asciitable.write(data_lists,
                             fstream,
                             names=columns,
                             Writer=asciitable.FixedWidthTwoLine,
                             bookend=True,
                             delimiter="|",
                             quotechar="'")
Ejemplo n.º 31
0
def process_stat(args):
    client = pysvn.Client()
    t1 = time.time()
    status = client.status(args.path)
    t2 = time.time()
    asciitable.write(
            get_status(status, args.path), 
            sys.stdout, 
            names=("status", "file", "modified"),
            Writer=asciitable.FixedWidthTwoLine,
            bookend=False,
            formats={
                "modified": format_timestamp, 
                "status": str
            })
    print 
    now = time.time()
    print "[svn st in %.2f sec, total %.2f]" % (t2 - t1, now - t1)
    print
Ejemplo n.º 32
0
def fits2ascii(fitsfile,outpath=None,columns=['all'],verbose=True):
    """
    Convert fits file into ascii (see ascii2fits for the reverse command)

    --- INPUT

    --- EXAMPLE OF USE ---


    NB! not tested as of 160118... download asciitable
    """
    #-------------------------------------------------------------------------------------------------------------
    if verbose: print ' - Loading fits file '+fitsfile
    datfits  = pyfits.open(fitsfile)
    fitstab  = datfits[1].data
    fitscol  = fitstab.columns

    pdb.set_trace()
    #-------------------------------------------------------------------------------------------------------------
    if verbose: print ' - Will write the following columns to the ascii file:'
    if 'all' in columns:
        keys = fitscol.names
        if verbose: print '   all ("all" was found in list of columns)'
    else:
        keys = columns
        if verbose: print '   '+','.join(keys)
    #-------------------------------------------------------------------------------------------------------------
    if verbose: print ' - Initializing and fillling dictionary with data'
    asciidata = {}
    for kk in keys:
        asciidata[kk] = []
        asciidata[kk][:] = fitstab[kk][:]
    #-------------------------------------------------------------------------------------------------------------
    if verbose: print ' - Write dictionary to ascii file:'
    head = fitsfile.split('.fit')[0]
    asciiname = head+'.ascii'
    if outpath != None:
        asciibase = outpath+asciiname.split('/')[-1]
    asciitable.write(asciidata, asciiname, Writer=asciitable.CommentedHeader, names=keys)
    #-------------------------------------------------------------------------------------------------------------
    if verbose: print ' - Wrote data to: ',asciiname
Ejemplo n.º 33
0
def fit_viking_data(input_file, vl1years="2,3",  vl2years="2", nmodes=5):
    """Translate viking data from
         name year ls  d h m s p(mb)
         VL1  1  97.073    0 18  0 49  7.534
         to the same format as the model data
         ls, vl1, vl2
    """
    
    data = asciitable.read(input_file)
    data.dtype.names=["name","year","L_S","day","hour","minute","second","pressure"]
    #scale to Pascal
    pressure =data["pressure"] *100.
    lander_name = data["name"]
    year = data["year"]
    L_S = data["L_S"]
    
    #vl1
    vl1_list_years = [int(y) for y in vl1years.split(",")]
    selection_year = [False]*len(year)
    for y in vl1_list_years:
        selection_year = selection_year | (year==y)
    selection = (lander_name=="VL1") & selection_year
    d_data = dict(L_S=L_S[selection],vl1=pressure[selection])
    fit= fit_data(d_data, nmodes)

    #vl2
    vl2_list_years = [int(y) for y in vl2years.split(",")]
    selection_year = [False]*len(year)
    for y in vl2_list_years:
        selection_year = selection_year | (year==y)    
    selection = (lander_name=="VL2") & selection_year
    d_data2 = dict(L_S=L_S[selection],vl2=pressure[selection])
    fit2= fit_data(d_data2, nmodes)
    
    fit.update(fit2)
#    d_data["vl2"] = d_data2["vl2"]
#    d_data["vl2_L_S"] = d_data2["L_S"]
    asciitable.write(d_data2, "vl2.data",delimiter=",")
    asciitable.write(d_data, "vl1.data",delimiter=",")
    return fit
Ejemplo n.º 34
0
def save(data, names, outfile):

    # Import modules
    import numpy as np
    import asciitable
    import os

    # Make a file to write the data to
    open(outfile, 'a')

    # Create dictionary of 'name':data
    data_dict = {}
    if len(names) == 1:
        data_dict[names[0]] = data
    if len(names) >= 2:
        for i in range(len(names)):
            if len(np.shape(data)) > 1:
                data_dict[names[i]] = data[i, :]
            if len(np.shape(data)) == 1:
                data_dict[names[i]] = [data[i]]
    # Write data with asciitable
    asciitable.write(data_dict, outfile, names=names)
Ejemplo n.º 35
0
def slot(config):

    sess = megacell_api_session(config.mcc_baseurl)

    if config.all_slots:
        config.slots = Slots

    if config.new_cells:
        cells_info = sess.get_cells_info()
        config.slots = [ slot for slot in cells_info.keys() if cells_info[slot].fetch('status_text') == StatusStrings.NEW_CELL_INSERTED ]

    log.info('selected slots', slots=config.slots)

    if config.action:
        # Start an action on slots
        sess.multiple_slots_action(config.slots, ActionCodes[config.action])

    if config.info:
        # Print cells info
        cells_info = sess.get_cells_info()

        if len(config.infoset_queries) > 0:
            results = dict()
            for slot in config.slots:
                results[slot] = list()
                json_text = cells_info[slot].to_json()

                for query in config.infoset_queries:

                    query_result = query.input(text=json_text).text()
                    results[slot].append( query_result )
                    log.debug('jq query result', slot=slot, result=query_result, query=query)

            asciitable.write([ [slot] + [ result for result in results[slot] ] for slot in config.slots ],
                names=['Slot'] + [ query.program_string for query in config.infoset_queries ],
                formats={ 'Slot': '%s' },
                Writer=asciitable.FixedWidth)
        else:
            print(json.dumps( { slot.name: cells_info[slot].fetch('.') for slot in config.slots } ))
Ejemplo n.º 36
0
    def __call__(self):
        # Get all the transactions
        with session_scope() as session:
            txs = session.query(Transaction) \
                         .filter(Transaction.account.has(
                             name=self.account_name)) \
                         .order_by(Transaction.date) \
                         .all()

        output = defaultdict(list)
        for tx in txs:
            output['id'].append(tx.id)
            output['date'].append(str(tx.date))
            output['description'].append(
                tx.description[:130].replace('\n', ' '))
            output['amount'].append(tx.amount)
            output['reconciled'].append(tx.reconciled)
            if tx.category:
                output['category'].append(tx.category.name)
            else:
                output['category'].append('')

        def format_amount(x):
            if x < 0:
                color = Fore.RED
            else:
                color = Fore.GREEN
            output = "{}{:.2f}{}".format(color, x, Fore.RESET)
            return output

        output_io = StringIO()
        asciitable.write(output, output_io,
                         Writer=asciitable.FixedWidthNoHeader,
                         names=['id', 'date', 'description', 'category',
                                'reconciled', 'amount'],
                         formats={'amount': lambda x: format_amount(x),
                                  'reconciled': lambda x: 'Y' if x else 'N'})

        return output_io.getvalue()
Ejemplo n.º 37
0
    def report(self):

        if len(self.cells.items()) > 0:
            for (id, rows) in self.cells.items():
                print(f"=== Log for {id}")
                if len(rows) > 0:
                    asciitable.write(rows,
                                     names=[
                                         'Timestamp', 'Type', 'Event',
                                         'Equipment', 'Data'
                                     ],
                                     formats={
                                         'Timestamp': '%s',
                                         'Type': '%s',
                                         'Event': '%s',
                                         'Equipment': '%s',
                                         'Results': '%s'
                                     },
                                     Writer=asciitable.FixedWidth)
                else:
                    print("LOG EMPTY")
        else:
            self.log.warning('no data')
Ejemplo n.º 38
0
def confluence_table(table_dict, units=None, **kwargs):
    """ Format a dictionary into a confluence table. 
        Use the DefaultDict if you care about the order of the table.
    
        Example:
            >>> d=OrderedDict()
            >>> d['name']=['bright','dim']
            >>> d['flux']=['large','small']
            >>> print confluence_table(d)
            ||   name ||  flux ||
            || bright  | large  |
            ||    dim  | small  |

            >>> print confluence_table(d, units=dict(flux='[ergs]'))
            ||   name || flux [ergs] ||
            || bright  |       large  |
            ||    dim  |       small  |


    """

    if units is not None:
        for k,v in units.items():
            table_dict[k + ' %s' % v] = table_dict.pop(k)

    outtable=StringIO()

    asciitable.write(table_dict, outtable,
                 Writer=asciitable.FixedWidth,
                 names=table_dict.keys(),
                 **kwargs)
    t=outtable.getvalue()
    t=t.replace(' |','  |')
    t=t.strip().split('\n')
    t[0]=t[0].replace(' |','||')
    t=['|'+i for i in t]
    return '\n'.join(t)
Ejemplo n.º 39
0
Archivo: MCMC.py Proyecto: iancze/AY193
def run_chain():
    sequences = []
    global a_old, b_old, sigma
    for j in range(1000):
        #take jump
        accept = False
        a_jump = np.random.normal(scale=0.03)
        b_jump = np.random.normal(scale=0.05)
        a_new = a_old + a_jump
        b_new = b_old + b_jump
        ratio = r(a_new,b_new,a_old,b_old,sigma)
        if ratio >= 1.0:
            a_old = a_new
            b_old = b_new
            accept = True
        else:
            u = np.random.uniform()
            if ratio >= u:
                a_old = a_new
                b_old = b_new
                accept = True
        sequences.append([j,ratio,accept,a_old,b_old])

    asciitable.write(sequences,"runa.dat",names=["j","ratio","accept","a","b"])
Ejemplo n.º 40
0
def make_filetypes_dat(contents):
    filetypes = asciitable.read(msid_files['filetypes'].abs)
    test_filetypes = [x for x in filetypes if x['content'].lower() in contents]
    asciitable.write(test_filetypes,
                     os.path.join(opt.data_root, 'filetypes.dat'),
                     names=filetypes.dtype.names)
Ejemplo n.º 41
0
    Mission = str(f[1].header['TELESCOP'])
    ObsID = str(f[0].header['OBS_ID'])
    DateObs = str(f[0].header['DATE-OBS'])
    MJD = str(f[0].header['MJD_OBS'])
    Observer = str(f[1].header['OBSERVER'])
    return Mission, Inst, ObsID, Observer, DateObs, MJD, ExpTime


prst = []
for i in range(7):
    lst = []
    for FileName in FileNames:
        lst.append(list_file(FileName)[i])
    prst.append(lst)

data = {
    'Inst': prst[1],
    'ObsID': prst[2],
    'DateObs': prst[4],
    'MJD': prst[5],
    'ExpTime': prst[6]
}
asciitable.write(data,
                 sys.stdout,
                 Writer=asciitable.Latex,
                 latexdict={
                     'preamble': r'\begin{center}',
                     'tablefoot': r'\end{center}',
                     'tabletype': 'table*'
                 })
Ejemplo n.º 42
0
def check_write_table(test_def, table):
    out = io.StringIO()
    asciitable.write(table, out, **test_def["kwargs"])
    print("Expected:\n%s" % test_def["out"])
    print("Actual:\n%s" % out.getvalue())
    assert out.getvalue().splitlines() == test_def["out"].splitlines()
Ejemplo n.º 43
0
import asciitable,random
gz2users = asciitable.read('/Users/willettk/Desktop/treemap/gz2_users.csv')
ss5000 = random.sample(gz2users,5000)
asciitable.write(ss5000,'/Users/willettk/Desktop/ss5000.csv')
Ejemplo n.º 44
0
            print a, b, ww[w]
            sq = quad(qhansen, 0.01, 10, args=(a, b, ww[w]))
            sp = quad(phansen, 0.01, 10, args=(a, b))
            qnorm.append(sq[0] / sp[0])
#        plt.plot(ww, qnorm, 'g')
        c = col[u]
        s = sty[v]
        plt.plot(ww,
                 qnorm,
                 color=c,
                 ls=s,
                 label='a=%s [$\mu m$], b=%s' % (a, b))
plt.legend(bbox_to_anchor=(1.05, 1), loc=1, borderaxespad=0.)
leg = plt.legend(fancybox=True, loc=1, prop={'size': 7})
leg.get_frame().set_alpha(0.5)
#plt.xlim(1, 12)
#plt.ylim(1, 4)
#plt.title('extinction at long wavelengths')
#plt.title('Model Grid')
plt.xlabel('wavelength [$\mu m$]')
plt.ylabel('forsterite extinction coefficient')
plt.savefig('Plots/Qex_long_%s.pdf' % (timestr))
#plt.savefig('Plots/Qex_small.pdf')
plt.clf()

quit()
obj2 = {'a': aaa, 'b': bbb, 'wave': www, 'Qex_averaged': qnorm}
#cPickle.dump(obj2, open('../../Documents/Kaystuff/pickles/hansenmieff.pkl', 'wb'))
#cPickle.dump(obj2, open('pickles/hansenmieff.pkl', 'wb'))
asciitable.write(obj2, 'Files/hansensmall.txt')
Ejemplo n.º 45
0
            aaa.append(a)
            bbb.append(b)
#            wwww.append(ww[w])
#            www.append(ww[w])
            print a, b, ww[w]
            sq = quad(qhansen, 0.01, 10, args = (a,b,ww[w]))
            sp = quad(phansen, 0.01, 10, args = (a,b))
            qnorm.append(sq[0]/sp[0])
#        plt.plot(ww, qnorm, 'g')
        c = col[u]
        s = sty[v]
        plt.plot(ww, qnorm, color = c, ls = s, label = 'a=%s [$\mu m$], b=%s'%(a,b))
plt.legend(bbox_to_anchor=(1.05, 1), loc=1, borderaxespad=0.)
leg = plt.legend(fancybox=True, loc=1, prop={'size':7})
leg.get_frame().set_alpha(0.5)
#plt.xlim(1, 12)
#plt.ylim(1, 4)
#plt.title('extinction at long wavelengths')                                                                                                                                
#plt.title('Model Grid')                                          
plt.xlabel('wavelength [$\mu m$]')                                                                                                                      
plt.ylabel('forsterite extinction coefficient')        
plt.savefig('Plots/Qex_long_%s.pdf'%(timestr))
#plt.savefig('Plots/Qex_small.pdf')
plt.clf()

quit()
obj2 = {'a':aaa, 'b':bbb, 'wave':www, 'Qex_averaged': qnorm}
#cPickle.dump(obj2, open('../../Documents/Kaystuff/pickles/hansenmieff.pkl', 'wb'))
#cPickle.dump(obj2, open('pickles/hansenmieff.pkl', 'wb'))
asciitable.write(obj2, 'Files/hansensmall.txt')
Ejemplo n.º 46
0
    asciitable.write(d_data2, "vl2.data",delimiter=",")
    asciitable.write(d_data, "vl1.data",delimiter=",")
    return fit

if __name__=="__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("input_filename", type=str)
    parser.add_argument("output_filename", type=str)
    parser.add_argument("--vl1years", type=str, default="2,3")
    parser.add_argument("--vl2years", type=str, default="3")
#    parser.add_argument("--startrow", type=int, default=0)
#    parser.add_argument("--stoprow", type=int, default=None)
    parser.add_argument("--delimiter", type=str, default=',')
    parser.add_argument("--nmodes", type=int, default=5)

    args = parser.parse_args()
    
    fit = fit_viking_data(args.input_filename, 
                    nmodes=args.nmodes,
                    vl1years = args.vl1years,
                    vl2years = args.vl2years
                    )
    output = dict(mode=range(len(fit["p1"])), 
                  p1=fit["p1"], 
                  p2=fit["p2"])
    target = open(args.output_filename, 'w')
    target.write("#Fit to VL data from {0} with {1} harmonic modes\n".format(args.input_filename, args.nmodes))
    asciitable.write(output,target, delimiter=args.delimiter)
    target.close()
    
Ejemplo n.º 47
0
        symb = 'go'
        dJK.append(J[l] - K[l] - JKavg)
        a_emc.append(a_mcmc[0])
        a_plus.append(a_mcmc[1])
        a_minus.append(a_mcmc[2])
        n_emc.append(n_mcmc[0])
        n_plus.append(n_mcmc[1])
        n_minus.append(n_mcmc[2])
        lgname_conf.append(red)
        print red
    lgname0.append(red)
    shape.append(symb)

# Write delta(J-K),a,N in a file for future use
data = {'filename': lgname0, 'd(J-K)': dJK0, 'a': a_emc0, 'N': n_emc0}
asciitable.write(data, 'Files/emceeresults_lg.txt')

#        plota.errorbar(dJK[l], a_emc[l], yerr = (a_plus[l], a_minus[l]), fmt = 'go', alpha = 0.8)
# Write confident delta(J-K),a,N in a file for future use
dataa = {'filename': lgname_conf, 'd(J-K)': dJK, 'a': a_emc, 'N': n_emc}
asciitable.write(dataa, 'Files/emceeresults_lg_conf.txt')

#quit()

#quit()
print len(lgname0)
#plt.show()
#quit()
stypeo = []
colo = []
dcol = []
Ejemplo n.º 48
0
import pyfits
import os, sys
import numpy as np
import asciitable
import asciitable.latex
from cudakde import *

def FoldInput(infile):
    inf = open(infile)
    lines = inf.readlines()
    #print lines
    pars = []
    for line in lines:
        #print line.split()[0]
        pp = read_data(line.split()[0])
        pars.append(pp)
    prst = []
    for j in range(8):
        lst = []
        for i in range(len(lines)):
            lst.append(r'$%1.2f^{+%1.2f}_{-%1.2f}$'%(pars[i][1,j],pars[i][2,j]-pars[i][1,j],pars[i][1,j]-pars[i][0,j]))
        prst.append(lst)
    return prst

prst = FoldInput(sys.argv[1])

models = ['bb','nsa12','nsa13','ns1260','ns123100','ns123190','ns130100','ns130190']

data = {r'A $Mod.$': models, r'B Bol. lum.': prst[0], r'B Bol. flux': prst[1], r'C $Psr. flux$': prst[2], r'D PWN flux': prst[3], r'E Psr. Lum.': prst[4], r'F PWN Lum': prst[5], r'G Psr. eff.': prst[6], r'H PWN eff': prst[7]}
asciitable.write(data, sys.stdout, Writer = asciitable.Latex, latexdict = {'preamble': r'\begin{center}', 'tablefoot': r'\end{center}', 'tabletype': 'table*', 'units':{'$N_{\rm H}$':'$\rm 10^{21} ./ cm^{-2}$'}})
Ejemplo n.º 49
0
def compare(lineid, specid, column='flux', posid="positions_mosaic"):
    fieldpairs = mosaic.find_overlaps(posid=posid)
    fitsfilename = "%s_mosaic_%s.fits" % (lineid, specid)
    table = mosaic.read_fits_table(fitsfilename)

    # initialise a table to save the results
    outtable = dict(field1=list(), field2=list(), 
                    s_mean=list(), s_std=list(), N=list(), 
                    rat_med=list(), rat_q25=list(), rat_q75=list())
    for fieldpair, indices in fieldpairs.items():
        field, otherfield = fieldpair
        values = table[column][[i-1 for i in indices.keys()]]
        otherindices = list()
        seps = list()
        for neighbours in indices.values():
            # first attempt - just use the nearest neighbour
            sep, otherindex = neighbours[0]
            otherindices.append(otherindex)
            seps.append(sep)
        othervalues = table[column][[i-1 for i in otherindices]]
        seps = np.array(seps)

        # plot the data
        try:
            datamax = 1.1*max(values.max(), othervalues.max())
        except ValueError:
            # something wrong with this pair: skip it
            continue
        # The color of the points represents the separations
        plt.scatter(x=values, y=othervalues, c=seps, 
                    marker="o", vmin=0.0, vmax=2.68, alpha=0.6)
        plt.axis([0.0, datamax, 0.0, datamax])
        cb = plt.colorbar()
        cb.set_label("aperture\nseparation")
        # plt.axis('scaled')
        plt.xlabel(field)
        plt.ylabel(otherfield)

        # plot line y =x
        x = np.linspace(0.0, datamax)
        plt.plot(x, x, 'r--')

        # Median gradient
        medgrad = np.median(othervalues/values)
        quart25 = scipy.stats.scoreatpercentile(othervalues/values, 25)
        quart75 = scipy.stats.scoreatpercentile(othervalues/values, 75)
        dyplus = quart75 - medgrad
        dyminus = medgrad - quart25
        y = medgrad*x
        plt.plot(x, y, 'g')

        # Add info to title
        plt.title("%s %s %s\n N = %i, median(%s/%s) = %.2f + %.2f - %.2f\n sep = %.2f +/- %.2f" 
                  % (lineid, specid, column, len(values),
                     otherfield, field, medgrad, dyplus, dyminus,
                     seps.mean(), seps.std()),
                  fontsize="small")

        plt.savefig("%s-%s-compare-%s-%s-%s.png" 
                    % (lineid, specid, column, field, otherfield))
        plt.clf()

        # save the data to the output table
        outtable["field1"].append(field)
        outtable["field2"].append(otherfield)
        outtable["s_mean"].append(seps.mean())
        outtable["s_std"].append(seps.std())
        outtable["N"].append(len(values))
        outtable["rat_med"].append(medgrad)
        outtable["rat_q25"].append(quart25)
        outtable["rat_q75"].append(quart75)

    # Write output table to file
    fmt = "%.3f"
    fmts = dict(s_mean=fmt, s_std=fmt, 
                rat_med=fmt, rat_q25=fmt, rat_q75=fmt)
    asciitable.write(outtable, 
                     "%s-%s-compare-%s.dat" %  (lineid, specid, column),
                     delimiter="\t", formats=fmts)
Ejemplo n.º 50
0
    else:
        keys = args.columns
    #-------------------------------------------------------------------------------------------------------------
    # Initialize and fill dictionary with data
    asciidata = {}
    for kk in keys:
        asciidata[kk] = []
        asciidata[kk][:] = fitstab[kk][:]
    #-------------------------------------------------------------------------------------------------------------
    # write dictionary to ascii file
    head = re.compile('\.fi', re.IGNORECASE).split(
        args.fitsfile)[0]  # making sure case is ignored
    asciiname = head + '.ascii'
    #commentstr = '# File created on ',str(datetime.datetime.now()),' with fits2ascii.py from ',args.fitsfile
    asciitable.write(asciidata,
                     asciiname,
                     Writer=asciitable.CommentedHeader,
                     names=keys)
    #-------------------------------------------------------------------------------------------------------------
    if args.verbose:
        print 'Wrote the columns: ', keys
        print 'From fits file   : ', args.fitsfile
        print 'to the ascii file: ', asciiname
else:
    #-------------------------------------------------------------------------------------------------------------
    #                                             ASCII 2 FITS
    #-------------------------------------------------------------------------------------------------------------
    # reading ascii file
    hdr = open(args.fitsfile).readlines()[0]
    hdrkeys = hdr.split()[1:]
    data = np.genfromtxt(args.fitsfile, comments='#')
    #-------------------------------------------------------------------------------------------------------------
Ejemplo n.º 51
0
def pow_ave(filedir, dx, units, **kwargs):

    # Import numpy and asciitable
    import asciitable
    import numpy as np
    import os

    # Keywords
    zvals = kwargs.get('zvals', 'default')

    # Get filenames from filedir
    filenames = os.listdir(filedir)
    filenames.sort()
    avpow = []
    after_shot = False
    for filename in filenames:
        if filename == 'ave.txt':
            os.remove(filedir + '/' + filename)
            continue
        if filename[-4:] != '.txt':
            continue
        datafile = open(filedir + '/' + filename, 'r')
        if zvals == 'default':
            z = filename[:3]
        if zvals != 'default':
            z = 1
        gnarpow = []
        for line in datafile:
            line = line.strip()
            columns = line.split()
            gnarpow.append(float(columns[2]))
        if filename[-5:] == 'a.txt':
            after_shot = True
            after = np.mean(gnarpow)
        if filename[-5:] != 'a.txt':
            avpow.append(np.mean(gnarpow))
    datafile.close()
    pows = np.array(avpow)
    # Make position array
    pos = np.zeros(np.shape(pows), dtype=float)
    if units == 'standard':
        conv = 25.4
    if units == 'mm':
        conv = 1
    if units == 'micron':
        conv = 1E-3
    for i in range(1, len(pos)):
        pos[i] = pos[i - 1] + (dx * conv)
    # Make file to write data to
    writefile = filedir + '/ave.txt'
    newfile = open(writefile, 'a')
    # Convert to numpy array and write to ascii
    asciitable.write({
        'Position': pos,
        'Average Power': pows
    },
                     newfile,
                     names=['Position', 'Average Power'])
    newfile.close()

    if after_shot == False:
        return pos, pows, z
    if after_shot == True:
        return pos, pows, z, after
Ejemplo n.º 52
0
# along with this program.  If not, see <http://www.gnu.org/licenses/>.

import asciitable, sqlite3, datetime

#
# Parse command line options
#
from optparse import OptionParser

parser = OptionParser()
parser.add_option('-d', '--db', dest='db_filename', help='Use DB as the source sqlite database', metavar='DB')

(options, args) = parser.parse_args()

db = sqlite3.connect(options.db_filename)

cur = db.cursor()
cur.execute("select strftime('%s', 'now') - strftime('%s', ts) as delta,disc_80211_networks,disc_cells,global_rank from wigle_net order by ts desc limit 1")
delta, disc_80211_networks, disc_cells, global_rank = cur.fetchone()

delta = datetime.timedelta(seconds=delta)

asciitable.write({
    'UPD': [ str(delta) ],
    'Disc. Nets w/ GPS': [ str(disc_80211_networks) ],
    'Disc. cells w/ GPS': [ str(disc_cells) ],
    'Rank': [ str(global_rank) ],
}, names = ['UPD', 'Disc. Nets w/ GPS', 'Disc. cells w/ GPS', 'Rank'], Writer=asciitable.FixedWidth)


Ejemplo n.º 53
0
        tmp = dict(link_small=pattern_s,
                   link_big=pattern_b,
                   link=link,
                   idISS=idISS
                   )

        photos.append(tmp)
    return photos,pattern_s_L,pattern_b_L,link_L,idiss

photos,link_small,link_big,link,idiss=get_iss_photos(lista,mission)

photos=np.array(photos)
photos=list(photos.astype(str))
f = open('tasks_darkskies.csv', 'w')
f.write("\n".join(photos))
f.close()
for h in list(np.array(range(len(idiss)/100+1))+1):
    idiss_j=np.array(idiss[(h-1)*100:h*100])
    link_j=np.array(link[(h-1)*100:h*100])
    link_small_j=np.array(link_small[(h-1)*100:h*100])
    link_big_j=np.array(link_big[(h-1)*100:h*100])

    print "Creating"
    asciitable.write({'idiss': idiss_j, 'link': link_j,'link_small':link_small_j,'link_big':link_big_j}, 'test2.csv', names=['idiss','link','link_small','link_big'],delimiter=',')
    print "Uploading"
    os.system('pbs add_tasks --tasks-file test2.csv --tasks-type csv --redundancy 5')
    print (h-1)*100
    print h*100
    print "Waiting"
    time.sleep(900)
Ejemplo n.º 54
0
def lv_ave(date, **kwargs):

    # Define keyword arguments
    df = kwargs.get('darkframe', False)
    s = kwargs.get('save', True)
    rep = kwargs.get('repeats', [])
    ratio = kwargs.get('ratio', 1)
    f1 = kwargs.get('f1_ratio', False)

    # Import Modules
    import numpy as np
    import matplotlib.pyplot as plt
    import os
    from decimal import Decimal

    # Make a directory to store plots and averages in
    create_dir('/home/chris/anaconda2/plots/' + str(date) + '/lv')
    # Extract all the filenames in the folder .../date/lv
    filenames = os.listdir('/home/chris/anaconda2/data/' + str(date) + '/lv')
    filenames.sort()
    # Add the full path onto the front
    filepaths = []
    for name in filenames:
        if name == 'ave_pows.txt':
            filenames.remove(name)
            continue
        if name[-3:] == 'txt':
            filepaths.append('/home/chris/anaconda2/data/' + str(date) +
                             '/lv/' + name)

    # Get lv number from file name
    lv_nums = []
    for path in filepaths:
        j = 1  # j is the incrementor for characters in the run name/number
        for i in range(1, len(path) + 1):
            lv = ''
            if path[-i] == '/':  # Steps backwards in path until it finds a '/'
                while path[-i + j] != '_':
                    lv += path[
                        -i +
                        j]  # Adds on characters after the '/' until it finds a '_'
                    j += 1
                break
            else:
                continue
        lv_nums.append(lv)
    lv_nums = np.array(lv_nums)

    # Insert new number for repeat files into lv_nums
    for i in rep:
        lv_nums = np.insert(lv_nums,
                            np.where(lv_nums == str(i))[0] + 1,
                            int(i * 10 + 1))

    # Import data from file(s)
    av_pow = np.zeros(len(filepaths) + len(rep))
    f1ratio = []
    for i in filepaths:
        datafile = open(i, 'r')
        frame = []
        power = []
        n = 1
        for line in datafile:
            line = line.strip()
            columns = line.split(';')
            frame.append(int(columns[0]))
            power.append(1000 * ratio *
                         float(columns[1]))  # 1000 converts to mW
        if lv_nums[filepaths.index(i)] in rep:
            n = 2
        for j in range(n):
            power_part = power[j * (len(power) / n):(j + 1) * (len(power) / n)]
            if df == True:
                av_pow[filepaths.index(i) + j] = np.mean(power_part[1:])
            if df == False:
                av_pow[filepaths.index(i) + j] = np.mean(power_part)
            # Plot the power for each frame
            plt.figure('lv')
            plt.clf()
            plt.plot(frame, power_part, 'bo', label='Measured Power')
            plt.xlabel('Frame')
            plt.ylabel('Measured Power (mW)')
            plt.title(
                str(date) + ': lv ' + str(lv_nums[filepaths.index(i) + j]))
            avestr = '%.2E' % Decimal(av_pow[filepaths.index(i) + j])
            h_line(av_pow[filepaths.index(i) + j],
                   label='Average: ' + avestr + 'mW')
            plt.axis([0, frame[-1] + 1, 0, 1.1 * np.amax(power_part)])
            plt.legend(loc=4)
            plt.savefig('/Users/christopherchambers/Py/plots/' + str(date) +
                        '/lv/lv' + str(lv_nums[filepaths.index(i) + j]) +
                        '.png')
            plt.draw()
            if f1 == True:
                f1ratio.append(np.mean(power[1:]) / power[0])

    if s == True:
        # Make a file to write to
        open(
            '/Users/christopherchambers/Py/plots/' + str(date) +
            '/lv/ave_pows.txt', 'w')
        import asciitable
        # Make into a structured numpy array
        if ratio == 1:
            p_type = 'Ave Power (ref)'
        else:
            p_type = 'Ave Power (trans)'
        data = np.zeros(len(av_pow), dtype=[('lv', 'S4'), (p_type, 'float64')])
        data[:] = zip(lv_nums, av_pow)
        asciitable.write(
            data, '/Users/christopherchambers/Py/plots/' + str(date) +
            '/lv/ave_pows.txt')

    if f1 == True:
        return lv_nums, av_pow, f1ratio
    else:
        return lv_nums, av_pow
Ejemplo n.º 55
0
def check_write_table(test_def, table):
    out = io.StringIO()
    asciitable.write(table, out, **test_def['kwargs'])
    print('Expected:\n%s' % test_def['out'])
    print('Actual:\n%s' % out.getvalue())
    assert(out.getvalue().splitlines() == test_def['out'].splitlines())
Ejemplo n.º 56
0
for x in range(len(bdnyc.targets)):
    spt = bdnyc.targets[x].sptype
    if len(spt) >= 3:
        if (spt[-3] in ('p',':')):
            continue
    if not spt[-2]==':':
        if not spt[-1] in ('g','b',':','d'):
            inst =  bdnyc.targets[x].nir['low'].keys()
            if len(inst) > 0:
                for y in range(len(inst)):
                    date = bdnyc.targets[x].nir['low'][inst[y]].keys()
                    if len(date) > 0:
                        for z in range(len(date)):
                            filter = bdnyc.targets[x].nir['low'][inst[y]][date[z]].keys()
                            if len(filter) > 0:
                                for t in range(len(filter)):
                                    wl = bdnyc.targets[x].nir['low'][inst[y]][date[z]][filter[t]]['wl']
                                    flux = bdnyc.targets[x].nir['low'][inst[y]][date[z]][filter[t]]['flux']
                                    data = bdnyc.targets[x].nir['low'][inst[y]][date[z]][filter[t]].keys()
                                    if wl[0] < 0.965 and wl[-1] > 2.120:
                                        specData.append([[wl],[flux]])
                                        targetinfo.append(bdnyc.targets[x])
                                        UNUM.append(bdnyc.targets[x].unum)
                                        INST.append(inst[y])
                                        TYPE.append('nir')
                                        RES.append('low')
                                        DATE.append(date[z])
                                        FILTER.append(filter[t])
asciitable.write({'unum':UNUM,'type':TYPE,'res':RES,'inst':INST,'date':DATE,'filter':FILTER},'fieldtable.dat')
return [specData,targetinfo]
Ejemplo n.º 57
0
parser = OptionParser()
parser.add_option('-d',
                  '--db',
                  dest='db_filename',
                  help='Use DB as the source sqlite database',
                  metavar='DB')

(options, args) = parser.parse_args()

db = sqlite3.connect(options.db_filename)

cur = db.cursor()
cur.execute(
    "select strftime('%s', 'now') - strftime('%s', ts) as delta,imsi,aero2_dataplans.name,expiration_ts,data_used,data_available from aero2_data join aero2_dataplans where aero2_dataplans.id = aero2_data.dataplan_id order by ts desc limit 1"
)
delta, imsi, dataplan_name, expiration_ts, data_used, data_available = cur.fetchone(
)

delta = datetime.timedelta(seconds=delta)

asciitable.write(
    {
        'UPD': [str(delta)],
        'IMSI': [imsi],
        'Dataplan': [dataplan_name],
        'Expiration date': [str(expiration_ts)],
        'Data usage': ["%s of %s MB" % (data_used, data_available)],
    },
    names=['UPD', 'IMSI', 'Dataplan', 'Expiration date', 'Data usage'],
    Writer=asciitable.FixedWidth)