Exemplo n.º 1
0
    def test_reads_correct_entries_despite_blanks(self):
        io = IO()
        reader = io.read_dictionary(self.valid_path_with_blanks)

        values = list(reader)

        self.assertListEqual(values, ["foo", "bar", "baz"])
 def makefile(self, *args, **kwargs):
     if args[0] == 'rb':
         return IO(b"GET %s HTTP/1.0" % self._path)
     elif args[0] == 'wb':
         return IO(b'')
     else:
         raise ValueError("Unknown file type to make", args, kwargs)
Exemplo n.º 3
0
    def test_non_int_raises_value_error(self):
        io = IO()
        reader = io.read(self.invalid_file)

        with self.assertRaises(ValueError):
            for number in reader:
                pass
 def test_load(self):
     glob_path = self.tmp_dir_time + '*.nc'
     filepaths = glob(glob_path)
     directory = self.tmp_dir_time
     out = IO()
     with common._redirect_stdout(out):
         test_case_a = ch.load(filepaths)
     output = out.getvalue().strip()
     self.assertIsInstance(test_case_a, iris.cube.Cube)
     self.assertEqual(test_case_a.dim_coords[0].units.origin,
                      "hours since 1970-01-01 00:00:00")
     self.assertEqual(test_case_a.dim_coords[0].units.calendar, "gregorian")
     expected_output = "cube aux coordinates differ: " \
                       "\n\ncube time coordinates differ: " \
                       "\n\n\ttime start date inconsistent" \
                       "\n\nNew time origin set to hours since " \
                       "1970-01-01 00:00:00"
     self.assertEqual(output, expected_output)
     out = IO()
     with common._redirect_stdout(out):
         test_case_b = ch.load(directory)
     self.assertIsInstance(test_case_a, iris.cube.Cube)
     self.assertEqual(test_case_b.dim_coords[0].units.origin,
                      "hours since 1970-01-01 00:00:00")
     self.assertEqual(test_case_b.dim_coords[0].units.calendar, "gregorian")
     self.assertEqual(output, expected_output)
Exemplo n.º 5
0
    def test_raises_on_non_readable_path(self):
        io = IO()
        with self.assertRaises(IOError):
            reader = io.read_dictionary("path/that/doesnt/exist")

            for line in reader:
                pass
Exemplo n.º 6
0
    def test_reads_entries_as_strings(self):
        io = IO()
        reader = io.read_dictionary(self.valid_path_with_blanks)

        values = list(reader)

        for value in values:
            self.assertIsInstance(value, str)
Exemplo n.º 7
0
    def __init__(self, **kwargs):

        # Initialize self.Fixed (subset of self.Prognostic which will NOT be time-marched)
        if 'Fixed' in kwargs: self.Fixed = kwargs.pop('Fixed')
        else: self.Fixed = []

        # Initialize I/O
        self.Io = IO(self, **kwargs)

        # Get values from restart file, if available
        if 'RestartFile' in kwargs:
            ParamNames = Parameters().value.keys()
            FieldNames = self.Required
            kwargs = self.Io.readRestart(FieldNames, ParamNames, kwargs)

        # Initialize scalar parameters
        self.Params = Parameters(**kwargs)

        # Frequency with which compute() will be executed
        if 'UpdateFreq' in kwargs:
            self.UpdateFreq = kwargs.pop('UpdateFreq')
        else:
            self.UpdateFreq = self.Params['dt']

        # Initialize State
        self.State = State(self, **kwargs)
        self.Grid = self.State.Grid

        # Dictionary to hold increments on prognos fields
        self.Inc = {}

        # Initialize diagnostics
        self.compute(ForcedCompute=True)

        # Create output file
        self.Io.createOutputFile(self.State, self.Params.value)

        # Write out initial state
        if not self.Io.Appending: self.write()

        # Initialize plotting facilities
        self.Plot = Plot()

        # Initialize runtime monitor
        self.Monitor = Monitor(self, **kwargs)

        # Notify user of unused input quantities
        self._checkUnused(kwargs)

        # Set some redundant attributes (mainly for backward compatibility)
        self.nlon = self.Grid['nlon']
        self.nlat = self.Grid['nlat']
        self.nlev = self.Grid['nlev']
        try:
            self.o3 = self.State['o3']
        except:
            pass
Exemplo n.º 8
0
    def test_reads_correct_ints(self):
        """Tests to ensure the correct numbers are returned

        Implicitly bound to the values in valid_file.txt. Make sure to keep the
        values in sync or use a different file if diverging often.
        """
        io = IO()
        reader = io.read(self.valid_file)

        self.assertItemsEqual([5, 6, 2, 4, 7, 232, 3], reader)
Exemplo n.º 9
0
    def print_diff():

        buf = IO()
        yaml.dump(result, buf)
        print("Yaml formatted result for copy/paste:")
        print(buf.getvalue())

        buf = IO()
        yaml.dump(query_test['expected'], buf)
        print("\nYaml formatted expected:")
        print(buf.getvalue())
 def test_log_module_redirect(self):
     logger = log_module()
     out = IO()
     with _redirect_stdout(out):
         logger.info('Message on stdout')
     output = out.getvalue().strip()
     self.assertEqual(output, 'Message on stdout')
     out = IO()
     with _redirect_stdout(out):
         logger.info('Message on stderr')
     output = out.getvalue().strip()
     self.assertEqual(output, 'Message on stderr')
Exemplo n.º 11
0
    def __init__(self, filename):
        '''
        Read a PSCF symmetry-adapted field file, and create a new object.

        Argument:
        filename -- name of file

        The file named filename is opened and closed within this function.
        '''
        self.file = open(filename, 'r')
        self._io = IO()
        file = self.file

        # Read version line
        self.version = Version(self.file)

        self._input_unit_cell()
        self.group_name = self._input_var('char')
        self.N_monomer = self._input_var('int')
        self.N_star = self._input_var('int')

        # Define empty lists
        self.fields = []
        self.waves = []
        self.counts = []

        for i in range(self.N_star):

            data = file.readline().split()
            if len(data) != self.N_monomer + self.dim + 1:
                raise IoException('Incorrect number of elements in field line')
            j = 0

            # Read field coefficients
            self.fields.append([])
            for k in range(self.N_monomer):
                value = float(data[j])
                self.fields[i].append(value)
                j += 1

            # Read field coefficients
            self.waves.append([])
            for k in range(self.dim):
                value = int(data[j])
                self.waves[i].append(value)
                j += 1

            # Read star_count
            self.counts.append(int(data[j]))

        self.file.close()
        self.file = None
Exemplo n.º 12
0
def export_groups(request):
    groups = Group.objects.filter(created_by=request.user)
    df_combined = pd.DataFrame()
    for group in groups:
        df = get_df_clients(user=request.user, group_id=group.pk)
        df['客户分组事件_编号'] = group.id
        df['客户分组事件_名称'] = group.name
        df_combined = pd.concat([df_combined, df])

    excel_file = IO()

    xlwriter = pd.ExcelWriter(excel_file, engine='xlsxwriter')

    df_combined.to_excel(xlwriter, 'data', index=False)

    xlwriter.save()
    xlwriter.close()

    excel_file.seek(0)

    # 设置浏览器mime类型
    response = HttpResponse(
        excel_file.read(),
        content_type=
        'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')

    # 设置文件名
    now = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
    response['Content-Disposition'] = 'attachment; filename=' + now + '.xlsx'
    return response
Exemplo n.º 13
0
    def gzip_response(response):
        if response.headers["Content-Type"] != "application/json":
            return response

        accept_encoding = request.headers.get("Accept-Encoding", "")

        if "gzip" not in accept_encoding.lower():
            return response

        response.direct_passthrough = False

        if (response.status_code < 200 or response.status_code >= 300
                or "Content-Encoding" in response.headers):
            return response

        gzip_buffer = IO()
        gzip_file = gzip.GzipFile(mode="wb", fileobj=gzip_buffer)
        gzip_file.write(response.data)
        gzip_file.close()

        response.data = gzip_buffer.getvalue()
        response.headers["Content-Encoding"] = "gzip"
        response.headers["Vary"] = "Accept-Encoding"
        response.headers["Content-Length"] = len(response.data)

        return response
Exemplo n.º 14
0
def _fetch_structure(pdbid, biounit=False):
    """Enclosing logic in a function"""

    base_url = 'https://files.rcsb.org/download/'
    pdb_type = '.pdb1' if biounit else '.pdb'
    pdb_url = base_url + pdbid.lower() + pdb_type + '.gz'

    try:
        request = Request(pdb_url)
        opener = build_opener()
        url_data = opener.open(request).read()
    except HTTPError as e:
        print('[!] Error fetching structure: ({0}) {1}'.format(e.code, e.msg),
              file=sys.stderr)
        return
    else:
        try:
            buf = IO(url_data)
            gz_handle = gzip.GzipFile(fileobj=buf, mode='rb')
            for line in gz_handle:
                yield line.decode('utf-8')
        except IOError as e:
            print('[!] Error fetching structure: {0}'.format(e.msg),
                  file=sys.stderr)
            return
        finally:
            gz_handle.close()
Exemplo n.º 15
0
    def get(self, request, **kwargs):
        try:
            data = Data.objects.filter(name='analytics_data')[0]

            # Let's do the analytics
            df = pd.read_excel(data.file, sheet_name='Raw Data')
            df.columns = [col.replace(' ', '_').lower() for col in df.columns]

            df = df.loc[df['accepted_compound_id'].str.endswith('PC')
                        | df['accepted_compound_id'].str.endswith('LPC')
                        | df['accepted_compound_id'].str.endswith('plasmalogen'
                                                                  )]

            excel_file = IO()
            xlwriter = pd.ExcelWriter(excel_file, engine='xlsxwriter')
            df.to_excel(xlwriter, 'Raw Data')
            xlwriter.save()
            xlwriter.close()
            excel_file.seek(0)

            response = HttpResponse(
                excel_file.read(),
                content_type=
                'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
            )
            response[
                'Content-Disposition'] = 'attachment; filename=output1.xlsx'
            return response
        except DatabaseError as e:
            return Response({"Error": str(e)},
                            status=status.HTTP_304_NOT_MODIFIED)
Exemplo n.º 16
0
def export_data_xls(request,code=None):
    response = HttpResponse(content_type='application/ms-excel')
    response['Content-Disposition'] = 'attachment; filename="users.xls"'

    dfDischarge=GaugeDischarge01Min.objects.filter(code=code).to_timeseries(index='mydate')
    station_info=Gauge01MinMinimal.objects.filter(code=code)

    # my "Excel" file, which is an in-memory output file (buffer)
    # for the new workbook
    excel_file = IO()

    xlwriter = pd.ExcelWriter(excel_file, engine='xlsxwriter')

    dfDischarge.to_excel(xlwriter, station_info.values()[0]['station_name'])

    xlwriter.save()
    xlwriter.close()

    # important step, rewind the buffer or when it is read() you'll get nothing
    # but an error message when you try to open your zero length file in Excel
    excel_file.seek(0)

    # set the mime type so that the browser knows what to do with the file
    response = HttpResponse(excel_file.read(),
                            content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')

    # set the file name in the Content-Disposition header
    response['Content-Disposition'] = 'attachment; filename=DischargeStation{}.xlsx'.format(code)

    print(response)

    return response
Exemplo n.º 17
0
        def zipper(response):  # pylint: disable=unused-variable
            accept_encoding = request.headers.get('Accept-Encoding', '')

            if 'gzip' not in accept_encoding.lower():
                return response

            response.direct_passthrough = False

            if (
                response.status_code < 200
                or response.status_code >= 300
                or 'Content-Encoding' in response.headers
            ):
                return response
            gzip_buffer = IO()
            gzip_file = gzip.GzipFile(mode='wb', fileobj=gzip_buffer)
            gzip_file.write(response.data)
            gzip_file.close()

            response.data = gzip_buffer.getvalue()
            response.headers['Content-Encoding'] = 'gzip'
            response.headers['Vary'] = 'Accept-Encoding'
            response.headers['Content-Length'] = len(response.data)

            return response
Exemplo n.º 18
0
    def __init__(self, socket, tls_wrapper=None):
        self.io = IO(socket, tls_wrapper)
        self.reply_queue = []

        #: :class:`Extensions` object of the client, populated once the EHLO
        #: command returns its response.
        self.extensions = Extensions()
Exemplo n.º 19
0
def get_thumbnail(userfolder, file_name):

    filename = check_path(os.path.join(get_userfolder_path(userfolder), IMG_DIR), file_name)

    headers = {}
    stats = os.stat(filename)

    lm = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime(stats.st_mtime))
    response.set_header('Last-Modified', lm)

    ims = request.environ.get('HTTP_IF_MODIFIED_SINCE')
    if ims:
        ims = parse_date(ims.split(";")[0].strip())
    if ims is not None and ims >= int(stats.st_mtime):
        headers['Date'] = time.strftime("%a, %d %b %Y %H:%M:%S GMT", time.gmtime())
        return HTTPResponse(status=304, **headers)

    response.content_type = 'image/png'

    img = Image.open(filename, 'r')
    img.thumbnail((70, 70), Image.ANTIALIAS)
    buffered = IO()
    img.save(buffered, format='PNG')

    ret = buffered.getvalue()
    response.set_header('Content-Length', len(ret))

    return ret
Exemplo n.º 20
0
def playlist(stream):
    """
    """
    DB = create_db_engine()
    results = DB.execute(
                "SELECT * FROM {}.edgevideopart WHERE source='{}' AND video_path is NOT NULL order by timestamp desc limit 120".format(DB_NAME, stream))
    results = [result for result in results]
    start = time.time()
    segment_count = DB.execute("SELECT COUNT(*) FROM {}.edgevideopart WHERE source='{}' AND video_path is NOT NULL".format(DB_NAME, stream))
    segment_count = [segment for segment in segment_count][0][0]
    manifest =  \
"""#EXTM3U
#EXT-X-VERSION:7
#EXT-X-INDEPENDENT-SEGMENTS
#EXT-X-TARGETDURATION:6
#EXT-X-PROGRAM-DATE-TIME:%s
#EXT-X-MEDIA-SEQUENCE:%s
""" % (
    datetime.utcfromtimestamp(results[-1].timestamp/1000).strftime('%Y-%m-%dT%H:%M:%S'), 
    segment_count)

    for result in results[::-1]:
        url = ("/video/{}".format(result.id))
        manifest += "\n#EXTINF:{}, \n{}\n#EXT-X-DISCONTINUITY ".format(float(result.length), url)

    gzip_buffer = IO()
    gzip_file = gzip.GzipFile(mode='wb',
                              fileobj=gzip_buffer)
    gzip_file.write(manifest.encode())
    gzip_file.close()
    return Response(gzip_buffer.getvalue(), mimetype="application/x-mpegURL", headers={
        "Content-Encoding":"gzip", 'Content-Length': len(gzip_buffer.getvalue())})
Exemplo n.º 21
0
def export(request, type):
    form_dict = dict(six.iterlists(request.GET))

    if type == "pivoted":
        df = get_df(form_dict)  # 透视后的数据
    elif type == "raw":
        df = get_df(form_dict, is_pivoted=False)  # 原始数

    excel_file = IO()

    xlwriter = pd.ExcelWriter(excel_file, engine="xlsxwriter")

    df.to_excel(xlwriter, "data", index=True)

    xlwriter.save()
    xlwriter.close()

    excel_file.seek(0)

    # 设置浏览器mime类型
    response = HttpResponse(
        excel_file.read(),
        content_type=
        "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet",
    )

    # 设置文件名
    now = datetime.datetime.now().strftime(
        "%Y%m%d%H%M%S")  # 当前精确时间不会重复,适合用来命名默认导出文件
    response["Content-Disposition"] = "attachment; filename=" + now + ".xlsx"
    return response
Exemplo n.º 22
0
def downloadreport(request):
    samples_list = samples.objects.all()
    samples_filter = SampleFilter(request.GET, queryset=samples_list)

    df1 = convert_to_dataframe(samples_list,
                               fields=[
                                   'transaction_GlCode', 'transaction_date',
                                   'transaction_number', 'transaction_value',
                                   'remarks', 'action', 'Area',
                                   'Financial_Year', 'Client'
                               ])
    frames = [df1]
    result = pd.concat(frames)
    excel_file = IO()

    xlwriter = pd.ExcelWriter(excel_file, engine='openpyxl')
    result.to_excel(xlwriter, 'sheetname')
    xlwriter.save()
    xlwriter.close()
    excel_file.seek(0)
    response = HttpResponse(
        excel_file.read(),
        content_type=
        'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')
    response['content-Disposition'] = 'attachment; filename=myfile.xlsx'
    return response
Exemplo n.º 23
0
    def setUp(self):
        Framework.TestCase.setUp(self)
        self.repo = self.g.get_repo("akfish/PyGithub")

        self.dumpedRepo = IO()
        self.g.dump(self.repo, self.dumpedRepo)
        self.dumpedRepo.seek(0)
Exemplo n.º 24
0
    def setUp(self):
        super().setUp()
        self.repo = self.g.get_repo("akfish/PyGithub")

        self.dumpedRepo = IO()
        self.g.dump(self.repo, self.dumpedRepo)
        self.dumpedRepo.seek(0)
Exemplo n.º 25
0
def b64_mol(mol, size=300, hlsss=None):
    img_file = IO()
    if isinstance(mol, list):
        img = autocrop(Draw.MolsToGridImage(mol, size=(size, size)))
    else:
        if hlsss is not None:
            if isinstance(hlsss, str):
                hlsss = hlsss.split(",")
                atoms = set()
                for smi in hlsss:
                    m = Chem.MolFromSmiles(smi)
                    if m:
                        matches = list(chain(*mol.GetSubstructMatches(m)))
                    else:
                        matches = []
                    if len(matches) > 0:
                        atoms = atoms.union(set(matches))
            atoms = list(atoms)
        else:
            atoms = []
        try:
            img = autocrop(
                Draw.MolToImage(mol, size=(size, size), highlightAtoms=atoms))
        except UnicodeEncodeError:
            print(Chem.MolToSmiles(mol))
            mol = Chem.MolFromSmiles("*")
            img = autocrop(Draw.MolToImage(mol, size=(size, size)))
    img = make_transparent(img)
    img.save(img_file, format='PNG')
    b64 = base64.b64encode(img_file.getvalue())
    b64 = b64.decode()
    img_file.close()
    return b64
Exemplo n.º 26
0
def stats_to_csv(stats):
    if sys.version_info[0] >= 3:
        from io import StringIO as IO
    else:
        from cStringIO import StringIO as IO
    import csv

    csv_fh = IO()

    keys = set()
    for stat in stats:
        for key in list(stat.keys()):
            keys.add(key)

    fieldnames = sorted(list(keys), key=str)

    csvwriter = csv.DictWriter(csv_fh,
                               delimiter=str(","),
                               fieldnames=fieldnames)
    csvwriter.writerow(dict((fn, fn) for fn in fieldnames))
    for row in stats:
        csvwriter.writerow(row)
    contents = csv_fh.getvalue()
    csv_fh.close()
    return contents
Exemplo n.º 27
0
Arquivo: io.py Projeto: vadsem/yt
    def _generic_fluid_handler(self, chunks, selector, fields, size, ftype):
        tr = defaultdict(list)

        for chunk in chunks:
            for subset in chunk.objs:
                fname = None
                for fh in subset.domain.field_handlers:
                    if fh.ftype == ftype:
                        file_handler = fh
                        fname = fh.fname
                        break

                if fname is None:
                    raise YTFieldTypeNotFound(ftype)

                # Now we read the entire thing
                with open(fname, "rb") as f:
                    content = IO(f.read())
                # This contains the boundary information, so we skim through
                # and pick off the right vectors
                rv = subset.fill(content, fields, selector, file_handler)
                for ft, f in fields:
                    d = rv.pop(f)
                    mylog.debug("Filling %s with %s (%0.3e %0.3e) (%s zones)",
                                f, d.size, d.min(), d.max(), d.size)
                    tr[(ft, f)].append(d)
        d = {}
        for field in fields:
            d[field] = np.concatenate(tr.pop(field))

        return d
Exemplo n.º 28
0
def encode_plot(P, pad=None, pad_inches=0.1, bbox_inches=None, remove_axes = False, transparent=False, axes_pad=None):
    """
    Convert a plot object to base64-encoded png format.

    pad is passed down to matplotlib's tight_layout; pad_inches and bbox_inches to savefig.

    The resulting object is a base64-encoded version of the png
    formatted plot, which can be displayed in web pages with no
    further intervention.
    """
    if PY3:
        from io import BytesIO as IO
    else:
        from StringIO import StringIO as IO
    from matplotlib.backends.backend_agg import FigureCanvasAgg
    from base64 import b64encode
    from six.moves.urllib_parse import quote

    virtual_file = IO()
    fig = P.matplotlib(axes_pad=axes_pad)
    fig.set_canvas(FigureCanvasAgg(fig))
    if remove_axes:
        for a in fig.axes:
            a.axis('off')
    if pad is not None:
        fig.tight_layout(pad=pad)
    fig.savefig(virtual_file, format='png', pad_inches=pad_inches, bbox_inches=bbox_inches, transparent=transparent)
    virtual_file.seek(0)
    if PY3:
        buf = virtual_file.getbuffer()
    else:
        buf = virtual_file.buf
    return "data:image/png;base64," + quote(b64encode(buf))
Exemplo n.º 29
0
def b64_fig(fig, dpi=72):
    img_file = IO()
    # print(fig.savefig.__doc__)
    # print([x for x in dir(fig) if x.startswith("set_")])
    # print(sorted(dir(fig)))
    # print(fig.get_edgecolor(), fig.get_facecolor())
    fig.savefig(img_file, dpi=dpi, format="PNG", bbox_inches="tight")
    img = mi.Image.open(img_file)
    img = mi.autocrop(img)
    img_file.close()
    img_file = IO()
    img.save(img_file, format="PNG")
    b64 = base64.b64encode(img_file.getvalue())
    b64 = b64.decode()
    img_file.close()
    return b64
Exemplo n.º 30
0
def export_debts_excel(request):
    sio = IO()
    ds = {'descricao': [], 'valor': []}
    students = []
    for debt in Debt.objects.filter(paid=False,
                                    exemption=False).order_by('student'):
        students.append(debt.student.name)
        ds['descricao'].append(str(debt.debt_info))
        ds['valor'].append(debt.debt_info.value + debt.debt_info.penalty -
                           debt.discount)
    PandasDataFrame = pd.DataFrame(ds, index=students)
    PandasWriter = pd.ExcelWriter(sio, engine='xlsxwriter')
    PandasDataFrame.to_excel(PandasWriter, sheet_name='Debtors')
    PandasWriter.save()
    PandasWriter.close()
    sio.seek(0)
    workbook = sio.getvalue()
    response = HttpResponse(
        sio.read(),
        content_type=
        'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')
    response[
        'Content-Disposition'] = 'attachment; filename=%s' % 'devedores.xlsx'
    #messages.add_message(request, messages.SUCCESS, "Planilha de dívidas gerada com sucesso.")
    return response
Exemplo n.º 31
0
def b64_img(im, format="JPEG"):
    img_file = IO()
    im.save(img_file, format=format)
    b64 = base64.b64encode(img_file.getvalue())
    b64 = b64.decode()
    img_file.close()
    return b64
Exemplo n.º 32
0
def export(request, type):
    form_dict = dict(six.iterlists(request.GET))
    if type == 'pivoted':
        df = get_df(form_dict)  # 透视后的数据
        sheet_name = aggfunc + '(' + value + ')'
    elif type == 'raw':
        df = get_df(form_dict, is_pivoted=False)  # 原始数
        sheet_name = '原始数据'
    excel_file = IO()
    xlwriter = pd.ExcelWriter(excel_file)
    df.to_excel(xlwriter, sheet_name=sheet_name)

    xlwriter.save()
    xlwriter.close()

    #重新设置起始位置,在这里等同于excel_file.close()
    excel_file.seek(0)

    # 设置浏览器mime类型
    # MIME (Multipurpose Internet Mail Extensions) 是描述消息内容类型的因特网标准。
    # MIME 消息能包含文本、图像、音频、视频以及其他应用程序专用的数据。
    response = HttpResponse(
        excel_file.read(),
        content_type=
        'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
    )  #即为xlsx类型

    # 设置文件名
    now = datetime.datetime.now().strftime(
        "%Y%m%d%H%M%S")  # 当前精确时间不会重复,适合用来命名默认导出文件
    response['Content-Disposition'] = 'attachment; filename=' + now + '.xlsx'
    return response
Exemplo n.º 33
0
        def after_request(response):
            # Custom logging
            responseLogFormat = "{} {} {} \"{}\" {}".format(
                request.remote_addr, request.method, response.status,
                response.mimetype, request.path)
            self.serverLogger.info(responseLogFormat)
            # Custom headers
            response.headers["Server"] = "Wills Media Server v0.0.1"
            response.headers["Access-Control-Allow-Origin"] = "*"
            # GZip all requests
            accept_encoding = request.headers.get('Accept-Encoding', '')
            if 'gzip' not in accept_encoding.lower():
                pass
            else:
                response.direct_passthrough = False

                if (response.status_code < 200 or response.status_code >= 300
                        or 'Content-Encoding' in response.headers):
                    return response
                gzip_buffer = IO()
                gzip_file = gzip.GzipFile(mode='wb', fileobj=gzip_buffer)
                gzip_file.write(response.data)
                gzip_file.close()

                response.data = gzip_buffer.getvalue()
                response.headers['Content-Encoding'] = 'gzip'
                response.headers['Vary'] = 'Accept-Encoding'
                response.headers['Content-Length'] = len(response.data)

            return response
Exemplo n.º 34
0
    def writeFile(contentSection, variables, scriptPath):
        contentSeparator = 'content:'
        actualData = Executor._replacePlaceholdersIn(contentSection.data,
                                                     variables)
        paramsAndContent = actualData.split(contentSeparator, 1)
        otherKeyValues, content = [paramsAndContent[0], None]

        if len(paramsAndContent) > 1:
            content = paramsAndContent[1]

        properties = {}

        for propertyLine in otherKeyValues.split('\n'):
            strippedLine = propertyLine.strip()

            if strippedLine is not '':
                key, value = strippedLine.split(':', 1)
                key = key.strip().replace(' ', '')
                value = value.strip()[1:-1]
                properties[key] = value

        if 'path' not in properties:
            raise Exception(
                'Missing path? in output section. Please provide a path!')

        path = properties['path']

        writeParameters = {}

        if 'writeMethod' in properties:
            writeParameters['writeMethod'] = properties['writeMethod'].strip()
        else:
            writeParameters['writeMethod'] = 'replaceExistingFile'

        if 'extendBelow' in properties:
            writeParameters['extendBelow'] = properties['extendBelow'].strip()

        completePath = path

        if len(path) >= 1 and path[0] != '/':
            completePath = scriptPath + '/' + path

        if content != None:
            content = content.strip().strip('"""').strip('\n')

        IO.write(completePath, content, parameters=writeParameters)
Exemplo n.º 35
0
 def test_compare_cubes_ocean(self):
     test_load = _generate_ocean_cube()
     out = IO()
     with _redirect_stdout(out):
         ch.compare_cubes(test_load)
     output = out.getvalue().strip()
     expected_output = ""
     self.assertEqual(output, expected_output)
Exemplo n.º 36
0
def b64_fig(fig, dpi=72):
    img_file = IO()
    fig.savefig(img_file, dpi=dpi, format='PNG', bbox_inches="tight")
    b64 = base64.b64encode(img_file.getvalue())
    if PY3:
        b64 = b64.decode()
    img_file.close()
    return b64
Exemplo n.º 37
0
def run(args):
    """Runs the core of the script; reading the file and counting the top n
    ints

    Split out as a separate function to facilitate optional profiling

    :param args: the arguments object generated by the script call
    """
    io = IO()
    reader = io.read(args.path)

    top = TopN(args.n)

    for number in reader:
        top.push(number)

    top_n = top.get_top_n()

    print("\n".join(map(str, top_n)))
Exemplo n.º 38
0
    def __init__(self, **kwargs):

        # Initialize self.Fixed (subset of self.Prognostic which will NOT be time-marched)
        if 'Fixed' in kwargs: self.Fixed = kwargs.pop('Fixed')
        else: self.Fixed = []

        # Initialize I/O
        self.Io = IO(self, **kwargs)

        # Get values from restart file, if available
        if 'RestartFile' in kwargs:
            ParamNames = Parameters().value.keys()
            FieldNames = self.Required
            kwargs = self.Io.readRestart(FieldNames, ParamNames, kwargs)

        # Initialize scalar parameters
        self.Params  = Parameters(**kwargs)

        # Frequency with which compute() will be executed
        if 'UpdateFreq' in kwargs:
            self.UpdateFreq = kwargs.pop('UpdateFreq')
        else:
            self.UpdateFreq = self.Params['dt']

        # Initialize State
        self.State = State(self, **kwargs)
        self.Grid = self.State.Grid

        # Dictionary to hold increments on prognos fields
        self.Inc = {}

        # Initialize diagnostics
        self.compute(ForcedCompute=True)

        # Create output file
        self.Io.createOutputFile(self.State, self.Params.value)

        # Write out initial state
        if not self.Io.Appending: self.write()

        # Initialize plotting facilities
        self.Plot = Plot()

        # Initialize runtime monitor
        self.Monitor = Monitor(self,**kwargs)

        # Notify user of unused input quantities
        self._checkUnused(kwargs)

        # Set some redundant attributes (mainly for backward compatibility)
        self.nlon = self.Grid['nlon']
        self.nlat = self.Grid['nlat']
        self.nlev = self.Grid['nlev']
        try: self.o3 = self.State['o3']
        except: pass
Exemplo n.º 39
0
def run(args):
    """Runs the core of the script; builds the graph and performs the required
    search type

    Split out as a separate function to facilitate optional profiling

    :param args: the arguments object generated by the script call
    """

    io = IO()

    reader = io.read_dictionary(args.dictionary_path)

    graph = WordGraph()

    for word in reader:
        graph.add(word)

    if args.all_paths:
        paths = graph.find_all_paths(args.word_from, args.word_to)
        for path in paths:
            print io.format_word_path(path)
    else:
        path = graph.find_path(args.word_from, args.word_to)
        print io.format_word_path(path)
Exemplo n.º 40
0
    def __init__(self,filename):
        '''
        Read a PSCF symmetry-adapted field file, and create a new object.

        Argument:
        filename -- name of file

        The file named filename is opened and closed within this function.
        '''
        self.file = open(filename, 'r')
	self._io   = IO()
	file = self.file

        # Read version line
	self.version = Version(self.file)

        self._input_unit_cell()
        self.group_name = self._input_var('char')
        self.N_monomer = self._input_var('int')
        self.N_star = self._input_var('int')

        # Define empty lists
        self.fields = []
        self.waves = []
        self.counts = []

        for i in range(self.N_star):

            data = file.readline().split()
            if len(data) != self.N_monomer + self.dim + 1:
                raise IoException('Incorrect number of elements in field line')
            j = 0

            # Read field coefficients
            self.fields.append([])
            for k in range(self.N_monomer):
                value = float(data[j])
                self.fields[i].append(value)
                j += 1

            # Read field coefficients
            self.waves.append([])
            for k in range(self.dim):
                value = int(data[j])
                self.waves[i].append(value)
                j += 1

            # Read star_count
            self.counts.append(int(data[j]))

        self.file.close()
	self.file = None
Exemplo n.º 41
0
    def __init__(self,filename):
        """
        Read and parse a PSCF parameter file, create an object to hold data.

        The file with specified filename is opened and closed within body 
        of the constructor method

        Argument:
        filename - name of PSCF parameter file (string)
        """
        self.file = open(filename, 'r')
        self._io   = IO()
        file = self.file
        self.N_chain   = 0
        self.N_solvent = 0

        # Dictionary flags indicates which sections are present
        # Keys are flag string values ('MONOMERS' etc.), values are all 1
        self.flags = {}

        # List sections lists section names (flags) in order read
        self.sections = []

        # Read version line
        self.version = Version(self.file)

        # Read input script section
        next = 1
        while next:
            next = self.read_param_section(file)

        self.file.close()
        self.file = None

        # Make sorted list of attribute names
        self.att_names = self.__dict__.keys()
        self.att_names.sort()
Exemplo n.º 42
0
class FieldFile(object):
    '''
    Hold data in a PSCF field file. 

    A FieldFile object contains the data in a field file in the PSCF
    symmetry-adapted Fourier expansion format (see web user manual).
    It can be used to represent either omega (chemical potential) or
    rho (volume fraction) fields. a

    The constructor reads a field file, creates a FieldFile object to store
    the data, and stores all of the parameters and field coefficients in 
    attributes of the object.

    Attributes:
    dim            -- [int] number of periodic spatial directions
    crystal_system -- [string] label of Bravais crystal system
    N_cell_param   -- [int] number of unit cell parameters
    cell_param     -- [list] list of real unit cell parameters
    group          -- [string] name of space group
    N_monomer      -- [int] number of monomer types
    N_star         -- [int] number of basis functions (or stars)
    fields         -- [list] list of list of coefficients

    The attribute field[i] is is a list (or array) of coefficients
    for a field associated with monomer type number i. The element
    field[i][j] is the coefficient of basis function j in the field
    associated with monomer type i. 
    '''

    # "Public" methods

    def __init__(self,filename):
        '''
        Read a PSCF symmetry-adapted field file, and create a new object.

        Argument:
        filename -- name of file

        The file named filename is opened and closed within this function.
        '''
        self.file = open(filename, 'r')
	self._io   = IO()
	file = self.file

        # Read version line
	self.version = Version(self.file)

        self._input_unit_cell()
        self.group_name = self._input_var('char')
        self.N_monomer = self._input_var('int')
        self.N_star = self._input_var('int')

        # Define empty lists
        self.fields = []
        self.waves = []
        self.counts = []

        for i in range(self.N_star):

            data = file.readline().split()
            if len(data) != self.N_monomer + self.dim + 1:
                raise IoException('Incorrect number of elements in field line')
            j = 0

            # Read field coefficients
            self.fields.append([])
            for k in range(self.N_monomer):
                value = float(data[j])
                self.fields[i].append(value)
                j += 1

            # Read field coefficients
            self.waves.append([])
            for k in range(self.dim):
                value = int(data[j])
                self.waves[i].append(value)
                j += 1

            # Read star_count
            self.counts.append(int(data[j]))

        self.file.close()
	self.file = None

    def write(self, file, major=1, minor=0):
        '''
        PURPOSE
           Write field to file in PSCF symmetry-adapted format.
        ARGUMENTS
           file  - file object or file name string
           major - major file format version number
           minor - minor file format version number
        COMMENT
           if file is a field object, it must be open for writing
        '''

        # If file argument is a string, open a file of that name
        if type(file) == type('thing'):
            temp = open(file,'w')
            file = temp
        self.file = file
           
	self.version.major = major
	self.version.minor = minor
	self.version.write(file)

        self._output_unit_cell()
        self._output_var('char', 'group_name')
        self._output_var( 'int', 'N_monomer')
        self._output_var( 'int', 'N_star')

        for i in range(self.N_star):
            for k in range(self.N_monomer):
                file.write('%20.12E' % self.fields[i][k])
            file.write('    ')
            for k in range(self.dim):
                file.write('%4d' % self.waves[i][k])
            file.write('%6d' % self.counts[i])
            file.write("\n")

	file.close()
        self.file = None

    def addMonomer(self, value = 0.0):
        ''' 
        PURPOSE
           Add a field with a coefficients set to common value
        ARGUMENTS
           value - value of all coefficients for new momoner
        COMMENT
            N_momomer is incremented by 1, and new field is last
        '''
        self.N_monomer += 1
        for k in range(self.N_star):
            self.fields[k].append(value)

    def duplicateMonomer(self, i):
        ''' 
        PURPOSE
           Add a field by duplicating field i
        ARGUMENTS
           i - index in range [0, N_monomer-1]
        COMMENT
            N_momomer is incremented, and duplicate field is last
        '''
        self.N_monomer += 1
        for k in range(self.N_star):
            self.fields[k].append(self.fields[k][i])

    def switchMonomers(self, i, j):
        '''
        PURPOSE
           Switch coefficients of fields i and j
        ARGUMENTS
           i - index in range [0, N_monomer-1]
           j - index in range [0, N_monomer-1]
        '''
        for k in range(self.N_star):
            temp = self.fields[k][i]
            self.fields[k][i] = self.fields[k][j]
            self.fields[k][j] = temp

    # "Private" methods

    # Wrappers for input_... output_.. methods of IO)

    def _input_var(self, type, comment = None, f='A'):
        return self._io.input_var(self.file, type, comment, f)

    def _input_vec(self, type, n=None, comment=None, s='R',f='A'):
        return self._io.input_vec(self.file, type, n, comment, s, f)

    # Output methods (output by name)
    def _output_var(self, type, name, f='A'):
        if self.__dict__.has_key(name):
            data = self.__dict__[name]
	    self._io.output_var(self.file, type, data, name, f)

    def _output_vec(self, type, name, n=None, s='R', f='A'):
        if self.__dict__.has_key(name):
            data = self.__dict__[name]
	    self._io.output_vec(self.file, type, data, n, name, s, f)

    def _input_unit_cell(self):
        ''' Analog of subroutine _input_unit_cell in unit_cell_mod.f '''
        self.dim = self._input_var('int')
        self.crystal_system = self._input_var('char')
        self.N_cell_param = self._input_var('int')
        self.cell_param = self._input_vec('real',self.N_cell_param)

    def _output_unit_cell(self):
        ''' Analog of subroutine _output_unit_cell in unit_cell_mod.f '''
        self._output_var('int', 'dim')
        self._output_var('char', 'crystal_system')
        self._output_var('int', 'N_cell_param')
        self._output_vec('real', 'cell_param', self.N_cell_param)
Exemplo n.º 43
0
class Client(object):
    """Class whose methods perform various SMTP commands on a given socket. The
    return value from each command is a |Reply| object. Commands that are
    pipelined may not have their replies filled until subsequent commands are
    executed.

    The ``extensions`` attribute contains the |Extensions| object that are made
    available by the server.

    :param socket: Connected socket to use for the client.
    :param tls_wrapper: Optional function that takes a socket and the ``tls``
                        dictionary, creates a new encrypted socket, performs
                        the TLS handshake, and returns it. The default uses
                        :class:`~gevent.ssl.SSLSocket`.

    """

    def __init__(self, socket, tls_wrapper=None):
        self.io = IO(socket, tls_wrapper)
        self.reply_queue = []

        #: :class:`Extensions` object of the client, populated once the EHLO
        #: command returns its response.
        self.extensions = Extensions()

    def _flush_pipeline(self):
        self.io.flush_send()
        while True:
            try:
                reply = self.reply_queue.pop(0)
            except IndexError:
                return None
            reply.recv(self.io)

    def has_reply_waiting(self):
        """Checks if the underlying socket has data waiting to be received,
        which means a reply is waiting to be read.

        :rtype: True or False

        """
        sock_fd = self.io.socket.fileno()
        try:
            wait_read(sock_fd, 0.1, Timeout())
        except Timeout:
            return False
        else:
            return True

    def get_reply(self, command='[TIMEOUT]'):
        """Gets a reply from the server that was not triggered by the client
        sending a command. This is most useful for receiving timeout
        notifications.

        :param command: Optional command name to associate with the reply.
        :returns: |Reply| object populated with the response.

        """
        reply = Reply(command=command)
        self.reply_queue.append(reply)

        self._flush_pipeline()

        return reply

    def get_banner(self):
        """Waits for the SMTP banner at the beginning of the connection.

        :returns: |Reply| object populated with the response.

        """
        banner = Reply(command='[BANNER]')
        banner.enhanced_status_code = False
        self.reply_queue.append(banner)

        self._flush_pipeline()

        return banner

    def custom_command(self, command, arg=None):
        """Sends a custom command to the SMTP server and waits for the reply.

        :param command: The command to send.
        :param arg: Optonal argument string to send with the command.
        :returns: |Reply| object populated with the response.

        """
        custom = Reply(command=command.upper())
        self.reply_queue.append(custom)

        if arg:
            command = ' '.join((command, arg))
        self.io.send_command(command)

        self._flush_pipeline()

        return custom

    def ehlo(self, ehlo_as):
        """Sends the EHLO command with identifier string and waits for the
        reply. When this method returns, the ``self.extensions`` object will
        also be populated with the SMTP extensions the server supports.

        :param ehlo_as: EHLO identifier string, usually an FQDN.
        :returns: |Reply| object populated with the response.

        """
        ehlo = Reply(command='EHLO')
        ehlo.enhanced_status_code = False
        self.reply_queue.append(ehlo)

        command = 'EHLO '+ehlo_as
        self.io.send_command(command)

        self._flush_pipeline()
        if ehlo.code == '250':
            self.extensions.reset()
            ehlo.message = self.extensions.parse_string(ehlo.message)

        return ehlo

    def helo(self, helo_as):
        """Sends the HELO command with identifier string and waits for the
        reply.

        :param helo_as: HELO identifier string, usually an FQDN.
        :returns: |Reply| object populated with the response.

        """
        helo = Reply(command='HELO')
        helo.enhanced_status_code = False
        self.reply_queue.append(helo)

        command = 'HELO '+helo_as
        self.io.send_command(command)

        self._flush_pipeline()

        return helo

    def encrypt(self, tls):
        """Encrypts the underlying socket with the information given by ``tls``.
        This call should only be used directly against servers that expect to be
        immediately encrypted. If encryption is negotiated with
        :meth:`starttls()` there is no need to call this method.

        :param tls: Dictionary of keyword arguments for
                    :class:`~gevent.ssl.SSLSocket`.

        """
        self.io.encrypt_socket(tls)

    def starttls(self, tls):
        """Sends the STARTTLS command with identifier string and waits for the
        reply. When the reply is received and the code is 220, the socket is
        encrypted with the parameters in ``tls``. This should be followed by a
        another call to :meth:`ehlo()`.

        :param tls: Dictionary of keyword arguments for
                    :class:`~gevent.ssl.SSLSocket`.
        :returns: |Reply| object populated with the response.

        """
        reply = self.custom_command('STARTTLS')
        if reply.code == '220':
            self.encrypt(tls)
        return reply

    def mailfrom(self, address, data_size=None):
        """Sends the MAIL command with the ``address`` and possibly the message
        size. The message size is sent if the server supports the SIZE
        extension. If the server does *not* support PIPELINING, the returned
        reply object is populated immediately.

        :param address: The sender address to send.
        :param data_size: Optional size of the message body.
        :returns: |Reply| object that will be populated with the response
                  once a non-pipelined command is called, or if the server does
                  not support PIPELINING.

        """
        mailfrom = Reply(command='MAIL')
        self.reply_queue.append(mailfrom)

        command = 'MAIL FROM:<{0}>'.format(address)
        if data_size is not None and 'SIZE' in self.extensions:
            command += ' SIZE='+str(data_size)
        self.io.send_command(command)

        if 'PIPELINING' not in self.extensions:
            self._flush_pipeline()

        return mailfrom

    def rcptto(self, address):
        """Sends the RCPT command with the ``address``. If the server
        does *not* support PIPELINING, the returned reply object is
        populated immediately.

        :param address: The sender address to send.
        :param data_size: Optional size of the message body.
        :returns: |Reply| object that will be populated with the response
                  once a non-pipelined command is called, or if the server does
                  not support PIPELINING.

        """
        rcptto = Reply(command='RCPT')
        self.reply_queue.append(rcptto)

        command = 'RCPT TO:<{0}>'.format(address)
        self.io.send_command(command)

        if 'PIPELINING' not in self.extensions:
            self._flush_pipeline()

        return rcptto

    def data(self):
        """Sends the DATA command and waits for the response. If the response
        from the server is a 354, the server is respecting message data and
        should be sent :meth:`send_data` or :meth:`send_empty_data`.

        :returns: |Reply| object populated with the response.

        """
        return self.custom_command('DATA')

    def send_data(self, *data):
        """Processes and sends message data. At the end of the message data,
        the client will send a line with a single ``.`` to indicate the end of
        the message.  If the server does *not* support PIPELINING, the returned
        reply object is populated immediately.

        :param data: The message data parts.
        :type data: string or unicode
        :returns: |Reply| object that will be populated with the response
                  once a non-pipelined command is called, or if the server does
                  not support PIPELINING.

        """
        send_data = Reply(command='[SEND_DATA]')
        self.reply_queue.append(send_data)

        data_sender = DataSender(*data)
        data_sender.send(self.io)

        if 'PIPELINING' not in self.extensions:
            self._flush_pipeline()

        return send_data

    def send_empty_data(self):
        """Sends a line with a single ``.`` to indicate an empty message. If
        the server does *not* support PIPELINING, the returned reply object is
        populated immediately.

        :param data: The message data.
        :type data: string or unicode
        :returns: |Reply| object that will be populated with the response
                  once a non-pipelined command is called, or if the server does
                  not support PIPELINING.

        """
        send_data = Reply(command='[SEND_DATA]')
        self.reply_queue.append(send_data)

        self.io.send_command('.')

        if 'PIPELINING' not in self.extensions:
            self._flush_pipeline()

        return send_data

    def rset(self):
        """Sends a RSET command and waits for the response. The intent of the
        RSET command is to reset any :meth:`mail` or :meth:`rcpt` commands that
        are pending.

        :returns: |Reply| object populated with the response.

        """
        return self.custom_command('RSET')

    def quit(self):
        """Sends the QUIT command and waits for the response. After the response
        is received (should be 221) the socket should be closed.

        :returns: |Reply| object populated with the response.

        """
        return self.custom_command('QUIT')
Exemplo n.º 44
0
class IOTest(unittest.TestCase):

    def setUp(self):
    	self.kernel = Mock()
        self.io = IO(self.kernel)

    def test_queueisEmpty_true(self):
    	assert(self.io.queueisEmpty())

    def test_queueisEmpty_false(self):
    	instruction = Mock()
    	self.io.getQueue().put(instruction)
    	assert(not self.io.queueisEmpty())

    def test_receivePcb(self):
    	pcb = Mock()
    	self.io.receivePcb(pcb)
    	assert(not self.io.queueisEmpty())

    def test_fetch(self):
    	pcb = Mock()
    	self.io.receivePcb(pcb)
    	assert(not self.io.queueisEmpty())
    	self.io.fetch()
    	assert(self.io.queueisEmpty())


    def test_run(self):
    	pcb = Mock()
    	memManager = Mock()
    	handler = Mock()
    	instruction = Mock()
    	self.io.receivePcb(pcb)

    	when(self.kernel).getMemoryManager().thenReturn(memManager)
    	when(memManager).getInstruction(1, 5).thenReturn(instruction)
    	when(self.kernel).getHandler().thenReturn(handler)
    	when(pcb).getPid().thenReturn(1)
    	when(pcb).getDisplacement().thenReturn(5)

    	self.io.run()

    	verify(handler,times(1)).handler(any(Irq))
Exemplo n.º 45
0
    def test_path_output_format(self):
        io = IO()
        word_path = ["hat", "mat", "bat"]
        formatted_path = io.format_word_path(word_path)

        self.assertEqual(formatted_path, "hat -> mat -> bat")
Exemplo n.º 46
0
 def test_throws_if_path_non_readable(self):
     io = IO()
     with self.assertRaises(IOError):
         reader = io.read("path/that/doesn't/exist")
         for number in reader:
             pass
Exemplo n.º 47
0
class federation(Component):
    """
    Combine components to create time-dependent model.

    * Instantiation:
    
      x = climt.federation(C1, C2, ..., Cn, <args> )

      where C1, ..., Cn are instances of CliMT components 
      and <args> are any keywork arguments relevant to the
      components included.

      The parameters and state of constituent components is re-initialized.

    * Running the federation:

      x.step()     will evolve the federation 1 timestep
      x.step(100)  will evolve the federation 100 timesteps
      x.step(100.) will evolve the federation 100 seconds
    """

    def __init__(self, *components, **kwargs):
        """
        """
        # Check input components
        if len(components) < 2:
            raise "\n\n +++ CliMT.federation: you must give me more than 1 component to federate!\n\n"
        else:
            for component in components:
                assert (
                    type(component) is InstanceType
                ), "\n\n +++CliMT.federation: Input item %s is not an instance.\n\n" % str(c)

        # Re-order components: diagnostic, semi-implicit, explicit, implicit
        components = list(components)
        """
        for i in range(len(components)):
            if len(components[i].Prognostic) > 0:
                components.append(components.pop(i))
        for scheme in ['semi-implicit', 'explicit', 'implicit']:
            for i in range(len(components)):
                if components[i].SteppingScheme == scheme:
                    components.append(components.pop(i))
        """
        self.components = components

        # Federation's Required is union of all components' Required;
        # same for Prognostic and Diagnostic
        self.Required = []
        self.Prognostic = []
        self.Diagnostic = []
        for component in components:
            self.Required = list(set(self.Required).union(component.Required))
            self.Prognostic = list(set(self.Prognostic).union(component.Prognostic))
            self.Diagnostic = list(set(self.Diagnostic).union(component.Diagnostic))

        # Other attributes
        self.Name = "federation"
        self.Extension = None

        # Set LevType to None if all components are None, else p
        self.LevType = None
        for component in components:
            if component.LevType == "p":
                self.LevType = "p"

        # Initialize self.Fixed (subset of self.Prognostic which will NOT be time-marched)
        if "Fixed" in kwargs:
            self.Fixed = kwargs.pop("Fixed")
        else:
            self.Fixed = []

        # Instantiate I/O
        self.Io = IO(self, **kwargs)

        # Get values from restart file, if available
        if "RestartFile" in kwargs:
            ParamNames = Parameters().value.keys()
            FieldNames = self.Required
            kwargs = self.Io.readRestart(FieldNames, ParamNames, kwargs)

        # Initialize scalar parameters
        self.Params = Parameters(**kwargs)

        # Initialize State
        self.State = State(self, **kwargs)
        self.Grid = self.State.Grid

        # Set some redundant attributes (mainly for backward compatibility)
        self.nlon = self.Grid["nlon"]
        self.nlat = self.Grid["nlat"]
        self.nlev = self.Grid["nlev"]
        try:
            self.o3 = self.State["o3"]
        except:
            pass

        # Check if components enforce axis dimensions, ensure consistency
        for component in self.components:
            for AxisName in ["lev", "lat", "lon"]:
                exec("n_fed = self.n%s" % AxisName)
                try:
                    exec("n_com = component.Extension.get_n%s()" % AxisName)
                except:
                    n_com = n_fed
                assert (
                    n_com == n_fed
                ), "\n\n ++++ CliMT.federation.init: recompile with %i %ss to run this federation\n" % (n_fed, AxisName)

        # Dictionary to hold increments on prognos fields
        self.Inc = {}

        # Adjust components' attributes
        for component in self.components:
            component.Monitoring = False
            component.Io.OutputFreq = self.Io.OutputFreq
            component.Fixed.extend(self.Fixed)
            if component.UpdateFreq == component["dt"]:
                component.UpdateFreq = self["dt"]
            component.Params = self.Params
            component.Grid = self.State.Grid
            component.State = self.State
            component.Inc = {}
            # insolation component gets special treatment because
            # of need to set orb params in common block (yes, this is ugly)
            try:
                component.setOrbParams(**kwargs)
            except:
                pass
        self.compute(ForcedCompute=True)

        # Create output file
        self.Io.createOutputFile(self.State, self.Params.value)

        # Write out initial state
        if not self.Io.Appending:
            self.write()

        # Initialize plotting facilities
        self.Plot = Plot()

        # Initialize runtime monitor
        self.Monitor = Monitor(self, **kwargs)

        # Notify user of unused input quantities
        self._checkUnused(kwargs)

        # Print out report
        # self.report()

    def compute(self, ForcedCompute=False):
        """
        Update federation's diagnostics and increments.
        """
        ## New = self.State.Old.copy()
        ## for component in self.components:
        ##     # enforce time-splitting of implicit and semi-implicit components
        ##     self.State.Old.update(New)
        ##     # bring component's diagnostics and increments up to date
        ##     component.compute(ForcedCompute=ForcedCompute)
        ##     # accumulate increments
        ##     for key in component.Inc:
        ##         New[key] += component.Inc[key]
        ## for key in self.State.Old:
        ##     self.Inc[key] = New[key]  - self.State.Old[key]

        self.Inc = self.State.Old.copy()
        for key in self.Inc:
            self.Inc[key] = self.Inc[key] * 0.0
        for component in self.components:
            # bring component's diagnostics and increments up to date
            component.compute(ForcedCompute=ForcedCompute)
            # accumulate increments
            for key in component.Inc:
                self.Inc[key] += component.Inc[key]
Exemplo n.º 48
0
    def test_values_integers(self):
        io = IO()
        reader = io.read(self.valid_file)

        for number in reader:
            self.assertIsInstance(number, int)
Exemplo n.º 49
0
    def __init__(self, *components, **kwargs):
        """
        """
        # Check input components
        if len(components) < 2:
            raise "\n\n +++ CliMT.federation: you must give me more than 1 component to federate!\n\n"
        else:
            for component in components:
                assert (
                    type(component) is InstanceType
                ), "\n\n +++CliMT.federation: Input item %s is not an instance.\n\n" % str(c)

        # Re-order components: diagnostic, semi-implicit, explicit, implicit
        components = list(components)
        """
        for i in range(len(components)):
            if len(components[i].Prognostic) > 0:
                components.append(components.pop(i))
        for scheme in ['semi-implicit', 'explicit', 'implicit']:
            for i in range(len(components)):
                if components[i].SteppingScheme == scheme:
                    components.append(components.pop(i))
        """
        self.components = components

        # Federation's Required is union of all components' Required;
        # same for Prognostic and Diagnostic
        self.Required = []
        self.Prognostic = []
        self.Diagnostic = []
        for component in components:
            self.Required = list(set(self.Required).union(component.Required))
            self.Prognostic = list(set(self.Prognostic).union(component.Prognostic))
            self.Diagnostic = list(set(self.Diagnostic).union(component.Diagnostic))

        # Other attributes
        self.Name = "federation"
        self.Extension = None

        # Set LevType to None if all components are None, else p
        self.LevType = None
        for component in components:
            if component.LevType == "p":
                self.LevType = "p"

        # Initialize self.Fixed (subset of self.Prognostic which will NOT be time-marched)
        if "Fixed" in kwargs:
            self.Fixed = kwargs.pop("Fixed")
        else:
            self.Fixed = []

        # Instantiate I/O
        self.Io = IO(self, **kwargs)

        # Get values from restart file, if available
        if "RestartFile" in kwargs:
            ParamNames = Parameters().value.keys()
            FieldNames = self.Required
            kwargs = self.Io.readRestart(FieldNames, ParamNames, kwargs)

        # Initialize scalar parameters
        self.Params = Parameters(**kwargs)

        # Initialize State
        self.State = State(self, **kwargs)
        self.Grid = self.State.Grid

        # Set some redundant attributes (mainly for backward compatibility)
        self.nlon = self.Grid["nlon"]
        self.nlat = self.Grid["nlat"]
        self.nlev = self.Grid["nlev"]
        try:
            self.o3 = self.State["o3"]
        except:
            pass

        # Check if components enforce axis dimensions, ensure consistency
        for component in self.components:
            for AxisName in ["lev", "lat", "lon"]:
                exec("n_fed = self.n%s" % AxisName)
                try:
                    exec("n_com = component.Extension.get_n%s()" % AxisName)
                except:
                    n_com = n_fed
                assert (
                    n_com == n_fed
                ), "\n\n ++++ CliMT.federation.init: recompile with %i %ss to run this federation\n" % (n_fed, AxisName)

        # Dictionary to hold increments on prognos fields
        self.Inc = {}

        # Adjust components' attributes
        for component in self.components:
            component.Monitoring = False
            component.Io.OutputFreq = self.Io.OutputFreq
            component.Fixed.extend(self.Fixed)
            if component.UpdateFreq == component["dt"]:
                component.UpdateFreq = self["dt"]
            component.Params = self.Params
            component.Grid = self.State.Grid
            component.State = self.State
            component.Inc = {}
            # insolation component gets special treatment because
            # of need to set orb params in common block (yes, this is ugly)
            try:
                component.setOrbParams(**kwargs)
            except:
                pass
        self.compute(ForcedCompute=True)

        # Create output file
        self.Io.createOutputFile(self.State, self.Params.value)

        # Write out initial state
        if not self.Io.Appending:
            self.write()

        # Initialize plotting facilities
        self.Plot = Plot()

        # Initialize runtime monitor
        self.Monitor = Monitor(self, **kwargs)

        # Notify user of unused input quantities
        self._checkUnused(kwargs)
Exemplo n.º 50
0
 def setUp(self):
 	self.kernel = Mock()
     self.io = IO(self.kernel)
Exemplo n.º 51
0
class ParamFile(object):
    """
    Hold the data in a PSCF parameter file.

    The constructor reads a PSCF parameter file and stores the values of
    parameters as attributes with names that are the same as the variable
    names in the parameter file. Parameters that are stored in PSCF as
    1D arrays are stored in list parameters of the same name. Parameters
    that are stored in PSCF as matrices are stored as lists of lists. 

    Note that list indices are numbered from 0 (C/python convention), 
    rather than from 1 as in PSCF (Fortran convention), so all indices
    are off by one relative to the values used in the PSCF source code.

    Construction: To construct an object from a param file named 'param':

        > param = ParamFile('param')

    Attributes: After construction, param.kuhn[2] is the statistical 
    segment length of the third monomer type, and param.block_length[0][1] 
    is the length of the 2nd block of the first chain species. 

    An instance of the ParamFile class can be used to edit values of 
    parameters by reading one parameter file, modifying one or more 
    parameters, and then write the object to another file. 
    """

    def __init__(self,filename):
        """
        Read and parse a PSCF parameter file, create an object to hold data.

        The file with specified filename is opened and closed within body 
        of the constructor method

        Argument:
        filename - name of PSCF parameter file (string)
        """
        self.file = open(filename, 'r')
        self._io   = IO()
        file = self.file
        self.N_chain   = 0
        self.N_solvent = 0

        # Dictionary flags indicates which sections are present
        # Keys are flag string values ('MONOMERS' etc.), values are all 1
        self.flags = {}

        # List sections lists section names (flags) in order read
        self.sections = []

        # Read version line
        self.version = Version(self.file)

        # Read input script section
        next = 1
        while next:
            next = self.read_param_section(file)

        self.file.close()
        self.file = None

        # Make sorted list of attribute names
        self.att_names = self.__dict__.keys()
        self.att_names.sort()

    def write(self, file, major=1, minor=0):
        '''
        Write a parameter file, in the format used by PSCF.

        If the "file" argument is a file object, it must be opened
        for writing. If it is a file name string, a file of that
        name will be opened and written to. In either case, the file
        is closed upon return. 

        Argument:
        file - output file object or file name.
        '''
        # If file argument is a string, open a file of that name
        if type(file) == type('thing'):
            temp = open(file,'w')
            file = temp
        self.file = file
        self.version.major = major
        self.version.minor = minor
        self.version.write(file)
        if self.flags.has_key('MONOMERS'):
            file.write("\n%-20s\n" % 'MONOMERS')
            self.output_monomers()
        if self.flags.has_key('CHAINS'):
            file.write("\n%-20s\n" % 'CHAINS')
            self.output_chains()
        if self.flags.has_key('SOLVENTS'):
            file.write("\n%-20s\n" % 'SOLVENTS')
            self.output_solvents()
        if self.flags.has_key('COMPOSITION'):
            file.write("\n%-20s\n" % 'COMPOSITION')
            self.output_composition()
        if self.flags.has_key('INTERACTION'):
            file.write("\n%-20s\n" % 'INTERACTION')
            self.output_interaction()
        if self.flags.has_key('UNIT_CELL'):
            file.write("\n%-20s\n" % 'UNIT_CELL')
            self.output_unit_cell()
        if self.flags.has_key('DISCRETIZATION'):
            file.write("\n%-20s\n" % 'DISCRETIZATION')
            self._output_vec( 'int', 'ngrid', self.dim)
            self._output_var( 'real', 'chain_step')
        if self.flags.has_key('BASIS'):
            file.write("\n%-20s\n" % 'BASIS')
            self._output_var('char', 'group_name')
        if self.flags.has_key('ITERATE'):
            file.write("\n%-20s\n" % 'ITERATE')
            self.output_iterate()
        if self.flags.has_key('SWEEP'):
            file.write("\n%-20s\n" % 'SWEEP')
            self._output_var( 'real', 's_max')
            self.output_increments()
        file.write("\n%-20s\n" % 'FINISH')

        file.close()
        self.file = None

    def eval(self, expr):
        """
        Returns the value of a mathematical expression involving parameters.

        This function returns the numerical value of a mathematical 
        expression, expressed as a string literal, that is constructed 
        using the names of parameters that appear in the parameter file 
        as variable names. 

        For example, if expr == '3.0*block_length[0][0]', then the 
        method returns a value equal to three times the length of 
        the first chain species. 

        Argument:
        expr -- string literal representation of a mathematical expression.
        """
        for key in self.__dict__.keys():
            exec( key + '= self.' + key )
        return eval(expr)

    def read_param_section(self, file):
        """ 
        Read one parameter file section, return 0 if FINISH, 1 otherwise.

        This function reads the capitalized label for a section 
        (e.g., MONONOMERS, CHAINS, etc.), then calls a function to read
        the appropriate section. It returns 0 if it is a FINISH section,
        and 1 if it is any other valid section. It throws an IoException
        if the section label is not recognized. 
        """
        # Read the next non-empty line
        hasFlag = False
        while not hasFlag:
            line = self.file.readline()
            if not line:
                next = 0
                return next
            flag = line.strip()
            if flag != '':
                hasFlag = True

        # Set key in self.flags dictionary
        self.flags[flag] = 1
        self.sections.append(flag)

        next = 1
        if flag == 'MONOMERS':
            self.input_monomers()
        elif flag == 'CHAINS':
            self.input_chains()
        elif flag == 'SOLVENTS':
            self.input_solvents()
        elif flag == 'COMPOSITION':
            self.input_composition()
        elif flag == 'INTERACTION':
            self.input_interaction()
        elif flag == 'UNIT_CELL':
            self.input_unit_cell()
        elif flag == 'DISCRETIZATION':
            self.ngrid = self._input_vec('int')
            self.chain_step = self._input_var('real')
        elif flag == 'BASIS':
            self.group_name = self._input_var('char')
        elif flag == 'ITERATE':
            self.input_iterate()
        elif flag == 'FINISH':
            next = 0
        else:
            msg = "Unrecognized parameter file section name: " + flag
            raise IoException(msg)

        return next

    def input_monomers(self):
        """ Analog of subroutine input_monomers in chemistry_mod.f """
        # Monomers
        self.N_monomer = self._input_var('int')
        N_monomer      = self.N_monomer
        self.kuhn      = self._input_vec('real')

    def input_chains(self):
        self.N_chain = self._input_var('int', f='A')
        if self.N_chain:
            self.N_block = self._input_vec('int', n=self.N_chain, s='C')
            self.file.readline()
            self.block_monomer = []
            for j in range(self.N_chain):
                self.block_monomer.append( self._input_vec('int', f='N') )
            self.file.readline()
            self.block_length = []
            for j in range(self.N_chain):
                self.block_length.append( self._input_vec('real', f='N') )
        else:
            self.N_chain = 0

    def input_solvents(self):
        self.N_solvent = self._input_var('int',f='A')
        if self.N_solvent:
            self.solvent_monomer = self._input_vec('int', self.N_solvent, 
                                                   s='C')
            self.solvent_size    = self._input_vec('real', self.N_solvent, 
                                                   s='C')
        else:
            self.N_solvent = 0

    def input_composition(self):
        self.ensemble = self._input_var('int',f='A')
        N_chain   = self.N_chain
        N_solvent = self.N_solvent
        if self.ensemble == 0:
            if self.N_chain > 0:
                self.phi_chain = self._input_vec('real',n=N_chain,s='C',f='A')
            if self.N_solvent > 0:
                self.phi_solvent = self._input_vec('real', n=N_solvent,
                                                   s='C', f='A')
        elif self.ensemble == 1:
            if self.N_chain > 0:
                self.mu_chain = self._input_vec('real', n=N_chain,s='C',f='A')
            if self.N_solvent > 0:
                self.mu_solvent = self._input_vec('real', n=N_solvent,
                                                  s='C',f='A')

    def input_interaction(self):
        """ Analog of subroutine input_interaction in chemistry_mod.f """
        self.interaction_type = self._input_var('char')
        N_monomer = self.N_monomer
        if self.interaction_type == 'chi':
            self.chi = self._input_mat('real',N_monomer,N_monomer,s='L')
        elif self.interaction_type == 'chi_T':
            self.chi_A = self._input_mat('real',N_monomer,N_monomer,s='L')
            self.chi_B = self._input_mat('real',N_monomer,N_monomer,s='L')
            self.Temperature = self._input_var('real')

    def output_monomers(self):
        """ Analog of subroutine output_monomers in chemistry_mod.f """
        self._output_var( 'int', 'N_monomer' )
        self._output_vec('real', 'kuhn', self.N_monomer )

    def output_chains(self):
        """ Analog of subroutine output_chains in chemistry_mod.f """
        self._output_var( 'int', 'N_chain')
        N_chain = self.N_chain
        if N_chain > 0:
            self._output_vec( 'int', 'N_block', N_chain, s='C')
            self.file.write('%-20s' % 'block_monomer' + "\n")
            for j in range(self.N_chain):
               self._io.output_vec(self.file, 'int', self.block_monomer[j], 
                                   self.N_block[j], f='N')
            #self.file.write('block_length'+"\n")
            self.file.write('%-20s' % 'block_length' + "\n")
            for j in range(self.N_chain):
                self._io.output_vec(self.file, 'real', self.block_length[j],
                                    self.N_block[j], f='N')

    def output_solvents(self):
        self._output_var('int', 'N_solvent')
        N_solvent = self.N_solvent
        if self.N_solvent > 0:
            self._output_vec('int', 'solvent_monomer',N_solvent,s='C')
            self._output_vec('real', 'solvent_size',N_solvent,s='C')

    def output_composition(self):
        self._output_var('int', 'ensemble')
        N_chain   = self.N_chain
        N_solvent = self.N_solvent
        if self.ensemble == 0:
            if N_chain > 0:
                self._output_vec('real', 'phi_chain',N_chain,s='C',f='A')
            if N_solvent > 0:
                self._output_vec('real', 'phi_solvent',N_solvent,s='C',f='A')
        elif self.ensemble == 1:
            if N_chain > 0:
                self._output_vec('real', 'mu_chain',N_chain,s='C',f='A')
            if N_solvent > 0:
                self._output_vec('real', 'mu_solvent',N_solvent,s='C',f='A')

    def output_interaction(self):
        """ Analog of subroutine output_interaction in chemistry_mod.f """
        N_monomer = self.N_monomer
        self._output_var('char', 'interaction_type' )
        if  self.interaction_type == 'chi':
            self._output_mat('real', 'chi',N_monomer,N_monomer,s='L')
        if  self.interaction_type == 'chi_T':
            self._output_mat('real', 'chiA',N_monomer,N_monomer,s='L')
            self._output_mat('real', 'chiB',N_monomer,N_monomer,s='L')
            self._output_var('real', 'Temperature')

    def input_unit_cell(self):
        """ Analog of subroutine input_unit_cell in unit_cell_mod.f """
        self.dim = self._input_var('int')
        self.crystal_system = self._input_var('char')
        self.N_cell_param = self._input_var('int')
        self.cell_param = self._input_vec('real',self.N_cell_param)

    def output_unit_cell(self):
        """ Analog of subroutine output_unit_cell in unit_cell_mod.f """
        self._output_var('int', 'dim')
        self._output_var('char', 'crystal_system')
        self._output_var('int', 'N_cell_param')
        self._output_vec('real', 'cell_param', self.N_cell_param)

    def input_iterate(self):
        self.input_filename = self._input_var('char')
        self.output_prefix = self._input_var('char')
        self.max_itr = self._input_var('int')
        self.error_max = self._input_var('real')
        self.domain = self._input_var('logic')
        self.itr_algo = self._input_var('char')
        if self.itr_algo == 'NR':
            self.N_cut = self._input_var('int')
        if self.itr_algo == 'AM':
            self.N_history = self._input_var('int')

    def output_iterate(self):
        self._output_var('char', 'input_filename')
        self._output_var('char', 'output_prefix')
        self._output_var('int', 'max_itr')
        self._output_var('real', 'error_max')
        self._output_var('logic', 'domain')
        self._output_var('char', 'itr_algo')
        if self.itr_algo == 'NR':
            self._output_var('int', 'N_cut')
        if self.itr_algo == 'AM':
            self._output_var('int', 'N_history')

    def input_increments(self):
        """ Analog of subroutine input_increments in sweep_mod.f """
        self.increments = {}
        next = 1
        while next:
            comment = self.file.readline().strip()
            self.increments[comment] = 1
            if comment == 'd_kuhn':
                 self.d_kuhn = self._input_vec('real',f='N')
            elif comment == 'd_chi':
                 self.d_chi = \
                 self._input_mat('real', self.N_monomer,
                                 self.N_monomer,f='N',s='L')
            elif comment == 'd_temperature':
                 self.d_temperature = self._input_var('real',f='N')
            elif comment == 'd_block_length':
                 self.d_block_length = []
                 for i in range(self.N_chain):
                     self.d_block_length.append(self._input_vec('real',f='N'))
            elif comment == 'd_phi' or comment == 'd_phi_chain':
                 self.increments['d_phi_chain'] = 1
                 self.d_phi_chain = []
                 for i in range(self.N_chain):
                     self.d_phi_chain.append(self._input_var('real',f='N'))
            elif comment == 'd_mu' or comment == 'd_mu_chain':
                 self.increments['d_mu_chain'] = 1
                 self.d_mu_chain = []
                 for i in range(self.N_chain):
                     self.d_mu_chain.append(self._input_var('real',f='N'))
            elif comment == 'd_cell_param':
                 self.d_cell_param(self._input_vec('real',f='N'))
            elif comment == 'end_increments':
                 next = 0

    def output_increments(self):
        """ Analog of subroutine output_increments in sweep_mod.f """
        N_mon = self.N_monomer
        if self.increments.has_key('d_kuhn'):
            self._output_vec('real', 'd_kuhn',f='A')
        if self.increments.has_key('d_chi'):
            self._output_mat('real', 'd_chi',N_mon,N_mon,f='A',s='L')
        if self.increments.has_key('d_temperature'):
            self._output_var('real', 'temperature',f='A')
        if self.increments.has_key('d_block_length'):
            self.file.write('d_block_length' + "\n")
            for i in range(self.N_chain):
                d_block_length = self.d_block_length[i]
                N_block = self.N_block[i]
                self._output_vec('real', 'd_block_length',N_block,f='N')
        if self.increments.has_key('d_phi_chain'):
            self.file.write('d_phi_chain' + "\n")
            for i in range(self.N_chain):
                self._output_var('real', self.d_phi_chain[i], 
                                 'd_phi_chain',f='N')
        if self.increments.has_key('d_mu_chain'):
            for i in range(self.N_chain):
                self._output_var('real',self.d_mu_chain[i], 
                                 'd_mu_chain',f='A')
        if self.increments.has_key('d_cell_param'):
            self._output_var('real', self.d_cell_param, 
                             self.N_cell_param, 'd_cell_param',f='A')
        self.file.write('end_increments' + "\n")

    # "Protected" methods

    # Input methods (wrapper for self._io.input_... methods of IO)
    def _input_var(self, type, comment = None, f='A'):
        """Input scalar variable from file."""
        return self._io.input_var(self.file, type, comment, f)

    def _input_vec(self, type, n=None, comment=None, s='R',f='A'):
        """Input vector-valued variable from file.."""
        return self._io.input_vec(self.file, type, n, comment, s, f)

    def _input_mat(self, type, m, n=None, comment=None, s='L', f='A'):
        """Input matrix-valued variable from file."""
        return self._io.input_mat(self.file, type, m, n, comment, s, f)

    def _output_var(self, type, name, f='A'):
        """Output single variable by variable name."""
        if self.__dict__.has_key(name):
            data = self.__dict__[name]
            self._io.output_var(self.file, type, data, name, f)

    def _output_vec(self, type, name, n=None, s='R', f='A'):
        """Output vector-valued variable by variable name."""
        if self.__dict__.has_key(name):
            data = self.__dict__[name]
            self._io.output_vec(self.file, type, data, n, name, s, f)

    def _output_mat(self, type, name, m, n=None, s='L', f='A'):
        """Output matrix-valued variable by name."""
        if self.__dict__.has_key(name):
            data = self.__dict__[name]
            self._io.output_mat(self.file, type, data, m, n, name, s, f)

    def __getitem__(self,key):
        return self.__dict__[key]

    def __str__(self):
        s = []
        for key in self.att_names:
            s.append( key +  ' : ' + str( self[key] ) )
        return string.join(s, '\n')
Exemplo n.º 52
0
class Component:
    """
    Abstract class defining methods inherited by all CliMT components.
    """
    def __init__(self, **kwargs):

        # Initialize self.Fixed (subset of self.Prognostic which will NOT be time-marched)
        if 'Fixed' in kwargs: self.Fixed = kwargs.pop('Fixed')
        else: self.Fixed = []

        # Initialize I/O
        self.Io = IO(self, **kwargs)

        # Get values from restart file, if available
        if 'RestartFile' in kwargs:
            ParamNames = Parameters().value.keys()
            FieldNames = self.Required
            kwargs = self.Io.readRestart(FieldNames, ParamNames, kwargs)

        # Initialize scalar parameters
        self.Params  = Parameters(**kwargs)

        # Frequency with which compute() will be executed
        if 'UpdateFreq' in kwargs:
            self.UpdateFreq = kwargs.pop('UpdateFreq')
        else:
            self.UpdateFreq = self.Params['dt']

        # Initialize State
        self.State = State(self, **kwargs)
        self.Grid = self.State.Grid

        # Dictionary to hold increments on prognos fields
        self.Inc = {}

        # Initialize diagnostics
        self.compute(ForcedCompute=True)

        # Create output file
        self.Io.createOutputFile(self.State, self.Params.value)

        # Write out initial state
        if not self.Io.Appending: self.write()

        # Initialize plotting facilities
        self.Plot = Plot()

        # Initialize runtime monitor
        self.Monitor = Monitor(self,**kwargs)

        # Notify user of unused input quantities
        self._checkUnused(kwargs)

        # Set some redundant attributes (mainly for backward compatibility)
        self.nlon = self.Grid['nlon']
        self.nlat = self.Grid['nlat']
        self.nlev = self.Grid['nlev']
        try: self.o3 = self.State['o3']
        except: pass

    def compute(self, ForcedCompute=False):
        """
        Updates component's diagnostics and increments
        """
        # See if it's time for an update; if not, skip rest
        if not ForcedCompute:
            freq = self.UpdateFreq
            time = self.State.ElapsedTime
            if int(time/freq) == int((time-self['dt'])/freq): return

        # Set up union of State, Grid and Params
        Input = {}
        for dic in [self.State.Now, self.Grid.value, self.Params.value]: Input.update(dic)
        Input['UpdateFreq'] = self.UpdateFreq

        # For implicit time stepping, replace current time level with previous (old) time level
        if self.SteppingScheme == 'implicit': Input.update(self.State.Old)

        # For semimplicit time stepping, append previous (old) time level to Input dict
        if self.SteppingScheme == 'semi-implicit':
            for key in self.Prognostic: Input[key+'old'] = self.State.Old[key]

        # List of arguments to be passed to extension
        args = [ Input[key] for key in self.ToExtension ]

        # Call extension and build dictionary of ouputs
        OutputValues = self.driver(*args)
        if len(self.FromExtension) == 1: Output = {self.FromExtension[0]: OutputValues}
        else:                            Output = dict( zip(self.FromExtension, OutputValues ) )

        # Extract increments from Output
        for key in self.Prognostic:
            self.Inc[key] = Output.pop(key+'inc')

        # Remove increments of Fixed variables
        for key in self.Fixed:
            if key in self.Inc: self.Inc.pop(key)
            if key in Output: Output.pop(key)

        # Update State
        self.State.update(Output)
        for key in Output: exec('self.'+key+'=Output[key]')

        # No further need for input dictionary
        del(Input)

    def step(self, RunLength=1, Inc={}):
        """
        Advances component one timestep and writes to output file if necessary.
        Inc is an externally-specified set of increments added to the internally-computed
        increments at each time step.
        """

        # If RunLength is integer, interpret as number of time steps
        if type(RunLength) is type(1):
            NSteps = RunLength

        # If RunLength is float, interpret as length of run in seconds
        if type(RunLength) is type(1.):
            NSteps = int(RunLength/self['dt'])

        for i in range(NSteps):
            # Add external increments
            for key in Inc.keys():
                if key in self.Inc.keys():
                    self.Inc[key] += Inc[key]
                else:
                    self.Inc[key] = Inc[key]

            # Avance prognostics 1 time step
            self.State.advance(self)

            # Bring diagnostics and increments up to date
            self.compute()

            # Bring calendar up to date
            self['calday'] += self['dt']/self['lod']
            if self['calday'] > self['daysperyear']:
                self['calday'] -= self['daysperyear']

            # Write to file, if it's time to
            dt   = self.Params['dt']
            time = self.State.ElapsedTime
            freq = self.Io.OutputFreq
            if int(time/freq) != int((time-dt)/freq): self.write()

            # Refresh monitor, if it's time to
            if self.Monitor.Monitoring:
                freq = self.Monitor.MonitorFreq
                if int(time/freq) != int((time-dt)/freq): self.Monitor.refresh(self)

    def __call__(self,**kwargs):
        """
        # Provides a simple interface to extension, useful e.g. for diagnostics.
        """
        # Re-initialize parameters, grid and state
        self.Params  = Parameters(**kwargs)
        self.State = State(self, **kwargs)
        self.Grid = self.State.Grid
        # Bring diagnostics up to date
        self.compute()

    def write(self):
        """
        Invokes write method of IO instance to write out current State
        """
        self.Io.writeOutput(self.Params, self.State)

    def open(self, OutputFileName='CliMT.nc'):
        """
        """
        if self.Io.OutputFileName == OutputFileName:
            print '\n +++ ClimT.Io: File %s is currently open for output'% OutputFileName
            return
        else:
            print 'Opening %s for output'% OutputFileName
            self.Io.OutputFileName = OutputFileName
            self.Io.DoingOutput = True
            self.Io.Appending = False
            self.Io.OutputTimeIndex = 0
            self.Io.createOutputFile(self.State, self.Params)

    def plot(self, *FieldKeys):
        self.Plot(self, *FieldKeys)

    def setFigure(self, FigureNumber=None):
        self.Plot.setFigure(FigureNumber)

    def closeFigure(self, FigureNumber=None):
        self.Plot.closeFigure(FigureNumber)

    def usage(self):
        print self.__doc__

    def report(self):
        print 'CliMT component:\n    %s' % self.Name
        keys = self.State.keys()
        keys1 = []
        for i in range(len(keys)):
            if   keys[i] in self.Prognostic: keys1.append('%12s   %s' % (keys[i],'(prognostic)'))
        for i in range(len(keys)):
            if keys[i] in self.Diagnostic: keys1.append('%12s   %s' % (keys[i],'(diagnostic)'))
        for i in range(len(keys)):
            if keys[i] not in self.Prognostic and keys[i] not in self.Diagnostic:
                                           keys1.append('%12s   %s' % (keys[i],'(Fixed)'))
        print 'State variables:\n %s' % '\n '.join( keys1 )

    def _checkUnused(self,kwargs):
        '''
        Notify of unused input quantities.
        '''
        unused = []
        io_keys = ['RestartFile','OutputFile','OutputFreq','OutputFields','ElapsedTime']
        monitor_keys = ['MonitorFields','MonitorFreq']
        for key in kwargs:
            if key not in self.Params  \
            and key not in self.Grid   \
            and key not in KnownFields \
            and key not in io_keys \
            and key not in monitor_keys:
                unused.append(key)

        if len(unused) > 0:
           if len(unused) == 1: suffix = 'y'
           else              : suffix = 'ies'
           print '\n ++++ CliMT.'+self.Name+'.initialize: WARNING: Input quantit%s %s not used.\n' \
                  % (suffix,str(list(unused)))

    def _getShape3D(self, **kwargs):
        '''
        Returns shape of 3D arrays to be passed to extension.
        '''
        return (self._getAxisLength('lev', **kwargs),
                self._getAxisLength('lat', **kwargs),
                self._getAxisLength('lon', **kwargs))


    def _getAxisLength(self, AxisName, **kwargs):
        '''
        Returns length of axis.
        '''
        # Check input
        assert AxisName in ['lev','lat','lon'], \
               '\n\n ++++ CliMT.%s: Axis name must be one of "lon", "lat", "lev"' % self.Name

        # See if axis was supplied in input
        n = None
        if AxisName in kwargs:
            if ndim(array(kwargs[AxisName])) == 0:
                n = 1
            else:
                assert ndim(array(kwargs[AxisName])) == 1, \
                    '\n\n ++++ CliMT.%s.init: input %s must be rank 1' % (self.Name,AxisName)
                n = len(array(kwargs[AxisName]))

        # If not, see if some field was supplied
        else:
            for key in kwargs:
                if key in KnownFields:
                    if KnownFields[key][2] == '2D' and AxisName != 'lev':
                        i = ['lat','lon'].index(AxisName)
                        try:    n = array(kwargs[key]).shape[i]
                        except: n = 1
                    elif KnownFields[key][2] == '3D':
                        i = ['lev','lat','lon'].index(AxisName)
                        try:    n = array(kwargs[key]).shape[i]
                        except: n = 1

        # Last resort: get dimensions set in Makefile
        if n is None: exec('n = get_n%s()' % AxisName)

        # Check if extension enforces axis dimension, ensure consistency
        try:
            exec('n_ext = self.Extension.get_n%s()' % AxisName)
        except:
            n_ext = n
        assert n_ext == n, \
            '\n\n ++++ CliMT.%s.init: input %s has dimension %i but extension requires %i'% \
            (self.Name, AxisName, n, n_ext)

        return n

    # Returns requested quantity from Params, Grid or State
    def __getitem__(self, key):
        for obj in [self.Params, self.Grid, self.State]:
            if key in obj:
                if type(obj[key]) is type('string'): return obj[key]
                else: return squeeze(obj[key])
        raise IndexError,'\n\n CliMT.State: %s not in Params, Grid or State' % str(key)

    # Sets requested quantity in Params, Grid or State
    def __setitem__(self, key, value):
        if key in self.Params:
            self.Params[key] = value
            return
        if key in self.Grid:
            self.Grid[key] = value
            return
        elif key in self.State and KnownFields[key][2] == '2D':
            self.State[key]=reshape(value,self.Grid.Shape3D[1:3])
            return
        elif key in self.State and KnownFields[key][2] == '3D':
            self.State[key]=reshape(value,self.Grid.Shape3D)
            return
        else: raise IndexError,'\n\n CliMT.State: %s not in Params, Grid or State' % str(key)