Ejemplo n.º 1
0
def one_exp_plot(log_file, ix):
    '''ix is the index of log_file (the ix-th experiment log)
    '''
    jsv, jsv_epoch = [], []
    test_acc, test_acc_epoch = [], []
    lines = open(log_file).readlines()
    for i in range(len(lines)):
        line = lines[i].strip()
        if 'JSV_mean' in line:
            jsv.append(parse_value(line, 'JSV_mean'))
            if 'Epoch' in line:
                jsv_epoch.append(parse_value(line, 'Epoch', type_func=int))
            else:
                jsv_epoch.append(parse_value(lines[i+1], 'Epoch', type_func=int))
        if 'Best_Acc1_Epoch' in line:
            test_acc.append(parse_value(line, 'Acc1'))
            test_acc_epoch.append(parse_value(line, 'Epoch', type_func=int))
    
    interval = args.x_step

    # plot ax1: JSV
    ax1.plot(jsv_epoch[::interval], jsv[::interval], label=legends[ix], color=colors[0], linestyle=linestyles[ix])
    ax1.set_ylabel('Mean JSV')

    # plot ax2: Test accuracy
    ax2.plot(test_acc_epoch[::interval], test_acc[::interval], label=legends[ix], color=colors[1], linestyle=linestyles[ix])
    ax2.set_xlabel('Epoch'); ax2.set_ylabel('Test accuracy (%)')
Ejemplo n.º 2
0
def one_exp_plot(log_file, ix):
    '''ix is the index of log_file (the ix-th experiment log)
    '''
    jsv, jsv_epoch = [], []
    test_acc, test_acc_epoch = [], []
    for line in open(log_file):
        if 'JSV_mean' in line and 'Epoch' in line:
            jsv.append(parse_value(line, 'JSV_mean'))
            jsv_epoch.append(parse_value(line, 'Epoch', type_func=int))
        if 'Best_Acc1_Epoch' in line:
            test_acc.append(parse_value(line, 'Acc1'))
            test_acc_epoch.append(parse_value(line, 'Epoch', type_func=int))

    # plot ax1: JSV
    ax1.plot(jsv_epoch,
             jsv,
             label=legends[ix],
             color=colors[0],
             linestyle=linestyles[ix])
    ax1.yaxis.label.set_color(colors[0])
    ax1.tick_params(axis='y', colors=colors[0])

    # plot ax2: Test accuracy
    ax2.plot(test_acc_epoch,
             test_acc,
             label=legends[ix],
             color=colors[1],
             linestyle=linestyles[ix])
    ax2.yaxis.label.set_color(colors[1])
    ax2.tick_params(axis='y', colors=colors[1])
Ejemplo n.º 3
0
def filter_tuple(cfg, f):
    fs = f.split('=')
    assert len(fs) == 2
    (key, value) = fs
    key = key.strip()
    if re.match("\[.*\]", value):
        value = utils.parse_str_list(value)
    else:
        value = [utils.parse_value(value)]

    # if dimension not in optspace, exit
    if key not in cfg.optspace:
        print key, "does not belong to optspace"
        print "dimensions:", ','.join(cfg.optspace.keys())
        sys.exit(1)

    # if value not in optspace, exit
    for v in value:
        keys = [str(i) for i in cfg.optspace[key]]
        if str(v) not in keys:
            print v, "does not belong to dimension"
            print "values:", ','.join(keys)
            sys.exit(1)

    return (key, value)
Ejemplo n.º 4
0
def generic_filter(experiment, outf, filters):
    for key, value in filters:
        assert type(key) == str
        assert type(value) == list

        if not utils.parse_value(experiment[key]) in value:
            return False
    return True
Ejemplo n.º 5
0
def parse(args):
    """ parse arguments in this format: \"option1=value;option2=[value3,value4]\"
    and returns a dictionary { option1 : [ value ] , option2 : [value3, value4]
    """

    flts = {}
    for f in args.split(';'):
        fs = f.split('=')
        assert len(fs) == 2
        (key, value) = fs
        key = key.strip()
        if re.match("\[.*\]", value):
            value = utils.parse_str_list(value)
        else:
            value = [utils.parse_value(value)]
        flts[key] = value
    return flts
Ejemplo n.º 6
0
 def readings(self, session, **params) -> iter:
     """
     Stream CSV data as rows (one dictionary per row)
     """
     endpoint = "{endpoint}/readings.csv".format(endpoint=self.endpoint)
     stream = session.call_iter(endpoint, params=params)
     # First row
     headers = next(csv.reader(stream))
     for row in csv.DictReader(stream, fieldnames=headers):
         # Parse
         try:
             row = OrderedDict(station=self.object_id,
                               measure=row['measure'],
                               timestamp=utils.parse_timestamp(
                                   row['dateTime']),
                               value=utils.parse_value(row['value']))
             yield row
         except ValueError:
             LOGGER.error(row)
             raise
Ejemplo n.º 7
0
    def get_archive(cls, session, date):
        """
        Historic Readings. The measurement readings are archived daily as dump files in CSV format.

        https://environment.data.gov.uk/flood-monitoring/doc/reference#historic-readings
        """
        for row in cls._get_archive(session=session, date=date):
            # Rename columns
            yield OrderedDict(
                timestamp=utils.parse_timestamp(row['dateTime']),
                station=row['station'],
                station_reference=row['stationReference'],
                measure=row['measure'],
                unit_name=row['unitName'],
                value=utils.parse_value(row['value']),
                datumType=row['datumType'],
                label=row['label'],
                parameter=row['parameter'],
                qualifier=row['qualifier'],
                period=row['period'],
                value_type=row['valueType'],
                observed_property=settings.PARAMETER_MAP[row['parameter']],
            )
Ejemplo n.º 8
0
 def __init__(self, value, unit):
     self.value = utils.parse_value(value)
     self.unit = unit