예제 #1
0
    def __init__(self):
        cfg = util.config()
        self.conn = sqlite3.connect(util.get_path(cfg["db"]["db_sqlite_file"]))
        logging.debug("Got connection to database")

        schema = open(util.get_path("db/db_schema.sql")).read()
        for statement in schema.split(";"):
            try:
                self.conn.execute(statement)
            except sqlite3.OperationalError as e:
                if statement.strip().startswith("--IGNORE_ERROR"):
                    logging.info(
                        "Ignoring error thrown by statement {}: {}".format(
                            statement, e))
                else:
                    raise e
        self.conn.commit()
        logging.debug("Done!")

        if self.get_db_version() == "1":
            logging.info("Got db version of 1, migrating to version 2")
            if self.migrate_genre():
                logging.info("It worked")
                self.set_db_version("2")
            else:
                logging.error("Failed to migrate genres")
예제 #2
0
 def verify_complete(self, client: Client) -> State:
     """Are we done with this group yet?"""
     state = State.OK
     jobs = get_maint_job(client, self.group)
     if len(jobs) < self.device_count:
         log.debug(
             "Some devices are still offline.",
             online=len(jobs),
             expected=self.device_count,
         )
         state = State.PENDING
     for job in jobs:
         status = job.get("upgradeStatus")
         job_ver = job.get("desiredVersion")
         node_id = get_path(
             str,
             get_path(str,
                      job.get("dn", "").split("/"), 2).split("-"), 1)
         log.debug(
             "Upgrade status",
             percent=job.get("instlProgPct"),
             node_id=node_id,
             status=status,
             target_version=job_ver,
         )
         if status != "completeok":
             state = State.PENDING
         if job_ver != self.version_str:
             state = State.PENDING
     return state
예제 #3
0
def split_and_save(config, data, labels):
    print("Pre-processing data!")
    (x_train, x_test, y_train, y_test) = train_test_split(data,
                                                          labels,
                                                          test_size=0.25,
                                                          stratify=labels,
                                                          random_state=42,
                                                          shuffle=True)

    #todo:remove it from here, add it to when you do training since we might need it for embbeding
    y_train = keras.utils.to_categorical(y_train, config.num_classes)
    y_test = keras.utils.to_categorical(y_test, config.num_classes)

    # save the reshaped photos
    x_train_path = get_path(config.data_path_root, config.x_train_path)
    save(x_train_path, x_train)

    y_train_path = get_path(config.data_path_root, config.y_train_path)
    save(y_train_path, y_train)

    x_test_path = get_path(config.data_path_root, config.x_test_path)
    save(x_test_path, x_test)

    y_test_path = get_path(config.data_path_root, config.y_test_path)
    save(y_test_path, y_test)
예제 #4
0
def crop_hero_up_template_image():
    with open('data.json', 'r') as f:
        data = json.load(f)
    for h in data:
        path = get_path('res/hero_up_img') + '/{}.png'.format(h)
        img = cv2.imread(path)
        res = img[0:20, 0:20]
        cv2.imwrite(get_path('res/hero_up') + '/{}.png'.format(h), res)
예제 #5
0
 def setUp(self):
     """Runs before each unit test
     Sets up the AmpObject object using "stl_file.stl"
     """
     from ampscan.core import AmpObject
     stl_path = get_path("stl_file.stl")
     self.amp = AmpObject(stl_path)
     stl_path = get_path("stl_file_4.stl")  # R=1.2
     self.amp2 = AmpObject(stl_path)
예제 #6
0
 def setUp(self):
     """Runs before each unit test.
     Sets up AmpObject object using "stl_file_4.stl" "stl_file_5.stl".
     """
     from AmpScan.core import AmpObject
     # Load 2 spheres with radius 1, and 1.2
     stl_path = get_path("stl_file_5.stl")  # R=1
     self.amp1 = AmpObject(stl_path)
     stl_path = get_path("stl_file_4.stl")  # R=1.2
     self.amp2 = AmpObject(stl_path)
예제 #7
0
def read_unformatted():
    home_id_dict = util.get_homeid_dict()
    files = glob.glob(
        util.get_path('daily_reading', 'raw_data', 'all') +
        'xlsx_unformatted/*.xlsx')
    for f in files[:1]:
        filename = f[f.rfind('/') + 1:]
        tokens = util.split_string([' ', '_', '.', '-'], filename)
        home_id = 'UNKNOWN'
        for x in tokens:
            if x in home_id_dict:
                home_id = home_id_dict[x]
        df = pd.read_excel(f, sheetname=0)
        if filename == 'TC_D2_Romer_Manual readings -logs_ - Sail boat_11-1_11-18.xlsx':
            df.rename(columns={
                'Date': 'date',
                'Time': 'time',
                'Action: observations and behaviour': 'activity'
            },
                      inplace=True)
            df.dropna(axis=0, how='all', inplace=True)
            pattern = re.compile('[0-9]{1,2}')
            df['date'] = df['date'].ffill()
            df['time'] = df['time'].ffill()
            df.dropna(subset=['date', 'time'], axis=0, how='any', inplace=True)
            df['time'] = df['time'].map(remove_ampm)
            df['date'] = df['date'].map(
                lambda x: 'Nov' if x == 'Nov' else '2016-11-{0}'.format(
                    re.match(pattern, x).group()))
            df = df[['date', 'time', 'activity']]
        if filename == 'DHP Log - DHP-HP-Daily Readings week of Jan 11, 2016.xlsx':
            df = df.ix[19:23, [0, 1, 8]]
            df.rename(columns={
                'Date': 'date',
                'Unnamed: 1': 'time',
                datetime.datetime(2016, 1, 14, 0, 0): 'activity'
            },
                      inplace=True)
            df['time'] = df['time'].map(remove_ampm)
            df.info()
            print df.head()
        lastdate = df['date'].tolist()[-1]
        print type(lastdate)
        if type(lastdate) == datetime.datetime:
            timestr = lastdate.strftime('%m-%d-%Y')
        elif type(lastdate) == pd.tslib.Timestamp:
            timestr = '{0}-{1}-{2}'.format(lastdate.month, lastdate.day,
                                           lastdate.year)
        else:
            timestr = lastdate
        outfile = 'activity_{0}_{1}.csv'.format(home_id, timestr)
        print 'write to {0}'.format(outfile)
        df.to_csv(util.get_path('daily_reading', 'activity_stamp', 'all') +
                  '{0}'.format(outfile),
                  index=False)
예제 #8
0
def crop_template_from_snapshot():
    im = ImageGrab.grabclipboard()
    assert im is not None, 'Take a snapshot first!'
    im.save(get_path('.') + '/snapshot.png', 'PNG')
    im = cv2.imread(get_path('.') + '/snapshot.png')
    for i in range(Cfg.COL):
        for j in range(Cfg.ROW):
            x = Cfg.X0 + i * Cfg.X_INTERVAL
            y = Cfg.Y0 + j * Cfg.Y_INTERVAL
            raw_template = im[y:y + Cfg.H, x:x + Cfg.W, :]
            cv2.imwrite(
                get_path('res/raw_template') + f'/{i}_{j}.png', raw_template)
예제 #9
0
def test_cleaning():
    files = glob.glob(util.get_path('Dylos', 'raw_data', 'all') + '*.[a-z][a-z][a-z]')
    # files = files[:101]
    files = [x for x in files if 'CTG_R_D077_11-19' in x]
    mlines = ['filename,multiplier\n']
    for i, f in enumerate(files):
        if i % step_size == 0:
            print i
        m = cleaning(f, f.replace('raw_data', 'reform_'))
        mlines.append('{0},{1}\n'.format(f[f.rfind('/') + 1:], m))
    with open (util.get_path('Dylos', 'reform_', 'all') + 'summary/m.csv', 'w+') as wt:
        wt.write(''.join(mlines))
    return
예제 #10
0
    def _load_manifest(self):
        output = util.get_path("./temp")
        print output

        manifest = ''
        if os.path.isfile(self.filename):
            zip = zipfile.ZipFile(self.filename)
            zip.extract('AndroidManifest.xml', output)
            manifest = os.path.join(output, 'AndroidManifest.xml')
        if os.path.isfile(manifest):
            cmd = "java -jar %s %s > %s" % \
                  (util.get_path('./res/AXMLPrinter2.jar'),
                   manifest, 
                   util.get_path('./temp/AndroidManifest_decoded.xml'))
            os.popen(cmd)
            return util.get_path('./temp/AndroidManifest_decoded.xml')
예제 #11
0
def check_tcam_scale(client: Client) -> State:
    """per-leaf TCAM scale"""
    # Verify polUsageCum <= polUsageCapCum for eqptcapacityPolUsage5min
    over_limit = False
    for record in client.get_class("eqptcapacityPolUsage5min"):
        node_dn = get_node_dn(record["dn"])
        count = get_path(int, record, "polUsageCum")
        limit = get_path(int, record, "polUsageCapCum")
        if count > 0 and count >= limit:
            over_limit = True
            log.warning(f"Over TCAM scale on {node_dn}",
                        count=count,
                        limit=limit)
        if client.args["debug"]:
            log.debug(f"TCAM scale on {node_dn}", count=count, limit=limit)
    return State.FAIL if over_limit else State.OK
예제 #12
0
파일: docker.py 프로젝트: djpetti/stoplight
  def __init__(self, container, job_dir, nvidia=True):
    """
    Args:
      container: The container to run.
      job_dir: The directory that will be mounted under /job_files in the
      container.
      nvidia: If true, it will use nvidia-docker instead of normal docker. """
    self.__container = container
    self.__job_dir = os.path.abspath(job_dir)

    self.__process = None

    if nvidia:
      self.__docker = util.get_path("nvidia-docker")
    else:
      self.__docker = util.get_path("docker")
예제 #13
0
def send_email(sender_email, password, student_email, student_code):

  path = util.get_path(student_code)
  filename_certificate = util.get_certificate(student_code)
  filename_declaration = util.get_declaration(student_code)

  if(util.is_valid_file(path, filename_certificate) and util.is_valid_file(path, filename_declaration)):
    message = MIMEMultipart()
    message["From"] = sender_email
    message["To"] = student_email
    message["Subject"] = subject
    message.attach(MIMEText(body, "plain"))

    part1 = add_file(path, filename_certificate)
    message.attach(part1)
    part2 = add_file(path, filename_declaration)
    message.attach(part2)

    text = message.as_string()
    print('Sending from %s to %s' % (sender_email, student_email))
    sleep(5)
    context = ssl.create_default_context()
    with smtplib.SMTP_SSL("smtp.gmail.com", 465, context=context) as server:
        server.login(sender_email, password)
        server.sendmail(sender_email, [student_email, sender_email], text)##Bcc to sender_email

  return 
예제 #14
0
def test_cleaning():
    files = glob.glob(
        util.get_path('Dylos', 'raw_data', 'all') + '*.[a-z][a-z][a-z]')
    # files = files[:101]
    files = [x for x in files if 'CTG_R_D077_11-19' in x]
    mlines = ['filename,multiplier\n']
    for i, f in enumerate(files):
        if i % step_size == 0:
            print i
        m = cleaning(f, f.replace('raw_data', 'reform_'))
        mlines.append('{0},{1}\n'.format(f[f.rfind('/') + 1:], m))
    with open(
            util.get_path('Dylos', 'reform_', 'all') + 'summary/m.csv',
            'w+') as wt:
        wt.write(''.join(mlines))
    return
예제 #15
0
 def format_json_name(text):
     """formats a collection name as a JSON filename"""
     result = text
     match = re.search(r'\w+.json', text)
     if not match:
         result += '.json'
     return util.get_path(result)
예제 #16
0
def get_tracked_relays():
    """
  Provides the relays we're tracking.

  :returns: **list** of **TrackedRelay** we're tracking

  :raises: **ValueError** if our config file is malformed
  """

    config = stem.util.conf.get_config('tracked_relays')
    config.load(util.get_path('data', 'tracked_relays.cfg'))

    results, expired = [], []

    for identifier in set([key.split('.')[0] for key in config.keys()]):
        relay = TrackedRelay(identifier, config)

        if relay.expires > datetime.datetime.now():
            results.append(relay)
        else:
            expired.append(relay)

    if expired:
        body = 'The following entries in tracked_relays.cfg have expired...\n\n'

        for relay in expired:
            body += '* %s (%s)\n' % (relay.identifier,
                                     relay.expires.strftime('%Y-%m-%d'))

        util.send('tracked_relays.cfg entries expired',
                  body=body,
                  to=['*****@*****.**'])

    return results
예제 #17
0
def main():
    """ Execute the test suite. """

    # Create a logger and load its config
    logging.config.fileConfig(get_path('bin/logging.conf'))
    logger = logging.getLogger(__name__)

    # parse options
    opts = parse_arguments()
    work_dir = os.path.join(os.getcwd(), opts.work_dir)
    db_connection = MySQLdb.connect(host=opts.db_host, user=opts.db_user,
                passwd=opts.db_passwd, db=opts.db_name)
    db_connection.autocommit(True)

    # Create work directory
    if not os.path.exists(work_dir):
        logger.info("Create a directory to work in.")
        os.makedirs(work_dir)

    db = DbConnector(db_connection)
    tc_factory = TcFactory(work_dir)
    s_factory = SettingFactory(db)
    tcg = TcGenerator(db, tc_factory, s_factory, work_dir, batch_size=4)
    detector = Detector(db, work_dir, timeout=opts.timeout)

    function_records = namedtuple('Function', ['id', 'name', 'tcl', 'header',
        'number_of_parameters', 'c_types', 'signature', 'return_type'])
    functions = db.get_functions(function_records)
    for function in functions:
        test_function(tcg, detector, logger, function, do_cleanup=True)
예제 #18
0
    def channeling_read(out_channels: List[str], **kwargs) -> TimeSeriesDict:
        out = TimeSeriesDict()

        for channel in out_channels:
            for prefix in search_dirs:
                for in_channel in in_channels:
                    try:
                        # lock the target file
                        h5file, _ = path2h5file(get_path(
                            f'{in_channel} {generation_start}',
                            'hdf5',
                            prefix=prefix),
                                                mode='r')
                        # read off the dataset
                        out[channel] = TimeSeries.read(h5file, channel,
                                                       **kwargs)
                    except (FileNotFoundError, KeyError, OSError):
                        # file not found / hdf5 can't open file (OSError), channel not in file (KeyError)
                        continue
                    break
                else:
                    continue
                break
            else:
                # tried all search dirs but didn't find it. Attempt to download.
                raise FileNotFoundError(f'CANNOT FIND {channel}!!')
                # out[channel] = TimeSeries.get(channel, **kwargs) # slow.
        return out
예제 #19
0
def main():
    """ Execute the test suite. """

    # Create a logger and load its config
    logging.config.fileConfig(get_path('bin/logging.conf'))
    logger = logging.getLogger(__name__)

    # parse options
    opts = parse_arguments()
    work_dir = os.path.join(os.getcwd(), opts.work_dir)
    db_connection = MySQLdb.connect(host=opts.db_host,
                                    user=opts.db_user,
                                    passwd=opts.db_passwd,
                                    db=opts.db_name)
    db_connection.autocommit(True)

    # Create work directory
    if not os.path.exists(work_dir):
        logger.info("Create a directory to work in.")
        os.makedirs(work_dir)

    db = DbConnector(db_connection)
    tc_factory = TcFactory(work_dir)
    s_factory = SettingFactory(db)
    tcg = TcGenerator(db, tc_factory, s_factory, work_dir, batch_size=4)
    detector = Detector(db, work_dir, timeout=opts.timeout)

    function_records = namedtuple('Function', [
        'id', 'name', 'tcl', 'header', 'number_of_parameters', 'c_types',
        'signature', 'return_type'
    ])
    functions = db.get_functions(function_records)
    for function in functions:
        test_function(tcg, detector, logger, function, do_cleanup=True)
예제 #20
0
 def setUp(self):
     """Runs before each unit test.
     Sets up the AmpObject object using "stl_file.stl".
     """
     from AmpScan.core import AmpObject
     stl_path = get_path("stl_file.stl")
     self.amp = AmpObject(stl_path)
def run_export():
    if not config()["export"]["enable"]:
        logging.info("Export disabled, not running...")
        return

    logging.info("Enabled, running yoyoyo okokokok")

    os.chdir(get_path("upload"))

    prev_music = ""
    if os.path.exists("music.csv"):
        prev_music = open("music.csv", "r").read()

    db = DbStore()
    write_csv(db)
    if open("music.csv", "r").read() != prev_music:
        logging.info("music.csv changed so reuploading to github")
        # I know should use subprocess
        os.system("rm main.sqlite")
        os.system("cp ../main.sqlite main.sqlite")
        os.system('sqlite3 main.sqlite ".dump" > main.sql')
        os.system("git add main.sql music.csv")

        os.system("git commit -m \"Data upload at {}\"".format(
            datetime.datetime.now().isoformat()))
        os.system("git push -u origin master")
    else:
        logging.info("tracks.txt the same, no new music to upload")
예제 #22
0
    def get_updates(variables):
        # this is fugly because we must get the batch stats from the
        # graph so we get the ones that are *actually being used in
        # the computation* after graph transforms have been applied
        updates = []
        variables = graph.deep_ancestors(variables)
        for stat, role in BatchNormalization.roles.items():
            from blocks.roles import has_roles
            batch_stats = [var for var in variables if has_roles(var, [role])]
            batch_stats = util.dedup(batch_stats, equal=util.equal_computations)

            batch_stats_by_brick = OrderedDict()
            for batch_stat in batch_stats:
                brick = batch_stat.tag.batch_normalization_brick
                population_stat = brick.population_stats[stat]
                batch_stats_by_brick.setdefault(brick, []).append(batch_stat)

            for brick, batch_stats in batch_stats_by_brick.items():
                population_stat = brick.population_stats[stat]
                if len(batch_stats) > 1:
                    # makes sense for recurrent structures
                    logger.warning("averaging multiple population statistic estimates to update %s: %s"
                                   % (util.get_path(population_stat), batch_stats))
                batch_stat = T.stack(batch_stats).mean(axis=0)
                updates.append((population_stat,
                                (1 - brick.alpha) * population_stat
                                + brick.alpha * batch_stat))
        return updates
예제 #23
0
def read_formatted():
    # df_lookup = pd.read_csv(os.getcwd() + '/input/log_rename.csv')
    # df_lookup.set_index('oldname', inplace=True)
    home_id_dict = util.get_homeid_dict()
    files = glob.glob(
        util.get_path('daily_reading', 'raw_data', 'all') +
        'xlsx_formatted/*.xlsx')
    lastline_dict = \
        {'LCMP Log_Observation-Incident Report_V6.JJN.xlsx': 32,
         'Copy of D4-CMU-Daily Readings_RTto24jan2016.xlsx': 16,
         'D4-Hartkopf_Loftness-Daily Readings.xlsx': 14}
    sheets_dict = {
        'D4-Hartkopf_Loftness-Daily Readings.xlsx': [0, 1],
        'Copy of D4-CMU-Daily Readings_RTto24jan2016.xlsx': [0]
    }
    for f in files:
        sheetlist = [1]
        filename = f[f.rfind('/') + 1:]
        tokens = util.split_string([' ', '_', '.', '-'], filename)
        if filename in sheets_dict:
            sheetlist = sheets_dict[filename]
        home_id = 'UNKNOWN'
        for x in tokens:
            if x in home_id_dict:
                home_id = home_id_dict[x]
        for s in sheetlist:
            idx_lastline = 33
            df = pd.read_excel(f, sheetname=s)
            if filename in lastline_dict:
                idx_lastline = lastline_dict[filename]
            df2 = df.transpose().iloc[:, [1, 2, idx_lastline]]
            df2.dropna(subset=[idx_lastline], inplace=True)
            df2.rename(columns={
                1: 'date',
                2: 'time',
                33: 'activity'
            },
                       inplace=True)
            df2.drop(df2.index[0], axis=0, inplace=True)
            timestr = df2.ix[-1, 'date'].strftime('%m-%d-%Y')
            outfile = 'activity_{0}_{1}.csv'.format(home_id, timestr)
            print 'write to {0}'.format(outfile)
            df2.to_csv(
                util.get_path('daily_reading', 'activity_stamp', 'all') +
                '{0}'.format(outfile),
                index=False)
    return
예제 #24
0
    def setUp(self):
        """Runs before each unit test.
        Sets up the AmpObject object using "stl_file.stl".
        """
        from ampscan.core import AmpObject

        # Radius = 1
        stl_path = get_path("stl_file_4.stl")
        self.amp1 = AmpObject(stl_path)

        # Radius = 1.2
        stl_path = get_path("stl_file_5.stl")
        self.amp2 = AmpObject(stl_path)

        # Spheroid with major radius 1 and minor 0.5
        stl_path = get_path("stl_file_7.stl")
        self.amp3 = AmpObject(stl_path)
예제 #25
0
파일: process_log.py 프로젝트: yujiex/ROCIS
def read_unformatted():
    home_id_dict = util.get_homeid_dict()
    files = glob.glob(util.get_path('daily_reading', 'raw_data', 'all') + 'xlsx_unformatted/*.xlsx')
    for f in files[:1]:
        filename = f[f.rfind('/') + 1:]
        tokens = util.split_string([' ', '_', '.', '-'], filename)
        home_id = 'UNKNOWN'
        for x in tokens:
            if x in home_id_dict:
                home_id = home_id_dict[x]
        df = pd.read_excel(f, sheetname=0)
        if filename == 'TC_D2_Romer_Manual readings -logs_ - Sail boat_11-1_11-18.xlsx':
            df.rename(columns={'Date': 'date', 'Time': 'time',
                               'Action: observations and behaviour':
                               'activity'}, inplace=True)
            df.dropna(axis=0, how='all', inplace=True)
            pattern = re.compile('[0-9]{1,2}')
            df['date'] = df['date'].ffill()
            df['time'] = df['time'].ffill()
            df.dropna(subset=['date', 'time'], axis=0, how='any',
                      inplace=True)
            df['time'] = df['time'].map(remove_ampm)
            df['date'] = df['date'].map(lambda x: 'Nov' if x == 'Nov' else '2016-11-{0}'.format(re.match(pattern, x).group()))
            df = df[['date', 'time', 'activity']]
        if filename == 'DHP Log - DHP-HP-Daily Readings week of Jan 11, 2016.xlsx':
            df = df.ix[19:23, [0, 1, 8]]
            df.rename(columns={'Date': 'date', 'Unnamed: 1': 'time',
                               datetime.datetime(2016, 1, 14, 0, 0):
                               'activity'}, inplace=True)
            df['time'] = df['time'].map(remove_ampm)
            df.info()
            print df.head()
        lastdate = df['date'].tolist()[-1]
        print type(lastdate)
        if type(lastdate) == datetime.datetime:
            timestr = lastdate.strftime('%m-%d-%Y')
        elif type(lastdate) == pd.tslib.Timestamp:
            timestr = '{0}-{1}-{2}'.format(lastdate.month, lastdate.day, lastdate.year)
        else:
            timestr = lastdate
        outfile = 'activity_{0}_{1}.csv'.format(home_id, timestr)
        print 'write to {0}'.format(outfile)
        df.to_csv(util.get_path('daily_reading', 'activity_stamp',
                                'all') + '{0}'.format(outfile),
                  index=False)
예제 #26
0
def check_switch_scale(client: Client) -> State:
    """per-switch scale"""
    # Verify counts from ctxClassCnt are < limits from fvcapRule
    from collections import defaultdict

    metrics = defaultdict(lambda: defaultdict(dict))
    # map ctxClassCnt counts to fvcapRule limits
    count_to_limit = {"l2BD": "fvBD", "fvEpP": "fvCEp", "l3Dom": "fvCtx"}
    # Build dict with device/mo/metric
    counts = client.get_class("ctxClassCnt",
                              params={"rsp-subtree-class": "l2BD,fvEpP,l3Dom"})
    for record in counts:
        node_dn = get_node_dn(record["dn"])
        key = count_to_limit.get(record["name"])
        if key:
            metrics[node_dn][key]["count"] = get_path(int, record, "count")

    # Add limits to the metrics dict
    limits = client.get_class("fvcapRule", cache=True)
    for record in limits:
        if record["dn"].startswith("topology"):
            node_dn = get_node_dn(record["dn"])
            subj = record["subj"]
            if node_dn in metrics and subj in count_to_limit.values():
                limit = get_path(int, record, "constraint")
                metrics[node_dn][subj]["limit"] = limit

    # Validate metrics
    over_limit = False
    for node_dn, by_mo in metrics.items():
        for mo, metric in by_mo.items():
            count = metric.get("count", 0)
            limit = metric.get("limit", 0)
            if count > 0 and count >= limit:
                over_limit = True
                log.warning(f"Over scale limit on {node_dn}",
                            mo=mo,
                            count=count,
                            limit=limit)
            if client.args["debug"]:
                log.debug(f"Scale metric on {node_dn}:",
                          mo=mo,
                          count=count,
                          limit=limit)
    return State.FAIL if over_limit else State.OK
예제 #27
0
 def test_get_horizontal_path_should_not_contain_no_duplicate_positions(self):
     path = get_path((0, 0), (5, 0))
     self.assertEqual(len(path), 6)
     self.assertEqual((0, 0), path[0])
     self.assertEqual((1, 0), path[1])
     self.assertEqual((2, 0), path[2])
     self.assertEqual((3, 0), path[3])
     self.assertEqual((4, 0), path[4])
     self.assertEqual((5, 0), path[5])
예제 #28
0
    def _compile_executer(self):
        # Copy header to work dir
        shutil.copy2(get_path("bin/testcase_executer.h"), os.path.join(self.work_dir, "testcase_executer.h"))
        # Copy source to work dir
        executer_source = os.path.join(self.work_dir, "testcase_executer.cpp")
        shutil.copy2(get_path("bin/testcase_executer.cpp"), executer_source)

        # Compile the executer
        log = os.path.join(self.work_dir, "c.log")
        compiler_call = ["g++", "-g", executer_source] + ["-ldl"]
        compiler_call += ["-lrt", "-o", "testcase_executer.out"]
        subprocess.Popen(compiler_call, stdout=open(log, "a"), stderr=open(log, "a"), cwd=self.work_dir)

        pid, ret = os.wait()

        if ret != 0:
            self.logger.error("The testcase executer could not be compiled!" " Aborting all tests!\n")
            sys.exit()
예제 #29
0
def get_heroes_image():
    image_url = 'http://media.steampowered.com/apps/' \
                'dota2/images/heroes/{}_lg.png'
    with open('data.json') as f:
        data = json.load(f)
    for h in data:
        url = image_url.format(h)
        urlretrieve(url, get_path('res/heroes') +
                    '/{}.png'.format(h))
예제 #30
0
파일: nvidia.py 프로젝트: djpetti/stoplight
  def __init__(self, gpu_id):
    """
    Args:
      gpu_id: The numerical ID of the GPU, as listed by nvidia-smi -L. """
    self.__gpu_id = gpu_id

    self.__nvidia_smi = util.get_path("nvidia-smi")

    self.__gather_global_data()
예제 #31
0
def main():
    spiders = [NameDictSpider, CNNameDictSpider,
               WinRateSpider, MatchUpsSpider, TeammatesSpider]
    original_cwd = os.getcwd()
    project_path = get_path('spider/dotaplus')
    os.chdir(project_path)
    raw_data = crawl(spiders)
    os.chdir(original_cwd)
    process_data(raw_data)
예제 #32
0
def test_files(student_name, student_code):
    path = util.get_path(student_code)
    filename_certificate = util.get_certificate(student_code)
    filename_declaration = util.get_declaration(student_code)
    print('\n-----\nProcessing %d (%s)...' % (student_code, student_name))
    if (util.is_valid_file(path, filename_certificate)
            and util.is_valid_file(path, filename_declaration)):
        print(f"{bcolors.OKBLUE}OK files.{bcolors.ENDC}")
    return
예제 #33
0
    def setup(self):
        cfg = WaldorfCfg(master_ip='192.168.5.190')
        cfg.debug = 0
        cfg.env_cfg.already_exist = 'remove'
        cfg.env_cfg.version_mismatch = 'remove'
        cfg.env_cfg.git_credential = open(
            get_path('.', _file=__file__) + '/credential', 'rb').read()
        cfg.env_cfg.default_timeout = 310
        cfg.result_timeout = 10
        cfg.retry_times = 5
        self.waldorf_client = WaldorfClient(cfg, limit=self.limit)

        pairs = [
            CmdPair(MajorCmd.CREATE_ENV,
                    args=['$HOME/Python/3.6.5/bin/python3']),
            CmdPair(MajorCmd.CHECK_PY_VER, pattern='3.6.5')
        ]

        suites = [
            SetupSuite(
                Suite([
                    CmdPair(MinorCmd.CREATE_SESS),
                    CmdPair(MinorCmd.SOURCE_ENV),
                    CmdPair(MinorCmd.RUN_CMD, args=['python', '>>>'])
                ], [
                    CmdPair(MinorCmd.RUN_CMD,
                            pattern='No module',
                            exist=False,
                            args=['import tank', '>>>']),
                    CmdPair(MinorCmd.RUN_CMD,
                            args=['tank.__version__', '>>>'],
                            pattern=tank.__version__)
                ], [
                    CmdPair(MinorCmd.RUN_CMD, args=['exit()']),
                    CmdPair(MinorCmd.CLOSE_SESS)
                ]),
                Suite([
                    CmdPair(MinorCmd.CREATE_SESS),
                    CmdPair(MinorCmd.SOURCE_ENV),
                    CmdPair(MinorCmd.RUN_CMD, args=['cd'])
                ], [
                    CmdPair(MinorCmd.GIT_CLONE,
                            args=[
                                'git clone '
                                'http://server.levelup.io/liyue/tank.git',
                                'http://server.levelup.io/liyue/tank.git'
                            ]),
                    CmdPair(MinorCmd.RUN_CMD, args=['cd tank']),
                    CmdPair(MinorCmd.RUN_CMD, args=['pip install -U .']),
                    CmdPair(MinorCmd.RUN_CMD, args=['cd ..']),
                    CmdPair(MinorCmd.RUN_CMD, args=['rm -rf tank'])
                ], [CmdPair(MinorCmd.CLOSE_SESS)]))
        ]
        resp = self.waldorf_client.get_env(self.name, pairs, suites)
        for hostname, r in resp:
            if r[0] < 0:
                raise Exception(hostname, r[1])
예제 #34
0
 def __init__(self):
     # Inject environment variables from local file
     load_env()
     self.app_cache_dir = get_path("cache/app_cache").absolute().__str__()
     self.app_cache_default_timeout = 60
     self.tg_host = os.getenv("TG_HOST")
     self.tg_graph = os.getenv("TG_GRAPH")
     self.tg_username = os.getenv("TG_USERNAME")
     self.tg_password = os.getenv("TG_PASSWORD")
     self.tg_api_key = os.getenv("TG_API_KEY")
예제 #35
0
    def __init__(self, filename, version=''):

        self.filename = util.get_path('./packages/' + filename)
        self.package_name = '' 
        self.activities = []
        if version:
            self.version = version
        else:
            self.version = os.path.basename(self.filename)
        self.parse_manifest()
예제 #36
0
 def is_something_blocking(self, destination):
     """
     What about selfish creatures? that don't care about other creatures. Must know what kind of obstacle is blocking
     """
     path = get_path(self.parent.position.value, destination)
     hit_detector = shoot.MissileHitDetection(False, False)
     dungeon_level = self.parent.dungeon_level.value
     path_taken = hit_detector.get_path_taken(path, dungeon_level)
     return geometry.chess_distance(self.parent.position.value,
                                    destination) != len(path_taken)
예제 #37
0
def crop_hero_template_img():
    w, h = Interface.W, Interface.H
    for c, rows in enumerate(Interface.HERO_NUM):
        for row, col_n in enumerate(rows):
            for col in range(col_n):
                hero_name = HERO_INDEX[str((c, row, col))]
                img = cv2.imread('res/origin/{}.png'.format(hero_name))
                crop_img = img[h - 20:h, int(w / 2 - 10):int(w / 2 + 10)]
                cv2.imwrite(
                    get_path('res/crop') + '/{}.png'.format(hero_name),
                    crop_img)
예제 #38
0
def load(key, size, fallback=DEFAULT):
    x, y = size
    instance_path = _instance_logo_path(key)
    if not os.path.exists(instance_path):
        instance_path = util.get_path(*fallback)
    logo_image = Image.open(instance_path)
    if x is None:
        orig_x, orig_y = logo_image.size
        x = int(y * (float(orig_x) / float(orig_y)))
    logo_image.thumbnail((x, y), Image.ANTIALIAS)
    sio = StringIO.StringIO()
    logo_image.save(sio, 'PNG')
    return (instance_path, sio.getvalue())
예제 #39
0
파일: process_log.py 프로젝트: yujiex/ROCIS
def read_formatted():
    # df_lookup = pd.read_csv(os.getcwd() + '/input/log_rename.csv')
    # df_lookup.set_index('oldname', inplace=True)
    home_id_dict = util.get_homeid_dict()
    files = glob.glob(util.get_path('daily_reading', 'raw_data', 'all') + 'xlsx_formatted/*.xlsx')
    lastline_dict = \
        {'LCMP Log_Observation-Incident Report_V6.JJN.xlsx': 32,
         'Copy of D4-CMU-Daily Readings_RTto24jan2016.xlsx': 16,
         'D4-Hartkopf_Loftness-Daily Readings.xlsx': 14}
    sheets_dict = {'D4-Hartkopf_Loftness-Daily Readings.xlsx': [0, 1],
        'Copy of D4-CMU-Daily Readings_RTto24jan2016.xlsx': [0]}
    for f in files:
        sheetlist = [1]
        filename = f[f.rfind('/') + 1:]
        tokens = util.split_string([' ', '_', '.', '-'], filename)
        if filename in sheets_dict:
            sheetlist = sheets_dict[filename]
        home_id = 'UNKNOWN'
        for x in tokens:
            if x in home_id_dict:
                home_id = home_id_dict[x]
        for s in sheetlist:
            idx_lastline = 33
            df = pd.read_excel(f, sheetname=s)
            if filename in lastline_dict:
                idx_lastline = lastline_dict[filename]
            df2 = df.transpose().iloc[:, [1, 2, idx_lastline]]
            df2.dropna(subset=[idx_lastline], inplace=True)
            df2.rename(columns={1: 'date', 2: 'time', 33: 'activity'}, inplace=True)
            df2.drop(df2.index[0], axis=0, inplace=True)
            timestr = df2.ix[-1, 'date'].strftime('%m-%d-%Y')
            outfile = 'activity_{0}_{1}.csv'.format(home_id, timestr)
            print 'write to {0}'.format(outfile)
            df2.to_csv(util.get_path('daily_reading', 'activity_stamp',
                                    'all') + '{0}'.format(outfile),
                    index=False)
    return
예제 #40
0
def tag_convnet_dropout(outputs, rng=None, **kwargs):
    from blocks.roles import has_roles, OUTPUT
    cnn_outputs = OrderedDict()
    for var in theano.gof.graph.ancestors(outputs):
        if (has_roles(var, [OUTPUT]) and util.annotated_by_a(
                util.get_convolution_classes(), var)):
            cnn_outputs.setdefault(util.get_path(var), []).append(var)
    unique_outputs = []
    for path, vars in cnn_outputs.items():
        vars = util.dedup(vars, equal=util.equal_computations)
        unique_outputs.append(util.the(vars))
    graph.add_transform(
        unique_outputs,
        graph.DropoutTransform("convnet_dropout", rng=rng),
        reason="regularization")
예제 #41
0
def main():
  """ Initialize, generate test cases, finally make them. """

  # Create a logger and load its config
  logging.config.fileConfig(get_path('bin/logging.conf'))
  logger = logging.getLogger(__name__)

  # parse options
  opts = parse_arguments()
  work_dir = os.path.join(os.getcwd(), opts.work_dir)
  rootfs_dir = os.path.join(work_dir, 'rootfs')

  db_connection = MySQLdb.connect(host=opts.db_host, user=opts.db_user,
              passwd=opts.db_passwd, db=opts.db_name)
  db_connection.autocommit(True)
 # Create a fresh work directory
  if os.path.exists(work_dir):
    shutil.rmtree(work_dir)
  logger.info("Created a fresh directory to work in.")
  # 'makedirs' will create a multi-depth lib (both work_dir and rootfs_dir)
  os.makedirs(rootfs_dir)
 
  db = DbConnector(db_connection)
  tc_factory = TcFactory(work_dir)
  s_factory = SettingFactory(db)
  tcg = TcGenerator(db, tc_factory, s_factory, work_dir,
    batch_size=opts.jobs)

  function_records = namedtuple('Function', ['id', 'name', 'tcl', 'header',
      'number_of_parameters', 'c_types', 'signature', 'return_type'])
  functions = db.get_functions(function_records)
  all_testcases = []
  for function in functions: # 'function' is a namedtuple representing a function.
    tcg.load_function(function)
    while tcg.testcases_left():
      all_testcases += tcg.generate()
    print "Generated test cases for function: {0}".format(function.name)
  tcg.finalize_testcase_generation()
  print "Compiling the test cases (tail -f makefile.log in the working directory (probably 'tmp') for the progress)...\n"
  omk = OMK(work_dir)
  if omk.make_all(all_testcases, opts.jobs) != 0:
    print "ERROR: 'make' failed. Stopping slingshot..."
    sys.exit(1)
  print "\nRunning the test suite...\n"
  i386 = I386Emul(work_dir)
  i386.execute_tests(all_testcases)
예제 #42
0
def reparse_link_texts(tree, target_id, target_govt_id, source_id=None, db=None):
    """ find links whose href is misrepresented by its text """
    """ ie, "section 2(e) and 3(b)(i)"" will be default only point to s 2 """

    """ TODO, slow as balls, instead, build a minimum tree from structure, search that.  should take
        impossibly small fraction of time """
    inserts = []
    db = db or get_db()
    with db.cursor(cursor_factory=extras.RealDictCursor, name="link_cursor") as cur:
        if source_id:
            cur.execute("""select * from id_lookup i join section_references s on i.govt_id = s.target_govt_id and source_document_id=%(source_id)s where parent_id = %(id)s;
                """, {'id': target_id, 'govt_id': target_govt_id, 'source_id': source_id})
        else:
            cur.execute("""select * from id_lookup i join section_references s on i.govt_id = s.target_govt_id where parent_id = %(id)s;
                """, {'id': target_id, 'govt_id': target_govt_id})
        refs = cur.fetchall()
        memo = {}
        if len(refs):
            current_app.logger.info('%d refs for id %d' % (len(refs), target_id))
            nodes_by_id = {x.attrib['id']: x for x in tree.findall('.//*[@id]')}

            for ref in refs:
                try:
                    paths = []
                    if (ref['target_govt_id'], ref['link_text']) in memo:
                        paths = memo[(ref['target_govt_id'], ref['link_text'])]

                    else:
                        nodes = decide_govt_or_path(tree, ref['target_govt_id'], ref['link_text'], nodes_by_id=nodes_by_id)
                        if len(nodes) > 1 or nodes[0] != nodes_by_id[ref['target_govt_id']]:
                            paths = [get_path(n) for n in nodes]
                    memo[(ref['target_govt_id'], ref['link_text'])] = paths
                    if len(paths):
                        for p in paths:
                            inserts.append(cur.mogrify("""INSERT INTO document_section_references (link_id, target_path, target_govt_id, target_document_id)
                                    VALUES (%(link_id)s, %(target_path)s, %(target_govt_id)s, %(target_document_id)s)""",
                                    {'link_id': ref['link_id'], 'target_path': p, 'target_govt_id': ref['target_govt_id'], 'target_document_id': target_id} ))
                    else:
                        inserts.append(cur.mogrify("""INSERT INTO document_section_references (link_id, target_path, target_govt_id, target_document_id)
                                    VALUES (%(link_id)s, %(target_path)s, %(target_govt_id)s, %(target_document_id)s)""",
                                     {'link_id': ref['link_id'], 'target_path': None, 'target_govt_id': ref['target_govt_id'], 'target_document_id': target_id} ))



                except Exception, e:
                    current_app.logger.debug(e)
예제 #43
0
 def trigger(self, **kwargs):
     source_entity = kwargs[action.SOURCE_ENTITY]
     entities = [entity for entity in source_entity.vision.get_seen_entities()
                 if not entity is source_entity]
     if len(entities) <= 0:
         return
     random.shuffle(entities)
     missile_hit_detector = MissileHitDetection(passes_entity=True, passes_solid=False)
     dungeon_level = source_entity.dungeon_level.value
     zap_graphic = GraphicChar(None, colors.LIGHT_ORANGE, "*")
     for entity in entities:
         path = util.get_path(source_entity.position.value, entity.position.value)
         path = path[1:]
         new_path = missile_hit_detector.get_path_taken(path, dungeon_level)
         if len(path) == len(new_path):
             self.zap_path(new_path, source_entity)
             animation.animate_path(source_entity.game_state.value, path, zap_graphic)
             break
예제 #44
0
def path_and_mtime(key, fallback=DEFAULT):
    """
    Return a tuple with the path to the instance's and
    the mtime (converted to an int).

    *key* (str)
        The key of the instance.
    *fallback* (tuple of str)
        A fallback path tuple to a logo if the instance
        doesn't have one.

    Returns:
       a (path (`str`), mtime (`int`)) tuple.
    """
    logo_path = _instance_logo_path(key)
    if not os.path.exists(logo_path):
        logo_path = util.get_path(*fallback)
    mtime = os.path.getmtime(logo_path)
    # strip the fraction to get full seconds
    mtime = int(mtime)
    return logo_path, mtime
예제 #45
0
파일: logo.py 프로젝트: alkadis/vcv
def path_and_mtime(entity, fallback=None):
    """
    Return a tuple with the path to the entity's logo and
    the mtime (converted to an int).

    *entity*
        Get path and mtime for this entity
    *fallback* (tuple of str)
        A fallback path tuple to a logo if the entity
        doesn't have one.

    Returns:
       a (path (`str`), mtime (`int`)) tuple.
    """
    if fallback is None:
        fallback = FALLBACK
    key = _entity_key(entity)
    logo_path = _logo_path(key)
    if not os.path.exists(logo_path):
        logo_path = util.get_path(*fallback)
    mtime = os.path.getmtime(logo_path)
    # strip the fraction to get full seconds
    mtime = int(mtime)
    return logo_path, mtime
예제 #46
0
파일: main.py 프로젝트: ballasn/tsa-rnn
def construct_monitors(algorithm, task, n_patches, x, x_shape,
                       graph, name, ram, model, cost,
                       n_spatial_dims, plot_url, patchmonitor_interval=100, **kwargs):
    location, scale, savings = util.get_recurrent_auxiliaries(
        "location scale savings".split(), graph, n_patches)

    channels = util.Channels()
    channels.extend(task.monitor_channels(graph))

    channels.append(util.named(savings.mean(), "savings.mean"))

    for variable_name in "location scale".split():
        variable = locals()[variable_name]
        channels.append(variable.mean(axis=0),
                        "%s.mean" % variable_name)
        channels.append(variable.var(axis=0),
                        "%s.variance" % variable_name)

    channels.append(algorithm.total_gradient_norm,
                    "total_gradient_norm")

    step_norms = util.Channels()
    step_norms.extend(util.named(l2_norm([algorithm.steps[param]]),
                                 "%s.step_norm" % name)
                      for name, param in model.get_parameter_dict().items())
    step_channels = step_norms.get_channels()

    #for activation in VariableFilter(roles=[OUTPUT])(graph.variables):
    #    quantity = activation.mean()
    #    quantity.name = "%s.mean" % util.get_path(activation)
    #    channels.append(quantity)

    data_independent_channels = util.Channels()
    for parameter in graph.parameters:
        if parameter.name in "gamma beta".split():
            quantity = parameter.mean()
            quantity.name = "%s.mean" % util.get_path(parameter)
            data_independent_channels.append(quantity)

    extensions = []

    extensions.append(TrainingDataMonitoring(
        step_channels, prefix="train", after_epoch=True))

    extensions.append(DataStreamMonitoring(data_independent_channels.get_channels(),
                                           data_stream=None, after_epoch=True))
    extensions.extend(DataStreamMonitoring((channels.get_channels() + [cost]),
                                           data_stream=task.get_stream(which, monitor=True),
                                           prefix=which, after_epoch=True)
                      for which in "train valid test".split())

    patchmonitor = None
    if n_spatial_dims == 2:
        patchmonitor_klass = PatchMonitoring
    elif n_spatial_dims == 3:
        patchmonitor_klass = VideoPatchMonitoring

    if patchmonitor_klass:
        patch = T.stack(*[
            ram.crop(x, x_shape, location[:, i, :], scale[:, i, :])
            for i in xrange(n_patches)])
        patch = patch.dimshuffle(1, 0, *range(2, patch.ndim))
        patch_extractor = theano.function([x, x_shape],
                                          [location, scale, patch])

        for which in "train valid".split():
            patchmonitor = patchmonitor_klass(
                save_to="%s_patches_%s" % (name, which),
                data_stream=task.get_stream(which, shuffle=False, num_examples=5),
                every_n_batches=patchmonitor_interval,
                extractor=patch_extractor,
                map_to_input_space=attention.static_map_to_input_space)
            patchmonitor.save_patches("patchmonitor_test.png")
            extensions.append(patchmonitor)

    if plot_url:
        plot_channels = []
        plot_channels.extend(task.plot_channels())
        plot_channels.append(["train_cost"])
        #plot_channels.append(["train_%s" % step_channel.name for step_channel in step_channels])

        from blocks.extras.extensions.plot import Plot
        extensions.append(Plot(name, channels=plot_channels,
                            after_epoch=True, server_url=plot_url))

    return extensions
예제 #47
0
    return Response(), 200


@app.route("/model/train", methods=["POST"])
def retrain_model():
    pass


@app.route("/", methods=["GET"])
def welcome():
    return Response("Welcome to LunchBot! All your lunch are belong to us!"), 200


if __name__ == '__main__':

    loc = get_path(__file__) + '{0}'
    logger = get_default_root_logger(filename=loc.format('log/log.log'))
    get_canned_header(logger, 'LunchBot: Making Lunch Great Again!!!')

    rm = Recommender()

    logger.info("Attempting to load pre-trained model")
    model_location = loc.format('assets/model.pkl')
    if os.path.isfile(model_location):
        logger.info("Found pre-trained model at {}".format(model_location))
        rm.load_pre_trained_model(loc.format('assets/model.pkl'))
    else:
        logger.info("No trained model found, attempting to build model now...")

        data_location = loc.format("assets/data.csv")
        logger.info("Attempting to load data from {}".format(data_location))
예제 #48
0
파일: main.py 프로젝트: yingzha/tsa-rnn
def construct_monitors(
    algorithm,
    task,
    n_patches,
    x,
    x_uncentered,
    hs,
    graph,
    plot_url,
    name,
    ram,
    model,
    cost,
    n_spatial_dims,
    patchmonitor_interval=100,
    **kwargs
):
    location, scale, savings = util.get_recurrent_auxiliaries("location scale savings".split(), graph, n_patches)

    channels = util.Channels()
    channels.extend(task.monitor_channels(graph))
    for i in xrange(n_patches):
        channels.append(hs[:, i].mean(), "h%i.mean" % i)

    channels.append(util.named(savings.mean(), "savings.mean"))

    for variable_name in "location scale".split():
        variable = locals()[variable_name]
        channels.append(variable.var(axis=0).mean(), "%s.batch_variance" % variable_name)
        channels.append(variable.var(axis=1).mean(), "%s.time_variance" % variable_name)

    # step_norms = util.Channels()
    # step_norms.extend(util.named(l2_norm([algorithm.steps[param]]),
    #                             "%s.step_norm" % name)
    #                  for name, param in model.get_parameter_dict().items())
    # step_channels = step_norms.get_channels()

    for activation in VariableFilter(roles=[OUTPUT])(graph.variables):
        quantity = activation.mean()
        quantity.name = "%s.mean" % util.get_path(activation)
        channels.append(quantity)

    extensions = []

    # extensions.append(TrainingDataMonitoring(
    #    step_channels,
    #    prefix="train", after_epoch=True))

    extensions.extend(
        DataStreamMonitoring(
            (channels.get_channels() + [cost]), data_stream=task.get_stream(which), prefix=which, after_epoch=True
        )
        for which in "train valid test".split()
    )

    patchmonitor = None
    if n_spatial_dims == 2:
        patchmonitor_klass = PatchMonitoring
    elif n_spatial_dims == 3:
        patchmonitor_klass = VideoPatchMonitoring

    if patchmonitor_klass:
        # get patches from original (uncentered) images
        patch = T.stack(
            *[ram.attention.crop(x_uncentered, location[:, i, :], scale[:, i, :]) for i in xrange(n_patches)]
        )
        patch = patch.dimshuffle(1, 0, *range(2, patch.ndim))

        patchmonitor = patchmonitor_klass(
            task.get_stream("valid", SequentialScheme(5, 5)),
            every_n_batches=patchmonitor_interval,
            extractor=theano.function([x_uncentered], [location, scale, patch]),
            map_to_input_space=masonry.static_map_to_input_space,
        )
        patchmonitor.save_patches("test.png")
        extensions.append(patchmonitor)

    plot_channels = []
    plot_channels.extend(task.plot_channels())
    plot_channels.append(["train_cost"])
    # plot_channels.append(["train_%s" % step_channel.name for step_channel in step_channels])

    extensions.append(Plot(name, channels=plot_channels, after_epoch=True, server_url=plot_url))

    return extensions
예제 #49
0
def construct_monitors(algorithm, task, model, graphs, outputs,
                       updates, monitor_options, n_spatial_dims,
                       hyperparameters, **kwargs):
    from blocks.extensions.monitoring import TrainingDataMonitoring, DataStreamMonitoring

    extensions = []

    if "steps" in monitor_options:
        step_channels = []
        step_channels.extend([
            algorithm.steps[param].norm(2).copy(name="step_norm:%s" % name)
            for name, param in model.get_parameter_dict().items()])
        step_channels.append(algorithm.total_step_norm.copy(name="total_step_norm"))
        step_channels.append(algorithm.total_gradient_norm.copy(name="total_gradient_norm"))
        logger.warning("constructing training data monitor")
        extensions.append(TrainingDataMonitoring(
            step_channels, prefix="train", after_epoch=True))

    if "parameters" in monitor_options:
        data_independent_channels = []
        for parameter in graphs["train"].parameters:
            if parameter.name in "gamma beta W b".split():
                quantity = parameter.norm(2)
                quantity.name = "parameter.norm:%s" % util.get_path(parameter)
                data_independent_channels.append(quantity)
        for key in "location_std scale_std".split():
            data_independent_channels.append(hyperparameters[key].copy(name="parameter:%s" % key))
        extensions.append(DataStreamMonitoring(
            data_independent_channels, data_stream=None, after_epoch=True))

    for which_set in "train test".split():
        channels = []
        channels.extend(outputs[which_set][key] for key in
                        "cost emitter_cost excursion_cost".split())
        channels.extend(outputs[which_set][key] for key in
                        task.monitor_outputs())
        channels.append(outputs[which_set]["savings"]
                        .mean().copy(name="mean_savings"))

        if "theta" in monitor_options:
            for key in "raw_location raw_scale".split():
                for stat in "mean var".split():
                    channels.append(getattr(outputs[which_set][key], stat)(axis=1)
                                    .copy(name="%s.%s" % (key, stat)))
        if which_set == "train":
            if "activations" in monitor_options:
                from blocks.roles import has_roles, OUTPUT
                cnn_outputs = OrderedDict()
                for var in theano.gof.graph.ancestors(graphs[which_set].outputs):
                    if (has_roles(var, [OUTPUT]) and util.annotated_by_a(
                            util.get_convolution_classes(), var)):
                        cnn_outputs.setdefault(util.get_path(var), []).append(var)
                for path, vars in cnn_outputs.items():
                    vars = util.dedup(vars, equal=util.equal_computations)
                    for i, var in enumerate(vars):
                        channels.append(var.mean().copy(
                            name="activation[%i].mean:%s" % (i, path)))

        if "batch_normalization" in monitor_options:
            errors = []
            for population_stat, update in updates[which_set]:
                if population_stat.name.startswith("population"):
                    # this is a super robust way to get the
                    # corresponding batch statistic from the
                    # exponential moving average expression
                    batch_stat = update.owner.inputs[1].owner.inputs[1]
                    errors.append(((population_stat - batch_stat)**2).mean())
            if errors:
                channels.append(T.stack(errors).mean().copy(name="population_statistic_mse"))

        logger.warning("constructing %s monitor" % which_set)
        extensions.append(DataStreamMonitoring(
            channels, prefix=which_set, after_epoch=True,
            data_stream=task.get_stream(which_set, monitor=True)))

    return extensions
예제 #50
0
    def __gen_cpp(self):
        """ Create cpp file for this testcase """

        # Template for testcase cpp files
        template = get_path("bin/tc_cpp_template")
        # file name
        fn = os.path.join(self.__work_dir, "{0}.cpp".format(self.__name))

        # Construct setting values
        s_call = ""
        commit_call = ""
        cleanup_call = ""
        temp = ""

        for i, s in enumerate(self.__settings):
            s_name = s.get_name()
            dt = s.get_datatype()
            setting_type = dt['type']
            # Cast to setting type
            s_call += "{0}* temp{1} = ({0}*) {2}_access();\n".format(
                    setting_type, i, s_name)
            if s.is_pointer or s.is_jmp_buf:
                # Cast to function parameter type
                s_call += "{0}* tmp{1} = ({0}*) temp{1};\n".format(
                        self.__c_types[i], i)
                # Still got a ptr to a ptr, we have to dereference in function
                # call
                temp += ("*tmp{0}, ".format(i))
            else:
                # The setting cast of the ptr to a non ptr type results in a
                # ptr of the setting type. If this setting type is 'smaller'
                # than the function parameter type (i.e. setting type is short,
                # function parameter type is int), A cast of the ptr could
                # introduce non-determinism, because the second half of the
                # memory that is going to be dereferenced is not controlled by
                # the setting. Above all the indented value would not be
                # tested.
                # So we have to dereference the ptr first and only cast the
                # value of the setting to the function parameter typ.
                s_call += "{0} tmp{1} = ({0}) *temp{1};\n".format(
                        self.__c_types[i], i)
                # ptr allready dereferenced before cast, use value
                temp += ("tmp{0}, ".format(i))

            commit_call += "{0}_commit();\n".format(s_name)
            cleanup_call += "{0}_cleanup();\n".format(s_name)

        temp = re.sub(r', $', '', temp)

        # These types can not be used as return values.
        can_not_be_used_as_return = ['void', 'Null', 'div_t', 'ldiv_t']

        # Add testcase specific values to the template
        with open(fn, "w") as f:
            for line in open(template):
                # Add include of function header
                line = line.replace('FUNC_HEADER', '{}'.format(
                    self.__fun_header))
                # Add include of testcase header
                line = line.replace('HEADER_NAME', '{}'.format(self.__name))
                line = line.replace('MAXP', '{}'.format(len(self.__settings)))
                # Add type of return value of the function under test
                if not self.__ret_val in can_not_be_used_as_return:
                    line = line.replace('RETURN_VAL', '{} rval;'.format(
                        self.__ret_val))
                    line = line.replace('VALUE_RETURN', 'rval = ')
                    line = line.replace('OUTPUT', 'std::cout << rval <<'
                        ' std::endl;\n')
                else:
                    line = line.replace('RETURN_VAL', '')
                    line = line.replace('VALUE_RETURN', '')
                    line = line.replace('OUTPUT', '\n')

                # Add setting casting
                line = line.replace('S_CALLS', '{}'.format(s_call))
                # Add commit call
                line = line.replace('COMMIT_CALLS', '{}'.format(commit_call))
                # Add cleanup call
                line = line.replace('CLEANUP_CALLS', '{}'.format(cleanup_call))

                # Add function call: name of function
                line = line.replace('FUN_NAME', '{}'.format(
                    self.__function_name))
                # Add function call: parameters
                line = line.replace('TEMP', '{}'.format(temp))
                # replace marker with value
                f.write(line)
        f.closed
예제 #51
0
def construct_monitors(algorithm, task, model, graphs, outputs,
                       updates, monitor_options, n_spatial_dims,
                       plot_url, hyperparameters,
                       patchmonitor_interval, **kwargs):
    from blocks.extensions.monitoring import TrainingDataMonitoring, DataStreamMonitoring

    extensions = []

    if "steps" in monitor_options:
        step_channels = []
        step_channels.extend([
            algorithm.steps[param].norm(2).copy(name="step_norm:%s" % name)
            for name, param in model.get_parameter_dict().items()])
        step_channels.append(algorithm.total_step_norm.copy(name="total_step_norm"))
        step_channels.append(algorithm.total_gradient_norm.copy(name="total_gradient_norm"))

        from extensions import Compressor
        for step_rule in algorithm.step_rule.components:
            if isinstance(step_rule, Compressor):
                step_channels.append(step_rule.norm.copy(name="compressor.norm"))
                step_channels.append(step_rule.newnorm.copy(name="compressor.newnorm"))
                step_channels.append(step_rule.median.copy(name="compressor.median"))
                step_channels.append(step_rule.ratio.copy(name="compressor.ratio"))

        step_channels.extend(outputs["train"][key] for key in
                             "cost emitter_cost excursion_cost cross_entropy error_rate".split())

        step_channels.extend(util.uniqueify_names_last_resort(util.dedup(
            (var.mean().copy(name="bn_stat:%s" % util.get_path(var))
             for var in graph.deep_ancestors([outputs["train"]["cost"]])
             if hasattr(var.tag, "batch_normalization_brick")),
            equal=util.equal_computations)))

        logger.warning("constructing training data monitor")
        extensions.append(TrainingDataMonitoring(
            step_channels, prefix="iteration", after_batch=True))

    if "parameters" in monitor_options:
        data_independent_channels = []
        for parameter in graphs["train"].parameters:
            if parameter.name in "gamma beta W b".split():
                quantity = parameter.norm(2)
                quantity.name = "parameter.norm:%s" % util.get_path(parameter)
                data_independent_channels.append(quantity)
        for key in "location_std scale_std".split():
            data_independent_channels.append(hyperparameters[key].copy(name="parameter:%s" % key))
        extensions.append(DataStreamMonitoring(
            data_independent_channels, data_stream=None, after_epoch=True))

    for which_set in "train valid test".split():
        channels = []
        channels.extend(outputs[which_set][key] for key in
                        "cost emitter_cost excursion_cost".split())
        channels.extend(outputs[which_set][key] for key in
                        task.monitor_outputs())
        channels.append(outputs[which_set]["savings"]
                        .mean().copy(name="mean_savings"))

        if "theta" in monitor_options:
            for key in "true_scale raw_location raw_scale".split():
                for stat in "mean var".split():
                    channels.append(getattr(outputs[which_set][key], stat)(axis=1)
                                    .copy(name="%s.%s" % (key, stat)))
        if which_set == "train":
            if "activations" in monitor_options:
                from blocks.roles import has_roles, OUTPUT
                cnn_outputs = OrderedDict()
                for var in theano.gof.graph.ancestors(graphs[which_set].outputs):
                    if (has_roles(var, [OUTPUT]) and util.annotated_by_a(
                            util.get_convolution_classes(), var)):
                        cnn_outputs.setdefault(util.get_path(var), []).append(var)
                for path, vars in cnn_outputs.items():
                    vars = util.dedup(vars, equal=util.equal_computations)
                    for i, var in enumerate(vars):
                        channels.append(var.mean().copy(
                            name="activation[%i].mean:%s" % (i, path)))

        if "batch_normalization" in monitor_options:
            errors = []
            for population_stat, update in updates[which_set]:
                if population_stat.name.startswith("population"):
                    # this is a super robust way to get the
                    # corresponding batch statistic from the
                    # exponential moving average expression
                    batch_stat = update.owner.inputs[1].owner.inputs[1]
                    errors.append(((population_stat - batch_stat)**2).mean())
            if errors:
                channels.append(T.stack(errors).mean().copy(name="population_statistic_mse"))

        logger.warning("constructing %s monitor" % which_set)
        extensions.append(DataStreamMonitoring(
            channels, prefix=which_set, after_epoch=True,
            data_stream=task.get_stream(which_set, monitor=True)))

    if "patches" in monitor_options:
        from patchmonitor import PatchMonitoring, VideoPatchMonitoring

        patchmonitor = None
        if n_spatial_dims == 2:
            patchmonitor_klass = PatchMonitoring
        elif n_spatial_dims == 3:
            patchmonitor_klass = VideoPatchMonitoring

        if patchmonitor_klass:
            for which in "train valid".split():
                patch = outputs[which]["patch"]
                patch = patch.dimshuffle(1, 0, *range(2, patch.ndim))
                patch_extractor = theano.function(
                    [outputs[which][key] for key in "x x_shape".split()],
                    [outputs[which][key] for key in "raw_location raw_scale".split()] + [patch])

                patchmonitor = patchmonitor_klass(
                    save_to="%s_patches_%s" % (hyperparameters["name"], which),
                    data_stream=task.get_stream(which, shuffle=False, num_examples=10),
                    every_n_batches=patchmonitor_interval,
                    extractor=patch_extractor,
                    map_to_input_space=attention.static_map_to_input_space)
                patchmonitor.save_patches("patchmonitor_test.png")
                extensions.append(patchmonitor)

    if plot_url:
        plot_channels = []
        plot_channels.extend(task.plot_channels())
        plot_channels.append(["train_cost"])
        #plot_channels.append(["train_%s" % step_channel.name for step_channel in step_channels])

        from blocks.extras.extensions.plot import Plot
        extensions.append(Plot(name, channels=plot_channels,
                            after_epoch=True, server_url=plot_url))

    return extensions
예제 #52
0
    def __init__(self):

        self.loc = get_path(__file__) + '/../{0}'
        self.logger = get_logger(__name__)
예제 #53
0
 def test_get_horizontal_with_same_start_and_destination_is_length_one(self):
     path = get_path((2, 4), (2, 4))
     self.assertEqual(len(path), 1)
     self.assertIn((2, 4), path)