Exemplo n.º 1
0
def main(args):
    config = load_config(args)
    global_eval_config = config["eval_params"]
    models, model_names = config_modelloader(config, load_pretrain=True)

    converted_models = [BoundSequential.convert(model) for model in models]

    robust_errs = []
    errs = []
    for model, model_id, model_config in zip(converted_models, model_names,
                                             config["models"]):
        model = model.cuda()

        # make a copy of global training config, and update per-model config
        eval_config = copy.deepcopy(global_eval_config)
        if "eval_params" in model_config:
            eval_config.update(model_config["eval_params"])

        # read training parameters from config file
        method = eval_config["method"]
        verbose = eval_config["verbose"]
        eps = eval_config["epsilon"]
        # parameters specific to a training method
        method_param = eval_config["method_params"]
        norm = float(eval_config["norm"])
        train_data, test_data = config_dataloader(
            config, **eval_config["loader_params"])

        model_name = get_path(config, model_id, "model", load=False)
        print(model_name)
        model_log = get_path(config, model_id, "eval_log")
        logger = Logger(open(model_log, "w"))
        logger.log("evaluation configurations:", eval_config)

        logger.log("Evaluating...")
        # evaluate
        robust_err, err = Train(model, model_id, 0, test_data, eps, eps, eps,
                                norm, logger, verbose, False, None, method,
                                **method_param)
        robust_errs.append(robust_err)
        errs.append(err)

    print(
        'model robust errors (for robustly trained models, not valid for naturally trained models):'
    )
    print(robust_errs)
    robust_errs = np.array(robust_errs)
    print('min: {:.4f}, max: {:.4f}, median: {:.4f}, mean: {:.4f}'.format(
        np.min(robust_errs), np.max(robust_errs), np.median(robust_errs),
        np.mean(robust_errs)))
    print('clean errors for models with min, max and median robust errors')
    i_min = np.argmin(robust_errs)
    i_max = np.argmax(robust_errs)
    i_median = np.argsort(robust_errs)[len(robust_errs) // 2]
    print('for min: {:.4f}, for max: {:.4f}, for median: {:.4f}'.format(
        errs[i_min], errs[i_max], errs[i_median]))
    print('model clean errors:')
    print(errs)
    print('min: {:.4f}, max: {:.4f}, median: {:.4f}, mean: {:.4f}'.format(
        np.min(errs), np.max(errs), np.median(errs), np.mean(errs)))
Exemplo n.º 2
0
def read_dict_info(conf, generate_api=True):
    """Parse dictionary meta data from the dictionary source and extract
    information about all released dictionaries. Return a list of dictionaries
    (Dictionary objects). generate_api is used to emit a message which is only
    useful if the API is generated."""
    dictionaries = []
    for dict_source in ['crafted', 'generated']:
        dict_source = get_path(conf[dict_source])
        print("Parsing meta data for all dictionaries from", dict_source)
        dictionaries.extend(metadata.get_meta_from_xml(dict_source))

    release_path = get_path(conf['release'])
    print("Parsing release information from", release_path)
    release_files = releases.get_all_downloads(release_path)
    for dict in dictionaries:
        name = dict.get_name()
        if not name in release_files:
            if generate_api:
                print("Skipping %s, no releases found." % name)
            continue
        try:
            version = releases.get_latest_version(release_files[name])
        except releases.ReleaseError as e:  # add file name to error
            raise releases.ReleaseError(list(e.args) + [name])
        for full_file, format in release_files[name][version]:
            dict.add_download(dictionary.mklink(full_file, format, version))
    return dictionaries
Exemplo n.º 3
0
    def set_flask_app(self):
        import config as conf

        self.app = Flask(self.config.NAME,
                         template_folder=conf.get_path("app_web", "templates"),
                         static_folder=conf.get_path("app_web", "static"))
        self.app.config.from_object(self.config)
        self.app.app_context().push()
Exemplo n.º 4
0
def create_blog(cfg):
    blogname = cfg["blog.title"]
    templatedir = config.get_path(cfg, "templates.path")
    templates = read_templates(templatedir)
    articledir = config.get_path(cfg, "articles.path")
    articles = read_articles(articledir)
    mmd = create_mmd(cfg)
    blog = Blog(blogname, templates, mmd)
    for article in articles.values():
        blog.load_post(article)
    return blog
Exemplo n.º 5
0
def crack_system(params):
    if params[1] == 'notify':
        type = urllib.unquote(params[2])
        summary = urllib.unquote(params[3])
        body = urllib.unquote(params[4])
        if type == 'content':
            try:
                img_uri = urllib.unquote(params[5])
                avatar_file = os.path.join(
                    config.get_path("avatar"),
                    hashlib.new("sha1", img_uri).hexdigest())
                avatar_path = avatar_file
                th = threading.Thread(target=save_file_proc,
                                      args=(img_uri, avatar_path))
                th.start()
            except:
                avatar_file = None
            do_notify(summary, body, avatar_file)
        elif type == 'count':
            notify.update(summary, body)
            notify.show()
    elif params[1] == 'unread_alert':
        unread_count = int(urllib.unquote(params[2]))
        app.unread_alert(unread_count)
    elif params[1] == 'load_settings':
        settings = json.loads(urllib.unquote(params[2]))
        config.load_settings(settings)
        app.apply_settings()
    elif params[1] == 'sign_in':
        app.on_sign_in()
    elif params[1] == 'sign_out':
        app.on_sign_out()
    elif params[1] == 'quit':
        app.quit()
Exemplo n.º 6
0
def crack_system(params):
    if params[1] == 'notify':
        type = urllib.unquote(params[2])
        summary = urllib.unquote(params[3])
        body = urllib.unquote(params[4])
        if type == 'content':
            try:
                avatar_file = os.path.join(config.get_path("avatar"), urllib.unquote(params[5]))
            except:
                avatar_file = None
            do_notify(summary, body, avatar_file)
        elif type == 'count':
            notify.update(summary, body)
            notify.show()
    elif params[1] == 'unread_alert':
        unread_count = int(urllib.unquote(params[2]))
        app.unread_alert(unread_count)
    elif params[1] == 'load_settings':
        settings = json.loads(urllib.unquote(params[2]))
        config.load_settings(settings)
        app.apply_settings()
    elif params[1] == 'sign_in':
        app.on_sign_in()
    elif params[1] == 'sign_out':
        app.on_sign_out()
    elif params[1] == 'quit':
        app.quit()
Exemplo n.º 7
0
def crack_system(params):
    if params[1] == 'notify':
        type = urllib.unquote(params[2])
        summary = urllib.unquote(params[3])
        body = urllib.unquote(params[4])
        if type == 'content':
            try:
                avatar_file = os.path.join(config.get_path("avatar"),
                                           urllib.unquote(params[5]))
            except:
                avatar_file = None
            do_notify(summary, body, avatar_file)
        elif type == 'count':
            notify.update(summary, body)
            notify.show()
    elif params[1] == 'unread_alert':
        unread_count = int(urllib.unquote(params[2]))
        app.unread_alert(unread_count)
    elif params[1] == 'load_settings':
        settings = json.loads(urllib.unquote(params[2]))
        config.load_settings(settings)
        app.apply_settings()
    elif params[1] == 'sign_in':
        app.on_sign_in()
    elif params[1] == 'sign_out':
        app.on_sign_out()
    elif params[1] == 'quit':
        app.quit()
Exemplo n.º 8
0
def crack_system(params):
    if params[1] == 'notify':
        type = urllib.unquote(params[2])
        summary = urllib.unquote(params[3])
        body = urllib.unquote(params[4])
        if type == 'content':
            try:
                img_uri = urllib.unquote(params[5])
                avatar_file = os.path.join(config.get_path("avatar"), hashlib.new("sha1", img_uri).hexdigest())
                avatar_path = avatar_file
                th = threading.Thread(
                    target = save_file_proc,
                    args=(img_uri, avatar_path))
                th.start()
            except:
                avatar_file = None
            do_notify(summary, body, avatar_file)
        elif type == 'count':
            notify.update(summary, body)
            notify.show()
    elif params[1] == 'unread_alert':
        unread_count = int(urllib.unquote(params[2]))
        app.unread_alert(unread_count)
    elif params[1] == 'load_settings':
        settings = json.loads(urllib.unquote(params[2]))
        config.load_settings(settings)
        app.apply_settings()
    elif params[1] == 'sign_in':
        app.on_sign_in()
    elif params[1] == 'sign_out':
        app.on_sign_out()
    elif params[1] == 'quit':
        app.quit()
Exemplo n.º 9
0
def main(args):
    parser = argparse.ArgumentParser(description='FreeDict API generator')
    parser.add_argument(
        "-n",
        "--need-update",
        dest="check_for_unreleased_dicts",
        action="store_true",
        default=False,
        help=
        "check for unreleased dictionaries instead of generating the API file")
    parser.add_argument(
        '-p',
        "--pre-exec-script",
        dest="prexec",
        metavar="PATH",
        help=
        ('script/command to execute before this script, e.g. to set up a sshfs '
         'connection to a remote server, or to invoke rsync.'))
    parser.add_argument(
        '-o',
        "--post-exec-script",
        dest="postexc",
        metavar="PATH",
        help=("script/command to execute after this script is done, e.g. to "
              "umount mounted volumes."))

    args = parser.parse_args(args[1:])
    conf = config.discover_and_load()

    exec_or_fail(args.prexec)  # mount / synchronize release files

    dictionaries = read_dict_info(conf, not args.check_for_unreleased_dicts)

    if args.check_for_unreleased_dicts:
        outdated = find_outdated_releases(dictionaries)
        if not outdated:
            print("Everything up-to-date.")
        else:
            print("\nName      Source Version    Release Version")
            print("-------   ---------------   --------------------------")
            for data in sorted(outdated, key=lambda x: x[0]):
                name, v1, v2 = [str(e if e else 'unknown') for e in data]
                print('{}   {:<15}   {:<15}'.format(name, v1, v2))
    else:
        # remove dictionaries without download links
        dictionaries = sorted(
            (d for d in dictionaries if d.get_downloads() != []),
            key=lambda entry: entry.get_name())
        api_path = config.get_path(conf['DEFAULT'], key='api_output_path')
        if not api_path == 'freedict-database.xml' and not os.path.exists(
                os.path.dirname(api_path)):
            os.makedirs(os.path.dirname(api_path))
        print("Writing API file to", api_path)
        xmlhandlers.write_freedict_database(api_path, dictionaries)

    # if the files had been mounted with sshfs, it's a good idea to give it some
    # time to synchronize its state, otherwise umounting fails
    time.sleep(2)
    exec_or_fail(args.postexc)  # umount or rsync files, if required
Exemplo n.º 10
0
    def __init__(self, parentWidget):
        WebKit.WebView.__init__(self)
        self.load_finish_flag = False
        self.set_property('can-focus', True)
        self.set_property('can-default', True)
        self.set_full_content_zoom(1)
        self.parentWidget = parentWidget

        settings = self.get_settings()
        try:
            settings.set_property('enable-universal-access-from-file-uris',
                                  True)
            settings.set_property('javascript-can-access-clipboard', True)
            settings.set_property('enable-default-context-menu', True)
            settings.set_property('enable-page-cache', True)
            settings.set_property('tab-key-cycles-through-elements', True)
            settings.set_property('enable-file-access-from-file-uris', True)
            settings.set_property('enable-spell-checking', False)
            settings.set_property('enable-caret-browsing', False)
            settings.set_property('enable-developer-extras',
                                  config.ENABLE_INSPECTOR)
            try:
                # Since 1.7.5
                settings.set_property('enable-accelerated-compositing', True)
            except TypeError:
                pass
        except:
            print 'Error: settings property was not set.'

        WebKit.set_web_database_directory_path(config.get_path("db"))
        WebKit.set_default_web_database_quota(1024**3L)

        ## bind events
        self.connect('navigation-requested', self.on_navigation_requested)
        self.connect('new-window-policy-decision-requested',
                     self.on_new_window_requested)
        self.connect('script-alert', self.on_script_alert)
        self.connect('load-finished', self.on_load_finish)
        self.connect("hovering-over-link", self.on_over_link)
        #       self.connect('drag_data_received', self.on_drag_data_received)
        #       self.connect('drag_motion', self.on_drag_motion)
        #       self.connect('drag_drop', self.on_drag_drop)
        #       # @TODO DND for gir
        #       TARGET_TYPE_URI_LIST = 80
        #       dnd_list = [ ( 'text/uri-list', 0, TARGET_TYPE_URI_LIST ) ]
        #       te = Gtk.TargetEntry.new(
        #           'text/uri-list', 0, TARGET_TYPE_URI_LIST)
        #       self.drag_dest_set( Gtk.DestDefaults.ALL,
        #                 [te], Gdk.DragAction.COPY)

        if config.ENABLE_INSPECTOR:
            from inspector import HermelinInspector
            HermelinInspector(self.get_inspector())

        templatefile = utils.get_ui_object(config.TEMPLATE)
        template = open(templatefile, 'rb').read()
        self.load_string(template, 'text/html', 'UTF-8',
                         'file://' + templatefile)
Exemplo n.º 11
0
def main():
    args = setup()
    try:  # load configuration
        conf = config.discover_and_load()
    except config.ConfigurationError as e:
        print(e)
        sys.exit(42)

    if args.print_api_path:
        print(config.get_path(conf['DEFAULT'], key='api_output_path'))
        sys.exit(0)
    access_method = RsyncFileAccess()
    if conf['DEFAULT']['file_access_via'] == 'sshfs':
        access_method = SshfsAccess()

    release_directory = config.get_path(conf['release'])
    if not os.path.exists(release_directory):
        try:
            os.makedirs(release_directory)
        except OSError:
            # if the file does exist, but the fuse endpoint is _not_ connected,
            # we could try running fusermount -u:
            os.system('fusermount -u "%s"' % release_directory)

    if args.make_available:
        for section in ('release', 'generated'):
            if conf[section].getboolean('skip'):
                print("Skipping", section)
                continue
            print('Making files for "%s" available...' % section)
            options = conf[section]
            target_path = config.get_path(options)
            access_method.make_avalailable(options['user'], options['server'],
                                           options['remote_path'], target_path)
    elif args.umount:
        for section in ('generated', 'release'):
            if conf[section].getboolean('skip'):
                print("Skipping", section)
                continue
            target_path = config.get_path(conf[section])
            try:
                access_method.make_unavailable(target_path)
            except OSError as e:
                print(e.args[0])
                continue
Exemplo n.º 12
0
 def init_db(self):
     self.conn = sqlite3.connect(config.get_path(self.filename))
     self.cur = self.conn.cursor()
     self.cur.execute('PRAGMA journal_mode=wal')
     self.cur.execute('''CREATE TABLE IF NOT EXISTS tweets(
         tweet_id INT,
         tweet TEXT,
         username TEXT,
         PRIMARY KEY (tweet_id) ON CONFLICT IGNORE
     ) WITHOUT ROWID;''')
Exemplo n.º 13
0
def get_extra_exts():
    import glob
    exts = []
    files = glob.glob(config.get_path("ext") + '/*')
    ext_dirs = filter(lambda x: os.path.isdir(x), files)
    for dir in ext_dirs:
        ext_js = os.path.join(dir, 'entry.js')
        if os.path.exists(ext_js):
            exts.append('file://%s' % ext_js)
    return exts
Exemplo n.º 14
0
def get_extra_exts():
    import glob
    exts = []
    files = glob.glob(config.get_path("ext") + '/*')
    ext_dirs = filter(lambda x: os.path.isdir(x), files)
    for dir in ext_dirs:
        ext_js = os.path.join(dir, 'entry.js')
        if os.path.exists(ext_js):
            exts.append('file://%s' % ext_js)
    return exts
Exemplo n.º 15
0
def get_extra_themes():
    import glob
    themes = []
    files = glob.glob(config.get_path("theme") + '/*')
    theme_dirs = filter(lambda x: os.path.isdir(x), files)
    for dir in theme_dirs:
        info_file = os.path.join(dir, 'info.json')
        style_file = os.path.join(dir, 'style.css')
        if os.path.exists(info_file) and os.path.exists(style_file):
            themes.append('file://%s' % dir)
    return themes
Exemplo n.º 16
0
def get_extra_themes():
    import glob
    themes = []
    files = glob.glob(config.get_path("theme") + '/*')
    theme_dirs = filter(lambda x: os.path.isdir(x), files)
    for dir in theme_dirs:
        info_file = os.path.join(dir, 'info.json')
        style_file = os.path.join(dir, 'style.css')
        if os.path.exists(info_file) and os.path.exists(style_file):
            themes.append('file://%s' % dir)
    return themes
Exemplo n.º 17
0
    def __init__(self, parentWidget):
        WebKit.WebView.__init__(self)
        self.load_finish_flag = False
        self.set_property('can-focus', True)
        self.set_property('can-default', True)
        self.set_full_content_zoom(1)
        self.parentWidget = parentWidget;

        settings = self.get_settings()
        try:
            settings.set_property('enable-universal-access-from-file-uris', True)
            settings.set_property('javascript-can-access-clipboard', True)
            settings.set_property('enable-default-context-menu', True)
            settings.set_property('enable-page-cache', True)
            settings.set_property('tab-key-cycles-through-elements', True)
            settings.set_property('enable-file-access-from-file-uris', True)
            settings.set_property('enable-spell-checking', False)
            settings.set_property('enable-caret-browsing', False)
            settings.set_property('enable-developer-extras', config.ENABLE_INSPECTOR)
            try:
                # Since 1.7.5
                settings.set_property('enable-accelerated-compositing', True)
            except TypeError:
                pass
        except:
            print 'Error: settings property was not set.'

        WebKit.set_web_database_directory_path(config.get_path("db"))
        WebKit.set_default_web_database_quota(1024**3L)
        
        ## bind events
        self.connect('navigation-requested', self.on_navigation_requested);
        self.connect('new-window-policy-decision-requested', self.on_new_window_requested);
        self.connect('script-alert', self.on_script_alert);
        self.connect('load-finished', self.on_load_finish);
        self.connect("hovering-over-link", self.on_over_link);
#       self.connect('drag_data_received', self.on_drag_data_received)
#       self.connect('drag_motion', self.on_drag_motion)
#       self.connect('drag_drop', self.on_drag_drop)
#       # @TODO DND for gir
#       TARGET_TYPE_URI_LIST = 80
#       dnd_list = [ ( 'text/uri-list', 0, TARGET_TYPE_URI_LIST ) ]
#       te = Gtk.TargetEntry.new(
#           'text/uri-list', 0, TARGET_TYPE_URI_LIST)
#       self.drag_dest_set( Gtk.DestDefaults.ALL,
#                 [te], Gdk.DragAction.COPY)

        if config.ENABLE_INSPECTOR:
            from inspector import HermelinInspector
            HermelinInspector(self.get_inspector())

        templatefile = utils.get_ui_object(config.TEMPLATE)
        template = open(templatefile, 'rb').read()
        self.load_string(template, 'text/html', 'UTF-8', 'file://' + templatefile)
Exemplo n.º 18
0
    def __init__(self):
        print("Reloading.")
        try:
            path = get_path("files", "mac_vendors")
        except:
            logger.warn("No path for mac_vendors file in config")
            return

        for line in open(path).readlines():
            matches = re.match(r"\s+([A-Z0-9]{2}-[A-Z0-9]{2}-[A-Z0-9]{2})\s+\(hex\)\s+(.*)", line)
            if matches:
                a = matches.group(1)
                self[(long(a[0:2], 16) * 256 * 256) + (long(a[3:5], 16) * 256) + long(a[6:], 16)] = matches.group(2)
Exemplo n.º 19
0
 def on_load_finish(self, view, webframe):
     if self.load_finish_flag:
         return
     self.load_finish_flag = True;
     agent.webv = self
     # overlay extra variables of web part
     variables = {
           'platform': 'Linux'
         , 'wrapper': 'python-gtk3'
         , 'conf_dir': config.get_path("conf")
         , 'cache_dir': config.get_path("cache")
         , 'avatar_cache_dir': config.get_path("avatar")
         , 'extra_fonts': utils.get_extra_fonts()
         , 'extra_exts': utils.get_extra_exts()
         , 'extra_themes': utils.get_extra_themes()
         , 'locale': utils.get_locale()
     };
     # and then, notify web part i am ready to work :)
     GObject.idle_add(view.execute_script, '''
         overlay_variables(%s);
         globals.load_flags = 1;
         ''' % json.dumps(variables))
Exemplo n.º 20
0
 def on_load_finish(self, view, webframe):
     if self.load_finish_flag:
         return
     self.load_finish_flag = True;
     agent.webv = self
     # overlay extra variables of web part
     variables = {
           'platform': platform.system()
         , 'wrapper': 'python-gtk3'
         , 'conf_dir': config.get_path("conf")
         , 'cache_dir': config.get_path("cache")
         , 'avatar_cache_dir': config.get_path("avatar")
         , 'extra_fonts': utils.get_extra_fonts()
         , 'extra_exts': utils.get_extra_exts()
         , 'extra_themes': utils.get_extra_themes()
         , 'locale': utils.get_locale()
     };
     # and then, notify web part i am ready to work :)
     GObject.idle_add(view.execute_script, '''
         overlay_variables(%s);
         globals.load_flags = 1;
         ''' % json.dumps(variables))
Exemplo n.º 21
0
    def __init__(self,
                 api=None,
                 storefile=DEFAULT_TWEETSTORE_FILE,
                 autopopulate=True,
                 familiar=None,
                 rejectfile=None,
                 chain_factory=markov.tree.MarkovChain,
                 **kwargs):

        if type(chain_factory) in [str, unicode]:
            if chain_factory == 'sql':
                chain_factory = markov.prefix_sql.MarkovPrefixSql

                if ('separator' not in kwargs or kwargs['separator'] is None):
                    kwargs['separator'] = ' '

                if kwargs['separator'] == ' ':
                    chain_type = 'word'
                elif kwargs['separator'] == '':
                    chain_type = 'char'
                else:
                    chain_type = 'other'

                if ('dbfile' not in kwargs
                        or kwargs['dbfile'] is None) and storefile:
                    base_store = '.'.join(
                        os.path.basename(storefile).split('.')[:-1])
                    kwargs['dbfile'] = config.get_path(
                        'chains/{}_{}{}.sqlite'.format(base_store, chain_type,
                                                       kwargs['max']))
            elif chain_factory == 'memory_tree':
                chain_factory = markov.tree.MarkovChain
            else:
                raise ValueError(
                    'unknown chain_factory type "{}"'.format(chain_factory))
        self._chain = chain_factory(**kwargs)
        self._api = api
        self._familiar = familiar
        self.tweetcount = 0
        self._tweetstore = TweetStore(storefile)
        self._rejectstore = TweetStore(rejectfile)

        #populate the chain from the tweetstore
        if self._tweetstore and autopopulate:
            for tw in self._tweetstore.values():
                #override the default familiarity theshold, since tweets in the
                #store are considered to be pre-approved, and besides, if the
                #chain is empty, then nothing will reach a non-zero threshold
                # 0 is used because None is the guard value.
                self._ProcTweet(tw, familiar=0)
Exemplo n.º 22
0
def crack_action(params):
    if params[1] == 'search':
        load_search(params[2])
    elif params[1] == 'choose_file':
        callback = params[2]
        file_path = utils.open_file_chooser_dialog()
        webv.execute_script('%s("%s")' % (callback, file_path))
    elif params[1] == 'save_avatar':
        img_uri = urllib.unquote(params[2])
        avatar_file = urllib.unquote(params[3])
        avatar_path = os.path.join(config.get_path("avatar"), avatar_file)
        th = threading.Thread(target=save_file_proc,
                              args=(img_uri, avatar_path))
        th.start()
    elif params[1] == 'log':
        print '\033[1;31;40m[%s]\033[0m %s' % (urllib.unquote(
            params[2]), urllib.unquote(params[3]))
    elif params[1] == 'paste_clipboard_text':
        webv.paste_clipboard()
    elif params[1] == 'set_clipboard_text':
        webv.copy_clipboard()
Exemplo n.º 23
0
def crack_action(params):
    if params[1] == 'search':
        load_search(params[2])
    elif params[1] == 'choose_file':
        callback = params[2]
        file_path = utils.open_file_chooser_dialog()
        webv.execute_script('%s("%s")' % (callback, file_path))
    elif params[1] == 'save_avatar':
        img_uri = urllib.unquote(params[2])
        avatar_file = urllib.unquote(params[3])
        avatar_path = os.path.join(config.get_path("avatar"), avatar_file)
        th = threading.Thread(
            target = save_file_proc,
            args=(img_uri, avatar_path))
        th.start()
    elif params[1] == 'log':
        print '\033[1;31;40m[%s]\033[0m %s' % (urllib.unquote(params[2]) ,urllib.unquote(params[3]))
    elif params[1] == 'paste_clipboard_text':
        webv.paste_clipboard();
    elif params[1] == 'set_clipboard_text':
        webv.copy_clipboard();
Exemplo n.º 24
0
 def save_response_logs(response, filename):
     with open(config.get_path(f'logs/{filename}'), 'w') as file:
         file.write(response.text)
Exemplo n.º 25
0
    'django.contrib.sessions.middleware.SessionMiddleware',
    'django.middleware.common.CommonMiddleware',
    'django.middleware.csrf.CsrfViewMiddleware',
    'django.contrib.auth.middleware.AuthenticationMiddleware',
    'django.contrib.messages.middleware.MessageMiddleware',
    'django.middleware.clickjacking.XFrameOptionsMiddleware',
    'common.middleware.global_request_middleware',
]

ROOT_URLCONF = 'szezlong.urls'

TEMPLATES = [
    {
        'BACKEND': 'django.template.backends.django.DjangoTemplates',
        'DIRS': [
            get_path("templates"),
        ],
        'APP_DIRS': True,
        'OPTIONS': {
            'context_processors': [
                'django.template.context_processors.debug',
                'django.template.context_processors.request',
                'django.contrib.auth.context_processors.auth',
                'django.contrib.messages.context_processors.messages',
            ],
        },
    },
]

WSGI_APPLICATION = 'szezlong.wsgi.application'
Exemplo n.º 26
0
def uncompress():
    output_path = config.get_path('output_path', 'Output')

    with tarfile.open(config.COMPRESSED_FILE_PATH, 'r:gz') as tar:
        tar.extractall(output_path)
Exemplo n.º 27
0
def main(args):
    config = load_config(args)
    global_eval_config = config["eval_params"]
    models, model_names = config_modelloader(config, load_pretrain = False)

    model_ori = model_mlp_after_flatten(in_dim=784, neurons=[64, 64])
    dummy_input = torch.randn(10, 784)
    converted_models = [BoundGeneral(model_ori, dummy_input)]

    robust_errs = []
    errs = []

    checkpoint = torch.load(args.path_prefix, map_location='cpu')
    converted_models[0].load_state_dict(checkpoint['state_dict'], strict=True)

    for model, model_id, model_config in zip(converted_models, model_names, config["models"]):
        model = model.cuda()

        # make a copy of global training config, and update per-model config
        eval_config = copy.deepcopy(global_eval_config)
        if "eval_params" in model_config:
            eval_config.update(model_config["eval_params"])

        # read training parameters from config file
        method = eval_config["method"]
        verbose = eval_config["verbose"]
        eps = eval_config["epsilon"]
        # parameters specific to a training method
        method_param = eval_config["method_params"]
        norm = float(eval_config["norm"])
        train_data, test_data = config_dataloader(config, **eval_config["loader_params"])

        # model_name = get_path(config, model_id, "model", load =False)
        # print(model_name)
        config["path_prefix"] = os.path.split(os.path.split(config["path_prefix"])[0])[0]
        model_log = get_path(config, model_id, "eval_log")
        print(model_log)
        logger = Logger(open(model_log, "w"))
        logger.log("evaluation configurations:", eval_config)
            
        logger.log("Evaluating...")
        with torch.no_grad():
            # evaluate
            print('using bound', eval_config["epsilon_weights"], "norm", norm)
            l2_ball_list = []
            _c = 0
            for i in range(len(model.choices)):
                if hasattr(model.choices[i], 'weight'):
                    l2_norm = torch.norm(model.choices[i].weight.data, p=2)
                    l2_ball_list.append(eval_config["epsilon_weights"][_c] * l2_norm)
                    _c += 1

            print('after times Lp norm of weights',l2_ball_list)
            data = train_data

            print('length of data', len(data)*data.batch_size)
            # robust_err, err = Train(model, 0, test_data, eps, eps, eps, norm, logger, verbose, False, None, method, **method_param)
            robust_err, err = Train(model, 0, data, eps, eps, eps, eval_config['epsilon_weights'], eval_config['epsilon_weights'],
                                    norm, logger, verbose, False, None, method, **method_param)

        robust_errs.append(robust_err)
        errs.append(err)

    print('model robust errors (for robustly trained models, not valid for naturally trained models):')
    print(robust_errs)
    robust_errs = np.array(robust_errs)
    print('min: {:.4f}, max: {:.4f}, median: {:.4f}, mean: {:.4f}'.format(np.min(robust_errs), np.max(robust_errs), np.median(robust_errs), np.mean(robust_errs)))
    print('clean errors for models with min, max and median robust errors')
    i_min = np.argmin(robust_errs)
    i_max = np.argmax(robust_errs)
    i_median = np.argsort(robust_errs)[len(robust_errs) // 2]
    print('for min: {:.4f}, for max: {:.4f}, for median: {:.4f}'.format(errs[i_min], errs[i_max], errs[i_median]))
    print('model clean errors:')
    print(errs)
    print('min: {:.4f}, max: {:.4f}, median: {:.4f}, mean: {:.4f}'.format(np.min(errs), np.max(errs), np.median(errs), np.mean(errs)))
Exemplo n.º 28
0
def get_ui_object(name):
    for base in config.get_path("data"):
        fullpath = os.path.join(base, name)
        if os.path.exists(fullpath):
            return fullpath
def install_nifi():
  '''Install NiFi via Ambari. (And Ali's NiFi service)
  
  Automatically installs NiFi with NO user interaction. Simply just run the method while on the same Ambari machine and NiFi will be installed. You'll need to start it manually though.
  
  Returns:
    bool: True if installation is successful. Else, the user specifies whether or not they want to continue setting up the demo without Zeppelin. ``True`` if the user specifed Yes (to continue). ``False`` if they specified No (do not continue).
  
  Raises:
    EnvironmentError: Raised when Ambari is not installed on the current host. Or if ``hdp-select`` cannot be installed'''
  logger.info('Attempting to install NiFi to the cluster')
  if not is_ambari_installed():
    logger.error('Ambari must be installed to install NiFi as well.')
    raise EnvironmentError('You must install the demo on the same node as the Ambari server. Install Ambari here or move to another node with Ambari installed before continuing')
  
  
  if not is_hdp_select_installed():
    installed = install_hdp_select()
    if not installed:
      logger.error('hdp-select must be installed to install NiFi')
      raise EnvironmentError('hdp-select could not be installed. Please install it manually and then re-run the setup.')
  
  conf = config.read_config('global.conf')
  cmds = json.loads(conf['NIFI']['install-commands'])
  sh = Shell()
  
  logger.info('Getting HDP Version')
  version = sh.run(cmds[0])
  logger.info('HDP Version: ' + version[0])
  fixed_copy = cmds[2].replace('$VERSION', str(version[0])).replace('\n', '')
  fixed_remove = cmds[1].replace('$VERSION', str(version[0])).replace('\n', '')
  logger.info('NiFi Clean Command: ' + fixed_copy)
  logger.info('NiFi Copy Command: ' + fixed_remove)
  remove = sh.run(fixed_remove)
  copy = sh.run(fixed_copy)
  
  
  amc = conf['AMBARI']
  cc = CurlClient(amc['username'], amc['password'], amc['proto'], amc['server'], amc['port'])
  opts = '-H \'X-Requested-By: ambari\''
  path = '/api/v1/clusters/' + amc['cluster_name'] + '/services/NIFI'
  print cc.make_request('POST', path, options=opts)
  path += '/components/NIFI_MASTER'
  print cc.make_request('POST', path, options=opts)
  
  cfg = {
    'cmd': 'bash /var/lib/ambari-server/resources/scripts/configs.sh set', 
    'server': amc['server'],
    'cluster': amc['cluster_name'],
    'name': 'nifi-ambari-config',
    'config_file': config.get_path('nifi/config/nifi-ambari-config.json')
  }
  create_cmd = lambda x: ' '.join([cfg['cmd'], cfg['server'], cfg['cluster'], x, config.get_path('nifi/config/' + x + '.json')])
  
  logger.debug(sh.run(create_cmd('nifi-ambari-config')))
  logger.debug(sh.run(create_cmd('nifi-bootstrap-env')))
  logger.debug(sh.run(create_cmd('nifi-flow-env')))
  logger.debug(sh.run(create_cmd('nifi-logback-env')))
  logger.debug(sh.run(create_cmd('nifi-properties-env')))
  
  path = '/api/v1/clusters/' + amc['cluster_name'] + '/hosts/' + amc['server'] + '/host_components/NIFI_MASTER'
  logger.debug(path)
  cc.make_request('POST', path, options=opts)
  
  path = '/api/v1/clusters/' + amc['cluster_name'] + '/services/NIFI'
  opts = '-H \'X-Requested-By: ambari\' -d \'{"RequestInfo": {"context" :"Install Nifi"}, "Body": {"ServiceInfo": {"maintenance_state" : "OFF", "state": "INSTALLED"}}}\''
  cc.make_request('PUT', path, options=opts)

  print("Please open the Ambari Interface and manually deploy the NiFi Service.")
  raw_input("Press enter twice to continue...")
  raw_input("Press enter once to continue...")
  
#   We've copied the necessary files. Once that completes we need to add it to Ambari
  logger.info('Waiting for user to install service in Ambari to continue')
  print('Checking to make sure service is installed')
  ambari = config.read_config('global.conf')['AMBARI']
  installed = check_ambari_service_installed('NIFI', ambari)
  logger.info('NiFi installed successfully')
  cont = ''
  if not installed:
    print('Unable to contact Ambari Server. Unsure whether or not NiFi was installed')
    while not (cont == 'y' or cont == 'n'):
      cont = raw_input('Continue attempt to set up NiFi for demo?(y/n)')
      if not (cont == 'y' or cont == 'n'):
        print('Please enter "y" or "n"')
  else:
    cont = 'y'
  
  if cont == 'n':
    return False
  elif cont == 'y':
    return True
Exemplo n.º 30
0
import pandas as pd
from config import get_path

df = pd.DataFrame(pd.read_csv(get_path() + 'amount.csv'))

print df
Exemplo n.º 31
0
def install_nifi():
    '''Install NiFi via Ambari. (And Ali's NiFi service)
  
  Automatically installs NiFi with NO user interaction. Simply just run the method while on the same Ambari machine and NiFi will be installed. You'll need to start it manually though.
  
  Returns:
    bool: True if installation is successful. Else, the user specifies whether or not they want to continue setting up the demo without Zeppelin. ``True`` if the user specifed Yes (to continue). ``False`` if they specified No (do not continue).
  
  Raises:
    EnvironmentError: Raised when Ambari is not installed on the current host. Or if ``hdp-select`` cannot be installed'''
    logger.info('Attempting to install NiFi to the cluster')
    if not is_ambari_installed():
        logger.error('Ambari must be installed to install NiFi as well.')
        raise EnvironmentError(
            'You must install the demo on the same node as the Ambari server. Install Ambari here or move to another node with Ambari installed before continuing'
        )

    if not is_hdp_select_installed():
        installed = install_hdp_select()
        if not installed:
            logger.error('hdp-select must be installed to install NiFi')
            raise EnvironmentError(
                'hdp-select could not be installed. Please install it manually and then re-run the setup.'
            )

    conf = config.read_config('global.conf')
    cmds = json.loads(conf['NIFI']['install-commands'])
    sh = Shell()

    logger.info('Getting HDP Version')
    version = sh.run(cmds[0])
    logger.info('HDP Version: ' + version[0])
    fixed_copy = cmds[2].replace('$VERSION', str(version[0])).replace('\n', '')
    fixed_remove = cmds[1].replace('$VERSION',
                                   str(version[0])).replace('\n', '')
    logger.info('NiFi Clean Command: ' + fixed_copy)
    logger.info('NiFi Copy Command: ' + fixed_remove)
    remove = sh.run(fixed_remove)
    copy = sh.run(fixed_copy)

    amc = conf['AMBARI']
    cc = CurlClient(amc['username'], amc['password'], amc['proto'],
                    amc['server'], amc['port'])
    opts = '-H \'X-Requested-By: ambari\''
    path = '/api/v1/clusters/' + amc['cluster_name'] + '/services/NIFI'
    print cc.make_request('POST', path, options=opts)
    path += '/components/NIFI_MASTER'
    print cc.make_request('POST', path, options=opts)

    cfg = {
        'cmd': 'bash /var/lib/ambari-server/resources/scripts/configs.sh set',
        'server': amc['server'],
        'cluster': amc['cluster_name'],
        'name': 'nifi-ambari-config',
        'config_file': config.get_path('nifi/config/nifi-ambari-config.json')
    }
    create_cmd = lambda x: ' '.join([
        cfg['cmd'], cfg['server'], cfg['cluster'], x,
        config.get_path('nifi/config/' + x + '.json')
    ])

    logger.debug(sh.run(create_cmd('nifi-ambari-config')))
    logger.debug(sh.run(create_cmd('nifi-bootstrap-env')))
    logger.debug(sh.run(create_cmd('nifi-flow-env')))
    logger.debug(sh.run(create_cmd('nifi-logback-env')))
    logger.debug(sh.run(create_cmd('nifi-properties-env')))

    path = '/api/v1/clusters/' + amc['cluster_name'] + '/hosts/' + amc[
        'server'] + '/host_components/NIFI_MASTER'
    logger.debug(path)
    cc.make_request('POST', path, options=opts)

    path = '/api/v1/clusters/' + amc['cluster_name'] + '/services/NIFI'
    opts = '-H \'X-Requested-By: ambari\' -d \'{"RequestInfo": {"context" :"Install Nifi"}, "Body": {"ServiceInfo": {"maintenance_state" : "OFF", "state": "INSTALLED"}}}\''
    cc.make_request('PUT', path, options=opts)

    print(
        "Please open the Ambari Interface and manually deploy the NiFi Service."
    )
    raw_input("Press enter twice to continue...")
    raw_input("Press enter once to continue...")

    #   We've copied the necessary files. Once that completes we need to add it to Ambari
    logger.info('Waiting for user to install service in Ambari to continue')
    print('Checking to make sure service is installed')
    ambari = config.read_config('global.conf')['AMBARI']
    installed = check_ambari_service_installed('NIFI', ambari)
    logger.info('NiFi installed successfully')
    cont = ''
    if not installed:
        print(
            'Unable to contact Ambari Server. Unsure whether or not NiFi was installed'
        )
        while not (cont == 'y' or cont == 'n'):
            cont = raw_input('Continue attempt to set up NiFi for demo?(y/n)')
            if not (cont == 'y' or cont == 'n'):
                print('Please enter "y" or "n"')
    else:
        cont = 'y'

    if cont == 'n':
        return False
    elif cont == 'y':
        return True
  def check_schema(self, schema):
    '''Checks the entire schema for any incorrect or missing parameters.
    
    Args:
      schema (str): The file path or JSON string of the schema
    
    Returns:
      N/A
      
    Raises:
      TypeError: If the root of JSON schema is not a list.
      KeyError: If 'fieldName' or 'type' are not found in a datum
      ValueError: If there are duplicate fieldNames
      RuntimeError: If a certain 'type' isn't found
      
    '''
    conf = None
    try:
      path = config.get_path(schema)
      with open(path) as df:
        conf = json.load(df)
#        logger.info('Successfully read config from file')
      self.using_file = True
    except IOError:
      self.using_file = False
      logger.info('Could not read schema as file. Attempting to read as JSON string')
      pass

    if conf == None:
        conf = json.loads(schema)
        logger.info('Read as JSON string')

    if not type(conf) == list:
      logger.error('Root of JSON Schema is not a list')
      raise TypeError('Root of JSON Schema is not a list')

    for field in conf:
      if not 'fieldName' in field:
        logger.error('fieldName not found in schema')
        raise KeyError('Could not find \'fieldName\' in field of schema')
      if not 'type' in field:
        logger.error('type not found in schema')
        raise KeyError('Could not find \'type\' in field of schema')
      field_type = field['type']
      datum = AbstractDatum(field)
      if not datum.field_name in self.field_names:
        self.field_names.append(datum.field_name)
        logger.debug('Added datum to field set with type: '
                     + str(field_type))
      else:
        raise ValueError('Cannot have duplicate field names')

      if 'string' == field_type:
        datum = StringDatum(field)
      elif 'int' == field_type:
        datum = IntDatum(field)
      elif 'decimal' == field_type:
        datum = DecimalDatum(field)
      elif 'map' == field_type:
        datum = MapDatum(field)
      elif 'boolean' == field_type:
        datum = BooleanDatum(field)
      else:
        raise RuntimeError('Field type:' + field_type + ' was not found. Please change the field type or implement a new datum')
      # Check to make sure the field has necessary attributes
      datum.check()
      self.data_fields.append(datum)
Exemplo n.º 33
0
def get_ui_object(name):
    for base in config.get_path("data"):
        fullpath = os.path.join(base, name)
        if os.path.exists(fullpath):
            return fullpath
Exemplo n.º 34
0
def main():
    p = OptionParser(usage="usage: %prog [options] [name] [version]",
                     description=__doc__)

    p.add_option("--config",
                 action="store_true",
                 help="display the configuration and exit")

    p.add_option('-f',
                 "--force",
                 action="store_true",
                 help="force install the main package "
                 "(not it's dependencies, see --forceall)")

    p.add_option("--forceall",
                 action="store_true",
                 help="force install of all packages "
                 "(i.e. including dependencies)")

    p.add_option('-i',
                 "--info",
                 action="store_true",
                 help="show information about a package")

    p.add_option('-l',
                 "--list",
                 action="store_true",
                 help="list the packages currently installed on the system")

    p.add_option('-n',
                 "--dry-run",
                 action="store_true",
                 help="show what would have been downloaded/removed/installed")

    p.add_option('-N',
                 "--no-deps",
                 action="store_true",
                 help="neither download nor install dependencies")

    p.add_option("--remove", action="store_true", help="remove a package")

    p.add_option('-s',
                 "--search",
                 action="store_true",
                 help="search the index in the repo (chain) of packages "
                 "and display versions available.")

    p.add_option('-v', "--verbose", action="store_true")

    p.add_option('--version', action="store_true")

    p.add_option("--whats-new",
                 action="store_true",
                 help="display to which installed packages updates are "
                 "available")

    opts, args = p.parse_args()

    if len(args) > 0 and opts.config:
        p.error("Option takes no arguments")

    if opts.force and opts.forceall:
        p.error("Options --force and --forceall exclude each ohter")

    pat = None
    if (opts.list or opts.search) and args:
        pat = re.compile(args[0], re.I)

    if opts.version:  #  --version
        from enstaller import __version__
        print "IronPkg version:", __version__
        return

    if opts.config:  #  --config
        config.print_config()
        return

    if config.get_path() is None:
        # create config file if it dosn't exist
        config.write()

    conf = config.read()  #  conf

    global dry_run, version  #  set globals
    dry_run = opts.dry_run
    version = opts.version

    if opts.list:  #  --list
        print_installed(pat)
        return

    c = Chain(conf['IndexedRepos'], verbose)  #  init chain

    if opts.search:  #  --search
        search(c, pat)
        return

    if opts.info:  #  --info
        if len(args) != 1:
            p.error("Option requires one argument (name of package)")
        info_option(c, canonical(args[0]))
        return

    if opts.whats_new:  # --whats-new
        if args:
            p.error("Option requires no arguments")
        whats_new(c)
        return

    if len(args) == 0:
        p.error("Requirement (name and optional version) missing")
    if len(args) > 2:
        p.error("A requirement is a name and an optional version")
    req = Req(' '.join(args))

    if opts.remove:  #  --remove
        remove_req(req)
        return

    dists = get_dists(
        c,
        req,  #  dists
        recur=not opts.no_deps)

    # Warn the user about packages which depend on what will be updated
    depend_warn([dist_naming.filename_dist(d) for d in dists])

    # Packages which are installed currently
    inst = set(egginst.get_installed())

    # These are the packahes which are being excluded from being installed
    if opts.forceall:
        exclude = set()
    else:
        exclude = set(inst)
        if opts.force:
            exclude.discard(dist_naming.filename_dist(dists[-1]))

    # Fetch distributions
    if not isdir(conf['local']):
        os.makedirs(conf['local'])
    for dist in iter_dists_excl(dists, exclude):
        c.fetch_dist(dist,
                     conf['local'],
                     check_md5=opts.force or opts.forceall,
                     dry_run=dry_run)

    # Remove packages (in reverse install order)
    for dist in dists[::-1]:
        fn = dist_naming.filename_dist(dist)
        if fn in inst:
            # if the distribution (which needs to be installed) is already
            # installed don't remove it
            continue
        cname = cname_fn(fn)
        for fn_inst in inst:
            if cname == cname_fn(fn_inst):
                egginst_remove(fn_inst)

    # Install packages
    installed_something = False
    for dist in iter_dists_excl(dists, exclude):
        installed_something = True
        egginst_install(conf, dist)

    if not installed_something:
        print "No update necessary, %s is up-to-date." % req
        print_installed_info(req.name)
Exemplo n.º 35
0
def main(args):
    config = load_config(args)
    global_train_config = config["training_params"]
    models, model_names = config_modelloader(config)
    for model, model_id, model_config in zip(models, model_names,
                                             config["models"]):
        # make a copy of global training config, and update per-model config
        train_config = copy.deepcopy(global_train_config)
        if "training_params" in model_config:
            train_config = update_dict(train_config,
                                       model_config["training_params"])
        model = BoundSequential.convert(
            model, train_config["method_params"]["bound_opts"])

        # read training parameters from config file
        epochs = train_config["epochs"]
        lr = train_config["lr"]
        weight_decay = train_config["weight_decay"]
        starting_epsilon = train_config["starting_epsilon"]
        end_epsilon = train_config["epsilon"]
        schedule_length = train_config["schedule_length"]
        schedule_start = train_config["schedule_start"]
        optimizer = train_config["optimizer"]
        method = train_config["method"]
        verbose = train_config["verbose"]
        lr_decay_step = train_config["lr_decay_step"]
        lr_decay_milestones = train_config["lr_decay_milestones"]
        lr_decay_factor = train_config["lr_decay_factor"]
        multi_gpu = train_config["multi_gpu"]
        # parameters specific to a training method
        method_param = train_config["method_params"]
        norm = float(train_config["norm"])
        train_data, test_data = config_dataloader(
            config, **train_config["loader_params"])

        if optimizer == "adam":
            opt = optim.Adam(model.parameters(),
                             lr=lr,
                             weight_decay=weight_decay)
        elif optimizer == "sgd":
            opt = optim.SGD(model.parameters(),
                            lr=lr,
                            momentum=0.9,
                            nesterov=True,
                            weight_decay=weight_decay)
        else:
            raise ValueError("Unknown optimizer")

        batch_multiplier = train_config["method_params"].get(
            "batch_multiplier", 1)
        batch_size = train_data.batch_size * batch_multiplier
        num_steps_per_epoch = int(
            np.ceil(1.0 * len(train_data.dataset) / batch_size))
        epsilon_scheduler = EpsilonScheduler(
            train_config.get("schedule_type",
                             "linear"), schedule_start * num_steps_per_epoch,
            ((schedule_start + schedule_length) - 1) * num_steps_per_epoch,
            starting_epsilon, end_epsilon, num_steps_per_epoch)
        max_eps = end_epsilon

        if lr_decay_step:
            # Use StepLR. Decay by lr_decay_factor every lr_decay_step.
            lr_scheduler = optim.lr_scheduler.StepLR(opt,
                                                     step_size=lr_decay_step,
                                                     gamma=lr_decay_factor)
            lr_decay_milestones = None
        elif lr_decay_milestones:
            # Decay learning rate by lr_decay_factor at a few milestones.
            lr_scheduler = optim.lr_scheduler.MultiStepLR(
                opt, milestones=lr_decay_milestones, gamma=lr_decay_factor)
        else:
            raise ValueError(
                "one of lr_decay_step and lr_decay_milestones must be not empty."
            )
        model_name = get_path(config, model_id, "model", load=False)
        best_model_name = get_path(config, model_id, "best_model", load=False)
        model_log = get_path(config, model_id, "train_log")
        logger = Logger(open(model_log, "w"))
        logger.log(model_name)
        logger.log("Command line:", " ".join(sys.argv[:]))
        logger.log("training configurations:", train_config)
        logger.log("Model structure:")
        logger.log(str(model))
        logger.log("data std:", train_data.std)
        best_err = np.inf
        recorded_clean_err = np.inf
        timer = 0.0

        if multi_gpu:
            logger.log(
                "\nUsing multiple GPUs for computing CROWN-IBP bounds\n")
            model = BoundDataParallel(model)
            model = model.cuda()

        for t in range(epochs):
            epoch_start_eps = epsilon_scheduler.get_eps(t, 0)
            epoch_end_eps = epsilon_scheduler.get_eps(t + 1, 0)
            logger.log(
                "Epoch {}, learning rate {}, epsilon {:.6g} - {:.6g}".format(
                    t, lr_scheduler.get_lr(), epoch_start_eps, epoch_end_eps))
            # with torch.autograd.detect_anomaly():
            start_time = time.time()
            Train(model, t, train_data, epsilon_scheduler, max_eps, norm,
                  logger, verbose, True, opt, method, **method_param)
            if lr_decay_step:
                # Use stepLR. Note that we manually set up epoch number here, so the +1 offset.
                lr_scheduler.step(
                    epoch=max(t - (schedule_start + schedule_length - 1) +
                              1, 0))
            elif lr_decay_milestones:
                # Use MultiStepLR with milestones.
                lr_scheduler.step()
            epoch_time = time.time() - start_time
            timer += epoch_time
            logger.log('Epoch time: {:.4f}, Total time: {:.4f}'.format(
                epoch_time, timer))
            logger.log("Evaluating...")
            with torch.no_grad():
                # evaluate
                err, clean_err = Train(
                    model, t, test_data,
                    EpsilonScheduler("linear", 0, 0, epoch_end_eps,
                                     epoch_end_eps, 1), max_eps, norm, logger,
                    verbose, False, None, method, **method_param)

            logger.log('saving to', model_name)
            torch.save(
                {
                    'state_dict':
                    model.module.state_dict()
                    if multi_gpu else model.state_dict(),
                    'epoch':
                    t,
                }, model_name)

            # save the best model after we reached the schedule
            if t >= (schedule_start + schedule_length):
                if err <= best_err:
                    best_err = err
                    recorded_clean_err = clean_err
                    logger.log('Saving best model {} with error {}'.format(
                        best_model_name, best_err))
                    torch.save(
                        {
                            'state_dict':
                            model.module.state_dict()
                            if multi_gpu else model.state_dict(),
                            'robust_err':
                            err,
                            'clean_err':
                            clean_err,
                            'epoch':
                            t,
                        }, best_model_name)

        logger.log('Total Time: {:.4f}'.format(timer))
        logger.log('Model {} best err {}, clean err {}'.format(
            model_id, best_err, recorded_clean_err))
Exemplo n.º 36
0
def main(args):
    config = load_config(args)
    global_train_config = config["training_params"]
    models, model_names = config_modelloader(config)

    converted_models = [BoundSequential.convert(model) for model in models]

    for model, model_id, model_config in zip(converted_models, model_names, config["models"]):
        model = model.cuda()

        # make a copy of global training config, and update per-model config
        train_config = copy.deepcopy(global_train_config)
        if "training_params" in model_config:
            train_config = update_dict(train_config, model_config["training_params"])

        # read training parameters from config file
        epochs = train_config["epochs"]
        lr = train_config["lr"]
        weight_decay = train_config["weight_decay"]
        starting_epsilon = train_config["starting_epsilon"]
        end_epsilon = train_config["epsilon"]
        schedule_length = train_config["schedule_length"]
        schedule_start = train_config["schedule_start"]
        optimizer = train_config["optimizer"]
        method = train_config["method"]
        verbose = train_config["verbose"]
        lr_decay_step = train_config["lr_decay_step"]
        lr_decay_factor = train_config["lr_decay_factor"]
        # parameters specific to a training method
        method_param = train_config["method_params"]
        norm = train_config["norm"]
        train_data, test_data = config_dataloader(config, **train_config["loader_params"])

        if optimizer == "adam":
            opt = optim.Adam(model.parameters(), lr=lr, weight_decay=weight_decay)
        elif optimizer == "sgd":
            opt = optim.SGD(model.parameters(), lr=lr, momentum=0.9, nesterov=True, weight_decay=weight_decay)
        else:
            raise ValueError("Unknown optimizer")

        eps_schedule = [0] * schedule_start + list(np.linspace(starting_epsilon, end_epsilon, schedule_length))
        max_eps = end_epsilon
        lr_scheduler = optim.lr_scheduler.StepLR(opt, step_size=lr_decay_step, gamma=lr_decay_factor)
        model_name = get_path(config, model_id, "model", load = False)
        best_model_name = get_path(config, model_id, "best_model", load = False)
        print(model_name)
        model_log = get_path(config, model_id, "train_log")
        logger = Logger(open(model_log, "w"))
        logger.log("Command line:", " ".join(sys.argv[:]))
        logger.log("training configurations:", train_config)
        logger.log("Model structure:")
        logger.log(str(model))
        logger.log("data std:", train_data.std)
        best_err = np.inf
        recorded_clean_err = np.inf
        timer = 0.0
        for t in range(epochs):
            lr_scheduler.step(epoch=max(t-len(eps_schedule), 0))
            if t >= len(eps_schedule):
                eps = end_epsilon
            else:
                epoch_start_eps = eps_schedule[t]
                if t + 1 >= len(eps_schedule):
                    epoch_end_eps = epoch_start_eps
                else:
                    epoch_end_eps = eps_schedule[t+1]
            
            logger.log("Epoch {}, learning rate {}, epsilon {:.6f} - {:.6f}".format(t, lr_scheduler.get_lr(), epoch_start_eps, epoch_end_eps))
            # with torch.autograd.detect_anomaly():
            start_time = time.time()
            Train(model, t, train_data, epoch_start_eps, epoch_end_eps, max_eps, logger, verbose, True, opt, method, **method_param)
            epoch_time = time.time() - start_time
            timer += epoch_time
            logger.log('Epoch time: {:.4f}, Total time: {:.4f}'.format(epoch_time, timer))
            logger.log("Evaluating...")
            with torch.no_grad():
                # evaluate
                err, clean_err = Train(model, t, test_data, epoch_end_eps, epoch_end_eps, max_eps, logger, verbose, False, None, method, **method_param)

            logger.log('saving to', model_name)
            torch.save({
                    'state_dict' : model.state_dict(), 
                    'epoch' : t,
                    }, model_name)

            # save the best model after we reached the schedule
            if t >= len(eps_schedule):
                if err <= best_err:
                    best_err = err
                    recorded_clean_err = clean_err
                    logger.log('Saving best model {} with error {}'.format(best_model_name, best_err))
                    torch.save({
                            'state_dict' : model.state_dict(), 
                            'robust_err' : err,
                            'clean_err' : clean_err,
                            'epoch' : t,
                            }, best_model_name)

        logger.log('Total Time: {:.4f}'.format(timer))
        logger.log('Model {} best err {}, clean err {}'.format(model_id, best_err, recorded_clean_err))
Exemplo n.º 37
0
def main(args):
    config = load_config(args)
    global_train_config = config["training_params"]
    models, model_names = config_modelloader(config)

    converted_models = [BoundSequential.convert(model) for model in models]

    for model, model_id, model_config in zip(converted_models, model_names,
                                             config["models"]):
        print("Number of GPUs:", torch.cuda.device_count())
        model = model.cuda()
        # make a copy of global training config, and update per-model config
        train_config = copy.deepcopy(global_train_config)
        if "training_params" in model_config:
            train_config = update_dict(train_config,
                                       model_config["training_params"])

        # read training parameters from config file
        epochs = train_config["epochs"]
        lr = train_config["lr"]
        weight_decay = train_config["weight_decay"]
        starting_epsilon = train_config["starting_epsilon"]
        end_epsilon = train_config["epsilon"]
        schedule_length = train_config["schedule_length"]
        schedule_start = train_config["schedule_start"]
        optimizer = train_config["optimizer"]
        method = train_config["method"]
        verbose = train_config["verbose"]
        lr_decay_step = train_config["lr_decay_step"]
        lr_decay_factor = train_config["lr_decay_factor"]
        # parameters specific to a training method
        method_param = train_config["method_params"]
        norm = float(train_config["norm"])
        train_config["loader_params"]["batch_size"] = train_config[
            "loader_params"]["batch_size"] // args.grad_acc_steps
        train_config["loader_params"]["test_batch_size"] = train_config[
            "loader_params"]["test_batch_size"] // args.grad_acc_steps
        train_data, test_data = config_dataloader(
            config, **train_config["loader_params"])

        # initialize adversary network
        if method_param["attack_type"] == "patch-nn":
            if config["dataset"] == "mnist":
                adv_net = ResNetUNet(n_class=10,
                                     channels=1,
                                     base_width=method_param["base_width"],
                                     dataset="mnist").cuda()
            if config["dataset"] == "cifar":
                adv_net = ResNetUNet(n_class=10,
                                     channels=3,
                                     base_width=method_param["base_width"],
                                     dataset="cifar").cuda()
        else:
            adv_net = None
        if optimizer == "adam":
            opt = optim.Adam(model.parameters(),
                             lr=lr,
                             weight_decay=weight_decay)
            if method_param["attack_type"] == "patch-nn":
                unetopt = optim.Adam(adv_net.parameters(),
                                     lr=lr,
                                     weight_decay=weight_decay)
            else:
                unetopt = None
        elif optimizer == "sgd":
            if method_param["attack_type"] == "patch-nn":
                unetopt = optim.SGD(adv_net.parameters(),
                                    lr=lr,
                                    momentum=0.9,
                                    nesterov=True,
                                    weight_decay=weight_decay)
            else:
                unetopt = None
            opt = optim.SGD(model.parameters(),
                            lr=lr,
                            momentum=0.9,
                            nesterov=True,
                            weight_decay=weight_decay)
        else:
            raise ValueError("Unknown optimizer")
        lr_scheduler = optim.lr_scheduler.StepLR(opt,
                                                 step_size=lr_decay_step,
                                                 gamma=lr_decay_factor)
        if method_param["attack_type"] == "patch-nn":
            lr_scheduler_unet = optim.lr_scheduler.StepLR(
                unetopt, step_size=lr_decay_step, gamma=lr_decay_factor)

        start_epoch = 0
        if args.resume:
            model_log = os.path.join(out_path, "test_log")
            logger = Logger(open(model_log, "w"))
            state_dict = torch.load(args.resume)
            print("***** Loading state dict from {} @ epoch {}".format(
                args.resume, state_dict['epoch']))
            model.load_state_dict(state_dict['state_dict'])
            opt.load_state_dict(state_dict['opt_state_dict'])
            lr_scheduler.load_state_dict(state_dict['lr_scheduler_dict'])
            start_epoch = state_dict['epoch'] + 1

        eps_schedule = [0] * schedule_start + list(
            np.linspace(starting_epsilon, end_epsilon, schedule_length))
        max_eps = end_epsilon

        model_name = get_path(config, model_id, "model", load=False)
        best_model_name = get_path(config, model_id, "best_model", load=False)
        print(model_name)
        model_log = get_path(config, model_id, "train_log")
        logger = Logger(open(model_log, "w"))
        logger.log("Command line:", " ".join(sys.argv[:]))
        logger.log("training configurations:", train_config)
        logger.log("Model structure:")
        logger.log(str(model))
        logger.log("data std:", train_data.std)
        best_err = np.inf
        recorded_clean_err = np.inf
        timer = 0.0

        for t in range(start_epoch, epochs):
            train_data, test_data = config_dataloader(
                config, **train_config["loader_params"])

            if method_param["attack_type"] == "patch-nn":
                lr_scheduler_unet.step(epoch=max(t - len(eps_schedule), 0))
            lr_scheduler.step(epoch=max(t - len(eps_schedule), 0))

            if t >= len(eps_schedule):
                eps = end_epsilon
            else:
                epoch_start_eps = eps_schedule[t]
                if t + 1 >= len(eps_schedule):
                    epoch_end_eps = epoch_start_eps
                else:
                    epoch_end_eps = eps_schedule[t + 1]

            logger.log(
                "Epoch {}, learning rate {}, epsilon {:.6f} - {:.6f}".format(
                    t, lr_scheduler.get_lr(), epoch_start_eps, epoch_end_eps))
            # with torch.autograd.detect_anomaly():
            start_time = time.time()

            Train(model, model_id, t, train_data, epoch_start_eps,
                  epoch_end_eps, max_eps, norm, logger, verbose, True, opt,
                  method, adv_net, unetopt, **method_param)
            epoch_time = time.time() - start_time
            timer += epoch_time
            logger.log('Epoch time: {:.4f}, Total time: {:.4f}'.format(
                epoch_time, timer))

            logger.log("Evaluating...")
            # evaluate
            err, clean_err = Train(model, model_id, t, test_data,
                                   epoch_end_eps, epoch_end_eps, max_eps, norm,
                                   logger, verbose, False, None, method,
                                   adv_net, None, **method_param)
            # err, clean_err = 0, 0

            logger.log('saving to', model_name)
            # torch.save({
            #         'state_dict' : model.state_dict(),
            #         'opt_state_dict': opt.state_dict(),
            #         'robust_err': err,
            #         'clean_err': clean_err,
            #         'epoch' : t,
            #         'lr_scheduler_dict': lr_scheduler.state_dict()
            #         }, model_name)
            torch.save(model.state_dict(), model_name)

            # save the best model after we reached the schedule
            if t >= len(eps_schedule):
                if err <= best_err:
                    best_err = err
                    recorded_clean_err = clean_err
                    logger.log('Saving best model {} with error {}'.format(
                        best_model_name, best_err))
                    torch.save(
                        {
                            'state_dict': model.state_dict(),
                            'opt_state_dict': opt.state_dict(),
                            'robust_err': err,
                            'clean_err': clean_err,
                            'epoch': t,
                            'lr_scheduler_dict': lr_scheduler.state_dict()
                        }, best_model_name)

        logger.log('Total Time: {:.4f}'.format(timer))
        logger.log('Model {} best err {}, clean err {}'.format(
            model_id, best_err, recorded_clean_err))
Exemplo n.º 38
0
def main():
    p = OptionParser(usage="usage: %prog [options] [name] [version]",
                     description=__doc__)

    p.add_option("--config",
                 action="store_true",
                 help="display the configuration and exit")

    p.add_option('-f', "--force",
                 action="store_true",
                 help="force install the main package "
                      "(not it's dependencies, see --forceall)")

    p.add_option("--forceall",
                 action="store_true",
                 help="force install of all packages "
                      "(i.e. including dependencies)")

    p.add_option('-i', "--info",
                 action="store_true",
                 help="show information about a package")

    p.add_option('-l', "--list",
                 action="store_true",
                 help="list the packages currently installed on the system")

    p.add_option('-n', "--dry-run",
                 action="store_true",
                 help="show what would have been downloaded/removed/installed")

    p.add_option('-N', "--no-deps",
                 action="store_true",
                 help="neither download nor install dependencies")

    p.add_option("--remove",
                 action="store_true",
                 help="remove a package")

    p.add_option('-s', "--search",
                 action="store_true",
                 help="search the index in the repo (chain) of packages "
                      "and display versions available.")

    p.add_option('-v', "--verbose", action="store_true")

    p.add_option('--version', action="store_true")

    p.add_option("--whats-new",
                 action="store_true",
                 help="display to which installed packages updates are "
                      "available")

    opts, args = p.parse_args()

    if len(args) > 0 and opts.config:
        p.error("Option takes no arguments")

    if opts.force and opts.forceall:
        p.error("Options --force and --forceall exclude each ohter")

    pat = None
    if (opts.list or opts.search) and args:
        pat = re.compile(args[0], re.I)

    if opts.version:                              #  --version
        from enstaller import __version__
        print "IronPkg version:", __version__
        return

    if opts.config:                               #  --config
        config.print_config()
        return

    if config.get_path() is None:
        # create config file if it dosn't exist
        config.write()

    conf = config.read()                          #  conf

    global dry_run, version                       #  set globals
    dry_run = opts.dry_run
    version = opts.version

    if opts.list:                                 #  --list
        print_installed(pat)
        return

    c = Chain(conf['IndexedRepos'], verbose)      #  init chain

    if opts.search:                               #  --search
        search(c, pat)
        return

    if opts.info:                                 #  --info
        if len(args) != 1:
            p.error("Option requires one argument (name of package)")
        info_option(c, canonical(args[0]))
        return

    if opts.whats_new:                            # --whats-new
        if args:
            p.error("Option requires no arguments")
        whats_new(c)
        return

    if len(args) == 0:
        p.error("Requirement (name and optional version) missing")
    if len(args) > 2:
        p.error("A requirement is a name and an optional version")
    req = Req(' '.join(args))

    if opts.remove:                               #  --remove
        remove_req(req)
        return

    dists = get_dists(c, req,                     #  dists
                      recur=not opts.no_deps)

    # Warn the user about packages which depend on what will be updated
    depend_warn([dist_naming.filename_dist(d) for d in dists])

    # Packages which are installed currently
    inst = set(egginst.get_installed())

    # These are the packahes which are being excluded from being installed
    if opts.forceall:
        exclude = set()
    else:
        exclude = set(inst)
        if opts.force:
            exclude.discard(dist_naming.filename_dist(dists[-1]))

    # Fetch distributions
    if not isdir(conf['local']):
        os.makedirs(conf['local'])
    for dist in iter_dists_excl(dists, exclude):
        c.fetch_dist(dist, conf['local'],
                     check_md5=opts.force or opts.forceall,
                     dry_run=dry_run)

    # Remove packages (in reverse install order)
    for dist in dists[::-1]:
        fn = dist_naming.filename_dist(dist)
        if fn in inst:
            # if the distribution (which needs to be installed) is already
            # installed don't remove it
            continue
        cname = cname_fn(fn)
        for fn_inst in inst:
            if cname == cname_fn(fn_inst):
                egginst_remove(fn_inst)

    # Install packages
    installed_something = False
    for dist in iter_dists_excl(dists, exclude):
        installed_something = True
        egginst_install(conf, dist)

    if not installed_something:
        print "No update necessary, %s is up-to-date." % req
        print_installed_info(req.name)