def main(device=torch.device('cuda:0')):
    # CLI arguments
    parser = arg.ArgumentParser(
        description='We all know what we are doing. Fighting!')
    parser.add_argument("--datasize",
                        "-d",
                        default="small",
                        type=str,
                        help="data size you want to use, small, medium, total")
    # Parsing
    args = parser.parse_args()
    # Data loaders
    datasize = args.datasize
    pathname = "data/nyu.zip"
    tr_loader, va_loader, te_loader = getTrainingValidationTestingData(
        datasize, pathname, batch_size=config("unet.batch_size"))

    # Model
    model = Net()

    # define loss function
    # criterion = torch.nn.L1Loss()

    # Attempts to restore the latest checkpoint if exists
    print("Loading unet...")
    model, start_epoch, stats = util.restore_checkpoint(
        model, util.config("unet.checkpoint"))
    acc, loss = util.evaluate_model(model, te_loader, device)
    # axes = util.make_training_plot()
    print(f'Test Accuracy:{acc}')
    print(f'Test Loss:{loss}')
Ejemplo n.º 2
0
def harden():
	upgrade()
	autoupgrade()
	util.debian_install("fail2ban")


	print "attempting to configure key-based auth for root..."
	import os.path
	localKey = open(os.path.expanduser("~/.ssh/id_rsa.pub")).read()
	util.append(localKey,"~/.ssh/authorized_keys")

	print "WARNING: YOU *MUST* have root configured to continue.  Type YES to confirm."
	confirm = None
	while confirm != "YES":
		confirm = raw_input()
		pass


	print "disabling password auth for root"
	util.config("PasswordAuthentication no","/etc/ssh/sshd_config")

	run("service ssh restart")

	setupDeployUser()
	logwatch()


	firewall() #this should be run last, since it can abort the SSH connection
Ejemplo n.º 3
0
def set_teamcity_installed(software,version=1):
    "Marks some software as being installed on the agent"
    if util.what_system() != util.DARWIN:
        raise Exception("Not implemented")
    run("sudo launchctl unload /Library/LaunchAgents/com.dca.teamcity-agent.plist")
    util.config("{SOFTWARE}={VERSION}".format(SOFTWARE=software,VERSION=version),"buildAgent/conf/buildAgent.properties")
    run("sudo launchctl load /Library/LaunchAgents/com.dca.teamcity-agent.plist")
Ejemplo n.º 4
0
    def training_data(self):
        if self.dataset in Model.Xs:
            print("Using cached dataset:", self.dataset)
            return (Model.Xs[self.dataset], Model.Ys[self.dataset])

        Model.Xs[self.dataset] = []
        Model.Ys[self.dataset] = []

        db_tweets_connection = db_tweets_connect(
            'pericog', config('connections', config('connections', 'active')))
        db_tweets_cursor = db_tweets_connection.cursor()

        print("Retrieving training dataset:", self.dataset)
        db_tweets_cursor.execute("""
				SELECT
					text, {}
				FROM tweets
				LEFT JOIN tweet_properties ON
					id=tweet_id
				WHERE {}_train = True
				ORDER BY id DESC
			""".format(','.join(self.properties), self.dataset))

        # TODO: make this work with multiple properties at once
        for text, label in db_tweets_cursor.fetchall():
            if label is None:
                continue

            Model.Xs[self.dataset].append(text)
            Model.Ys[self.dataset].append(label)

        db_tweets_connection.close()

        return (Model.Xs[self.dataset],
                numpy.array(Model.Ys[self.dataset]).astype(numpy.bool))
Ejemplo n.º 5
0
    def train(self, X, Y):
        X = [
            TaggedDocument(tokens, [property])
            for tokens, property in zip(X, Y)
        ]
        model = Doc2Vec(
            workers=config('pericog', 'thread_count'),
            dm=1,
            dbow_words=1,
            dm_mean=0,
            dm_concat=0,
            dm_tag_count=1,
            hs=1,
            negative=0,
            size=config('tokens2vec', 'vector_size'),
            alpha=0.025,
            window=8,
            min_count=0,
            sample=1e-4,
            iter=10,
            max_vocab_size=None,
            batch_words=1000000,
            min_alpha=0.0001,
            seed=1,

            ### no documentation ###
            # docvecs=None,
            # docvecs_mapfile='',
            # trim_rule=None,
            # comment=None,
            documents=X,
        )
        model.save(self.path)
Ejemplo n.º 6
0
def harden():
    upgrade()
    autoupgrade()
    util.debian_install("fail2ban")

    print "attempting to configure key-based auth for root..."
    import os.path
    localKey = open(os.path.expanduser("~/.ssh/id_rsa.pub")).read()
    util.append(localKey, "~/.ssh/authorized_keys")

    print "WARNING: YOU *MUST* have root configured to continue.  Type YES to confirm."
    confirm = None
    while confirm != "YES":
        confirm = raw_input()
        pass

    print "disabling password auth for root"
    util.config("PasswordAuthentication no", "/etc/ssh/sshd_config")

    run("service ssh restart")

    setupDeployUser()
    logwatch()

    firewall()  #this should be run last, since it can abort the SSH connection
Ejemplo n.º 7
0
def install_sonar_runner_objc():
    "Install the sonar-runner command with support for analyzing objc projects"
    brew_install("sonar-runner")
    brew_install("xctool")
    install_oclint()
    util.pip_install("gcovr")
    version = run('sonar-runner -v | head -n 1 | tr -d "SonarQube Runner "')
    util.config("sonar_runner=%s" %
                version, "buildAgent/conf/buildAgent.properties")
Ejemplo n.º 8
0
 def __init__( self, filepath, show_hidden=False, git=False ):
   self._filepath = filepath
   self._show_hidden = show_hidden
   self._git = git and util.config('use_git')
   self._excluded = util.config('ignore_ext').split(',')
   self._ignore_case = util.config('ignore_case')
   self._ignore_space = util.config('ignore_space')
   if self._git:
     self._load_git()
   self._load_file()
Ejemplo n.º 9
0
 def __init__(self, filepath, show_hidden=False, git=False):
     self._filepath = filepath
     self._show_hidden = show_hidden
     self._git = git and util.config("use_git")
     self._excluded = util.config("ignore_ext").split(",")
     self._ignore_case = util.config("ignore_case")
     self._ignore_space = util.config("ignore_space")
     if self._git:
         self._load_git()
     self._load_file()
Ejemplo n.º 10
0
 def __init__( self, filepath, show_hidden=False, git=False ):
   self._filepath = filepath
   self._show_hidden = show_hidden
   self._git = git and util.config('use_git')
   #self._excluded = util.config('ignore_ext').split(',')
   ignore_ext = util.config('ignore_ext')
   if len(ignore_ext.strip())==0:
       self._excluded = []
   else:
       self._excluded = ignore_ext.split(',')
   self._ignore_case = util.config('ignore_case')
   self._ignore_space = util.config('ignore_space')
   if self._git:
     self._load_git()
   self._load_file()
Ejemplo n.º 11
0
    def __init__(self):
        cpy.tools.buffet = BuffetTool(config("template_engine"))
        #set the output encoding
        self._cp_config["cpy.tools.encode.encoding"] = "utf-8"

        #make our stylesheet {divname: (top, left, size)}
        self.discs = {"top":      (50, 650, 146),
                      "topright": (230, 560, 146),
                      "right":    (345, 370, 146),
                      "botright": (381, 140, 146),
                      "botleft":  (230, 0, 146),
                      "left":     (260, 25, 146),
                      "topleft":  (80, 75, 146)}
        styletemp = mako.template.Template(filename="templates/styles.mak")
        fout = file("static/styles.css", "w")\
                .write(styletemp.render(discs=self.discs))

        #load the admin page. We have to import Admin after the config has
        #loaded or else it borks
        from admin import Admin
        self.admin = Admin()

        from team import TeamAdmin
        self.team_admin = TeamAdmin()

        from cherryblossom import BlogRoot
        self.blog = BlogRoot('/blog')
Ejemplo n.º 12
0
    def __init__(self):
        cfg = util.config()
        self.conn = sqlite3.connect(util.get_path(cfg["db"]["db_sqlite_file"]))
        logging.debug("Got connection to database")

        schema = open(util.get_path("db/db_schema.sql")).read()
        for statement in schema.split(";"):
            try:
                self.conn.execute(statement)
            except sqlite3.OperationalError as e:
                if statement.strip().startswith("--IGNORE_ERROR"):
                    logging.info(
                        "Ignoring error thrown by statement {}: {}".format(
                            statement, e))
                else:
                    raise e
        self.conn.commit()
        logging.debug("Done!")

        if self.get_db_version() == "1":
            logging.info("Got db version of 1, migrating to version 2")
            if self.migrate_genre():
                logging.info("It worked")
                self.set_db_version("2")
            else:
                logging.error("Failed to migrate genres")
Ejemplo n.º 13
0
    def __init__(self):
        cpy.tools.buffet = BuffetTool(config("template_engine"))
        #set the output encoding
        self._cp_config["cpy.tools.encode.encoding"] = "utf-8"

        #make our stylesheet {divname: (top, left, size)}
        self.discs = {
            "top": (50, 650, 146),
            "topright": (230, 560, 146),
            "right": (345, 370, 146),
            "botright": (381, 140, 146),
            "botleft": (230, 0, 146),
            "left": (260, 25, 146),
            "topleft": (80, 75, 146)
        }
        styletemp = mako.template.Template(filename="templates/styles.mak")
        fout = file("static/styles.css", "w")\
                .write(styletemp.render(discs=self.discs))

        #load the admin page. We have to import Admin after the config has
        #loaded or else it borks
        from admin import Admin
        self.admin = Admin()

        from team import TeamAdmin
        self.team_admin = TeamAdmin()

        from cherryblossom import BlogRoot
        self.blog = BlogRoot('/blog')
def refresh_events(db: DbStore):
    config = util.config()["gen_events"]
    if not config["enable"]:
        logging.info("Skipping events as gen_events disbled")
        return

    logging.info("Refreshing events")

    last_event = db.latest_event()

    if last_event is not None:
        after = last_event["timestamp"]
        states = player_store.store().player_states_after_time_asc(after)
        logging.info("Processing events after {}({})".format(after, unix_to_iso(after)))
        initial_state = last_event
    else:
        logging.info("Processing all events (no existing events)")
        states = player_store.store().player_get_states_asc_timestamp()
        initial_state = {"state": {}}

    logging.info("Initial state for event gen: {}".format(initial_state.__str__()))
    logging.info("Num states to process = {}".format(len(states)))
    new_events = gen_events(initial_state, states)
    logging.info("Generated {} new events".format(len(new_events)))

    if len(new_events) > 0:
        for event in new_events:
            db.add_event(event)

    add_prev_track_id(db)

    logging.info("Deleting old states...")
    player_store.store().delete_states()
    logging.info("Done with gen_events")
Ejemplo n.º 15
0
def fix_cgroups():
    """This works around the cgroup issue reported at http://blog.docker.io/2013/09/docker-can-now-run-within-docker/:
    It looks like the LXC tools cannot start nested containers if the devices control group is not in its own hierarchy. Check the content of /proc/1/cgroup: if devices is standing on a line on its own, you’re good. If you see that another control group is on the same line, Docker-in-Docker won’t work. The wrapper script will detect this situation and issue a warning. To work around the issue, you should stop all running containers, unmount all the control groups, and remount them one by one, each in its own hierarchy.
    """

    run("umount /cgroup")
    cgroups = ["perf_event","blkio","net_cls","freezer","cpuacct","cpu","cpuset","devices"]
    with settings(warn_only=True):
        for group in cgroups:
            run("mkdir /cgroup/%s" % group)
    
    run("sed -i '\,none        /cgroup        cgroup        defaults    0    0,d' /etc/fstab")

    for group in cgroups:
        util.config("{GROUP} /cgroup/{GROUP} cgroup rw,relatime,{GROUP},clone_children 0 0".format(GROUP=group),"/etc/fstab")
    print "you should probably powercycle after running this... mount -a doesn't seem to do the trick exactly"
def run_export():
    if not config()["export"]["enable"]:
        logging.info("Export disabled, not running...")
        return

    logging.info("Enabled, running yoyoyo okokokok")

    os.chdir(get_path("upload"))

    prev_music = ""
    if os.path.exists("music.csv"):
        prev_music = open("music.csv", "r").read()

    db = DbStore()
    write_csv(db)
    if open("music.csv", "r").read() != prev_music:
        logging.info("music.csv changed so reuploading to github")
        # I know should use subprocess
        os.system("rm main.sqlite")
        os.system("cp ../main.sqlite main.sqlite")
        os.system('sqlite3 main.sqlite ".dump" > main.sql')
        os.system("git add main.sql music.csv")

        os.system("git commit -m \"Data upload at {}\"".format(
            datetime.datetime.now().isoformat()))
        os.system("git push -u origin master")
    else:
        logging.info("tracks.txt the same, no new music to upload")
Ejemplo n.º 17
0
    def plot_confusion_matrix(self,
                              dataset="training",
                              normalize=True,
                              title='Confusion matrix',
                              cmap=plt.cm.Blues):
        """
        This function prints and plots the confusion matrix.
        Normalization can be applied by setting `normalize=True`.
        """
        cm = self.confusion_matrix(dataset)
        classes = config()["pitches"]

        if normalize:
            cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]

        plt.imshow(cm, interpolation='nearest', cmap=cmap)
        plt.title(title)
        plt.colorbar()
        tick_marks = np.arange(len(classes))
        plt.xticks(tick_marks, classes, rotation=45)
        plt.yticks(tick_marks, classes)

        fmt = '.2f' if normalize else 'd'
        thresh = cm.max() / 2.
        for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
            plt.text(j,
                     i,
                     format(cm[i, j], fmt),
                     horizontalalignment="center",
                     color="white" if cm[i, j] > thresh else "black")

        plt.tight_layout()
        plt.ylabel('True label')
        plt.xlabel('Predicted label')
        plt.show()
Ejemplo n.º 18
0
def main():
    ARGS = args_parser.parse_args()
    print "job_dir: {}".format(ARGS.job_dir)
    config = util.config(ARGS.job_dir)
    
    estimator = model.estimator(config)
    
    train_spec = tf.estimator.TrainSpec(
        input.train_input_fn,
        max_steps=100
    )

    exporter = tf.estimator.FinalExporter(
        'estimator',
        input.json_serving_function,
        as_text=False  # change to true if you want to export the model as readable text
    )

    eval_spec = tf.estimator.EvalSpec(
        input.eval_input_fn,
        exporters=[exporter],
        name='estimator-eval',
        steps=100
    )
    
    tf.estimator.train_and_evaluate(
        estimator,
        train_spec,
        eval_spec
    )
Ejemplo n.º 19
0
 def get_word2vec(token):
     if token in self.vectors:
         return self.vectors[token]
     elif config('glove', 'generate_missing'):
         return numpy.random.rand(self.size)
     else:
         return numpy.zeros(self.size)
Ejemplo n.º 20
0
def get_cove(token_vectors, vector, size=300):
    if token in vectors:
        return vectors[token]
    elif config('word2vec', 'generate_missing'):
        return numpy.random.rand(size)
    else:
        return numpy.zeros(size)
Ejemplo n.º 21
0
def view(string, entities):
    response = requests.get(util.config("quoteData"))
    data = response.json()

    quoteNumber = 111

    for item in entities:
        if item["entity"] == "quote" or item["entity"] == "id" or item[
                "entity"] == "number":
            quoteNumber = item["utteranceText"].lower().strip()

    quote = data["quotes"][int(quoteNumber) - 1]

    if quote is None:
        return util.output(
            "end", "quote_doesnt_exist",
            util.translate("quote_doesnt_exist", {"ID": quoteNumber}))

    # TODO: quote side_text
    return util.output(
        "end", "quote",
        util.translate("quote", {
            "ID": quoteNumber,
            "author": quote["user"],
            "text": quote["text"]
        }))
Ejemplo n.º 22
0
 def __init__(self, method_name) -> NoReturn:
     cfg = config(self._CFG_PATH)
     auth = tweepy.OAuthHandler(cfg["auth"]["consumer_key"],
                                cfg["auth"]["consumer_secret"])
     auth.set_access_token(cfg["auth"]["access_token"],
                           cfg["auth"]["access_token_secret"])
     self.client = tweepy.API(auth, wait_on_rate_limit=True)
     self.method: Callable = getattr(self.client, method_name)
Ejemplo n.º 23
0
def install_teamcitify():
    util.install_python3()
    util.install_python3_pip()
    sudo("%s uninstall teamcitify" % util.PIP3)
    run("%s install https://github.com/drewcrawford/teamcitify/zipball/master" %
        util.PIP3)
    # figure out the teamcitify version
    version = None
    import re
    try:
        version = run("%s freeze" % util.PIP3)
    except SystemExit:
        pass
    if version:
        version = re.search("teamcitify==(.*)", version).group(1)
        util.config("teamcitify=%s" %
                    version, "buildAgent/conf/buildAgent.properties")
Ejemplo n.º 24
0
 def factory(self,
             model,
             dataset=None,
             properties='True',
             input_model=None):
     model = str(config(self.name, model))
     Class = getattr(__import__(model, fromlist=[model]), model)
     return Class(dataset, properties, input_model, self.verbose)
Ejemplo n.º 25
0
def get_token2index_map(setting):
    if not force_gen and os.path.isfile("derived_data/" + setting_string(**setting) + "__token2index_map.json"):
        with open("derived_data/" + setting_string(**setting) + "__token2index_map.json") as f:
            return json.load(f)
    else:
        token_counts = get_token_counts(setting)

        min_count = 1
        if config('min_token_count') is not None:
            min_count = config('min_token_count')

        frequent_tokens = map(lambda i: i[0], filter(lambda c: c[1] >= min_count, token_counts.items()))
        token2index_map = dict(zip(sorted(frequent_tokens), range(len(frequent_tokens))))

        with open("derived_data/" + setting_string(**setting) + "__token2index_map.json", "w") as f:
            json.dump(token2index_map, f)

        return token2index_map
Ejemplo n.º 26
0
 def __init__(self):
   self._builder = gtk.Builder()
   self._builder.add_from_file(os.path.join(os.path.dirname( __file__ ), "config.glade"))
   self._window = self._builder.get_object('configwindow')
   self._use_git = self._builder.get_object('use-git')
   self._ignore_ext = self._builder.get_object('ignore-ext')
   self._ignore_case = self._builder.get_object('ignore-case')
   self._ignore_space = self._builder.get_object('ignore-space')
   self._ignore_ext.set_text(util.config('ignore_ext'))
   self._use_git.set_active(util.config('use_git'))
   self._ignore_case.set_active(util.config('ignore_case'))
   self._ignore_space.set_active(util.config('ignore_space'))
   self._ignore_ext.connect('key-release-event', self.on_ignore_ext)
   self._use_git.connect('toggled', self.on_use_git)
   self._ignore_case.connect('toggled', self.on_ignore_case)
   self._ignore_space.connect('toggled', self.on_ignore_space)
   self._builder.get_object('done').connect('clicked', self.on_click)
   self._window.show_all()
Ejemplo n.º 27
0
 def __init__(self):
     self._builder = gtk.Builder()
     self._builder.add_from_file(
         os.path.join(os.path.dirname(__file__), "config.glade"))
     self._window = self._builder.get_object('configwindow')
     self._use_git = self._builder.get_object('use-git')
     self._ignore_ext = self._builder.get_object('ignore-ext')
     self._ignore_case = self._builder.get_object('ignore-case')
     self._ignore_space = self._builder.get_object('ignore-space')
     self._ignore_ext.set_text(util.config('ignore_ext'))
     self._use_git.set_active(util.config('use_git'))
     self._ignore_case.set_active(util.config('ignore_case'))
     self._ignore_space.set_active(util.config('ignore_space'))
     self._ignore_ext.connect('key-release-event', self.on_ignore_ext)
     self._use_git.connect('toggled', self.on_use_git)
     self._ignore_case.connect('toggled', self.on_ignore_case)
     self._ignore_space.connect('toggled', self.on_ignore_space)
     self._builder.get_object('done').connect('clicked', self.on_click)
     self._window.show_all()
def main(device=torch.device('cuda:0')):
    """Print performance metrics for model at specified epoch."""
    # Data loaders
    pathname = "data/nyu.zip"
    tr_loader, va_loader, te_loader = getTrainingValidationTestingData(pathname,
                                                                       batch_size=util.config("unet.batch_size"))

    # Model
    model = Net()

    # define loss function
    # criterion = torch.nn.L1Loss()

    # Attempts to restore the latest checkpoint if exists
    print("Loading unet...")
    model, start_epoch, stats = util.restore_checkpoint(model, util.config("unet.checkpoint"))
    acc, loss = util.evaluate_model(model, te_loader, device)
    # axes = util.make_training_plot()
    print(f'Test Accuracy:{acc}')
    print(f'Test Loss:{loss}')
def connect():
    """ Connect to the PosgreSQL database server """

    # connection variable
    conn = None
    fetched_df = pd.DataFrame()

    try:
        print("Connecting to the PostgreSQL database...")
        params = config()  # read connection parameters
        conn = psycopg2.connect(**params)  # connect to the PostgreSQL server
        cur = conn.cursor()  # create a cursor

        # execute a statement
        print('PostgreSQL database version: ')
        cur.execute('SELECT version();')

        # display the PostgreSQL database version
        db_version = cur.fetchone()
        print(db_version)
        print("type(fetchone) : ", type(db_version))

        # fetch some more stuff
        print("SELECT * FROM application;\n")
        cur.execute("SELECT * FROM application LIMIT 20;")

        # display the results
        competition = cur.fetchall()
        for row in competition:
            print(row)

        # Extract columns and convert to df:
        columns = [desc[0] for desc in cur.description]
        fetched_df = pd.DataFrame(competition, columns=columns)
        if len(fetched_df) == 0:
            print("EMPTY!")

        print("type(fetchall) : ", type(competition))
        print("\nOutput query: \n", fetched_df)

        # close the communication with the PostgreSQL
        cur.close()

    except (Exception, psycopg2.DatabaseError) as error:
        print(error)

    finally:
        # verify connection is not empty
        if conn is not None:
            conn.close()
            print("\n Database connection closed. ")

    return fetched_df
Ejemplo n.º 30
0
def enrich_ids_with_authors(setting):
    f = open("derived_data/" + setting_string(**setting) + "__ids")
    f2 = open("derived_data/" + setting_string(**setting) + "__ids_with_authors", "w")

    findAuthorStmt = "SELECT display_name, zbmath_id FROM authorship WHERE document=%(document)s AND rank<=%(maxrank)s"
    max_author_rank = 2
    if config("max_author_rank") is not None:
        max_author_rank = config("max_author_rank")

    for line in f:
        x = line.split(";")
        ids = {}
        if setting['granularity'] == 'paragraphs':
            ids['doc'] = x[0].strip()
            ids['par'] = x[1].strip()
        elif setting['granularity'] == 'documents':
            ids['doc'] = x[0].strip()
        else:
            raise ValueError("granularity must be either 'paragraphs' or 'documents'")

        authors = []
        cursor().execute(findAuthorStmt, {"document": ids['doc'], "maxrank": max_author_rank})
        for row in cursor():
            display_name = row[0]
            zbmath_id = row[1]
            authors.append((display_name, zbmath_id))

        if setting['granularity'] == 'paragraphs':
            newline = ids['doc'] + ";" + ids['par'] + ";" + ";".join(map(lambda author: author[0] + "(" + str(author[1]) + ")", authors)) + (";" * (max_author_rank-len(authors)))
        elif setting['granularity'] == 'documents':
            newline = ids['doc'] + ";" + ";".join(map(lambda author: author[0] + "(" + str(author[1]) + ")", authors)) + (";" * (max_author_rank-len(authors)))
        else:
            raise ValueError("granularity must be either 'paragraphs' or 'documents'")

        f2.write(newline + "\n")

    f.close()
    f2.close()
Ejemplo n.º 31
0
    def validate(self):
        validation = {}
        for instrument in config()["instruments"]:
            X = self.validation_input_data(instrument).values
            Y = self.validation_output_data().values

            scores = self.model().evaluate(X, Y)
            for i in range(1, len(self._model.metrics_names)):
                print("\nResults validating with training data: %s: %.2f%%" %
                      (self._model.metrics_names[i], scores[i] * 100))

            print(self._model.metrics_names)
            validation[instrument] = (self._model.metrics_names, scores)
        return validation
Ejemplo n.º 32
0
    def delay(self, response):
        """Based on the throtteling information of the response, execute a delay before
        fetching additional data.

        Args:
            response (HTTP Response): Result of a GraphQL request to Shopify
        """
        max_cost_points = int(config()["general"]["max_cost_points"])
        leak_rate = int(config()["general"]["leak_rate"])

        try:
            cost = loads(response.text)['extensions']['cost']
            remaining_cost = cost['throttleStatus']['currentlyAvailable']
            actual_query_cost = cost['actualQueryCost']
        except KeyError as e:
            self.log.error('Can`t extract query cost from JSON response:\n{}, error: {}'\
                .format(response.text, e))

        if remaining_cost < actual_query_cost:
            time_to_sleep = ceil(
                (max_cost_points - remaining_cost) / leak_rate)
            self.log.info('Delaying next API request for {} seconds to avoid blocking'\
                .format(time_to_sleep))
            sleep(time_to_sleep)
Ejemplo n.º 33
0
    def split_song(self):
        trainer = Trainer()
        trainer.load()

        longFileProfiler = LongFileProfiler(self.song_file)
        profiles = longFileProfiler.get_profile()

        chords = []

        for profile in profiles:
            X = np.array([profile])
            prediction = trainer.model().predict(X)
            chord_index = np.argmax(prediction)

            chords.append(config()["pitches"][chord_index])
        return chords
Ejemplo n.º 34
0
def view_random(string, entities):

    response = requests.get(util.config("quoteData"))
    data = response.json()

    quoteExists = False

    while not quoteExists:
        quoteNumber = randint(0, len(data["quotes"]))
        quoteExists = data["quotes"][quoteNumber]

    quote = data["quotes"][quoteNumber]

    return util.output(
        "end", "quote",
        util.translate("quote", {
            "ID": quoteNumber,
            "author": quote["user"],
            "text": quote["text"]
        }))
Ejemplo n.º 35
0
 def train(self, X, Y):
     random_forest = RandomForestClassifier(
         n_jobs=config('pericog', 'thread_count'),
         n_estimators=100,  # number of trees
         criterion='gini',  # 'gini' or 'entropy'
         verbose=1,
         max_features=
         'sqrt',  # 'sqrt', 'log2', or a percentage of the total features for each forest to consider
         class_weight=None,
         max_depth=None,
         min_samples_split=2,
         min_samples_leaf=1,
         min_weight_fraction_leaf=0.0,
         max_leaf_nodes=None,
         min_impurity_decrease=0.0,
         bootstrap=True,
         oob_score=False,
         random_state=None,
         warm_start=False,
     )
     random_forest.fit(X, Y)
     joblib.dump(random_forest, self.path)
Ejemplo n.º 36
0
def create(string, entities):
    content = None

    if ("reply" in util.getQuery()["extra"]):
        content = util.getQuery()["extra"]["reply"]["content"]

    if not content:
        return util.output("end", "empty_paste", util.translate("empty_paste"))

    query = {"apikey": util.config("apikey")}
    payload = {
        "text": content,
        "title": "Automatic Upload",
        "name": "Guinevere"
    }
    request = requests.post("https://paste.gemwire.uk/api/create",
                            params=query,
                            data=payload)

    url = request.text.strip()

    return util.output("end", "paste_made",
                       util.translate("paste_made", {"paste": url}))
Ejemplo n.º 37
0
def view_all(string, entities):

    query = {"apikey": util.config("apikey")}
    response = requests.get("https://paste.gemwire.uk/api/recent",
                            params=query)
    list = response.json()

    result = ""

    for element in list:
        result += util.translate(
            "paste_list_element", {
                "id": element["pid"],
                "title": element["title"],
                "user": element["name"]
            })

    return util.output(
        "end", "paste_list",
        util.translate("paste_list", {
            "count": len(list),
            "result": result
        }))
    def __init__(self):
        cfg = util.config()
        self.conn = sqlite3.connect(
            util.get_path(cfg["db"]["player_sqlite_file"]))
        self.conn.execute("""
        create table if not exists player (
          timestamp          int,
          api_timestamp      real,
          track_id           text,
          progress_ms        int,
          duration_ms        int,
          is_playing         int,
          repeat             int,
          shuffle_state      int,
          device_id          text,
          device_active      int,
          volume_percent     int,
          is_private_session int,
          device_type        text,
          device_name        text
          ); 
        """)

        self.conn.commit()
Ejemplo n.º 39
0
    print('train sensitivity:', sensitivity(y_train, train_predict),
          'train specifity:', specificity(y_train, train_predict))
    print('test sensitivity:', sensitivity(y_test, test_predict),
          'test specifity:', specificity(y_test, test_predict))
    print('f1_score:', F1(y_test, test_predict))
    print('acc:', accuracy_score(y_test, test_predict))

    roc_auc = function_lpp_new.draw_roc(y_test, test_predict_proba)
    print('roc:', roc_auc)

    print('over')


if __name__ == '__main__':

    params = util.config()
    combine = list(zip(params['ecg_path_all'], params['pcg_path_all']))
    combine_index = 0
    for lll, j in enumerate(combine):

        params['ecg_path'] = j[0]
        params['pcg_path'] = j[1]

        scaler = MinMaxScaler()

        save_true = []
        save_predict = []
        y_predict_label = []
        result_eva = []
        tmp = [
            'sensitivity:', 'specificity:', 'precision', 'f1-score:',
    )
    result.close()


def clean_mysql(con):
    """Remove SystemEvents matching regexps in trigger table"""
    total = 0
    for id, from_host, sys_log_tag, message in triggers(con):
        num_of_rows = clean_system_events(from_host, sys_log_tag, message)
        update_trigger_with_delete_stats(id, num_of_rows)
        total += num_of_rows
    logger('Deleted %s SystemEvents matched by triggers.' % total)


#
# Main
#


if __name__ == "__main__":
    only_allow_one_instance('signer_trigger_clean.pid')

    cnf = config('signer.cfg', os.path.dirname(os.path.abspath(__file__)))
    engine = create_engine(
        cnf.DATABASE,
        convert_unicode=True, pool_size=50, pool_recycle=3600
    )

    con = engine.connect()
    clean_mysql(con)
    con.close()
Ejemplo n.º 41
0
 def __init__(self):
     self.conf = config(self)
     self.conn = None
Ejemplo n.º 42
0
 def on_ignore_ext(self, widget, event):
   util.config('ignore_ext', self._ignore_ext.get_text())
Ejemplo n.º 43
0
 def on_use_git(self, widget):
   util.config('use_git', self._use_git.get_active())
Ejemplo n.º 44
0
 def on_ignore_case(self, widget):
   util.config('ignore_case', self._ignore_case.get_active())
Ejemplo n.º 45
0
 def on_ignore_space(self, widget):
   util.config('ignore_space', self._ignore_space.get_active())