def run_findlr_func(exp):

    config = get_config(experiment_name=exp.name,
                        lr_schedule_type=exp.lr_schedule_type,
                        lr=exp.lr,
                        loss_types=exp.loss_types,
                        loss_coefs=exp.loss_coefs,
                        cat_weights=exp.cat_weights,
                        attr_weights=exp.attr_weights,
                        batch_size=exp.batch_size,
                        num_epochs=exp.num_epochs,
                        mixup_aug=exp.mixup_aug,
                        mixup_aug_beta=exp.mixup_aug_beta)

    train_dataset = FashionDataset(config.dataset_path,
                                   'training',
                                   h=config.h,
                                   w=config.w,
                                   mean=config.mean,
                                   train_pct=1.,
                                   valid_pct=0.15)
    net = exp.net(train_dataset.num_classes, **exp.net_kwargs)
    optimizer = Adam(net.parameters(), weight_decay=0)
    device = torch.device("cuda:0")
    find_lr(net, optimizer, train_dataset, config)

    del net
    del optimizer
    del device

    torch.cuda.empty_cache()
예제 #2
0
def start(config, port=6000):
    app = App(port, **get_config(config))
    app.start()
    print("Aplicação iniciada. Pressione h para ajuda.")

    while app.listening:
        ipt = input(">").split(" ", 1)
        cmd = ipt[0]
        data = ipt[1] if len(ipt) > 1 else None

        cmd = cmd.lower()
        if cmd in ["send", "s"]:
            to, msg = data.split(" ", 1)
            app.sendto(to, msg)
        elif cmd in ["messages", "m"]:
            print(app.socket.msg_queue.queue)
        elif cmd in ["generate", "g"]:
            app.generate_token()
        elif cmd in ["destroy", "d"]:
            app.destroy_token()
        elif cmd in ["help", "h"]:
            app.help()
        elif cmd in ["quit", "q"]:
            app.listening = False
        else:
            print("Comando desconhecido")

    print("Finalizando...")
    app.join()
    print("Aplicação finalizada")
def run_eval_experiment_func(exp):
    config = get_config(experiment_name=exp.name,
                        lr_schedule_type=exp.lr_schedule_type,
                        loss_types=exp.loss_types,
                        loss_coefs=exp.loss_coefs,
                        cat_weights=exp.cat_weights,
                        attr_weights=exp.attr_weights,
                        batch_size=1,
                        num_epochs=exp.num_epochs)

    valid_dataset = FashionDataset(config.dataset_path,
                                   'validation',
                                   h=config.h,
                                   w=config.w,
                                   mean=config.mean,
                                   train_pct=0.8,
                                   valid_pct=0.2)

    net = exp.net(valid_dataset.num_classes, **exp.net_kwargs)
    load_model(config, net, "last")

    device = torch.device("cuda")
    net.to(device)

    results = eval_model(net,
                         valid_dataset,
                         config,
                         save_examples_flag=True,
                         num_examples=50,
                         save_path=os.path.join(config.experiment_dir,
                                                'examples'))
def test_makes_all_files(temp_output_dir):
    input_directory = get_config()[INPUT_DIR_KEY]

    infiles = [x for x in os.listdir(input_directory) if x[0] == '@']
    outfiles = os.listdir(temp_output_dir)

    assert (len(infiles) == len(outfiles))
예제 #5
0
def get_id_from_name(name: str, setting: str = 'DEFAULT') -> int:
    logger = py_logging.create_logger(
        'get_id_from_name', '{}get_id_from_name.log'.format(
            os.path.dirname(os.path.realpath(__file__)) + os.sep))
    config = get_config()

    html_text = requests.get(config[setting]['NameToIdBaseUrl'] +
                             urllib.parse.quote_plus(name)).text
    logger.info("Sent request off to find the id for: {}".format(name))

    soup = BeautifulSoup(html_text, 'lxml')

    possible_results = soup.find('table', {"class": "table"}) \
                           .find_all('tr')

    for possible_result in possible_results:
        holder = possible_result.find_all('td')
        possible_result = possible_result.find('td')
        if holder:
            if len(holder) > 1:
                holder = holder[1]
                if holder.getText().lower() == name.lower():
                    return possible_result.getText()

    return None
def test_TFIDF_model():
    input_directory = get_config()[INPUT_DIR_KEY]

    infiles = [x for x in os.listdir(input_directory) if x[0]=='@']

    test_file_path = os.path.join(input_directory, infiles[0])

    with open(test_file_path, 'r') as file:
        in_data =  json.loads(file.read())

    df = pd.DataFrame(in_data, columns = ['tweet','date','embedding'])

    train, test =  train_test_split(df['tweet'],
                                    test_size = 0.4, random_state = 1)

    # Now split the training data into simulated term-part (user's tweets)
    # and document part (other users' tweets).  This just gets the array shapes
    # right.  In non-unit test scenarios, 'context' must come from other users.
    train_doc, train_context =  train_test_split(train,
                                    test_size = 0.5, random_state = 1)

    model = TFIDFModel(use_context=True)

    model.characterize(train_doc, train_context)

    sim_scores = model.similarity_score(test)

    assert(len(test)==len(sim_scores))
    
예제 #7
0
def run_pgs_rollout(config, **kwargs):
    if config is None:
        config = get_config()
    for name, val in kwargs.items():
        setattr(config, name, val)
    config.log_path = config.exp_name
    pgs_simulate(config)
예제 #8
0
    def __init__(self, event_q, shutdown_command='Stop'):

        self.logger = log_handler.get_log_handler('server_log.txt', 'info',
                                                  'door.EventHandler')

        self.logger.info('Starting: EventHandler')

        #Open config file
        config = get_config.get_config(CONFIG_FILE_NAME)

        self.event_logfile = config.get('Logging', 'event_logfile')

        self.event_queue = event_q
        self.shutdown_cmd = shutdown_command

        self.host = config.get('Logging', 'sql_server')
        self.user = config.get('Logging', 'sql_user')
        self.pwd = config.get('Logging', 'sql_pwd')
        self.db = config.get('Logging', 'sql_db')

        #self.mysql_conn = pymysql.connect(host=host, user=user, passwd=pwd, db=db)
        #self.mysql_cursor = self.mysql_conn.cursor()

        self.stop_event = threading.Event()

        #Start thread to process input events
        self.process_events_thread = threading.Thread(
            name='process_events', target=self.__process_events)

        self.process_events_thread.start()
예제 #9
0
파일: admin.py 프로젝트: dudarev/osmtweet
    def TweetHandler(self):
        """tweeting
        does not tweet from localhost"""
        self.response.out.write('<br/><br/>Tweeting<br/>')
        self.response.out.write('this info will be tweeted:<br/>')
        # oldest non-tweeted and prepared
        oldest_changeset = Changeset.all().order('created_at').filter('is_tweeted =', False).filter('is_prepared =', True).fetch(1)
        if not oldest_changeset:
            self.response.out.write('nothing to tweet')
            return
        else:
            c = oldest_changeset[0]
        
        config = get_config()

        # do not tweet from localhost
        if not 'localhost' in self.request.url:
            auth = tweepy.OAuthHandler(config["consumer_key"], config["consumer_secret"])
            auth_data = OAuthAccessToken.all().filter('specifier =', config["twitter_username"]).fetch(1)[0]
            auth.set_access_token(auth_data.oauth_token, auth_data.oauth_token_secret)
            self.response.out.write('<br/>tweeting with oauth:<br/>')
            api = tweepy.API(auth)
            self.response.out.write("id: %d" % c.id)
            self.response.out.write("user: %s" % c.user)
            self.response.out.write("comment: %s" % c.comment)
            self.response.out.write("tweet: %s" % c.tweet)
            try:
                api.update_status(c.tweet)
            except tweepy.error.TweepError, e: 
                self.response.out.write( 'failed: %s' % e.reason )
                if "Status is a duplicate" in e.reason:
                    c.is_tweeted = True
                    c.put()
                return
예제 #10
0
파일: main.py 프로젝트: dudarev/osmtweet
 def get(self):
     changesets = Changeset.all().order('-created_at').fetch(20)
     config = get_config()
     options = {'config': config, 'changesets': changesets}
     path = os.path.join(os.path.dirname(__file__), 'templates/rss.html')
     page = template.render(path,options)
     self.response.out.write(page)
예제 #11
0
 async def neko(self, ctx, *args):
     """
     .neko [tag] - shows image from nekos.life.
     ONLY 1 TAG!
     Available tags: 'tickle', 'classic', 'ngif', 'erofeet', 'meow', 'erok', 'poke', 'les', 'hololewd', 'lewdk',
     'keta', 'feetg','nsfw_neko_gif', 'eroyuri', 'kiss', '8ball', 'kuni', 't**s', 'pussy_jpg', 'cum_jpg', 'pussy',
     'lewdkemo','lizard', 'slap', 'lewd', 'cum', 'cuddle', 'spank', 'smallboobs', 'goose', 'Random_hentai_gif',
     'avatar','fox_girl', 'nsfw_avatar', 'hug', 'gecg', 'boobs', 'pat', 'feet', 'smug', 'kemonomimi', 'solog',
     'holo', 'wallpaper', 'bj', 'woof', 'yuri', 'trap', 'anal', 'baka', 'b*****b', 'holoero', 'feed', 'neko', 'gasm',
     'hentai', 'futanari', 'ero', 'solo', 'waifu', 'pwankg', 'eron', 'erokemo'
     """
     if ctx.message.server is not None:
         server = get_server(ctx.message.server)
         nsfw = get_channel(get_config()['token'],
                            ctx.message.channel.id)['nsfw']
     else:
         server = {'sfw': False, 'nsfw_channels': []}
         nsfw = True
     if ((not server['sfw'])
             and nsfw) or ctx.message.channel.id in server['nsfw_channels']:
         await self.client.send_typing(
             discord.Object(id=ctx.message.channel.id))
         await self.neko_wrap(ctx, args)
     else:
         await self.client.add_reaction(ctx.message, '❌')
예제 #12
0
def train_lstm(x_array, y_array, vocab_size, embedding_dim, units, batch_size,
               epochs):
    lstm_model = lstm.BaseLine(vocab_size, embedding_dim, units,
                               128).build_model()
    checkpoint = tf.train.Checkpoint(optimizer=optimizer,
                                     lstm_model=lstm_model)
    train(x_array, y_array, batch_size, epochs, checkpoint,
          get_config.get_config()['model_data'], lstm_model)
예제 #13
0
def create_logfile_path(name, logdir=None):
    now = datetime.now().strftime('%Y%b%d_%H%m%S')
    logname = '{}_{}.log'.format(name, now)
    if not logdir:
        logdir = get_config("logdir")
    logfile = os.path.join(logdir, logname)

    return logfile
예제 #14
0
def run_pgs_eval(config, **kwargs):
    if config is None:
        config = get_config()
    for name, val in kwargs.items():
        setattr(config, name, val)
    run_pgs_rollout(config)
    print('rollout done')
    run_compare(config)
    print('compare done')
예제 #15
0
    def write_to_stat(self):
        stat_dir = get_config.get_config()["stat_dir"]
        if not os.path.exists(stat_dir):
            os.mkdir(stat_dir)

        stat_file = os.path.join(stat_dir, uni_util.get_file_name("stat_flow"))
        with open(stat_file, "w") as f_stat:
            f_stat.write(uni_util.get_crawl_time() + "\t" + self.module_name +
                         "\t" + str(self.lines_num))
예제 #16
0
def load_test(path):
    x_array = []
    with open(path) as f:
        f.readline()
        for line in f:
            x_array.append([int(i) for i in line.split(' ')])
    x_array = tf.keras.preprocessing.sequence.pad_sequences(
        x_array, maxlen=get_config.get_config()['max_inp'], padding='post')
    return x_array
예제 #17
0
def get_defaults(transport_name):
    """Get defaults from config file"""
    json_cfg = get_config()

    return {
        'host': json_cfg['host'],
        'port': json_cfg['transports'][transport_name]['port'],
        'login': json_cfg['transports'][transport_name]['login'],
        'password': json_cfg['transports'][transport_name]['password']
    }
 def __init__(self):
     # GET CONNECT STRING -> TARGET DATABASE
     self.connect_string = get_config().target_conn()
     try:
         self.db_connection = psycopg2.connect(self.connect_string)
         self.db_cursor = self.db_connection.cursor()
         lw.log.debug("Conn2ected to target Database: " +
                      self.connect_string)
     except Exception as e:
         lw.log.error("Error connecting do Database:" + str(e))
    def __init__(self):
        """
        Load the model and read the config
        """

        self.conf = get_config.get_config()

        self.spark, self.sc = utilities.get_spark_session("classification_app")
        self.pipelineModel = PipelineModel.load(self.conf["model_path"])
        self.label_list = self.pipelineModel.stages[3].labels
def temp_output_dir(tmpdir_factory):
    config = get_config()

    tmpdir = tmpdir_factory.mktemp('output')

    filter_tweets_from_directories(config[INPUT_DIR_KEY], tmpdir,
                                   config['regexp_tweet_filters'],
                                   int(config['min_tweet_characters']))

    return tmpdir
예제 #21
0
def load_data(path):
    y_array = []
    x_array = []
    with open(path) as f:
        f.readline()
        for line in f:
            line = line.split('\t')
            y_array.append(int(line[0]))
            x_array.append([int(i) for i in line[1].split(' ')])
    x_array = tf.keras.preprocessing.sequence.pad_sequences(x_array, maxlen=get_config.get_config()['max_inp'], padding='post')
    return x_array, np.array(y_array)
예제 #22
0
def color(data, template, logs_dir="/data2/ben/HE/data/result/color_logs/"):
    import tensorflow.compat.v1 as tf
    tf.reset_default_graph()
    sess = tf.Session()
    image_dist = []
    is_train = False
    #print("FLAGS",tf.flags.FLAGS)
    #print("FLAGS.logs_dir==============",tf.flags.FLAGS.logs_dir)
    config = get_config(FLAGS, is_train)
    dist = DCGMM(sess, config, "DCGMM", is_train)

    db_tmpl = SampleProvider("Template_dataset", template, config.fileformat,
                             config.image_options, is_train)

    mu_tmpl = 0
    std_tmpl = 0
    N = 0
    while True:
        X = db_tmpl.DrawSample(config.batch_size)

        if len(X) == 0:
            break
        X_hsd = utils.RGB2HSD(X / 255.0)

        mu, std, gamma = dist.deploy(X_hsd)
        mu = np.asarray(mu)
        mu = np.swapaxes(mu, 1, 2)  # -> dim: [ClustrNo x 1 x 3]
        std = np.asarray(std)
        std = np.swapaxes(std, 1, 2)  # -> dim: [ClustrNo x 1 x 3]

        N = N + 1
        mu_tmpl = (N - 1) / N * mu_tmpl + 1 / N * mu
        std_tmpl = (N - 1) / N * std_tmpl + 1 / N * std

    db = SampleProvider("Test_dataset", data, config.fileformat,
                        config.image_options, is_train)

    while True:
        X = db.DrawSample(config.batch_size)
        if len(X) == 0:
            break

        X_hsd = utils.RGB2HSD(X / 255.0)
        mu, std, pi = dist.deploy(X_hsd)
        mu = np.asarray(mu)
        mu = np.swapaxes(mu, 1, 2)  # -> dim: [ClustrNo x 1 x 3]
        std = np.asarray(std)
        std = np.swapaxes(std, 1, 2)  # -> dim: [ClustrNo x 1 x 3]
        X_conv = image_dist_transform(X_hsd, mu, std, pi, mu_tmpl, std_tmpl,
                                      config.im_size, config.ClusterNo)
        image_dist.append(X_conv)
    # misc.imsave('/data2/ben/HE/data/result/color_tmp/color_norma.png', np.squeeze(X_conv))

    return image_dist
예제 #23
0
def connect():
    conn = None
    try:
        params = get_config()

        print('Connecting to the PostgreSQL database...')
        conn = psycopg2.connect(**params)
    except (Exception, psycopg2.DatabaseError) as error:
        print(error)
        sys.exit(1)
    print("Connection successfull!")
    return conn
예제 #24
0
    def start(self):
        '''
        This method starts MATS.
        '''
        
        print 'Using ' + MatsController.__name__ + ' as controller.'
        print 'Loading config from "' + self.config_file + '"...',
        self.config = get_config(self.config_file)   #get_config makes sure that the config makes sense. More details in get_config.py
        self.marionette_port = self.config['Marionette']['port'] 
        print 'OK'
        
        print 'Starting Firefox/Nightly from "' + self.config['Firefox']['binary'] + '" with Marionette on port ' + str(self.marionette_port) + '.'        
        self.FirefoxThread = FirefoxThread(self.config['Firefox']['binary'], self.marionette_port)
        self.FirefoxThread.start()
        
        print 'Creating controller'
        pid = self.FirefoxThread.getPID() # this function blocks until PID is available from FirefoxThread
        self.controller = MatsController(pid)
        
        print 'Starting controller'
        self.controller.start()
        self.controller.wait_for_ready()
        
        print 'Waiting for Marionette port to open (' + str(self.marionette_port) + ')'
        portReady = self.FirefoxThread.waitForMarionettePortOpenReady(self.config['Marionette']['port_timeout'])
        if portReady:
            print 'Marionette port open'
        else:
            print 'Error: timeout, shutting down MATS'
            self.controller.stop()
            self.FirefoxThread.stop()
            return
        
        #TODO: remove line below once https://bugzilla.mozilla.org/show_bug.cgi?id=753273 is fixed
        #sleep(10)
        
        try:
            print 'Starting Marionette'
            self.marionette = Marionette('localhost', self.marionette_port)
            #TODO: move starting session and navigation to separate methods
            print 'Starting session'
            sleep(5) #TODO temporary workaround for https://bugzilla.mozilla.org/show_bug.cgi?id=757078
            self.marionette_session = self.marionette.start_session()
            print 'Navigating to ' + self.url
            print self.marionette.navigate(self.url)
        except Exception as e:
            print 'Error starting Marionette'
            fall(e)
            self.controller.stop()
            self.FirefoxThread.stop()

        print 'MATS up and running. Waiting until Firefox/Nightly stops.'
예제 #25
0
def get_twitter_auth_list():
    twitter_auth_list = []
    config = get_config()
    keys_list = config["keys"]

    for i in range(0, len(keys_list)):
        keys_arr = keys_list[i].strip().split(",")
        consumer_key = keys_arr[CONSUMER_KEY_INDEX]
        consumer_secret = keys_arr[CONSUMER_SECRET_INDEX]
        access_key = keys_arr[ACCESS_TOKEN_INDEX]
        access_secret = keys_arr[ACCESS_SECRET_INDEX]

        twitter_auth_list.append(
            Twitter(auth=OAuth(access_key, access_secret, consumer_key,
                               consumer_secret)))

    return twitter_auth_list
예제 #26
0
def is_item_name_in_database(name: str, session_lock: multiprocessing.Lock) -> bool:
    config = get_config()
    engine = create_engine(
        '{}:///{}{}'.format(
            config['DEFAULT']['DatabaseType'],
            "C:\\Anthony\\Programs\\runescape-grand-exchange-data-analytics\\database_services\\database\\",
            config['DEFAULT']['DatabaseName']))
    Session = sessionmaker(bind=engine)

    logger = py_logging.create_logger(
        "item_name_in_database", '{}in_database.log'.format(os.path.dirname(os.path.realpath(__file__)) + os.sep))
    with session_lock:
        session = Session()
        count = session.query(Item).filter(Item.name == name).count()
        session.close()
    logger.debug("Number of {} in database: {}".format(name, count))
    return bool(count)
    def write(self, input_metric, input_tuple):
        lw.log.debug('Start metric processing')
        lw.log.debug("Write call started : metric=" + input_metric)
        self.metric = input_metric
        self.insert_tuple = input_tuple
        self.metric_conf = get_config().table_conf()
        self.metric_prefix = self.metric_conf[0]
        self.metric_suffix = self.metric_conf[1]

        SQL = "INSERT INTO " + self.metric_prefix + self.metric + self.metric_suffix + " ("
        keycount = 0
        for key in self.insert_tuple[0].items():
            if keycount > 0:
                SQL = SQL + ", "
            SQL = SQL + key[0]
            keycount = keycount + 1
        SQL = SQL + ") VALUES ("
        keycount = 0
        for key in self.insert_tuple[0].items():
            if keycount > 0:
                SQL = SQL + ", "
            SQL = SQL + "%s"
            keycount = keycount + 1
        SQL = SQL + ")"
        lw.log.debug('sql: ' + SQL)
        COUNTROWS = 0
        for insert_dict in self.insert_tuple:
            VALUES = ()
            COUNTROWS = COUNTROWS + 1
            for value in insert_dict.items():
                VALUES = VALUES + (value[1], )
            lw.log.debug('INSERT_TUPLE= %s' % (VALUES, ))
            try:
                self.db_cursor.execute(SQL, VALUES)
            except Exception as e:
                lw.log.error('(Error inserting on: ' + input_metric + ')' +
                             str(e))
        try:
            self.db_connection.commit()
            lw.log.info("metric=" + input_metric + " rows=" + str(COUNTROWS))
        except Exception as e:
            lw.log.error("metric=" + input_metric + " : " + str(e))
        lw.log.debug('Write call ended : metric=' + input_metric)
예제 #28
0
 async def e621(self, ctx, *args):
     """
     .621 [tags] - sends image from e621.net.
     Number of tags shouldn't be more than 5.
     Tags must be separated by spaces.
     """
     if ctx.message.server is not None:
         server = get_server(ctx.message.server)
         nsfw = get_channel(get_config()['token'],
                            ctx.message.channel.id)['nsfw']
     else:
         server = {'sfw': False, 'nsfw_channels': []}
         nsfw = True
     if ((not server['sfw'])
             and nsfw) or ctx.message.channel.id in server['nsfw_channels']:
         await self.client.send_typing(
             discord.Object(id=ctx.message.channel.id))
         await self.e6_wrap(ctx, args)
     else:
         await self.client.add_reaction(ctx.message, '❌')
예제 #29
0
파일: admin.py 프로젝트: dudarev/osmtweet
 def LoadHandler(self):
     self.response.out.write('<br/><br/>Loading changesets<br/>')
     from xml.etree  import cElementTree as etree
     config = get_config()
     # o,a for lat,lon
     # 1,2 for min,max
     o1,a1,o2,a2 = map(float,config['bbox'].split(','))
     delta_o = o2 - o1
     delta_a = a2 - a1
     # load local data from localhost
     if DEBUG_LOAD_FROM_FILE:
         file_name = "data.osm"
         data = open(file_name,'r')
     else:
         url = "http://api.openstreetmap.org/api/0.6/changesets?bbox=%s" % config['bbox']
         try:
             data = urllib2.urlopen(url)
         except urllib2.URLError, e:
             self.response.out.write( "error fetching url" )
             return
def test_TF_model():
    input_directory = get_config()[INPUT_DIR_KEY]

    infiles = [x for x in os.listdir(input_directory) if x[0]=='@']

    test_file_path = os.path.join(input_directory, infiles[0])

    with open(test_file_path, 'r') as file:
        in_data =  json.loads(file.read())

    df = pd.DataFrame(in_data, columns = ['tweet','date','embedding'])

    train, test =  train_test_split(df['tweet'],
                                    test_size = 0.4, random_state = 1)

    model = TFIDFModel(use_context=False)

    model.characterize(train, None)

    sim_scores = model.similarity_score(test)

    assert(len(test)==len(sim_scores))
예제 #31
0
파일: main.py 프로젝트: dudarev/osmtweet
    def get(self):
        changesets = Changeset.all().order('-created_at').fetch(20)
        counts = Count.all().order('-date').fetch(30)
        counts_list = [c.count for c in counts]
        counts_list.reverse()
        counts_list_str = ','.join(map(str,counts_list))
        config = get_config()
        description = Description.get_or_insert('description').text
        options = {
            'url': self.request.url,
            'config': config, 
            'changesets': changesets, 
            'description': description,
            }
        if counts_list:
            options.update({
                'counts_list': counts_list_str,
                'max_count': max(counts_list),
                'average': sum(counts_list)/len(counts_list),
                })

        path = os.path.join(os.path.dirname(__file__), 'templates/index.html')
        page = template.render(path,options)
        self.response.out.write(page)
예제 #32
0
파일: admin.py 프로젝트: dudarev/osmtweet
 def PrepareHandler(self):
     """prepare means: 
     - find the oldest non-prepared
     - create bit.ly link if does not exist
     - create trimmed tweet text if the link exists"""
     config = get_config()
     self.response.out.write('<br/><br/>Preparing to tweet')
     # get oldest non-prepared changeset in the datastore
     oldest_changeset = Changeset.all().order('created_at').filter('is_prepared =', False).fetch(1)
     if oldest_changeset:
         c = oldest_changeset[0]
         self.response.out.write('<br/>There is non-prepared<br/>')
         self.response.out.write("id: %d<br/>" % c.id)
         self.response.out.write("created_at: %s<br/>" % c.created_at)
         self.response.out.write("comment: %s<br/>" % c.comment)
         self.response.out.write("user: %s<br/>" % c.user)
         # if no shortened link - create it
         if not c.link_url:
             long_url = "http://www.openstreetmap.org/browse/changeset/%d" % c.id
             bitly = BitLy(config["bitly_username"], config["bitly_key"])
             short_url = bitly.short_url(long_url)
             if not short_url:
                 self.response.out.write('could not shorten')
                 return
             self.response.out.write(short_url)
             c.link_url = short_url
         if not c.tweet:
             if c.comment:
                 tweet = "%s: %s %s" % (c.user, c.comment, c.link_url)
             else:
                 tweet = "%s: %s" % (c.user, c.link_url)
         c.tweet = trim_to_tweet(tweet)
         if c.link_url and c.tweet:
             c.is_prepared = True
         self.response.out.write('tweet: %s' % c.tweet)
         c.put()
예제 #33
0
 def __init__(self, sort_by):
     self.api_url = 'https://api.imgur.com/3/gallery/search/{}/'.format(sort_by)
     self.imgur_key = get_config('imlib.cfg', 'api_keys', 'imgur')
     self.headers = {'Authorization': 'Client-ID ' + self.imgur_key}
예제 #34
0
from class_changerate1 import DB
from get_config import get_config
data, tables = get_config()
data.update({'table': "tabla6"})
#table = "tabla6"
#db_params = {'host':"192.168.122.48", 'user':"******", 'passwd':"Sungard01!", 'database':"app1", 'table':table}
obj = DB(host=data["host"],
         user=data["user"],
         passwd=data["password"],
         database=data["database"],
         table=data["table"])
obj.set_data_generation()
#end = datetime.now().replace(microsecond=0)
#print "Execution Time (Hr:Min:Sec)", end-start
예제 #35
0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Takes user tweet files from 'raw_data_path', applies filters, and writes output
to preprocessed_data_path.

June, 2019
@author: Joshua Rubin
"""

from get_config import (get_config, create_dir_if_not_there)
from tweetvalidator.data_processing import filter_tweets_from_directories

# Pull-in filter settings from global configuration.    
config = get_config()

create_dir_if_not_there(config['preprocessed_data_path'])

filter_tweets_from_directories(config['raw_data_path'],
                               config['preprocessed_data_path'],
                               config['regexp_tweet_filters'],
                                int(config['min_tweet_characters'])    )
def eval_model(test_preds, model):
    """
    Evaluate the ml model given the predictions and test data

    Args:
        test_preds - a list of transformed prediction data
        model - the ml pipelined model
    Returns:
    A confusion matrix, along with the precision, recall and F1 score of the currently trained model
    """
    metrics = MulticlassMetrics(test_preds.select("prediction", "label").rdd)

    # Overall statistics
    precision = metrics.precision()
    recall = metrics.recall()
    f1Score = metrics.fMeasure()
    print("Confusion matrix")
    print(metrics.confusionMatrix())
    print("Summary Stats")
    print("Precision = %s" % precision)
    print("Recall = %s" % recall)
    print("F1 Score = %s" % f1Score)


if __name__ == "__main__":
    conf = get_config.get_config()
    spark, sc = utilities.get_spark_session("preprocessing")
    train_and_eval_model(conf, spark, sc)
    print("Done.")
예제 #37
0
 def setUp(self):
     # case: check syntax yaml in config file
     self.print_info('\nGet Config file')
     self.conf = get_config.get_config()
     self.print_pass('Done')
예제 #38
0
#!/usr/bin/python
from core.sg2_category import sg2_category as sg2c
from core.database.sg2_database_utils import image_database
from core.sg2_users import user as u
import json
import sys
import get_config as gc
cf = gc.get_config('config.dat')

db = image_database(**cf['sg2'])

def serach_img_from_database_by_index(index):
    img_table = 'sg2_image_info'
    rate_table = 'sg2_image_rate'
    image = {}
    img_val = db.get_table_row(img_table, "image_index='%s'"%index)
    if img_val == []:
        return image
    img_key = db.get_table_keys(img_table)

    for key, val in zip(img_key, img_val[0]):
        image[key[0]] = val
    return image

if __name__== "__main__":
    index = sys.argv[1]
    print serach_img_from_database_by_index(index)
예제 #39
0
import logging
import time
from time import sleep, strftime

THIS_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.insert(0, THIS_DIR)

import get_config
import button_press_function


#GLOBALS

CONFIG_FILE_NAME = 'zone_controller.conf'

CONFIG = get_config.get_config(CONFIG_FILE_NAME)

LOGFILE = CONFIG.get('General', 'logfile')
SERVER_IP = CONFIG.get('General', 'serverIP')
SERVER_PORT = int(CONFIG.get('General', 'serverPort'))
ARDUINO_ADDRESS = CONFIG.get('General', 'arduinoAddress')


#Enable debugging
DBG_ON = False
logging.basicConfig(level=logging.DEBUG, filename=LOGFILE)


#Commands for ZC
ZC_CMD_GET = 'G'
ZC_CMD_SET = 'S'
예제 #40
0
import pandas as pd
from sklearn import linear_model
import numpy as np
import tweepy
import requests
import re
import time

from tweepy import OAuthHandler
from get_config import get_config


env = get_config()

consumer_key = env.get("CONSUMER_KEY")
consumer_secret = env.get("CONSUMER_SECRET")
access_token = env.get("ACCESS_TOKEN")
access_secret = env.get("ACCESS_TOKEN_SECRET")

auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_secret)

api = tweepy.API(auth)


t = pd.read_csv("fetcher/top100.csv")

# print t.t_id[:3]
# 0    614807708575907840
# 1    618798114825220097
# 2    617840986006401024
예제 #41
0
sys.path.insert(0, join_path(dirname(__file__), "lib"))  # extend sys.path

# from demjson import decode as decode_json
from django.utils.simplejson import loads as decode_json

from google.appengine.api.urlfetch import fetch as urlfetch, GET, POST
from google.appengine.ext import db
from google.appengine.ext.webapp import RequestHandler, WSGIApplication

# ------------------------------------------------------------------------------
# configuration -- SET THESE TO SUIT YOUR APP!!
# ------------------------------------------------------------------------------

from get_config import get_config

config = get_config()

OAUTH_APP_SETTINGS = {
    "twitter": {
        "consumer_key": config["consumer_key"],
        "consumer_secret": config["consumer_secret"],
        "request_token_url": "https://twitter.com/oauth/request_token",
        "access_token_url": "https://twitter.com/oauth/access_token",
        "user_auth_url": "http://twitter.com/oauth/authorize",
        "default_api_prefix": "https://twitter.com",
        "default_api_suffix": ".json",
    },
    "google": {
        "consumer_key": "",
        "consumer_secret": "",
        "request_token_url": "https://www.google.com/accounts/OAuthGetRequestToken",
def main():
    """
        Main function: 
            calls the subfunction according to user input
    """
        # read in the default settings from the configuration file
    global settings
    settings = get_config(default_config_file)
    
        # set the logging output i.e. to a File or the screen
    set_logging(settings)


        # get all parameters provided via cmd-line
    global input_params
    input_params = get_cmdline()
    

        # now that we know what dataset we need and where to find them, select the
        # correct reader for the requested dataset
        # first test if requested dataset does exist
    if settings.has_key('dataset.'+input_params['dataset']):
        reader = 'CF_' + input_params['dataset'] + '_Reader'
    else:
        err_msg = '[Error] -- ', now(), ' the requested dataset does not exist (is not configured)', input_params['dataset']
        err_code = 3
        handle_error(err_msg, err_code, settings)


        # call the reader module for the resepective dataset and process the data
    import dataset_reader
    attribute = getattr(dataset_reader, reader)
    f_read = attribute()

    #print 'READER: ', f_read       #@@
    
        # gets a listing of available DatasetSeries and their corresponding time-range
    base_flist, base_mask_flist, gfp_flist, gfpmask_flist = f_read.get_filelist(input_params, settings)

# @@@@
        ## processing-limits (max. filenumber to be used) here # @@@
    if gfp_flist.__len__() > int(settings['general.def_maxfiles']):
        err_msg = '[Error] -- ', now(), ' the number of GFP products availabel (=', str(gfp_flist.__len__()).strip(),') for the selected time period is larger then the configured "def_maxfiles" of: ', settings['general.def_maxfiles'], '\n', 'Please select a shorter time-period.'
        err_code = 4
        handle_error(err_msg, err_code, settings)
        

        # print the available input datasets:  eg. during testing 
    do_print_flist('BASE', base_flist)
    do_print_flist('BASE_Mask', base_mask_flist)
    do_print_flist('GFP', gfp_flist)
    do_print_flist('GFP_Mask', gfpmask_flist)


    lmsg = 'Dataset_listing - RUNTIME in sec: ',  time.time() - startTime1
    print_log(settings, lmsg)
        # create a temporarylocation under the provided settings['general.def_temp_dir'] to be used
        # for the temporary storage during processing
    temp_storage = tempfile.mkdtemp(prefix='cloudfree_',dir=settings['general.def_temp_dir'])
    if temp_storage[-1] != dsep:
        temp_storage = temp_storage+dsep


    if len(base_flist) >= 1:
        f_read.base_getcover(base_flist, input_params, settings, temp_storage, mask=False)

    if len(base_mask_flist) >= 1:
        f_read.base_getcover(base_mask_flist, input_params, settings, temp_storage, mask=True)

    lmsg = 'BASE dataset_download - RUNTIME in sec: ',  time.time() - startTime1 #, '\n'
    print_log(settings, lmsg)


        # call the Processor module for the resepective dataset and process the data
    import dataset_processor
    cfprocessor = 'CF_' + input_params['dataset'] + '_Processor'
    attribute = getattr(dataset_processor, cfprocessor)
    f_proc = attribute()

   #print 'PROCESSOR: ', f_proc        #@@
                
    cf_result = f_proc.process_clouds_1(base_flist, base_mask_flist, gfp_flist, gfpmask_flist, input_params, settings, temp_storage, f_read)


        # copy results to output location and clean-up the temporary storage area
    do_cleanup_tmp(temp_storage, cf_result, input_params, settings)

        # if   output_format  and/or  output_datatype  has been set by the user -> translate resulting image(s)
        # using gdal_translate 
    if change_output is True:
        cnv_output(cf_result, input_params, settings)



# ----------
# for performance testing
    msg = 'Full Processing Runtime in sec: ',  time.time() - startTime1, '\n' 
    print_log(settings, msg)

    settings['logging.log_fsock'].close()