Ejemplo n.º 1
0
def merge_ratings():

    def balance_classes(df, ndocs_per_class):
        df_pos = df[df['label']==1][:int(ndocs_per_class)]
        df_neg = df[df['label']==0][:int(ndocs_per_class)]
        return df_pos.append(df_neg)


    sub_space = lambda s: re.sub('\s+', ' ', s)
    write_row = lambda l, f: f.write('\t'.join(l) + '\n')

    filenames = glob('%s/*' % DATADIR)
    with open(TMPFILE, 'w') as f:
        write_row('id document label'.split(), f)
        for filename in filenames:
            for review in utils.read_json(filename):
                rating = int(review['rating'])
                if rating > 8:      # positive 9 10
                    write_row([review['review_id'], sub_space(review['review']), '1'], f)
                elif rating < 5:    # negative 1 2 3 4
                    write_row([review['review_id'], sub_space(review['review']), '0'], f)
                else:               # neutral
                    pass
    print('Ratings merged to %s' % TMPFILE)

    df = pd.read_csv(TMPFILE, sep='\t', quoting=3)
    df = df.fillna('')
    np.random.seed(SEED)
    df = df.iloc[np.random.permutation(len(df))]
    df = balance_classes(df, NDOCS/2)
    df.to_csv(RATINGSFILE, sep='\t', index=False)
    print('Ratings written to %s' % RATINGSFILE)
Ejemplo n.º 2
0
def main():

	# Check and get arguments
	args = utils.check_args()
	configFile = args.config
	directory = args.directory
	gz = args.gz

	# If not config file within arguments
	if not configFile:
		utils.printWarning("Didn't find '--config' param. Try to read 'config.json'...")
		configFile = utils.is_valid_json_file('config.json')

	# Try read json config file
	try:
		config = utils.read_json(configFile)
		config = config['config']
	## TODO Exception to handle
	finally:
		utils.printSuccess("DONE")

	# If config was loaded
	if config:
		try:
			backupped = utils.make_backup(config, directory, gz)
		finally:
			utils.printSuccess("DONE")

		utils.printSuccess("Close connection...")
		utils.printSuccess("DONE")
Ejemplo n.º 3
0
def parse_raw_emdr_data(data_folder):

    filenames = utils.get_filenames(data_folder)

    total_orders = []
    total_history = []
    for f in filenames:
        try:
            j = json.loads(utils.read_json(f, data_folder))
            rowsets = j['rowsets']
            resultType = j['resultType']

            if resultType=='orders':
                for rowset in rowsets:
                    for row in rowset['rows']:
                        total_orders.append(row)

            elif resultType=='history':
                for rowset in rowsets:
                    typeID = rowset['typeID']
                    regionID = rowset['regionID']
                    for row in rowset['rows']:
                        row.append(typeID)
                        row.append(regionID)
                        total_history.append(row)

            else:
                print '[x] Result type is not orders or history.'

        except Exception as e:
            print 'Filename: ' + f
            print e

    return total_orders, total_history
Ejemplo n.º 4
0
def sentiment_count(filename):
    lyrics = read_json(filename)
    classifications = {'pos': 0, 'neutral': 0, 'neg': 0}

    for lyric in lyrics:
        classifications[lyric['sentiment']] += 1

    print classifications
Ejemplo n.º 5
0
 def load(self, filename):
     data = u.read_json(filename)
     if data is not None:
         self._name = data[0]['name']
         self._avatar = u.string_to_avatar(data[0]['avatar_name'])
         self._avatar_name = data[0]['avatar_name']
         self._colour = data[0]['colour']
     else:
         print "Error loading %s" % filename
Ejemplo n.º 6
0
def download(assembly_id, json, indir, outdir):
    try:
        url = utils.read_json('%s/%s' % (indir, json))\
            ['status_dict']['접수']['의안접수정보'][0]['문서'][0][1][1]
    except KeyError as e:
        try:
            url = utils.read_json('%s/%s' % (indir, json))\
                ['status_dict']['접수']['의안접수정보'][0]['의안원문'][0][1][1]
        except KeyError as e:
            print json
            print utils.read_json('%s/%s' % (indir, json))\
                ['status_dict']['접수']['의안접수정보'][0]
            url = None

    path = '%s/%s' % (outdir, json.replace('.json', '.pdf'))
    if not os.path.isfile(path) and url:
        urllib.urlretrieve(url, path)
        print 'Downloaded %s' % path
Ejemplo n.º 7
0
	def monitor(self):
		threading.Timer(self._interval, self.monitor).start()
		after_files = utils.directory_to_set(self._directory_path, self._extension)
		new_files = after_files.difference(self._before_files)
		
		if len(new_files) > 0 :
			for f in new_files :
				data_rows = utils.read_json(f)
				self._uploader.upload(data_rows)
				
		self._before_files = after_files
Ejemplo n.º 8
0
 def get_order_book(self):
   """ Gets the up-to-date and valid order book from the exchange, or None if
       there was a problem (eg, the API service was unavailable at the
       moment or the returned order book was invalid).
   """
   # The URL for the API service is exchange-specific and should have been
   # defined by subclasses.
   json_data = read_json(self.url)
   if json_data is None:
     return None
   order_book = self._parse_order_book_from_json(json_data)
   if order_book is None or not validate_order_book(order_book):
     return None
   return order_book
Ejemplo n.º 9
0
def process(sources, output, force):
    """Download sources and process the file to the output directory.

    \b
    SOURCES: Source JSON file or directory of files. Required.
    OUTPUT: Destination directory for generated data. Required.
    """
    for path in utils.get_files(sources):
        pathparts = utils.get_path_parts(path)
        pathparts[0] = output.strip(os.sep)
        pathparts[-1] = pathparts[-1].replace('.json', '.geojson')

        outdir = os.sep.join(pathparts[:-1])
        outfile = os.sep.join(pathparts)

        source = utils.read_json(path)
        urlfile = urlparse(source['url']).path.split('/')[-1]

        if not hasattr(adapters, source['filetype']):
            utils.error('Unknown filetype', source['filetype'], '\n')
            continue

        if os.path.isfile(outfile) and not force:
            utils.error('Skipping', path, 'since generated file exists.',
                        'Use --force to regenerate.', '\n')
            continue

        utils.info('Downloading', source['url'])

        try:
            fp = utils.download(source['url'])
        except IOError:
            utils.error('Failed to download', source['url'], '\n')
            continue

        utils.info('Reading', urlfile)

        try:
            geojson = getattr(adapters, source['filetype']).read(fp, source['properties'])
        except IOError:
            utils.error('Failed to read', urlfile)
            continue
        finally:
            os.remove(fp.name)

        utils.make_sure_path_exists(outdir)
        utils.write_json(outfile, geojson)

        utils.success('Done. Processed to', outfile, '\n')
Ejemplo n.º 10
0
def setup_logging(save_dir,
                  log_config='logger/logger_config.json',
                  default_level=logging.INFO):
    """Setup logging configuration."""
    log_config = Path(log_config)

    if log_config.is_file():
        config = read_json(log_config)

        # Modify logging paths
        for _, handler in config['handlers'].items():
            if 'filename' in handler:
                handler['filename'] = str(save_dir / handler['filename'])

        logging.config.dictConfig(config)
    else:
        print('Warning: logging configuration file is not found in {}.'.format(
            log_config))

        logging.basicConfig(level=default_level)
Ejemplo n.º 11
0
def read_data_model():

    loaded_encoder = tf.keras.models.load_model(
        RESULT_PATH + "pretrain_gen_encoder")  #"pretrain_gen_encoder"
    loaded_decoder = tf.keras.models.load_model(
        RESULT_PATH + "pretrain_gen_decoder")  #pretrain_gen_decoder
    file_path = RESULT_PATH + "train/20A_20B.csv"
    te_clade_files = glob.glob(file_path)
    r_dict = utils.read_json(RESULT_PATH + "r_word_dictionaries.json")
    vocab_size = len(r_dict) + 1
    total_te_loss = list()

    for te_name in te_clade_files:
        te_clade_df = pd.read_csv(te_name, sep="\t")
        print(te_clade_df)
        te_X = te_clade_df["X"].drop_duplicates()
        te_y = te_clade_df["Y"].drop_duplicates()
        print(te_X)

    return loaded_encoder, loaded_decoder, te_X
Ejemplo n.º 12
0
def main():
    if not os.path.exists(label_path):
        label_file = "data/dev_label.txt"
        label = read_label_from_file(label_file, frame_size=frame_size, frame_shift=frame_shift)
        save_json(label, label_path)
    else:
        label = read_json(label_path)

    features, target = sklearn_dataset(
                        label, task=args.task, mode='val',
                        frame_size=frame_size, frame_shift=frame_shift,
                        features_path=features_path, target_path=target_path
                        )
    
    m = joblib.load(model_path)
    print(m)

    auc, eer = validate(m, features, target)
    print('AUC :', auc)
    print('ERR :', eer)
Ejemplo n.º 13
0
    def __init__(self, profile_name):
        """ Loads the JSON profile models into the tree attribute

		Arguments:
		----------
			profile_path:
				type: string
				info: name of the JSON predicting profile file
		"""

        profile = read_json(file_name=profile_name, file_type='profile_p')

        try:
            self.tree = profile['tree']
            self.colors = profile['colors']

            self.__load_clf(self.tree)

        except KeyError:
            exit('Invalid JSON keys')
Ejemplo n.º 14
0
async def join(ctx, *args):
    server_id = str(ctx.message.guild.id)
    data = read_json("src/teams.json")
    for id in args:
        for t in data[server_id]:
            found = False
            if str(t.get("id")) == id:
                found = True
                if ctx.author.name.lower() in lowercase_players(t.get("players")):
                    await ctx.send("You are already a part of Team {}!".format(id))
                    break
                if len(t.get("players")) >= max_players.get(t.get("game")):
                    await ctx.send("Team {} is full!".format(id))
                    break
                t.get("players").append(ctx.author.name)
                write_json(data)
                await ctx.send(embed=embed_team(t))
                break
        if not found:
            await ctx.send(em.get("team_not_found"))
Ejemplo n.º 15
0
def setup_logging(
    save_dir: Path,
    log_config: Path = Path(f"{PROJECT_DIR}/logger/logger_config.json"),
    default_level: int = logging.INFO,
) -> None:
    """
    Setup logging configuration
    """
    if log_config.is_file():
        config = read_json(log_config)
        # modify logging paths based on run config
        for _, handler in config["handlers"].items():
            if "filename" in handler:
                handler["filename"] = str(save_dir / handler["filename"])

        logging.config.dictConfig(config)
    else:
        print("Warning: logging configuration file is not found in {}.".format(
            log_config))
        logging.basicConfig(level=default_level)
Ejemplo n.º 16
0
def make_docid2doc(int_mapped_path):
    docid2doc = {}
    splits = [
        'dev_ent_added.json', 'test_ent_added.json', 'train_ent_added.json'
    ]
    for split in splits:
        split_file = os.path.join(int_mapped_path, split)
        with codecs.open(split_file, 'r', 'utf-8') as fp:
            print('Processing: {:s}'.format(split_file))
            for data_json in utils.read_json(fp):
                doc_id = data_json['doc_id']
                docid2doc[doc_id] = {
                    'ents': data_json['ents'],
                    'text': data_json['text']
                }
    docid2doc_file = os.path.join(int_mapped_path, 'docid2doc.json')
    print('docids2doc: {:d}'.format(len(docid2doc)))
    with codecs.open(docid2doc_file, 'w', 'utf-8') as fp:
        json.dump(docid2doc, fp)
        print('Wrote: {:s}'.format(fp.name))
Ejemplo n.º 17
0
async def analytics(client):
    if client.is_ready() and cfg.SAPPHIRE_ID is None:
        fp = os.path.join(cfg.SCRIPT_DIR, "analytics.json")
        guilds = func.get_guilds(client)
        if not os.path.exists(fp):
            analytics = {}
        else:
            analytics = utils.read_json(fp)
        analytics[datetime.now(pytz.timezone(
            cfg.CONFIG['log_timezone'])).strftime("%Y-%m-%d %H:%M")] = {
                'nc': utils.num_active_channels(guilds),
                'tt': round(cfg.TICK_TIME, 2),
                'tr': main_loop.seconds,
                'ng': len(guilds),
                'm': round(psutil.virtual_memory().used / 1024 / 1024 / 1024,
                           2),
            }
        with concurrent.futures.ThreadPoolExecutor() as pool:
            await client.loop.run_in_executor(pool, utils.write_json, fp,
                                              analytics)
Ejemplo n.º 18
0
    def __init__(self, args, options='', timestamp=True):
        # parse default and custom cli options
        for opt in options:
            args.add_argument(*opt.flags, default=None, type=opt.type)
        args = args.parse_args()

        if args.device:
            os.environ["CUDA_VISIBLE_DEVICES"] = args.device
        if args.resume:
            self.resume = Path(args.resume)
            self.cfg_fname = self.resume.parent / 'config.json'
        else:
            msg_no_cfg = "Configuration file need to be specified. Add '-c config.json', for example."
            assert args.config is not None, msg_no_cfg
            self.resume = None
            self.cfg_fname = Path(args.config)
        self.is_training = args.training
        # load config file and apply custom cli options
        config = read_json(self.cfg_fname)
        self._config = _update_config(config, options, args)

        # set save_dir where trained model and log will be saved.
        save_dir = Path(self.config['trainer']['save_dir'])
        if not save_dir.exists():
            save_dir.mkdir(exist_ok=True, parents=True)
        save_dir = save_dir.resolve()
        timestamp = datetime.now().strftime(
            r'%Y%m%d-%H%M%S') if timestamp else ''

        exper_name = self.config['name']
        self._save_dir = save_dir / 'models' / exper_name / timestamp
        self._log_dir = save_dir / 'log' / exper_name / timestamp

        self.save_dir.mkdir(parents=True, exist_ok=True)
        self.log_dir.mkdir(parents=True, exist_ok=True)

        # save updated config file to the checkpoint dir
        write_json(self.config, self.save_dir / 'config.json')

        # configure logging module
        setup_logging(self.log_dir, exper_name, timestamp, self.is_training)
Ejemplo n.º 19
0
async def execute(ctx, params):
    author = ctx['message'].author
    r = "Checking..."
    s = False
    m = await ctx['channel'].send(r)

    if patreon_info is None:
        return False, "No need to do that."

    patrons = patreon_info.fetch_patrons(force_update=False)
    patrons[cfg.CONFIG['admin_id']] = "sapphire"
    auth_path = os.path.join(cfg.SCRIPT_DIR, "patron_auths.json")
    if author.id in patrons:
        auths = utils.read_json(auth_path)
        auths[str(author.id)] = {"servers": [ctx['guild'].id]}
        utils.write_json(auth_path, auths, indent=4)
        patreon_info.update_patron_servers(patrons)
        s = True
        reward = patrons[author.id].title()
        await func.admin_log(
            "🔑 Authenticated **{}**'s {} server {} `{}`".format(
                author.name, reward, ctx['guild'].name, ctx['guild'].id),
            ctx['client'],
            important=True)
        r = "✅ Nice! This server is now a **{}** server.".format(reward)
        if reward in ["Diamond", "Sapphire"]:
            r += ("\nPlease give me ~{} hours to set up your private bot - "
                  "I'll DM you when it's ready to make the swap!".format(
                      12 if reward == "Sapphire" else 24))
    else:
        await func.admin_log(
            "🔒 Failed to authenticate for **{}**".format(author.name),
            ctx['client'])
        r = (
            "❌ Sorry it doesn't look like you're a Patron.\n"
            "If you just recently became one, please make sure you've connected your discord account "
            "(<https://bit.ly/2UdfYbQ>) and try again in a few minutes. "
            "If it still doesn't work, let me know in the support server: <https://discord.io/DotsBotsSupport>."
        )
    await m.edit(content=r)
    return s, "NO RESPONSE"
Ejemplo n.º 20
0
def setup_logging(save_dir,
                  log_config="logger/logger_config.json",
                  default_level=logging.INFO):
    """
    Setup logging configuration
    """
    log_config = Path(log_config)
    if log_config.is_file():
        config = read_json(log_config)
        # modify logging paths based on run config
        for _, handler in config["handlers"].items():
            if "filename" in handler:
                handler["filename"] = str(save_dir / handler["filename"])

        logging.config.dictConfig(config)
    else:
        logging.basicConfig(level=default_level,
                            format="%(levelname)s-%(message)s")
        logging.warning(
            "Logging configuration file is not found in {}.".format(
                log_config))
def generate_record_example_with_kp(in_path, info_path, writer):
    train = glob.glob(in_path + '/*.png')
    label = in_path + '/label.txt'
    with open(label) as file:
        file_content = file.read()
        label = int(file_content)
    for image in train:
        img = open(image, 'rb').read()
        json_file = open(info_path + image[-17:-3] + 'json', 'r')
        info = json.load(json_file)
        kpts = read_json(info)
        feature = {
            "label":
            tf.train.Feature(int64_list=tf.train.Int64List(value=[label])),
            'img_raw':
            tf.train.Feature(bytes_list=tf.train.BytesList(value=[img])),
            'kpts': tf.train.Feature(float_list=tf.train.FloatList(value=kpts))
        }
        example_proto = tf.train.Example(features=tf.train.Features(
            feature=feature))
        writer.write(example_proto.SerializeToString())
Ejemplo n.º 22
0
def setup_logging(save_dir,
                  log_config='logger/logger_config.json',
                  default_level=logging.INFO):
    """
    Setup logging configuration
    """
    log_config = Path(log_config)
    if log_config.is_file():
        config = read_json(log_config)
        # modify logging paths based on run config
        for _, handler in config['handlers'].items():
            if 'filename' in handler:
                handler['filename'] = str(save_dir / handler['filename'])

        logging.config.dictConfig(config)
    else:
        print(
            f"Warning: logging configuration file is not found in {log_config}."
        )
        logging.basicConfig(level=default_level)
    return config["handlers"]["info_file_handler"]["filename"]
Ejemplo n.º 23
0
def time(ctx):
    user = ctx.obj['USER']
    city = ctx.obj['CITY']

    name = utils.read_json(ctx.obj['SETTINGS'])['name'].get(user, 'error')

    if city:
        raise NotImplementedError('You cannot use this option yet.')
    elif (user and name != 'error'):
        print_date_now(name['timezone'])
    elif name == 'error':
        click.secho(f"Error Code {errno.EINVAL}: {os.strerror(errno.EINVAL)}",
                    err=True,
                    fg='red')
        click.secho(f"This user hasn't been defined in your settings yet.",
                    fg='yellow')
        click.secho(
            f"You can add new users in '{utils.path_settings('timetravel')}'.",
            fg='yellow')
    else:
        click.secho(f"An unknown error has occurred", err=True, fg='red')
 def update_table(self, notify):
     new_table_json = self.generate_table_json_from_url(self.iat_table_url)
     old_table_json = read_json(self.table_file_name)
     if not os.path.isfile(self.table_file_name):
         write_json(self.table_file_name, new_table_json)
         return
     #new_table_json = '{"table": [{"cardinal_points": "18 : 0","club": "AV Speyer 03","max_score": "898.6","place": "1","score": "5056.2 : 4170.6"}],"relay": "1Gruppe+A"}'
     new_table_dict = json.loads(new_table_json)
     if sorted(new_table_json.decode("utf-8")) == sorted(
             old_table_json.decode("utf-8")):
         print "Local check: Table of " + new_table_dict[
             "relay"] + " did not change"
         return
     else:
         print "Local check: Table of " + new_table_dict[
             "relay"] + " changed"
         self.send_post(new_table_json, '/set_table')
         if notify:
             self.notify_users_about_new_placements(new_table_json,
                                                    old_table_json)
         write_json(self.table_file_name, new_table_json)
Ejemplo n.º 25
0
def main(args):
    # build model architecture
    model_path= Path(args.model)
    config_path = model_path.parent.joinpath('config.json')
    config = read_json(config_path)
    print('Loading checkpoint: {}'.format(model_path))
    checkpoint = torch.load(model_path,map_location='cpu')
    if len(checkpoint.keys()) == 1 and str2bool(input("Is this a fused model y/n? ")):
        config['arch']['args']['batchnorm'] = False
    model = getattr(module_arch,config['arch']['type'])(**dict(config['arch']['args']))
    state_dict = checkpoint['state_dict']
    consume_prefix_in_state_dict_if_present(state_dict,"module.")
    model.load_state_dict(state_dict)
    data=torch.randn([1]+config['input_size'])
    device = torch.device('cpu') #'cuda' if torch.cuda.is_available() else 
    model.eval()
    model = model.to(device)
    data = data.to(device)
    model(data)
    res_path = parameters_extractor(model,config['extractor'],result_path=model_path.parent,fuse=args.fuse)
    print("Result:\n", res_path)
Ejemplo n.º 26
0
def split_triple_number():
    const = Const()
    const.novel_tagging()
    data = utils.read_json(Const.raw_test_filename)
    # sentences contains 1, 2, 3, 4, and >5 triples
    triples_size_1_data, triples_size_2_data, triples_size_3_data, triples_size_4_data, triples_size_5_data = [], [], [], [], []
    for i, a_data in enumerate(data):
        triples = set()
        for triple in a_data['relationMentions']:
            m1 = nltk.word_tokenize(triple['em1Text'])[-1]
            m2 = nltk.word_tokenize(triple['em2Text'])[-1]
            label = triple['label']
            if label != 'None':
                triples.add((m1, m2, label))

        if len(triples) == 1:
            triples_size_1_data.append(a_data)
        elif len(triples) == 2:
            triples_size_2_data.append(a_data)
        elif len(triples) == 3:
            triples_size_3_data.append(a_data)
        elif len(triples) == 4:
            triples_size_4_data.append(a_data)
        else:
            triples_size_5_data.append(a_data)
    utils.write_data(open(const.raw_test_1_triple_filename, 'w'),
                     triples_size_1_data)
    utils.write_data(open(const.raw_test_2_triple_filename, 'w'),
                     triples_size_2_data)
    utils.write_data(open(const.raw_test_3_triple_filename, 'w'),
                     triples_size_3_data)
    utils.write_data(open(const.raw_test_4_triple_filename, 'w'),
                     triples_size_4_data)
    utils.write_data(open(const.raw_test_5_triple_filename, 'w'),
                     triples_size_5_data)
    print('Sentence-1-Triple: %d' % len(triples_size_1_data))
    print('Sentence-2-Triple: %d' % len(triples_size_2_data))
    print('Sentence-3-Triple: %d' % len(triples_size_3_data))
    print('Sentence-4-Triple: %d' % len(triples_size_4_data))
    print('Sentence-5-Triple: %d' % len(triples_size_5_data))
Ejemplo n.º 27
0
def train_model(algorithm, training_config, output):

	""" Trains a OpenCV classifier and stores it in the models folder

	Arguments:
	----------
		algorithm:
			type: string
			info: name of the classifier {Eigen, Fisher, LBPH}

		training_config:
			type: string
			info: name of the JSON with the training configuration

		output:
			type: string
			info: name of the output model
	"""

	datasets = read_json(
		file_name=training_config,
		file_type='training_c'
	)

	classifier = FaceClassifier(algorithm)
	classifier.train(datasets)

	# Saving FaceClassifier int <-> label dict as JSON
	write_json(
		dictionary=classifier.properties,
		file_name=output + '.json',
		file_type='model'
	)

	# Saving FaceClassifier trained model as XML (pickle not working)
	write_clf(
		clf=classifier.model,
		file_name=output + '.xml',
		file_type='model'
	)
Ejemplo n.º 28
0
    def extract_summary(self, ):
        data = read_json(self.full_path)

        articles = []
        abstracts = []

        for item in data:
            articles.appenditem['article'])
            abstracts.append(item['abstract'])
        data_iterator = zip(articles, abstracts)

        summaries = []
        references = []

        for item in tqdm(data_iterator, desc="Lead:"):
            article, abstract = item
            summary = article[:self.extract_num]
            summaries.append(summary)
            references.append([abstract])

        result = test_rouge(summaries, references, self.processors)
        return result
 def update_competitions(self, notify):
     new_competitions_json = self.generate_competitions_json_from_url(
         self.iat_competitions_url)
     old_competitions_json = read_json(self.competition_file_name)
     if not os.path.isfile(self.competition_file_name):
         write_json(self.competition_file_name, new_competitions_json)
         return
     #new_competitions_json = '{"competitions": [{"date": "07.05.2016", "home": "TB 03 Roding", "guest": "AV Speyer 03", "location": "Roding", "score": "562.1 : 545.0 :562.0", "url": "https://www.iat.uni-leipzig.de/datenbanken/blgew1516/start.php?pid=' + "'123'" +  '&protokoll=1&wkid=E3714956BFC24D6798DCD9C94B0620CC"}],"relay": "1Gruppe+A"}'
     new_competitions_dict = json.loads(new_competitions_json)
     if sorted(new_competitions_json.decode("utf-8")) == sorted(
             old_competitions_json.decode("utf-8")):
         print "Local check: Competitions of " + new_competitions_dict[
             "relay"] + " did not change"
         return
     else:
         print "Local check: Competitions of " + new_competitions_dict[
             "relay"] + " changed"
         self.send_post(new_competitions_json, '/set_competitions')
         if notify:
             self.notify_users_about_new_competitions(
                 new_competitions_json, old_competitions_json)
         write_json(self.competition_file_name, new_competitions_json)
Ejemplo n.º 30
0
def analyse_video(video_path, model_name, clf_th, output):

	""" Identifies actors in a video and generates one with their names

	Arguments:
	----------
		video_path:
			type: string
			info: path where the video is located

		model_name:
			type: string
			info: name of the trained model

		clf_th:
			type: float
			info: confidence threshold to identify an actor

		output:
			type: string
			info: name of the generated video
	"""

	clf_props = read_json(
		file_name=model_name + '.json',
		file_type='model'
	)

	# Creating and loading the trained classifier
	clf = FaceClassifier(clf_props['algorithm'])
	clf.properties = clf_props
	clf.model = read_clf(
		clf=clf,
		file_name=model_name + '.xml',
		file_type='model'
	)

	# Generating a similar video with the names on it
	identify_actors(video_path, clf, clf_th, output)
Ejemplo n.º 31
0
async def leave(ctx, *args):
    server_id = str(ctx.message.guild.id)
    data = read_json("src/teams.json")
    for id in args:
        for t in data[server_id]:
            found = False
            if str(t.get("id")) == id:
                found = True
                if ctx.author.name.lower() not in lowercase_players(t.get("players")):
                    await ctx.send("You are not a part of Team {}!".format(id))
                else:
                    remove_player(t.get("players"), ctx.author.name)
                    if len(t.get("players")) == 0:
                        data[server_id].remove(t)
                        await ctx.send("All players removed from Team {}.".format(id))
                    else:
                        await ctx.send("You have been removed from Team {}.".format(id))
                        await ctx.send(embed=embed_team(t))
                    write_json(data)
                break
        if not found:
            await ctx.send(em.get("team_not_found"))
Ejemplo n.º 32
0
async def remove(ctx, id, *args):
    server_id = str(ctx.message.guild.id)
    data = read_json("src/teams.json")
    for t in data[server_id]:
        if str(t.get("id")) == id:
            players = t.get("players")
            if ctx.author.name.lower() not in lowercase_players(players):
                await ctx.send(em.get("non_member"))
            elif len(args) != len(set(args)):
                await ctx.send(em.get("duplicate_player"))
            else:
                removed_players = []
                args = [int(n) - 1 for n in args]
                for n in args:
                    if 0 <= n < len(players):
                        removed_players.append(players[n])
                    else:
                        await ctx.send(
                            "Player {} is not part of the team!".format(n + 1)
                        )
                players = [p for n, p in enumerate(players) if n not in args]
                t["players"] = players
                if len(players) == 0:
                    data[server_id].remove(t)
                    await ctx.send("All players removed from Team {}.".format(id))
                elif len(removed_players) > 0:
                    await ctx.send(
                        "{} have been removed from Team {}.".format(
                            ", ".join(removed_players), id
                        )
                    ) if len(removed_players) > 1 else await ctx.send(
                        "{} has been removed from Team {}.".format(
                            removed_players[0], id
                        )
                    )
                    await ctx.send(embed=embed_team(t))
                write_json(data)
            return
    await ctx.send(em.get("team_not_found"))
Ejemplo n.º 33
0
def train_model(algorithm, feats_pct, lang, output, profile_name):
    """ Prepares arguments to train and saves a NodeClassif object

	Arguments:
	----------
		algorithm:
			type: string
			info: name of the algorithm to train

		feats_pct:
			type: int
			info: percentage of features to keep

		lang:
			type: string
			info: language to perform the tokenizer process

		output:
			type: string
			info: output file name including extension

		profile_name:
			type: string
			info: name of the JSON training profile file
	"""

    if (feats_pct < 0) or (feats_pct > 100):
        exit('The specified features percentage is invalid')

    profile_data = read_json(file_name=profile_name, file_type='profile_t')

    node_classif = NodeClassif(
        algorithm=algorithm.lower(),
        feats_pct=feats_pct,
        lang=lang,
    )

    node_classif.train(profile_data)
    save_object(node_classif, output, 'model')
Ejemplo n.º 34
0
def batch_eval_metrics(model_folder, w_config, save_path):
    assert len(w_config.split('-')) == 6
    print("Weight config: %s" % w_config)
    metrics = ['f1', 'acc', 'nmi']
    # metrics = ['f1', 'acc']
    mets = {k: [] for k in metrics}
    avg_mets = {k: 0 for k in metrics}
    model_count = 0
    eval_models = [
        f for f in Path(model_folder).glob('*') if w_config in str(f)
    ]

    txt_file_name = 'batch_eval_met_%s.txt' % w_config
    txt_dir = os.path.join(save_path, txt_file_name)
    txt_file = open(txt_dir, 'w')

    for f in eval_models:
        # for d in f.glob('*'):
        #     m = d / 'model_best.pth'
        #     if m.is_file():
        #         f = d; txt_file.write('%s\n' % f)
        #         break
        txt_file.write('%s\n' % f)
        print("Processing model: %s" % f)
        config_file = read_json(str(f / 'config.json'))
        model_best = str(f / 'model_best.pth')
        config = ConfigParser(config_file, resume=model_best, testing=True)
        f1, acc, nmi = eval_metrics(config)
        for m, s in zip(metrics, [f1, acc, nmi]):
            mets[m].append(s)
            avg_mets[m] += s
        model_count += 1
    for k in avg_mets.keys():
        avg_mets[k] /= model_count

    txt_file.write(json.dumps(mets, indent=2))
    txt_file.write('\n')
    txt_file.write(json.dumps(avg_mets, indent=2))
    txt_file.close()
Ejemplo n.º 35
0
def apply_threshold(file_predict, output, threshold):
    """
    Apply threshold on the scores of a predicted file, reducing
    the number of predicted bounding boxes.
    """
    dpred = utils.read_json(file_predict)

    dic = {}
    discarded = 0
    pb = progressbar.ProgressBar(len(dpred))
    for image in sorted(dpred):
        for content in dpred[image]:
            if content[1] >= threshold:
                if dic.has_key(image):
                    dic[image].append(content)
                else:
                    dic[image] = [content]
            else:
                discarded += 1
        pb.update()
    utils.save_json(output, dic)
    logger.info('Total of discarded bounding boxes: %d' % discarded)
Ejemplo n.º 36
0
    def __init__(self, config_file):
        self.config_file = Path(config_file)

        self.config = read_json(self.config_file)

        experiment_name = self.config['experiment_name']
        out_dir = Path(self.config['trainer']['save_dir'])
        timestamp = datetime.now().strftime(r'%Y%m%d_%H%M%S')

        self.save_dir = out_dir / 'models' / experiment_name / timestamp
        self.log_dir = out_dir / 'log' / experiment_name / timestamp
        self.samples_dir = out_dir / 'samples' / experiment_name / timestamp

        self.save_dir.mkdir(parents=True, exist_ok=True)
        self.log_dir.mkdir(parents=True, exist_ok=True)
        self.samples_dir.mkdir(parents=True, exist_ok=True)

        configure_logging(self.log_dir)
        self.log_levels = {
            0: logging.WARNING,
            1: logging.INFO,
            2: logging.DEBUG,
        }
Ejemplo n.º 37
0
    def add_item(self, urls=None):
        if(urls is None):
            urls = [input("Url to parse : ")]
        data = utils.read_json(self.database_file)

        for url in urls:
            try:
                for builder in self.builders:
                    if builder.match(url):
                        break
                else:
                    logger.info('The url {} is not valid'.format(url))

                logger.info("Scrapping and adding {} to the database".format(url))
                item_data = builder.scrapper(url, self.language).scrap()
                data[builder.field_name].append(item_data)
            except DatafusException as e:
                logger.warning("Skipping {} for the following reason : {}".format(url, str(e)))
            except Exception as e:
                logger.warning("Skipping {} for the following reason : {}".format(url, str(e)))
                logger.error(e)

        utils.save_json(self.database_file, data)
def combine_parsed_medline_articles(saved_path='parsed_articles', 
                                    year_start=2010, 
                                    year_end=2019):
    """
    Give a path to folder locating JSON files, 
    return all parsed paper

    Input
    =====
    saved_path: str, path to saved JSON folder

    Output
    ======
    parsed_papers: list, list of all parsed papers 
    """
    paths = glob(os.path.join(saved_path, '*.json'))

    parsed_papers = []
    for path in paths:
        papers = read_json(path)
        papers = [paper for paper in papers if int(paper['pubdate']) in range(2010, 2019)]
        parsed_papers.extend(papers)
    return parsed_papers
Ejemplo n.º 39
0
def setup_logging(save_dir,
                  run_id='1',
                  log_config='logger/logger.json',
                  default_level=logging.INFO):
    """
    Setup logging configuration

    Parameters
    ----------
    save_dir: str
        logging info save root directory. By default, the name of logging file is info.log.
    run_id: str
        session id.
    log_config: str, default='logger/logger.json'
        logging config json file.
    default_level: str, default=logging.INFO
        logging level.

    Returns
    -------
    None
    """

    _log_config = Path(log_config)
    if _log_config.is_file():
        # read json file
        config_dict = read_json(_log_config)
        for _, handler in config_dict['handlers'].items():
            if 'filename' in handler:
                handler['filename'] = str(
                    save_dir / '{}-{}'.format(run_id, handler['filename']))
        logging.config.dictConfig(config_dict)

    else:
        print("Warning: logging configuration file is not found in {}.".format(
            _log_config))
        logging.basicConfig(level=default_level)
Ejemplo n.º 40
0
    c.close()
    print "Done Classifying."
    print
    return classified_array


if __name__ == "__main__":

    # Nome do arquivo json que queremos fazer as operações
    json_filename = '../database_new.json'

    # Nome do arquivo com as urls novas que queremos adicionar no json
    input_filename = 'music_urls.txt'

    # Leitura do arquivo json. Caso não exista, retorna uma lista vazia
    database_array = read_json(json_filename)

    # Leitura do arquivo de urls
    urls_array = read_urls_file(input_filename) # Aqui colocamos a entrada desejada

    # Leitura das urls via input do usuário
    # urls_array = read_urls_input()

    # Chama a função para crawl das letras retornando uma lista de dicionários
    lyrics_array = crawl_lyrics(urls_array)
    
    # Adicionamos a nova lista ao final da lista de músicas que estavam
    # no banco de dados
    database_array.extend(classify_lyrics(lyrics_array))

    # Escrevemos no arquivo json
Ejemplo n.º 41
0
def process(sources, output, force):
    """Download sources and process the file to the output directory.

    \b
    SOURCES: Source JSON file or directory of files. Required.
    OUTPUT: Destination directory for generated data. Required.
    """
    catalog_features = []
    failures = []
    path_parts_to_skip = len(utils.get_path_parts(output))
    success = True
    for path in utils.get_files(sources):
        try:
            utils.info("Processing " + path)
            pathparts = utils.get_path_parts(path)
            pathparts[0] = output.strip(os.sep)
            pathparts[-1] = pathparts[-1].replace('.json', '.geojson')
    
            outdir = os.sep.join(pathparts[:-1])
            outfile = os.sep.join(pathparts)
    
            source = utils.read_json(path)
            urlfile = urlparse(source['url']).path.split('/')[-1]
    
            if not hasattr(adapters, source['filetype']):
                utils.error('Unknown filetype', source['filetype'], '\n')
                failures.append(path)
                continue
    
            if os.path.isfile(outfile) and \
                os.path.getmtime(outfile) > os.path.getmtime(path) and not force:
                utils.error('Skipping', path, 'since generated file exists.',
                            'Use --force to regenerate.', '\n')
                with open(outfile, "rb") as f:
                    geojson = json.load(f)
                properties = geojson['properties']
            else:
                utils.info('Downloading', source['url'])
    
                try:
                    fp = utils.download(source['url'])
                except IOError:
                    utils.error('Failed to download', source['url'], '\n')
                    failures.append(path)
                    continue
    
                utils.info('Reading', urlfile)
    
                if 'filter' in source:
                    filterer = BasicFilterer(source['filter'], source.get('filterOperator', 'and'))
                else:
                    filterer = None
    
                try:
                    geojson = getattr(adapters, source['filetype'])\
                        .read(fp, source['properties'],
                            filterer=filterer,
                            layer_name=source.get("layerName", None),
                            source_filename=source.get("filenameInZip", None))
                except IOError, e:
                    utils.error('Failed to read', urlfile, str(e))
                    failures.append(path)
                    continue
                except zipfile.BadZipfile, e:
                    utils.error('Unable to open zip file', source['url'])
                    failures.append(path)
                    continue
                finally:
Ejemplo n.º 42
0
    import traceback
    import json
    try:
        return eval_dag(dag, filename, dag_id), dag_id
    except Exception as e:
        with open('error.'+str(dag_id), 'w') as err:
            err.write(str(e)+'\n')
            for line in traceback.format_tb(e.__traceback__):
                err.write(line)
            err.write(json.dumps(dag))
    return (), dag_id

if __name__ == '__main__':

    datafile = "ml-prove.csv"
    dags = utils.read_json('test_err.json')

    results = dict()

    remaining_dags = [d for d in enumerate(dags) if str(d[0]) not in results]
    print("Starting...", len(remaining_dags))
    pprint.pprint(remaining_dags)

    for e in map(lambda x: safe_dag_eval(x[1], datafile, x[0]), remaining_dags):
        results[str(e[1])] = e
        print(e)
        print("Model %4d: Cross-validation error: %.5f (+-%.5f)" % (e[1], e[0][0], e[0][1]))
        sys.stdout.flush()

    print("-"*80)
    best_error = sorted(results.values(), key=lambda x: x[0][0]-2*x[0][1], reverse=True)[0]
Ejemplo n.º 43
0
def basic_view():
    view_data = json.loads(utils.read_json(utils.HEALTH_JSON_FILE))
    view_data.update(json.loads(utils.read_json(utils.STATS_JSON_FILE)))
    return view_data
Ejemplo n.º 44
0
Archivo: main.py Proyecto: Ealdor/pypbp
	def start(self):
		self.status.set("Estado: -")
		self.leng.set("Iteración: -/- y Número: -/-")
		self.totaltime.set("Tiempo total: -")
		self.ones.set("Total de unos: -")
		self.types.set("Progreso: -")
		self.startButton.config(state = 'disabled')
		self.browseButton.config(state = 'disabled')
		self.cancelButton.config(state = 'normal')
		self.maxnumberSpinbox.config(state = 'disabled')
		self.complexSpinbox.config(state = 'disabled')
		if int(self.complexSpinbox.get()) in (1,2,3,4,5) and int(self.maxnumberSpinbox.get()) in (1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21) and self.completeName != "":
			start_time = time.time()
			if self.name.get().split('.')[1] == 'csv':
				self.g = generator.Generator(self.maxnumberSpinbox.get(), self.complexSpinbox.get(), utils.read_csv(self.completeName), self.cancelButton, self.types)
			else:
				self.g = generator.Generator(self.maxnumberSpinbox.get(), self.complexSpinbox.get(), utils.read_json(self.completeName), self.cancelButton, self.types)
			self.g.count_one()
			self.ones.set("Total de unos: {0}".format(len(self.g.table_uno)))
			i = 0
			while self.g.maxim > 1:
				i += 1
				self.leng.set("Iteración: {0}/{1} y Número: {2}".format(i, self.complexSpinbox.get(), self.g.maxim))
				self.status.set("Estado: Generando puzzle...")
				self.g.step_one()
				tim = utils.sec_to(int(time.time() - start_time))
				self.totaltime.set("Tiempo total: {0}h:{1}m:{2}s".format(tim[0], tim[1], tim[2]))
				self.status.set("Estado: Aplicando condición uno...")
				self.g.cond_dos(1)
				tim = utils.sec_to(int(time.time() - start_time))
				self.totaltime.set("Tiempo total: {0}h:{1}m:{2}s".format(tim[0], tim[1], tim[2]))
				self.status.set("Estado: Aplicando condición dos...")
				self.g.cond_dos(2)
				tim = utils.sec_to(int(time.time() - start_time))
				self.totaltime.set("Tiempo total: {0}h:{1}m:{2}s".format(tim[0], tim[1], tim[2]))
				
				if self.g.maxim >= 4:
					self.status.set("Estado: Aplicando condición tres...")
					self.g.cond_dos(3)
					tim = utils.sec_to(int(time.time() - start_time))
					self.totaltime.set("Tiempo total: {0}h:{1}m:{2}s".format(tim[0], tim[1], tim[2]))
				
				self.g.count_one()
				self.ones.set("Total de unos: {0}".format(len(self.g.table_uno)))
				if i == self.g.iters:
					self.g.maxim -= 1
					i = 0
			if self.name.get().split('.')[1] == 'csv':
				utils.write_csv(self.g.table_all)
			else:
				utils.write_json(self.g.table_all)

			if self.g.cancel:
				self.status.set("Estado: Cancelado")
			else:
				self.status.set("Estado: Completado")
			self.g = None
		self.startButton.config(state = 'normal')
		self.browseButton.config(state = 'normal')
		self.cancelButton.config(state = 'disabled')
		self.maxnumberSpinbox.config(state = 'normal')
		self.complexSpinbox.config(state = 'normal')
Ejemplo n.º 45
0
    LOG.info('Connecting to the database')
    connection = pymysql.connect(host=hostname, port=port, user='******', passwd='skynet', db='skynet')
    cursor = connection.cursor()
    try:
        cursor.execute("SELECT max(parameterNumber) FROM " + parameters)

        data = cursor.fetchone()
        return data
    finally:
        cursor.close()
        connection.close()


try:
    utils.copy(PARAMETERS_FROM, PARAMETERS_TO)
    json_object = utils.read_json(PARAMETERS_FROM)
    dictionary = json_object[0]

    # Get the hostname
    HOSTNAME = dictionary['db-hostname']

    # Build the constants we need
    TABLE_STEM = dictionary['table-stem']
    PARAMETERS = TABLE_STEM + '_parameters'
    PORT_INT = int(dictionary['db-port'])
    PORT_STR = dictionary['db-port']

    max_parameter_number = get_parameter_numbers(PARAMETERS, HOSTNAME, PORT_INT)[0]
    LOG.info('Max parameter number = %(max_parameter_number)s' % {'max_parameter_number': str(max_parameter_number)})

    cmd = 'java -Xms15g -Xmx30g -jar /home/kevin/yabi/MergeSkyNetResults.jar -database ' + TABLE_STEM + ' -db_hostname ' + HOSTNAME + ' -db_port ' + PORT_STR
Ejemplo n.º 46
0
def respfunc_viewer(path):
    app = QtGui.QApplication([])
    pyqtgraph.setConfigOption("background", "w")
    pyqtgraph.setConfigOption("foreground", "k")

    win = QtGui.QMainWindow()
    win.setWindowTitle("MT response function data viewer")

    darea = dockarea.DockArea()
    w = QtGui.QWidget()
    win.setCentralWidget(darea)

    taglist = QtGui.QListWidget(win)
    taglist.setSelectionMode(QtGui.QAbstractItemView.ExtendedSelection)
    taglist_dock = dockarea.Dock("Tags")
    taglist_dock.addWidget(taglist)
    darea.addDock(taglist_dock)

    sitelist = QtGui.QListWidget()
    sitelist.setSelectionMode(QtGui.QAbstractItemView.ExtendedSelection)
    sitelist_dock = dockarea.Dock("Tree...")
    sitelist_dock.addWidget(sitelist)
    darea.addDock(sitelist_dock, "left", taglist_dock)

    resplot = pyqtgraph.PlotWidget()
    resplot_dock = dockarea.Dock("APPARENT RESISTIVITY")
    resplot_dock.addWidget(resplot)
    darea.addDock(resplot_dock, "left", sitelist_dock)

    phaseplot = pyqtgraph.PlotWidget()
    phaseplot_dock = dockarea.Dock("PHASE")
    phaseplot_dock.addWidget(phaseplot)
    darea.addDock(phaseplot_dock, "bottom", resplot_dock)

    default_pen = [[(255,255,255,90)], dict(width=1)]
    select_pen = [["r"], dict(width=1.5)]
    skipflag_pen = [[(255,255,255,30)], dict(width=0.5)]

    resplotitem = resplot.getPlotItem()
    phaseplotitem = phaseplot.getPlotItem()
    resplotitem.invertX(True)
    phaseplotitem.invertX(True)
    resplotitem.setLogMode(x=True, y=True)
    phaseplotitem.setLogMode(x=True, y=False)
    phaseplotitem.vb.setXLink(resplotitem.vb)
    resplotitem.setYRange(np.log10(0.1), np.log10(1000))
    phaseplotitem.setYRange(0, 90)

    resvb = resplotitem.vb
    phasevb = phaseplotitem.vb

    data = utils.AttrDict()

    tagfns = glob.glob(op.join(path, "*-cal.json"))
    tag2fn = {}
    fn2tag = {}
    sites = set()
    tagfns.sort()
    
    data = utils.AttrDict()
    with open(op.join(path, "maskedfreqs.json"), mode="r") as f:
        maskedfreqs = utils.read_json(f)
    maskedlines = utils.AttrDict()
    datasymbols = utils.AttrDict()

    psymbols = utils.AttrDict({
        "xy": dict(pen=None, symbol="o", symbolBrush="b"),
        "yx": dict(pen=None, symbol="s", symbolBrush="r")
        })
    plines = utils.AttrDict({
        "xy": dict(pen="b"),
        "yx": dict(pen="r")
        })

    plotpens = utils.AttrDict({"xy": "b", "yx": "r",})
    plotsymbols = utils.AttrDict({"xy": "o", "yx": "s"})

    def plot(tag):

        if not hasattr(datasymbols[tag], "res_xy"):
            datasymbols[tag].res_xy = pyqtgraph.PlotDataItem(data[tag].freqs, data[tag].res_xy, **psymbols.xy)
            datasymbols[tag].res_yx = pyqtgraph.PlotDataItem(data[tag].freqs, data[tag].res_yx, **psymbols.yx)
            datasymbols[tag].phase_xy = pyqtgraph.PlotDataItem(data[tag].freqs, data[tag].phase_xy, **psymbols.xy)
            datasymbols[tag].phase_yx = pyqtgraph.PlotDataItem(data[tag].freqs, data[tag].phase_yx, **psymbols.yx)

            maskedlines[tag].res_xy = pyqtgraph.PlotDataItem(data[tag].freqs, data[tag].res_xy, **plines.xy)
            maskedlines[tag].res_yx = pyqtgraph.PlotDataItem(data[tag].freqs, data[tag].res_yx, **plines.yx)
            maskedlines[tag].phase_xy = pyqtgraph.PlotDataItem(data[tag].freqs, data[tag].phase_xy, **plines.xy)
            maskedlines[tag].phase_yx = pyqtgraph.PlotDataItem(data[tag].freqs, data[tag].phase_yx, **plines.yx)
            
            resplotitem.addItem(datasymbols[tag].res_xy)
            resplotitem.addItem(datasymbols[tag].res_yx)
            resplotitem.addItem(maskedlines[tag].res_xy)
            resplotitem.addItem(maskedlines[tag].res_yx)

            phaseplotitem.addItem(datasymbols[tag].phase_xy)
            phaseplotitem.addItem(datasymbols[tag].phase_yx)
            phaseplotitem.addItem(maskedlines[tag].phase_xy)
            phaseplotitem.addItem(maskedlines[tag].phase_yx)

        for i, freq in enumerate(data[tag].freqs):
            if maskedfreqs[tag]["masks"][i] == 0:
                data[tag].freqs[i] = float(maskedfreqs[tag]["freqs"][i])
            else:
                data[tag].freqs[i] = np.nan

        maskedlines[tag].res_xy.setData(data[tag].freqs, data[tag].res_xy)
        maskedlines[tag].res_yx.setData(data[tag].freqs, data[tag].res_yx)
        maskedlines[tag].phase_xy.setData(data[tag].freqs, data[tag].phase_xy)
        maskedlines[tag].phase_yx.setData(data[tag].freqs, data[tag].phase_yx)

    progress = QtGui.QProgressDialog("Loading data...", "Abort", 0, len(tagfns), win)
    progress.setWindowModality(QtCore.Qt.WindowModal)

    for i, tagfn in enumerate(tagfns):
        progress.setValue(i)
        tag = op.basename(tagfn).replace("-cal.json", "")
        tag2fn[tag] = tagfn
        fn2tag[tagfn] = tag
        site = tag.split("-")[0]
        sites.add(site)
        data[tag] = utils.read_json(tagfn)
        if not tag in maskedfreqs:
            maskedfreqs[tag] = utils.AttrDict({"freqs": data[tag].freqs.copy(), "masks": np.empty_like(data[tag].freqs) * 0})

        if not tag in maskedlines:
            maskedlines[tag] = utils.AttrDict()
            datasymbols[tag] = utils.AttrDict()

        plot(tag)

        if progress.wasCanceled():
            break

    progress.setValue(len(tagfns))

    resfreqselect = pyqtgraph.LinearRegionItem([0,-1])
    phasefreqselect = pyqtgraph.LinearRegionItem([0,-1])
    resplotitem.addItem(resfreqselect)
    phaseplotitem.addItem(phasefreqselect)

    def res_region_moved():
        phasefreqselect.setRegion(resfreqselect.getRegion())

    def phase_region_moved():
        resfreqselect.setRegion(phasefreqselect.getRegion())

    resfreqselect.sigRegionChanged.connect(res_region_moved)
    phasefreqselect.sigRegionChanged.connect(phase_region_moved)

    def populate_tag_list(filter_sites=None):
        if filter_sites:
            tags = [t for t in tag2fn.keys() if t.split("-")[0] in filter_sites]
        else:
            tags = sorted(tag2fn.keys())
        tags.sort()
        taglist.clear()
        for tag in tags:
            # print tag
            tagitem = QtGui.QListWidgetItem(taglist)
            tagitem.setText(tag)
        plot_per_tag_list()
        print

        
    def plot_per_tag_list():
        tags = [t.text() for t in taglist.selectedItems()]
        if not tags:
            tags = [t.text() for t in [taglist.item(i) for i in xrange(taglist.count())]]
        
        for plotitemtag, tagitems in datasymbols.items():
            if plotitemtag in tags:
                for item_name, item in tagitems.items():
                    item.setSymbol(plotsymbols[item_name[-2:]])
                    # item.setPen(None)#plotpens[item_name[-2:]])
            else:
                for item in tagitems.values():
                    item.setSymbol(None)
                    # item.setPen(None)

        for plotitemtag, tagitems in maskedlines.items():
            if plotitemtag in tags:
                for item_name, item in tagitems.items():
                    item.setPen(plotpens[item_name[-2:]])
            else:
                for item in tagitems.values():
                    item.setPen(None)

    def selected_site_names():
        return [s.text() for s in sitelist.selectedItems()]

    def pick_site():
        newsites = selected_site_names()
        populate_tag_list(newsites)
        # plot_per_tag_list()

    def toggle_selected_mask(value):
        tags = [str(t.text()) for t in taglist.selectedItems()]
        log_mask_range = resfreqselect.getRegion()
        fmin = 10 ** log_mask_range[0]
        fmax = 10 ** log_mask_range[1]
        for tag in tags:
            for i, freq in enumerate(maskedfreqs[tag]["freqs"]):
                if freq >= fmin and freq <= fmax:
                    maskedfreqs[tag]["masks"][i] = value
            plot(tag)
        print log_mask_range, tags, "\n"

    disable = QtGui.QPushButton("&Delete selected frequencies")
    enable = QtGui.QPushButton("&Enable selected frequencies")
    sitelist_dock.addWidget(disable)
    sitelist_dock.addWidget(enable)
    disable.clicked.connect(lambda: toggle_selected_mask(1))
    enable.clicked.connect(lambda: toggle_selected_mask(0))


    # def generate_key_press_event_handler(self, vb, event):
    #     vb.keyPressEvent(self, event)
    #     if event.key() is Qt.Key_X:
    #         toggle_selected_mask(mode="xy")
    #     elif event.key() is Qt.Key_Y:
    #         toggle_selected_mask(mode="yx")

    # resplotitem.vb.keyPressEvent = lambda 

    populate_tag_list()

    sites = sorted(list(sites))
    for site in sites:
        siteitem = QtGui.QListWidgetItem(sitelist)
        siteitem.setText(site)

    sitelist.itemSelectionChanged.connect(pick_site)
    taglist.itemSelectionChanged.connect(plot_per_tag_list)

    def cleanup():
        with open(op.join(path, "maskedfreqs.json"), mode="w") as f:
            utils.write_json(maskedfreqs, f)

    win.showMaximized()
    app.aboutToQuit.connect(cleanup)
    app.exec_()
Ejemplo n.º 47
0
    for e in futures.map_as_completed(inner_eval, enumerate(dags)):
        results[str(e[1])] = e

    res = [results[str(d[0])][0] for d in enumerate(dags)]

    return res

if __name__ == '__main__':

    import shelve
    import pickle

    results = shelve.open("results_wilt_tuned", protocol=pickle.HIGHEST_PROTOCOL)

    datafile = "wilt.csv"
    dags = utils.read_json('test_pop.json')
    # dags = dags[36076:36077]

    remaining_dags = [d for d in enumerate(dags) if str(d[0]) not in results]
    print("Starting...", len(remaining_dags))

    for e in map(lambda x: safe_dag_eval(x[1], datafile, x[0]), remaining_dags):
        results[str(e[1])] = e
        print("Model %4d: Cross-validation error: %.5f (+-%.5f)" % (e[1], e[0][0], e[0][1]))
        sys.stdout.flush()

    print("-"*80)
    best_error = sorted(results.values(), key=lambda x: x[0][0]-2*x[0][1], reverse=True)[0]
    print("Best model CV error: %.5f (+-%.5f)" % (best_error[0][0], best_error[0][1]))

    import pprint
Ejemplo n.º 48
0
def start_prime_coordinator():
	"""This function initializes an PrimeCoordinator object  

        :param:
        :returns: 
        """
	if len(sys.argv)<3:
    		print "\n Invalid input, please enter correct parameters \n use command like: \n\t python PrimeCommand.py parameters_json.json primecommandResult.csv\n"
    		sys.exit(0)

  	#store command line arguments to local variables
  	json_file = sys.argv[1]
  	result_file = sys.argv[2]
	exposure_sequence = utils.read_json(json_file)#list of exposures{mean,sd,non_rate}, read json content into memory

        primeCoordinator = PrimeCoordinatorRaw()
        primeCoordinator.get_counterfactual_compound_exposures(exposure_sequence)

 	# get the data to print/store in a file 
        b_output_mortality      = primeCoordinator.output_baseline_mortality
        b_output_mortality_num  = primeCoordinator.output_baseline_mortality_num
        b_total_mortality       = primeCoordinator.output_baseline_mortality_total
        c_output_mortality      = primeCoordinator.output_counterfactual_mortality
        c_output_mortality_num  = primeCoordinator.output_counterfactual_mortality_num
        c_total_mortality       = primeCoordinator.output_counterfactual_mortality_total
        total_population        = primeCoordinator.output_total_population
        #total_death_averted    = str(int(round(primeCoordinator.output_total_death_averted))) # int
        total_death_averted     = str(primeCoordinator.output_total_death_averted) # decimale
        total_death_baseline    = str(primeCoordinator.output_total_death_baseline)

        '''
                These are the outputs 
        '''
        all_mortality_outcome	= primeCoordinator.output_all_mortality_exposure_outcome # [{'outcome_id':outcome_id,'name':outcome name,'baseline_death':100, 'counterfactual_death':20},{}] 
        all_mortality_age       = primeCoordinator.output_all_mortality_age # [{'age_group_id':age_group_id,'age_group':age_group,'baseline_death':100, 'counterfactual_death':20},{}] 
        all_mortality_gender    = primeCoordinator.output_all_mortality_gender# [{'gender':'male','baseline_death':100, 'counterfactual_death':20},{'gender':'female','baseline_death':100, 'counterfactual_death':20}] 

        '''
                Write results into a csv file
        '''
        f_test_result_csv= csv.writer(open(result_file, 'wb')) # write result into csv
	# write over all attributions 
	col1_name = 'total population'
        f_test_result_csv.writerow([col1_name]) # write title
       	f_test_result_csv.writerow([total_population]) # write baseline and counterfactual mortalities 
        f_test_result_csv.writerow([]) # write seperate line 
	col1_name = 'total death averted'
        f_test_result_csv.writerow([col1_name]) # write title
       	f_test_result_csv.writerow([total_death_averted]) # write baseline and counterfactual mortalities 
        f_test_result_csv.writerow([]) # write seperate line 
		
	# write all mortality by outcome 
	col1_name = 'outcome_id'
	col2_name = 'name'
        col3_name = 'b_mortality_sum_db'
        col4_name = 'c_mortality_sum'
        f_test_result_csv.writerow([col1_name,col2_name,col3_name,col4_name]) # write title
	for line in all_mortality_outcome:
        	f_test_result_csv.writerow([line.get(col1_name),line.get(col2_name),line.get(col3_name),line.get(col4_name)]) # write baseline and counterfactual mortalities 
        f_test_result_csv.writerow([]) # write seperate line 
		
	# write all mortality by age 
	col1_name = 'age_group_id'
	col2_name = 'age_group'
        col3_name = 'b_mortality_sum_db'
        col4_name = 'c_mortality_sum'
        f_test_result_csv.writerow([col1_name,col2_name,col3_name,col4_name]) # write title
	for line in all_mortality_age:
        	f_test_result_csv.writerow([line.get(col1_name),line.get(col2_name),line.get(col3_name),line.get(col4_name)]) # write baseline and counterfactual mortalities 
        f_test_result_csv.writerow([]) # write seperate line 
		
	# write all mortality by gender
	col1_name = 'gender'
        col2_name = 'b_mortality_sum_db'
        col3_name = 'c_mortality_sum'
        f_test_result_csv.writerow([col1_name,col2_name,col3_name]) # write title
	for line in all_mortality_gender:
        	f_test_result_csv.writerow([line.get(col1_name),line.get(col2_name),line.get(col3_name)]) # write baseline and counterfactual mortalities 
        f_test_result_csv.writerow([]) # write seperate line 
Ejemplo n.º 49
0
parser.add_argument('--header', action = 'store_true', 
    help = "Specify if the file contains a header")
args = parser.parse_args() 

# read in file
if args.i[-3:] == "xls" or args.i[-4:] == "xlsx":
    # make sure date and time fields are correctly processed
    indexline = utils.read_columnfile(args.c)
    date, time = False, False
    if indexline[3] != "-":
        date = indexline[3]
    if indexline[4] != "-":
        time = indexline[4]  
    lines = utils.read_excel(args.i, args.header, date, time)
elif args.i[-4:] == "json":
    csvrows = utils.read_json(args.i)
else: # txt file
    with open(args.i, encoding="utf-8") as fn:
        lines = [x.strip().split("\t") for x in fn.readlines()]
        if args.header:
            lines = lines[1:]

# with open(args.o, 'w') as csvfile:
#     writer = csv.writer(csvfile)
#     for line in lines:
#         writer.writerow(line)
# quit()

# set columns of lines in right order
if args.c: 
    indexline = utils.read_columnfile(args.c)
Ejemplo n.º 50
0
def read_routes():
    input_routes = read_json(os.path.join(exec_root, '..', 'preprocessor', 'input.json'))
    if len(set(r['name'] for r in input_routes['routes'])) != len(input_routes['routes']):
        print('Duplicate route names in input.json. Can\'t process.')
    return {r['name']: r for r in input_routes['routes']}
Ejemplo n.º 51
0
CUR_DIR = os.path.realpath('.')
BATCH_SIZE = int(os.getenv('CK_BATCH_SIZE', 8))
DEVICE_ID = int(os.getenv('CK_DEVICE_ID', 0))
SNAPSHOTS_DIR = 'snapshots'
SNAPSHOT_INTERVAL = int(os.getenv('CK_SNAPSHOT_INTERVAL', 1000))
TEST_INTERVAL = int(os.getenv('CK_TEST_INTERVAL', 250))
TEST_RESULTS_DIR = 'validations'
SOLVER_PROTOTXT = 'solver.prototxt'
TRAIN_PROTOTXT = 'train.prototxt'
TEST_PROTOTXT = 'test.prototxt'
TRAIN_LMDB = 'train_lmdb'
TEST_LMDB = 'test_lmdb'
LABEL_MAP_FILE = 'labelmap_kitti.prototxt'
NAME_SIZE_FILE = 'test_name_size.txt'

PREPARED_INFO = utils.read_json('info.json')
PREPARED_IMG_W = int(PREPARED_INFO['img_width'])
PREPARED_IMG_H = int(PREPARED_INFO['img_height'])
NUM_CLASSES = int(PREPARED_INFO['num_classes'])


def read_prototxt_net(file_name):
  net = caffe_pb2.NetParameter()
  utils.read_prototxt(file_name, net)
  layers = {}
  for layer in net.layer:
    layers[layer.name] = layer
  return net, layers


def prepare_solver_prototxt():
Ejemplo n.º 52
0
 def test_read_json_fails_nicely(self):
   self.assertIsNone(read_json('https://www.google.com/'))
   self.assertIsNone(read_json('http://hopefullyneverarealurl.com/'))