Esempio n. 1
0
def main(argv):
    # set up logging
    util.create_dir("logs")
    util.configure_logging("gzip", "logs/gzip.log")

    # check args
    if len(argv) != 4:
        print "Usage: {} <c/d> <source> <target>".format(argv[0])
        return -1

    # check if first argument is valid
    if argv[1] != "c" and argv[1] != "d":
        logging.error("First argument %s should be 'c' or 'd'", repr(argv[1]))
        return -1

    # check if files do (not) exist
    source = argv[2]
    target = argv[3]
    if not os.path.isfile(source):
        logging.error("Source %s does not exist", repr(source))
        return -1
    if os.path.isfile(target) and os.path.getsize(target) > 0:
        logging.error("Target %s already exists and is non-empty",
                      repr(target))
        return -1

    # compress/decompress
    if argv[1] == "c":
        compress(source, target)
    else:
        decompress(source, target)

    return 0
Esempio n. 2
0
    def init_album(self):
        #album json
        js = self.handler.read_link(url_album % self.album_id).json()['album']
        #name
        self.album_name = util.decode_html(js['name'])
        #album logo
        self.logo = js['picUrl']
        # artist_name
        self.artist_name = js['artists'][0]['name']
        #handle songs
        for jsong in js['songs']:
            song = NeteaseSong(self.handler, song_json=jsong)
            song.group_dir = self.artist_name + u'_' + self.album_name
            song.group_dir = song.group_dir.replace('/', '_')
            song.post_set()
            self.songs.append(song)

        d = path.dirname(self.songs[-1].abs_path)
        #creating the dir
        LOG.debug(msg.head_163 + msg.fmt_create_album_dir % d)
        util.create_dir(d)

        #download album logo images
        LOG.debug(msg.head_163 + msg.fmt_dl_album_cover % self.album_name)
        downloader.download_url(
            self.logo, path.join(d, 'cover.' + self.logo.split('.')[-1]))
Esempio n. 3
0
def convert_to_lucene(question_answer_content, doc_type_verbose, source_path):
    """
    :param question_answer_document content:
    :return: yield matchzoo data
    At initial version, we are just focusing on the context and question, nothing more,
    therefore we are ignoring the answer part as of now
    """
    word_counter, char_counter = Counter(), Counter()
    examples, eval, questions, paragraphs, q_to_ps = process_squad_file(question_answer_content, word_counter, char_counter)
    tokenized_paragraphs = tokenize_contexts(paragraphs, -1)
    tokenized_questions = tokenize_contexts(questions, -1)
    tokenized_questions, tokenized_paragraphs = fixing_the_token_problem(tokenized_questions, tokenized_paragraphs)

    paragraphs_nontokenized = [" ".join(context) for context in tokenized_paragraphs]
    questions_nontokenized = [" ".join(context) for context in tokenized_questions]

    if doc_type_verbose == 1 or doc_type_verbose == 3:
        # questions
        print('Questions are getting dumped.')
        dst_dir = UTIL.create_dir(os.path.join(source_path, 'lucene_questions'))
        for indx, doc in tqdm(enumerate(questions_nontokenized)):
            as_json = dict()
            as_json['content'] = doc
            #as_json['doc_id'] = indx
            UTIL.dump_json_file(os.path.join(dst_dir, '{}.json'.format(indx)), as_json, None)
    elif doc_type_verbose == 2 or doc_type_verbose == 3:
        print('Paragraphs are getting dumped.')
        dst_dir = UTIL.create_dir(os.path.join(source_path, 'lucene_paragraphs'))
        for indx, doc in tqdm(enumerate(paragraphs_nontokenized)):
            as_json = dict()
            as_json['content'] = doc
            #as_json['doc_id'] = indx
            UTIL.dump_json_file(os.path.join(dst_dir, '{}.json'.format(indx)), as_json, None)
    print('Completed.')
Esempio n. 4
0
def generate_pmbuild_config(config, profile):
    print(
        "--------------------------------------------------------------------------------"
    )
    print(
        "pmbuild live reload config -----------------------------------------------------"
    )
    print(
        "--------------------------------------------------------------------------------"
    )
    if "data_dir" not in config:
        print(
            "[error]: did not generate pmbuild_config.json for live reloading")
        return
    print("writing " + config["data_dir"] + "/" + "pmbuild_config.json")
    wd = os.getcwd()
    pmd = util.sanitize_file_path(config["env"]["pmtech_dir"])
    md = {
        "profile": profile,
        "pmtech_dir": pmd,
        "pmbuild": "cd " + wd + " && " + pmd + "pmbuild " + profile + " "
    }
    util.create_dir(config["data_dir"])
    f = open(os.path.join(config["data_dir"], "pmbuild_config.json"), "w+")
    f.write(json.dumps(md, indent=4))
def gen_dataset(N, data_path, isMC=True):
    data_path = os.path.abspath(data_path)
    ut.create_dir(data_path)

    if (isMC):
        movie_dict = ut.unpickle(
            os.path.join("data", "movies_single_index.pkl"))
    else:
        movie_dict = ut.unpickle(os.path.join(
            "data",
            "movies_multi_index.pkl"))  #Need to make this new index thing

    full_ids = list(movie_dict.keys())
    movie_ids = random.sample(full_ids, N)

    ids_l = len(movie_ids)
    shuffle(movie_ids)
    train_ids = movie_ids[:int(ids_l * SPLIT[0])]
    val_ids = movie_ids[int(ids_l * SPLIT[0]):int(ids_l *
                                                  (SPLIT[0] + SPLIT[1]))]
    test_ids = movie_ids[int(ids_l * (SPLIT[0] + SPLIT[1])):-1]

    print("Generating Training Set...")
    generate_set(train_ids, movie_dict, data_path, "train", isMC)
    print("Generating Validation Set...")
    generate_set(val_ids, movie_dict, data_path, "val", isMC)
    print("Generating Test Set...")
    generate_set(test_ids, movie_dict, data_path, "test", isMC)
Esempio n. 6
0
    def write_prediction(
        self,
        valid_df,
        batch_size,
    ):
        """
        Run prediction using given model on a list of images,
        and write the result to a csv file.
        :param batch_size: number of inputs in each batch.
        :param valid_df: validation dataset table
        :return:
            path to result result csv
        """
        predictions = self.model.predict_generator(
            self.img_generator(valid_df, batch_size),
            steps=math.ceil(valid_df.shape[0] / batch_size))
        util.create_dir(self.result_path)
        for i in range(len(predictions)):
            valid_df.at[i, "prediction"] = predictions[i]

        result_path = os.path.join(
            self.result_path,
            "vgg_{:%Y-%m-%d-%H%M}.csv".format(datetime.datetime.now()))
        valid_df.to_csv(result_path)

        return result_path
Esempio n. 7
0
    def init_album(self):
        #album json
        js = self.handler.read_link(url_album % self.album_id).json()['album']
        #name
        self.album_name = util.decode_html(js['name'])
        #album logo
        self.logo = js['picUrl']
        # artist_name
        self.artist_name = js['artists'][0]['name']
        #handle songs
        for jsong in js['songs']:
            song = NeteaseSong(self.handler, song_json=jsong)
            song.group_dir = self.artist_name + u'_' + self.album_name
            song.group_dir = song.group_dir.replace('/', '_')
            song.post_set()
            self.songs.append(song)

        d = path.dirname(self.songs[-1].abs_path)
        #creating the dir
        LOG.debug(msg.head_163 + msg.fmt_create_album_dir % d)
        util.create_dir(d)

        #download album logo images
        LOG.debug(msg.head_163 + msg.fmt_dl_album_cover % self.album_name)
        downloader.download_url(self.logo, path.join(d,'cover.' +self.logo.split('.')[-1]))
Esempio n. 8
0
    def init_album(self):
        #album json
        js = self.m163.read_link(url_album % self.album_id).json()['album']
        #name
        self.album_name = util.decode_html(js['name'])
        #album logo
        self.logo = js['picUrl']
        # artist_name
        self.artist_name = js['artists'][0]['name']
        #handle songs
        for jsong in js['songs']:
            song = NeteaseSong(self.m163, song_json=jsong)
            song.group_dir = self.artist_name + u'_' + self.album_name
            song.post_set()
            self.songs.append(song)

        d = path.dirname(self.songs[-1].abs_path)
        #creating the dir
        LOG.debug(u'创建专辑目录[%s]' % d)
        util.create_dir(d)

        #download album logo images
        LOG.debug(u'下载专辑[%s]封面' % self.album_name)
        downloader.download_by_url(
            self.logo, path.join(d, 'cover.' + self.logo.split('.')[-1]))
Esempio n. 9
0
    def write_prediction(self, valid_df, batch_size):
        """
        Run prediction using given model on a list of images,
        and write the result to a csv file.
        :param batch_size: number of inputs in each batch.
        :param valid_df: validation dataset table
        :return:
            path to result result csv
        """
        predictions = self.model.predict_generator(
            self.img_generator(valid_df, batch_size, False),
            steps=self.calc_steps(valid_df, batch_size))

        df = valid_df.groupby("study").agg({
            "path": lambda x: x.tolist(),
            "label": np.prod
        }).reset_index()

        util.create_dir(self.result_path)
        for i in range(len(df)):
            valid_df.loc[valid_df["study"] == df.iloc[i]["study"],
                         "prediction"] = predictions[i]

        result_path = os.path.join(
            self.result_path,
            "{}_{:%Y-%m-%d-%H%M}.csv".format(self.__class__.__name__,
                                             datetime.datetime.now()))
        valid_df.to_csv(result_path)

        return valid_df
Esempio n. 10
0
def test_run_all():
    '''Generate population, check it's the right size.'''
    nloci = 400
    create_dir('population')
    os.chdir('population')
    p = Population(TRUE, 'point', nloci)
    p.gen()
    assert len(run_all()) == p.size
Esempio n. 11
0
def test_create_dir():
    td = 'whatever'
    create_dir(td)
    assert os.path.isdir(td)

    # second call should also succeed, even though td exists
    create_dir(td)
    assert os.path.isdir(td)
    os.rmdir(td)
Esempio n. 12
0
    def init_album(self):
        resp_json = self.handler.read_link(url_album % self.album_id).json()
        j = resp_json['data']['trackList']

        if not j:
            LOG.error(resp_json['message'])
            return
        #description
        html = self.handler.read_link(self.url).text
        soup = BeautifulSoup(html, 'html.parser')
        if soup.find('meta', property="og:title"):
            self.album_desc = soup.find('span', property="v:summary").text
            # name
            self.album_name = soup.find('meta', property="og:title")['content']
            # album logo
            self.logo = soup.find('meta', property="og:image")['content']
            # artist_name
            self.artist_name = soup.find('meta',
                                         property="og:music:artist")['content']
        else:
            aSong = j[0]
            self.album_name = aSong['album_name']
            self.logo = aSong['album_pic']
            self.artist_name = aSong['artistVOs'][0]['artistName']
            self.album_desc = None

        #handle songs
        for jsong in j:
            song = XiamiSong(self.handler, song_json=jsong)
            song.song_name = jsong['name']  # name or songName
            song.group_dir = self.artist_name + u'_' + self.album_name
            song.group_dir = song.group_dir.replace('/', '_')
            song.post_set()
            self.songs.append(song)

        d = path.dirname(self.songs[-1].abs_path)
        #creating the dir
        LOG.debug(msg.head_xm + msg.fmt_create_album_dir % d)
        util.create_dir(d)

        #download album logo images
        LOG.debug(msg.head_xm + msg.fmt_dl_album_cover % self.album_name)
        if self.logo:
            self.logo = self.handler.add_http_prefix(self.logo)
            downloader.download_url(
                self.logo, path.join(d, 'cover.' + self.logo.split('.')[-1]))

        LOG.debug(msg.head_xm + msg.fmt_save_album_desc % self.album_name)
        if self.album_desc:
            self.album_desc = re.sub(r'&lt;\s*[bB][rR]\s*/&gt;', '\n',
                                     self.album_desc)
            self.album_desc = re.sub(r'&lt;.*?&gt;', '', self.album_desc)
            self.album_desc = util.decode_html(self.album_desc)
            import codecs
            with codecs.open(path.join(d, 'album_description.txt'), 'w',
                             'utf-8') as f:
                f.write(self.album_desc)
Esempio n. 13
0
 def default_logger_creator(config):
     """Creates a Unified logger with a default logdir prefix
     containing the agent name and the env id
     """
     if config['multiagent']['policies_to_train'][0] == '0':
         agent_path = os.path.join(EXPERIMENT_PATH, "player0")
     elif config['multiagent']['policies_to_train'][0] == '1':
         agent_path = os.path.join(EXPERIMENT_PATH, "player1")
     create_dir(agent_path, clean=True)
     return UnifiedLogger(config, agent_path, loggers=None)
Esempio n. 14
0
def _save_image(step, img, experiment_path):
    img_path = os.path.join(experiment_path, "render")
    create_dir(img_path, clean=False)
    if step == 0:
        files = glob.glob(f'{img_path}/*')
        for f in files:
            os.remove(f)
    img_path = os.path.join(img_path, f"{str(step).zfill(5)}.png")
    img = img.resize((1000, 1000))
    img.save(img_path)
Esempio n. 15
0
def test_generate_population():
    '''Generate population, check it's the right size.'''
    nloci = 400
    create_dir('population')
    os.chdir('population')
    p = Population(WILD_TYPE, 'point', nloci)
    p.gen()
    files = [file for file in os.walk('.') if file[2]]
    assert len(files) == nloci
    assert len(p.enum()) == nloci
    assert p.size == nloci
Esempio n. 16
0
    def init_album(self):
        resp_json = self.handler.read_link(url_album % self.album_id).json()
        j = resp_json['data']['trackList']

        if not j :
            LOG.error(resp_json['message'])
            return
        #description
        html = self.handler.read_link(self.url).text
        soup = BeautifulSoup(html,'html.parser')
        if  soup.find('meta', property="og:title"):
            self.album_desc = soup.find('span', property="v:summary").text
            # name
            self.album_name = soup.find('meta', property="og:title")['content']
            # album logo
            self.logo = soup.find('meta', property="og:image")['content']
            # artist_name
            self.artist_name = soup.find('meta', property="og:music:artist")['content']
        else:
            aSong = j[0]
            self.album_name = aSong['album_name']
            self.logo = aSong['album_pic']
            self.artist_name = aSong['artistVOs'][0]['artistName']
            self.album_desc = None

        #handle songs
        for jsong in j:
            song = XiamiSong(self.handler, song_json=jsong)
            song.song_name = jsong['name']  # name or songName
            song.group_dir = self.artist_name + u'_' + self.album_name
            song.group_dir = song.group_dir.replace('/','_')
            song.post_set()
            self.songs.append(song)

        d = path.dirname(self.songs[-1].abs_path)
        #creating the dir
        LOG.debug(msg.head_xm + msg.fmt_create_album_dir % d)
        util.create_dir(d)

        #download album logo images
        LOG.debug(msg.head_xm + msg.fmt_dl_album_cover % self.album_name)
        if self.logo:
            self.logo = self.handler.add_http_prefix(self.logo)
            downloader.download_url(self.logo, path.join(d,'cover.' +self.logo.split('.')[-1]))

        LOG.debug(msg.head_xm + msg.fmt_save_album_desc % self.album_name)
        if self.album_desc:
            self.album_desc = re.sub(r'&lt;\s*[bB][rR]\s*/&gt;','\n',self.album_desc)
            self.album_desc = re.sub(r'&lt;.*?&gt;','',self.album_desc)
            self.album_desc = util.decode_html(self.album_desc)
            import codecs
            with codecs.open(path.join(d,'album_description.txt'), 'w', 'utf-8') as f:
                f.write(self.album_desc)
Esempio n. 17
0
    def execute_test_sample(self, context, s, difficulty_test_base_path,
                            in_mem, out_mem, regs, timesteps):
        if context.print_memories or context.print_circuits is not 0:
            sample_difficulty_base_path = "%s/%s" % (difficulty_test_base_path,
                                                     s)
            create_dir(sample_difficulty_base_path, True)

        if context.info_is_active:
            print(
                "\nSample[%d], Initial memory: %s, Desired memory: %s, Initial registers: %s"
                %
                (s, in_mem[:].argmax(axis=1), out_mem, regs[:].argmax(axis=1)))

        # Iterate for every timestep
        debug = list()  # Init debug dictionary for the sample
        for t in range(timesteps):
            dt = DebugTimestep(context, t, s)
            coeffs, complete = self.__run_network(regs, dt)
            regs, in_mem = self.__run_circuit(regs, in_mem, context.gates,
                                              coeffs, dt)
            debug.append(dt)

            if self.context.stop_at_the_will and complete.sum() >= 1.0:
                break

        # Debug for the sample
        if context.info_is_active:
            for dt in debug:
                print(dt)
            print("\t• Expected mem => %s" % out_mem)

        if context.print_memories:
            with open("%s/memories.txt" % sample_difficulty_base_path,
                      "a+") as f:
                f.write(
                    "\\textbf{Step} & %s & %s & Read & Write \\\\ \hline \n" %
                    (" & ".join(["%s" % r for r in range(out_mem.shape[0])
                                 ]), " & ".join([
                                     "\\textit{r}%d" % r
                                     for r in range(context.num_regs)
                                 ])))
            for dt in debug:
                dt.print_memory_to_file(sample_difficulty_base_path, timesteps)

        if context.print_circuits is not 0:
            # Create dir for the single example of a difficulty
            for dt in debug:
                if context.print_circuits is 1:
                    dt.print_circuit(sample_difficulty_base_path)
                else:
                    dt.print_pruned_circuit(sample_difficulty_base_path)

        return s, in_mem
Esempio n. 18
0
 def init_collection(self):
     j = self.xm.read_link(url_collection % (self.collection_id) ).json()['collect']
     self.collection_name = j['name']
     for jsong in j['songs']:
         song = Song(self.xm, song_json=jsong)
         #rewrite filename, make it different
         song.group_dir = self.collection_name
         song.abs_path = path.join(config.DOWNLOAD_DIR, song.group_dir, song.filename)
         self.songs.append(song)
     if len(self.songs):
         #creating the dir
         util.create_dir(path.dirname(self.songs[-1].abs_path))
Esempio n. 19
0
 def init_playlist(self):
     j = self.handler.read_link(url_playlist % (self.playlist_id) ).json()['result']
     self.playlist_name = j['name']
     for jsong in j['tracks']:
         song = NeteaseSong(self.handler, song_json=jsong)
         #rewrite filename, make it different
         song.group_dir = self.playlist_name
         song.post_set()
         self.songs.append(song)
     if len(self.songs):
         #creating the dir
         util.create_dir(path.dirname(self.songs[-1].abs_path))
Esempio n. 20
0
def get_embedding_set(df, embedding_type, output_name, shaping, selection_tag="", overwrite=False):
    logging.info("Creating %s embedding %s...", embedding_type, output_name)
    
    path = "../data/cache/" + selection_tag + output_name + "_" + embedding_type
    path_and_name = path + "/" + shaping + ".pkl.pkl"
    
    if not util.check_output_necessary(path_and_name, overwrite):
        # df = pickle.load(path_and_name)
        with open(path_and_name, 'rb') as infile:
            df = pickle.load(infile)
        return df
    
    util.create_dir(path)
    
    embedding_df = None
    logging.debug("Preparing to run vectorization")
    if embedding_type == "w2v":
        embedding_df = word2vec_creator.run_w2v(df, path_and_name, shaping=shaping, word_limit=-1, sentics=False)
    elif embedding_type == "w2v_sentic":
        embedding_df = word2vec_creator.run_w2v(df, path_and_name, shaping=shaping, word_limit=-1, sentics=True)
    elif embedding_type == "w2v_sentic_full":
        embedding_df = word2vec_creator.run_w2v(df, path_and_name, shaping=shaping, word_limit=-1, sentics=True, zero_pad=True)
    elif embedding_type == "w2v_limit":
        # NOTE: yes, it looks like sentics should be false, but this is how we limit w2v embeddings to just the patterns the sentics would have been calculated on
        embedding_df = word2vec_creator.run_w2v(df, path_and_name, shaping=shaping, word_limit=-1, sentics=True, model_only=True)
    elif embedding_type == "glove":
        embedding_df = word2vec_creator.run_glove(df, path_and_name, shaping=shaping, word_limit=-1, sentics=False)
    elif embedding_type == "glove_sentic":
        embedding_df = word2vec_creator.run_glove(df, path_and_name, shaping=shaping, word_limit=-1, sentics=True)
    elif embedding_type == "glove_sentic_full":
        embedding_df = word2vec_creator.run_glove(df, path_and_name, shaping=shaping, word_limit=-1, sentics=True, zero_pad=True)
    elif embedding_type == "glove_limit":
        # NOTE: yes, it looks like sentics should be false, but this is how we limit glove embeddings to just the patterns the sentics would have been calculated on
        embedding_df = word2vec_creator.run_glove(df, path_and_name, shaping=shaping, word_limit=-1, sentics=True, model_only=True)
    elif embedding_type == "ft":
        embedding_df = word2vec_creator.run_fasttext(df, path_and_name, shaping=shaping, word_limit=-1, sentics=False)
    elif embedding_type == "ft_sentic":
        embedding_df = word2vec_creator.run_fasttext(df, path_and_name, shaping=shaping, word_limit=-1, sentics=True)
    elif embedding_type == "ft_sentic_full":
        embedding_df = word2vec_creator.run_fasttext(df, path_and_name, shaping=shaping, word_limit=-1, sentics=True, zero_pad=True)
    elif embedding_type == "fasttext_limit":
        # NOTE: yes, it looks like sentics should be false, but this is how we limit fasttext embeddings to just the patterns the sentics would have been calculated on
        embedding_df = word2vec_creator.run_fasttext(df, path_and_name, shaping=shaping, word_limit=-1, sentics=True, model_only=True)
    elif embedding_type == "sentic":
        embedding_df = word2vec_creator.vectorize_collection(df, path_and_name, model=None, shaping=shaping, word_limit=-1, sentics=True)
    elif embedding_type == "tfidf":
        # TODO: add ability to add sentics here too?
        embedding_df = create_tfidf(df, path, overwrite=True)
        # NOTE: would have to save it (and check for preexisting) separately as well then
        

    return embedding_df
Esempio n. 21
0
 def init_collection(self):
     j = self.xm.read_link(url_collection % (self.collection_id) ).json()['data']['trackList']
     j_first_song = j[0]
     #read collection name
     self.collection_name = self.get_collection_name()
     for jsong in j:
         song = XiamiSong(self.xm, song_json=jsong)
         #rewrite filename, make it different
         song.group_dir = self.collection_name
         song.post_set()
         self.songs.append(song)
     if len(self.songs):
         #creating the dir
         util.create_dir(path.dirname(self.songs[-1].abs_path))
Esempio n. 22
0
    def init_fav(self):
        """ parse html and load json and init Song object
        for each found song url"""
        page = 1
        user = ''
        total = 0
        cur = 1  #current processing link
        LOG.debug(msg.head_xm + msg.fmt_init_fav % self.uid)
        while True:
            html = self.handler.read_link(url_fav % (self.uid, page)).text
            soup = BeautifulSoup(html, 'html.parser')
            if not user:
                user = soup.title.string
            if not total:
                total = soup.find('span', class_='counts').string

            links = [
                link.get('href') for link in soup.find_all(
                    href=re.compile(r'xiami.com/song/[^?]+')) if link
            ]
            if links:
                for link in links:
                    LOG.debug(msg.head_xm + msg.fmt_parse_song_url % link)
                    if self.verbose:
                        sys.stdout.write(
                            log.hl(
                                '[%d/%s] parsing song ........ ' %
                                (cur, total), 'green'))
                        sys.stdout.flush()
                    try:
                        cur += 1
                        song = XiamiSong(self.handler, url=link)
                        #time.sleep(2)
                        if self.verbose:
                            sys.stdout.write(log.hl('DONE\n', 'green'))
                    except:
                        sys.stdout.write(log.hl('FAILED\n', 'error'))
                        continue
                    #rewrite filename, make it different
                    song.group_dir = user
                    song.post_set()
                    self.songs.append(song)
                page += 1
            else:
                break

        if len(self.songs):
            #creating the dir
            util.create_dir(path.dirname(self.songs[-1].abs_path))
        LOG.debug(msg.head_xm + msg.fmt_init_fav_ok % self.uid)
Esempio n. 23
0
def gist(input_file, output_folder):

    img_dict, img_dict_path = load_images_from_file(
        input_file)  # take all images category by category

    gist = gist_img(img_dict)
    name = gist[0]
    desc_list = gist[1]  # Takes the descriptor list which is unordered one
    cat_dict = gist[
        2]  # Takes the sift features that is seperated class by class for train data
    create_dir(output_folder)
    create_dir(f"{output_folder}/{name}/")
    save(cat_dict, img_dict_path, f"{output_folder}/{name}/")
    print(f"\nThe execution of gist is done!\n")
Esempio n. 24
0
    def init_topsong(self):
        j = self.handler.read_link(url_artist_top_song % (self.artist_id)).json()
        self.artist_name = j['artist']['name']
        for jsong in j['hotSongs']:
            song = NeteaseSong(self.handler, song_json=jsong)
            song.group_dir = self.artist_name + '_TopSongs'
            song.post_set()
            self.songs.append(song)
            #check config for top X
            if len(self.songs) >= config.DOWNLOAD_TOP_SONG:
                break

        if len(self.songs):
            #creating the dir
            util.create_dir(path.dirname(self.songs[-1].abs_path))
Esempio n. 25
0
    def init_topsong(self):
        j = self.xm.read_link(url_artist_top_song % (self.artist_id)).json()
        for jsong in j['songs']:
            song = XiamiSong(self.xm, song_json=jsong)
            song.group_dir = self.artist_name + '_TopSongs'
            song.post_set()
            self.songs.append(song)
            #check config for top X
            if len(self.songs) >= config.DOWNLOAD_TOP_SONG:
                break

        if len(self.songs):
            #set the artist name
            self.artist_name = self.songs[-1].artist_name
            #creating the dir
            util.create_dir(path.dirname(self.songs[-1].abs_path))
Esempio n. 26
0
def save_img(img, filename):
    """
    Utility method that convert a ndarray into image and save to a image file.
    Args:
        img: image in ndarray
        filename: target filename including path

    Returns:

    """
    img = keras.preprocessing.image.array_to_img(img)
    try:
        img.save(filename)
    except FileNotFoundError:
        util.create_dir(os.path.dirname(filename))
        img.save(filename)
Esempio n. 27
0
    def init_topsong(self):
        j = self.xm.read_link(url_artist_top_song % (self.artist_id)).json()
        for jsong in j['songs']:
            song = XiamiSong(self.xm, song_json=jsong)
            song.group_dir = self.artist_name + '_TopSongs'
            song.post_set()
            self.songs.append(song)
            #check config for top X
            if len(self.songs) >= config.DOWNLOAD_TOP_SONG:
                break

        if len(self.songs):
            #set the artist name
            self.artist_name = self.songs[-1].artist_name
            #creating the dir
            util.create_dir(path.dirname(self.songs[-1].abs_path))
Esempio n. 28
0
 def init_collection(self):
     LOG.debug(msg.head_xm + msg.fmt_init_collect % self.collection_id)
     j = self.handler.read_link(url_collection % (self.collection_id) ).json()['data']['trackList']
     j_first_song = j[0]
     #read collection name
     self.collection_name = self.get_collection_name()
     for jsong in j:
         song = XiamiSong(self.handler, song_json=jsong)
         #rewrite filename, make it different
         song.group_dir = self.collection_name
         song.post_set()
         self.songs.append(song)
     if len(self.songs):
         #creating the dir
         util.create_dir(path.dirname(self.songs[-1].abs_path))
     LOG.debug(msg.head_xm + msg.fmt_init_collect_ok % self.collection_id)
Esempio n. 29
0
def run_textures(config):
    print(
        "--------------------------------------------------------------------------------"
    )
    print(
        "textures -----------------------------------------------------------------------"
    )
    print(
        "--------------------------------------------------------------------------------"
    )
    tool_cmd = tool_to_platform(config["tools"]["texturec"])
    for task in config["textures"]:
        files = get_task_files_containers(task)
        for f in files:
            copy_fmt = [".dds", ".pmv"]
            conv_fmt = [".png", ".jpg", ".tga", ".bmp", ".txt"]
            cont_fmt = [".txt"]
            fext = os.path.splitext(f[0])[1]
            if fext in copy_fmt:
                util.copy_file_create_dir_if_newer(f[0], f[1])
            if fext in conv_fmt:
                export = export_config_for_file(f[0])
                dep_inputs = [f[0]]
                if fext in cont_fmt:
                    export = export_config_for_directory(f[0], "osx")
                    dep_inputs = get_container_dep_inputs(f[0], dep_inputs)
                dst = util.change_ext(f[1], ".dds")
                if not dependencies.check_up_to_date_single(dst):
                    if "format" not in export.keys():
                        export["format"] = "RGBA8"
                    dep_outputs = [dst]
                    dep_info = dependencies.create_dependency_info(
                        dep_inputs, dep_outputs)
                    dep = dict()
                    dep["files"] = list()
                    dep["files"].append(dep_info)
                    util.create_dir(dst)
                    cmd = tool_cmd + " "
                    cmd += "-f " + f[0] + " "
                    cmd += "-t " + export["format"] + " "
                    if "cubemap" in export.keys() and export["cubemap"]:
                        cmd += " --cubearray "
                    cmd += "-o " + dst
                    print("texturec " + f[0])
                    subprocess.call(cmd, shell=True)
                    dependencies.write_to_file_single(
                        dep, util.change_ext(dst, ".json"))
Esempio n. 30
0
def main():
    if len(sys.argv) < 7:
        print(
            'Usage: python3 sample.py [MODEL_PATH] [DATA_PATH] [SPLIT_INDEX] [OUT_PATH] [RECIPE_ID] [NUM_SAMPLES]'
        )
        exit()
    model_path = os.path.abspath(sys.argv[1])
    data_path = os.path.abspath(sys.argv[2])
    split_index = int(sys.argv[3])
    out_path = os.path.abspath(sys.argv[4])
    recipe_id = sys.argv[5]
    num_samples = int(sys.argv[6])

    util.create_dir(out_path)
    saved_model = torch.load(model_path)

    data = GANstronomyDataset(data_path, split=opts.TVT_SPLIT)
    data.set_split_index(split_index)
    data_loader = DataLoader(data,
                             batch_size=1,
                             shuffle=False,
                             sampler=SequentialSampler(data))

    num_classes = data.num_classes()
    G = Generator(opts.LATENT_SIZE, opts.EMBED_SIZE).to(opts.DEVICE)
    G.load_state_dict(saved_model['G_state_dict'])
    G.eval()

    embs = None

    for ibatch, data_batch in enumerate(data_loader):
        with torch.no_grad():
            recipe_ids, recipe_embs, img_ids, imgs, classes, _, _ = data_batch
            batch_size, recipe_embs, imgs = util.get_variables3(
                recipe_ids, recipe_embs, img_ids, imgs)
            if recipe_ids[0] == recipe_id:
                embs = recipe_embs
                break

    assert embs is not None

    z = torch.randn(num_samples, opts.LATENT_SIZE).to(opts.DEVICE)
    imgs_gen = G(z, embs.expand(num_samples, opts.EMBED_SIZE)).detach()
    for i in range(num_samples):
        img_gen = imgs_gen[i]
        save_results(out_path, recipe_id, img_gen, i)
Esempio n. 31
0
def test():
	cd_to_pigz()
	util.create_dir("pigz_test_tmp")
	shutil.copyfile("pigz.c", os.path.join("pigz_test_tmp", "pigz.c"))

	os.chdir("pigz_test_tmp")
	sha1_orig = util.file_sha1("pigz.c")
	test_one_with_flag(sha1_orig, ".gz", "-f")
	test_one_with_flag2(sha1_orig, ".gz", "-fb", "32")
	test_one_with_flag2(sha1_orig, ".gz", "-fp", "1")
	test_one_with_flag(sha1_orig, ".zz", "-fz")
	test_one_with_flag(sha1_orig, ".zip", "-fK")
	test_one_with_flag2(sha1_orig, ".gz", "-11", "-f")
	# TODO: more tests for stdin/stdout
	# TODO: compare with gzip/compress (if available in path)
	cd_to_pigz()
	rm_dirs(["pigz_test_tmp"])
Esempio n. 32
0
def test():
    cd_to_pigz()
    util.create_dir("pigz_test_tmp")
    shutil.copyfile("pigz.c", os.path.join("pigz_test_tmp", "pigz.c"))

    os.chdir("pigz_test_tmp")
    sha1_orig = util.file_sha1("pigz.c")
    test_one_with_flag(sha1_orig, ".gz", "-f")
    test_one_with_flag2(sha1_orig, ".gz", "-fb", "32")
    test_one_with_flag2(sha1_orig, ".gz", "-fp", "1")
    test_one_with_flag(sha1_orig, ".zz", "-fz")
    test_one_with_flag(sha1_orig, ".zip", "-fK")
    test_one_with_flag2(sha1_orig, ".gz", "-11", "-f")
    # TODO: more tests for stdin/stdout
    # TODO: compare with gzip/compress (if available in path)
    cd_to_pigz()
    rm_dirs(["pigz_test_tmp"])
Esempio n. 33
0
def load_single_config(conf_parser, conf_key):
    config_warn_msg = "Cannot load config [%s], use default value: %s"
    try:
        v = conf_parser.get("settings", conf_key)
        if not v:
            raise Exception("ConfigError", "invalid")
        gkey = var_dict[conf_key][0]
        ty = var_dict[conf_key][1]

        if ty == "n":
            globals()[gkey] = int(v)
        else:
            if ty == "p":
                util.create_dir(v)
            globals()[gkey] = v
    except:
        log.warn(config_warn_msg % (conf_key, str(globals()[var_dict[conf_key][0]])))
Esempio n. 34
0
 def init_fav(self):
     page = 1
     while True:
         j = self.xm.read_link(url_fav % (self.uid, str(page)) ).json()
         if j['songs'] :
             for jsong in j['songs']:
                 song = Song(self.xm, song_json=jsong)
                 #rewrite filename, make it different
                 song.group_dir = 'favorite_%s' % self.uid
                 song.abs_path = path.join(config.DOWNLOAD_DIR, song.group_dir, song.filename)
                 self.songs.append(song)
             page += 1
         else:
             break
     if len(self.songs):
         #creating the dir
         util.create_dir(path.dirname(self.songs[-1].abs_path))
Esempio n. 35
0
def main():
    # cli args checking
    parser = argparse.ArgumentParser()
    subparsers = parser.add_subparsers(dest="action")
    parser_c = subparsers.add_parser("c")
    parser_c.add_argument("source", type=str)
    parser_c.add_argument("target", type=str)
    parser_c.add_argument("reference", type=str)
    parser_c.add_argument("--inner", type=str)
    parser_c.add_argument("--delta", type=str)
    parser_c.add_argument("--nointra", action="store_true")
    parser_d = subparsers.add_parser("d")
    parser_d.add_argument("source", type=str)
    parser_d.add_argument("target", type=str)
    args = parser.parse_args()

    # create method name
    if args.action == "c":
        method = create_method_name(args.nointra, args.delta, args.inner)
    elif args.action == "d":
        with open(args.source, "rb") as f:
            method = util.parse_header(f)[1]

    # set up logging
    util.create_dir("logs")
    util.configure_logging(method, "logs/{}.log".format(method))

    # check if files do (not) exist
    if not os.path.isfile(args.source):
        logging.error("Source %s does not exist", repr(args.source))
        return -1
    if os.path.isfile(args.target) and os.path.getsize(args.target) > 0:
        logging.error("Target %s already exists and is non-empty",
                      repr(args.target))
        return -1
    if args.action == "c" and not os.path.isfile(args.reference):
        logging.error("Reference %s does not exist", repr(args.reference))
        return -1

    # compress/decompress
    if args.action == "c":
        return compress(args.source, args.target, args.reference, args.nointra,
                        args.delta, args.inner)
    elif args.action == "d":
        return decompress(args.source, args.target)
Esempio n. 36
0
def load_single_config(conf_parser, conf_key):
    config_warn_msg = "Cannot load config [%s], use default value: %s"
    try:
        v = conf_parser.get('settings', conf_key)
        if not v:
            raise Exception('ConfigError', 'invalid')
        gkey = var_dict[conf_key][0]
        ty = var_dict[conf_key][1]

        if ty == 'n':
            globals()[gkey] = int(v)
        else:
            if ty == 'p':
                util.create_dir(v)
            globals()[gkey] = v
    except:
        log.warn(config_warn_msg %
                 (conf_key, str(globals()[var_dict[conf_key][0]])))
def draw_date_stats(date_stats):
    langs_info = read_auxiliary_file('../aux_files/langs_all_info.csv')
    code_to_lang = get_code_lang_mapping(langs_info)
    code_to_lang['overall'] = 'Общее распределение по датам'

    graphs_dir = create_dir('date_graphs')
    # draw_each_lang_separately(date_stats, code_to_lang, graphs_dir)
    # draw_all_langs_same_plot(date_stats, code_to_lang, graphs_dir)
    draw_overall_dates_with_registrars(date_stats, graphs_dir)
Esempio n. 38
0
def load_single_config(conf_parser, conf_key):
    config_warn_msg = "Cannot load config [%s], use default value: %s"
    try:
        v = conf_parser.get('settings', conf_key)
        if not v:
            raise Exception('ConfigError','invalid')
        gkey = var_dict[conf_key][0]
        ty = var_dict[conf_key][1]

        if ty == 'n':
            globals()[gkey] = int(v)
        else:
            if ty =='p':
                util.create_dir(v)
            globals()[gkey] = v
    except:
        if not conf_key.find('proxy.http'):
            log.warn(config_warn_msg % (conf_key, str(globals()[var_dict[conf_key][0]])))
Esempio n. 39
0
    def init_fav(self):
        """ parse html and load json and init Song object
        for each found song url"""
        page = 1
        user = ''
        total = 0
        cur = 1 #current processing link
        LOG.debug(msg.head_xm + msg.fmt_init_fav % self.uid)
        while True:
            html = self.handler.read_link(url_fav%(self.uid,page)).text
            soup = BeautifulSoup(html,'html.parser')
            if not user:
                user = soup.title.string
            if not total:
                total = soup.find('span', class_='counts').string

            links = [link.get('href') for link in soup.find_all(href=re.compile(r'xiami.com/song/[^?]+')) if link]
            if links:
                for link in links:
                    LOG.debug(msg.head_xm + msg.fmt_parse_song_url % link)
                    if self.verbose:
                        sys.stdout.write(log.hl('[%d/%s] parsing song ........ '%(cur, total), 'green'))
                        sys.stdout.flush()
                    try:
                        cur += 1
                        song = XiamiSong(self.handler, url=link)
                        #time.sleep(2)
                        if self.verbose:
                            sys.stdout.write(log.hl('DONE\n', 'green'))
                    except:
                        sys.stdout.write(log.hl('FAILED\n', 'error'))
                        continue
                    #rewrite filename, make it different
                    song.group_dir = user
                    song.post_set()
                    self.songs.append(song)
                page += 1
            else:
                break

        if len(self.songs):
            #creating the dir
            util.create_dir(path.dirname(self.songs[-1].abs_path))
        LOG.debug(msg.head_xm + msg.fmt_init_fav_ok % self.uid)
Esempio n. 40
0
 def init_collection(self):
     LOG.debug(msg.head_xm + msg.fmt_init_collect % self.collection_id)
     j = self.handler.read_link(url_collection % (self.collection_id) ).json()['data']['trackList']
     if not j:
         LOG.warning(msg.head_xm + " cannot load data for url: %s "% self.url)
         return
     j_first_song = j[0]
     #read collection name
     self.collection_name = self.get_collection_name()
     for jsong in j:
         song = XiamiSong(self.handler, song_json=jsong)
         #rewrite filename, make it different
         song.group_dir = self.collection_name
         song.post_set()
         self.songs.append(song)
     if len(self.songs):
         #creating the dir
         util.create_dir(path.dirname(self.songs[-1].abs_path))
     LOG.debug(msg.head_xm + msg.fmt_init_collect_ok % self.collection_id)
Esempio n. 41
0
def load_single_config(conf_parser, conf_key):
    config_warn_msg = "Cannot load config [%s], use default value: %s"
    try:
        v = conf_parser.get('settings', conf_key)
        if not v:
            raise Exception('ConfigError','invalid')
        gkey = var_dict[conf_key][0]
        ty = var_dict[conf_key][1]

        if ty == 'n':
            #here check topx should less than 30
            if conf_key == 'download.artist.topsong' and int(v) >30:
                return 
            globals()[gkey] = int(v)
        else:
            if ty =='p':
                util.create_dir(v)
            globals()[gkey] = v
    except:
        log.warn(config_warn_msg % (conf_key, str(globals()[var_dict[conf_key][0]])))
Esempio n. 42
0
    def init_topsong(self):
        LOG.debug(msg.head_xm + msg.fmt_init_artist% self.artist_id)
        j = self.handler.read_link(url_artist_top_song % (self.artist_id)).json()['data']['trackList']
        for jsong in j:
            song = XiamiSong(self.handler, song_json=jsong)
            if not self.artist_name:
                self.artist_name = song.artist_name
            song.group_dir = self.artist_name + '_TopSongs'
            song.post_set()
            self.songs.append(song)
            #check config for top X
            if config.DOWNLOAD_TOP_SONG>0 and len(self.songs) >= config.DOWNLOAD_TOP_SONG:
                break

        if len(self.songs):
            #set the artist name
            self.artist_name = self.songs[-1].artist_name
            #creating the dir
            util.create_dir(path.dirname(self.songs[-1].abs_path))
        LOG.debug(msg.head_xm + msg.fmt_init_artist_ok % self.artist_id)
Esempio n. 43
0
    def init_album(self):
        j = self.handler.read_link(url_album % self.album_id).json()['data']['trackList']
        j_first_song = j[0]
        #name
        self.album_name = util.decode_html(j_first_song['album_name'])
        #album logo
        self.logo = j_first_song['album_pic']
        # artist_name
        self.artist_name = j_first_song['artist']

        #description
        html = self.handler.read_link(self.url).text
        soup = BeautifulSoup(html,'html.parser')
        self.album_desc = soup.find('span', property="v:summary").text

        #handle songs
        for jsong in j:
            song = XiamiSong(self.handler, song_json=jsong)
            song.group_dir = self.artist_name + u'_' + self.album_name
            song.post_set()
            self.songs.append(song)

        d = path.dirname(self.songs[-1].abs_path)
        #creating the dir
        LOG.debug(msg.head_xm + msg.fmt_create_album_dir % d)
        util.create_dir(d)

        #download album logo images
        LOG.debug(msg.head_xm + msg.fmt_dl_album_cover % self.album_name)
        downloader.download_url(self.logo, path.join(d,'cover.' +self.logo.split('.')[-1]))

        LOG.debug(msg.head_xm + msg.fmt_save_album_desc % self.album_name)
        if self.album_desc:
            self.album_desc = re.sub(r'&lt;\s*[bB][rR]\s*/&gt;','\n',self.album_desc)
            self.album_desc = re.sub(r'&lt;.*?&gt;','',self.album_desc)
            self.album_desc = util.decode_html(self.album_desc)
            import codecs
            with codecs.open(path.join(d,'album_description.txt'), 'w', 'utf-8') as f:
                f.write(self.album_desc)
Esempio n. 44
0
    def init_album(self):
        j = self.xm.read_link(url_album % self.album_id).json()['album']
        # name
        self.album_name = util.decode_html(j['title'])
        # album logo
        self.logo = j['album_logo']
        # artist_name
        self.artist_name = j['artist_name']

        # description
        self.album_desc = j['description']

        # handle songs
        for jsong in j['songs']:
            song = XiamiSong(self.xm, song_json=jsong)
            song.group_dir = self.artist_name + u'_' + self.album_name
            song.post_set()
            self.songs.append(song)

        d = path.dirname(self.songs[-1].abs_path)
        # creating the dir
        LOG.debug(u'创建专辑目录[%s]' % d)
        util.create_dir(d)

        # download album logo images
        LOG.debug(u'下载专辑[%s]封面' % self.album_name)
        downloader.download_by_url(
            self.logo, path.join(d, 'cover.' + self.logo.split('.')[-1]))

        LOG.debug(u'保存专辑[%s]介绍' % self.album_name)
        if self.album_desc:
            self.album_desc = re.sub(
                r'&lt;\s*[bB][rR]\s*/&gt;', '\n', self.album_desc)
            self.album_desc = re.sub(r'&lt;.*?&gt;', '', self.album_desc)
            self.album_desc = util.decode_html(self.album_desc)
            import codecs
            with codecs.open(path.join(d, 'album_description.txt'), 'w', 'utf-8') as f:
                f.write(self.album_desc)
Esempio n. 45
0
    def init_album(self):
        #album json
        js = self.m163.read_link(url_album % self.album_id).json()['album']
        #name
        self.album_name = util.decode_html(js['name'])
        #album logo
        self.logo = js['picUrl']
        # artist_name
        self.artist_name = js['artists'][0]['name']
        #handle songs
        for jsong in js['songs']:
            song = NeteaseSong(self.m163, song_json=jsong)
            song.group_dir = self.artist_name + u'_' + self.album_name
            song.post_set()
            self.songs.append(song)

        d = path.dirname(self.songs[-1].abs_path)
        #creating the dir
        LOG.debug(u'创建专辑目录[%s]' % d)
        util.create_dir(d)

        #download album logo images
        LOG.debug(u'下载专辑[%s]封面'% self.album_name)
        downloader.download_by_url(self.logo, path.join(d,'cover.' +self.logo.split('.')[-1]))
Esempio n. 46
0
    def init_album(self):
        # album json
        js = self.handler.read_link(url_album % self.album_id).json()["album"]
        # name
        self.album_name = util.decode_html(js["name"])
        # album logo
        self.logo = js["picUrl"]
        # artist_name
        self.artist_name = js["artists"][0]["name"]
        # handle songs
        for jsong in js["songs"]:
            song = NeteaseSong(self.handler, song_json=jsong)
            song.group_dir = self.artist_name + u"_" + self.album_name
            song.post_set()
            self.songs.append(song)

        d = path.dirname(self.songs[-1].abs_path)
        # creating the dir
        LOG.debug(msg.head_163 + msg.fmt_create_album_dir % d)
        util.create_dir(d)

        # download album logo images
        LOG.debug(msg.head_163 + msg.fmt_dl_album_cover % self.album_name)
        downloader.download_by_url(self.logo, path.join(d, "cover." + self.logo.split(".")[-1]))
Esempio n. 47
0
def get_stats_cache_dir(): return create_dir(os.path.join(get_cache_dir(), "stats"))
def get_logs_cache_dir(): return create_dir(os.path.join(get_cache_dir(), "logs"))
Esempio n. 48
0
		"analyze_out" : "",
	}
	fields_no_serialize = ["rel_build_log", "analyze_out"]

	def __init__(self, read_from_file=None):
		Serializable.__init__(self, Stats.fields, Stats.fields_no_serialize, read_from_file)

def file_size(p):
  return os.path.getsize(p)

def str2bool(s):
	if s.lower() in ("true", "1"): return True
	if s.lower() in ("false", "0"): return False
	assert(False)

g_cache_dir = create_dir(os.path.realpath(os.path.join("..", "sumatrapdfcache", "buildbot")))
g_stats_cache_dir = create_dir(os.path.join(g_cache_dir, "stats"))
g_logs_cache_dir = create_dir(os.path.join(g_cache_dir, "logs"))
def get_cache_dir(): return g_cache_dir
def get_stats_cache_dir(): return g_stats_cache_dir
def get_logs_cache_dir(): return g_logs_cache_dir

def logs_efi_out_path(ver):
	return os.path.join(get_logs_cache_dir(), str(ver) + "_efi.txt.bz2")

# logs are only kept for potential troubleshooting and they're quite big,
# so we delete old files (we keep logs for the last $to_keep revisions)
def delete_old_logs(to_keep=10):
	files = os.listdir(get_logs_cache_dir())
	versions = []
	for f in files:
Esempio n. 49
0
def get_logs_cache_dir(): return create_dir(os.path.join(get_cache_dir(), "logs"))

# logs are only kept for potential troubleshooting and they're quite big,
# so we delete old files (we keep logs for the last $to_keep revisions)
def delete_old_logs(to_keep=10):
Esempio n. 50
0
def get_cache_dir(): return create_dir(os.path.join("..", "sumatrapdfcache", "buildbot"))
def get_stats_cache_dir(): return create_dir(os.path.join(get_cache_dir(), "stats"))
	if m2:
		lossRate = float(m2.group(1))
		lossRate /= 100.
	return throughput, lossRate

if __name__ == '__main__':
	if len(sys.argv) != 1 and len(sys.argv) != 4 and len(sys.argv) != 5:
		print "Usage: %s [mode algorithm drive [iperf_script]]" % sys.argv[0]
		sys.exit(0)

	python_script = "../iperf/server_tcp.py"
	device = ""
	outputFile = None

	dirname = "../../data/drive_2_28_2016"
	create_dir(dirname)

	if len(sys.argv) >= 4:
		mode = sys.argv[1]
		alg = sys.argv[2]
		drive = sys.argv[3]
		outputFile = "%s/gps_throughput_%s_%s_%s.dat" % (dirname, mode, alg, drive)
	if len(sys.argv) == 5:
		python_script = sys.argv[4]
	
	gps_info = gps_parser.GPSInfo()
	gps_parse = gps_parser.GPSParser()
	thread = threading.Thread(target=gps_parser.GPSParser.read_gps, args=(gps_parse, gps_info, device,))
	thread.start()
	time.sleep(2)
	outf = open(outputFile, 'w') if outputFile else sys.stdout
Esempio n. 52
0
def sum_efi_cache_dir(ver):
    # make it outside of sumatrapdf_efi directory?
    d = os.path.join(sum_efi_dir(), "efi_cache", str(ver))
    return util.create_dir(d)
def calculate_capacity(data_rates, arr):
  throughput_arr = arr[:,3:].copy() 
  for no, rate in enumerate(data_rates):
    throughput_arr[:, no] = (1 - throughput_arr[:, no]) * rate
  capacity_arr = numpy.max(throughput_arr, axis=1)
  return capacity_arr

if __name__ == '__main__':
  data_rates = numpy.array([1, 2, 5.5, 6, 9, 11, 12, 18, 24, 36, 48, 54])
  INTERVAL = 50
  max_num_bs = 20
  max_num_clients = 20

  dirname_w = "trace"
  filename_r = "packet_loss.dat"
  create_dir(dirname_w)
  arr = numpy.loadtxt(filename_r, dtype='f8')
  for client_id in range(1, max_num_clients + 1):
    capacity_arr_total = None
    filename_w = "%s/client_%d.dat" % (dirname_w, client_id)
    for bs_id in range(1, max_num_bs + 1):
      #filename_loss = "%s/packet_loss_%d_%d.dat" % (dirname_w, bs_id, client_id)
      filename_loss = None
      synthesized_arr = shuffle_packet_loss(INTERVAL, arr, filename_loss)
      capacity_arr = calculate_capacity(data_rates, synthesized_arr)
      if capacity_arr_total is None:
        capacity_arr_total = capacity_arr
      else:
        capacity_arr_total = numpy.vstack((capacity_arr_total, capacity_arr))
    numpy.savetxt(filename_w, numpy.atleast_2d(capacity_arr_total).T, fmt="%g")