コード例 #1
0
def copy_result_to_output_path(final_label_matrix, max_confidence_frame_ids, output_path):
    unique_group_id, indices = np.unique(final_label_matrix, return_inverse=True)
    unique_group_id = unique_group_id[1:]
    
    utils.make_directory(output_path)
    count = 1
    for group_id in unique_group_id:
        keyframe = int(max_confidence_frame_ids[int(group_id)])
        people_index = np.argwhere(final_label_matrix[:, keyframe] == int(group_id))[0,0]
        print people_index, keyframe
        finished = False
        while not finished:
            try:
                image_path = join(data_path, 'head', str(keyframe), str(people_index) + '.jpg')
                image_output_path = join(output_path, str(count) + '.jpg')
                image = cv2.imread(image_path)
                cv2.imwrite(image_output_path, image)
                finished = True
            except Exception as e:
                print e
                while keyframe > 0:
                    keyframe -= 1
                    try:
                        people_index = np.argwhere(final_label_matrix[:, keyframe] == int(group_id))[0,0]
                    except:
                        pass

        count += 1
コード例 #2
0
def create_dataset(metadata,
                   outdir,
                   folder_label='summary',
                   include=['fold', 'sign'],
                   assembly='GRCh38'):
    # include all files for generatign the summary file
    cols = ['label', 'bed', 'sign', 'fold', 'bam']
    # init summary list to save the experiment ID - file relationship
    summary = []
    # create output folder if not present
    utils.make_directory(outdir)
    # loop over the list of experiments
    exp_accession_list = list(set(metadata['Experiment accession'].values))

    for exp_accession in exp_accession_list:
        # filter files and save selection
        summary.append(process_exp(exp_accession, metadata, assembly))

    sum_df = pd.DataFrame(summary, columns=cols)
    sum_df.to_csv(os.path.join(outdir, folder_label + '.csv'))
    cols = include

    for i in range(len(cols)):

        data_subdir = utils.make_directory(os.path.join(
            outdir, cols[i]))  # create file type dir
        wget_list(sum_df[cols[i]].values, data_subdir)  # download URLs
    filepaths = get_filepaths(sum_df['bed'].values, data_subdir)
    with open(os.path.join(outdir, 'basset_sample_beds.txt'),
              'w') as filehandle:
        for i in range(len(filepaths)):
            filehandle.write('{}\t{}\n'.format(sum_df['label'][i],
                                               filepaths[i]))
コード例 #3
0
def make_sample_image(state_info, epoch, n_row=10):
    """Saves a grid of generated digits ranging from 0 to n_classes"""
    # Sample noise
    img_path1 = utils.make_directory(
        os.path.join(utils.default_model_dir, 'images/src'))
    img_path2 = utils.make_directory(
        os.path.join(utils.default_model_dir, 'images/target'))

    z = Variable(
        FloatTensor(np.random.normal(0, 1, (n_row**2, args.latent_dim))))

    # Get labels ranging from 0 to n_classes for n rows
    labels = np.array([num for _ in range(n_row) for num in range(n_row)])
    labels = Variable(LongTensor(labels))
    labels_one = FloatTensor(n_row**2,
                             10).zero_().scatter_(1, labels.view(-1, 1), 1)

    img_gen_src = state_info.gen_src(z, labels_one)
    img_gen_target = state_info.gen_target(z, labels_one)
    save_image(img_gen_src.data,
               os.path.join(img_path1, '%d.png' % epoch),
               nrow=n_row,
               normalize=True)
    save_image(img_gen_target.data,
               os.path.join(img_path2, '%d.png' % epoch),
               nrow=n_row,
               normalize=True)
コード例 #4
0
def search_twitter(handle, start_date, end_date, save_data):
    """
    Using the Twitter search API, return and save tweets from the input handle
    using the start_date and end_date as search parameters. Retweets are not
    requested as part of the search (excluded).

    If the save data flag is true then the output will be saved to a file.

    This function returns a dictionary of key (unix time) value (tweet text)
    dictionary.
    """

    header = {
        # todo global? command line arg parse?
        'Authorization': 'Bearer ' + obtain_bearer_token('token.txt')
    }

    # https://developer.twitter.com/en/docs/tweets/search/api-reference/get-search-tweets.html
    # https://developer.twitter.com/en/docs/tweets/timelines/guides/working-with-timelines

    data = {
        'q': 'from:' + handle + ' since:' + start_date + ' until:' + end_date +
        ' exclude:retweets',
        'tweet_mode': 'extended',
        'lang': 'en',
        'count': str(200)  # 100 is the documented max per the API
    }

    request = requests.get('https://api.twitter.com/1.1/search/tweets.json?',
                           headers=header,
                           params=data)

    tweet_dict = {}

    for i in request.json()['statuses']:
        date = i['created_at'].encode('utf-8').replace("+0000", "")
        tweet_text = i['full_text'].encode('utf-8')

        # storing key as unix time (UTC - note calendar.timegm) in seconds
        # as it works well for sorting
        date_timestamp = calendar.timegm(
            time.strptime(date, "%a %b %d %H:%M:%S %Y"))

        # key timestamp, value tweet text
        tweet_dict[date_timestamp] = tweet_text

    otd = OrderedDict(sorted(tweet_dict.items()))

    if save_data:
        # create directory for handle if it doesn't exist
        path = 'data/' + handle  # should probably be global or allow configurable path
        make_directory(path)
        save_json_data(tweet_dict,
                       get_filename(path + '/' + handle + '_tweets', 'json'))

    return otd
コード例 #5
0
 def check_all_paths(self):
     utils.make_directory(self.configer['trainingImagePath'])
     utils.make_directory(self.configer['traininglabelPath'])
     utils.make_directory(self.configer['tempImagePath'])
     utils.make_directory(self.configer['testImagePath'])
     utils.make_directory(self.configer['testLabelPath'])
     if not Path(self.configer['rowImagePath']).exists():
         raise IOError(self.configer['rowImagePath'] + 'does not exist')
     if not Path(self.configer['rowLabelPath']).exists():
         raise IOError(self.configer['rowLabelPath'] + 'does not exist')
コード例 #6
0
    def post(self):
        utils.make_directory('/tmp/osmedeus-tmp/')
        data = Save.parser.parse_args()
        raw_content = data['content']
        content = urllib.parse.unquote(raw_content)
        ts = str(int(time.time()))
        filepath = '/tmp/osmedeus-tmp/' + \
            hashlib.md5(ts.encode()).hexdigest()[:5]

        utils.just_write(filepath, content)
        return {"filepath": filepath}
コード例 #7
0
ファイル: configuration.py プロジェクト: zzhsec/Osmedeus
    def post(self):
        # global options
        data = Configurations.parser.parse_args()
        options = data['options']

        # @TODO add another authen level when settings things from remote
        # check if credentials is the same on the config file or not
        if not self.verify(options):
            return {"error": "Can't not verify to setup config"}

        # write each workspace seprated folder
        ws_name = utils.get_workspace(options)
        utils.make_directory(current_path + '/storages/{0}/'.format(ws_name))
        if not os.path.isdir(current_path + '/storages/{0}/'.format(ws_name)):
            return {
                "error":
                "Can not create workspace directory with name {0} ".format(
                    ws_name)
            }

        activities_path = current_path + '/storages/{0}/activities.json'.format(
            ws_name)
        options_path = current_path + '/storages/{0}/options.json'.format(
            ws_name)

        # consider this is settings db
        utils.just_write(options_path, options, is_json=True)

        if options.get('FORCE') == "False":
            old_log = options['WORKSPACE'] + '/log.json'
            if utils.not_empty_file(old_log) and utils.reading_json(old_log):
                utils.print_info(
                    "It's already done. use '-f' options to force rerun the module"
                )

                raw_activities = utils.reading_json(options['WORKSPACE'] +
                                                    '/log.json')

                utils.just_write(activities_path, raw_activities, is_json=True)

                return options

        utils.print_info("Cleaning activities log")

        # Create skeleton activities based on commands.json
        commands = utils.reading_json(current_path + '/storages/commands.json')

        raw_activities = {}
        for k, v in commands.items():
            raw_activities[k] = []
        utils.just_write(activities_path, raw_activities, is_json=True)

        return options
コード例 #8
0
ファイル: data.py プロジェクト: codedecde/EmbedMap
    def load(self, file, dir_name, max_freq=-1, max_count=200000):
        """
        Loads the file (word 300 dim embedding) (the first line is the name. Ignore)
            :param file (str) : file name
            :param dir_name (str) : the directory from where data is located
            :returns None
        """
        folder = os.path.join(dir_name, file) + '_dir'
        file = os.path.join(dir_name, file)
        if os.path.exists(folder):
            embeddings_file = os.path.join(folder, 'embeddings.npy')
            ix2word_file = os.path.join(folder, 'ix2word.npy')
            assert os.path.exists(embeddings_file), "Embedding file not found at %s" % (embeddings_file)
            assert os.path.exists(ix2word_file), "Vocab index file not found at %s" % (ix2word_file)
            self.embeddings = np.load(embeddings_file)
            self.ix2word = np.load(ix2word_file)
        else:
            embeddings = []
            word_count = 0
            with io.open(file, 'r', encoding='utf-8', newline='\n', errors='ignore') as f:
                start_line = True
                for ix, linex in enumerate(f.readlines()):
                    if start_line:
                        start_line = not start_line
                        continue
                    word, vec = linex.rstrip().split(' ', 1)
                    vect = np.fromstring(vec, sep=' ')
                    if len(word) == 0 or vect.shape[0] < 300:
                        print('Skipping at', ix)
                        continue
                    self.ix2word.append(word)
                    embeddings.append(vect)
                    word_count += 1
                    if word_count == max_count:
                        break
            # import pdb; pdb.set_trace()
            self.ix2word = np.array(self.ix2word)
            self.embeddings = np.array(embeddings)
            make_directory(folder)
            np.save(os.path.join(folder, 'embeddings.npy'), self.embeddings)
            np.save(os.path.join(folder, 'ix2word.npy'), self.ix2word)

        self.embeddings = to_cuda(torch.from_numpy(self.embeddings).float(),
                                  self.gpu_device)
        if self.mean_center:
            self.embeddings.sub_(self.embeddings.mean(0, keepdim=True))
        if self.unit_norm:
            self.embeddings.div_(self.embeddings.norm(2, 1, keepdim=True))
        self.vocab = len(self.ix2word)
        self.max_freq = self.vocab - 1 if max_freq == -1 else min(max_freq, self.vocab - 1)
        self.word2ix = {self.ix2word[i]: i for i in range(self.vocab)}
        if self.mode == 'seq':
            self._perm = np.random.permutation(self.max_freq + 1)
コード例 #9
0
def generate_runfile(particle,energy,macro,walltime,jobname,jobdir,templateString):
        
    utils.make_directory(jobdir)

    # Render the template
    runfile_template_filled = templates.run_command_template.format(output_location = ("../output/" + particle + "/" + str(energy) + "MeV/"),macro=macro,seed=str(random.randint(1,sys.maxint)))
    seadragon_template_filled = templateString[0].format(walltime_request=walltime,job_name=jobname,jobdir=jobdir,run_command = runfile_template_filled)

    with file("%s/%s%s" % (jobdir,jobname,templateString[1]) , "w") as f:
        f.write(seadragon_template_filled)

    return 1
コード例 #10
0
ファイル: run.py プロジェクト: xflicsu/GenomonSV
def genomonSV_merge(args):
    """
    script for merging clustered junction data for creating nonmatched normal control data
    the first input the path for the individual junction list file to be merged.
    """

    with open(args.control_info_file, 'r') as hin:
        for line in hin:
            label, output_prefix = line.rstrip('\n').split('\t')
            if not os.path.exists(output_prefix +
                                  ".junction.clustered.bedpe.gz"):
                raise ValueError('No file: ' + output_prefix +
                                 ".junction.clustered.bedpe.gz")

    utils.make_directory(os.path.dirname(args.merge_output_file))

    if os.path.exists(args.merge_output_file + ".temp"):
        print >> sys.stderr, "Remove existing intermediate file " + args.merge_output_file + ".temp"
        os.remove(args.merge_output_file + ".temp")

    with open(args.control_info_file, 'r') as hin:
        for line in hin:
            label, output_prefix = line.rstrip('\n').split('\t')
            utils.processingMessage("extracting information of " + label)
            mergeFunction.simplifyJunc(
                output_prefix + ".junction.clustered.bedpe.gz",
                args.merge_output_file + ".temp", label)

    utils.processingMessage("sorting the aggregated junction file")
    utils.sortBedpe(args.merge_output_file + ".temp",
                    args.merge_output_file + ".temp.sort")

    utils.processingMessage(
        "merging the same junction in the aggregated junction file")
    mergeFunction.organizeControl(args.merge_output_file + ".temp.sort",
                                  args.merge_output_file + ".temp.merged",
                                  args.merge_check_margin_size)

    utils.processingMessage("sorting the merged junction file")
    utils.sortBedpe(args.merge_output_file + ".temp.merged",
                    args.merge_output_file + ".temp.merged.sort")

    utils.processingMessage("compressing the merged junction file")
    utils.compress_index_bed(args.merge_output_file + ".temp.merged.sort",
                             args.merge_output_file)

    if args.debug == False:
        subprocess.call(["rm", args.merge_output_file + ".temp"])
        subprocess.call(["rm", args.merge_output_file + ".temp.sort"])
        subprocess.call(["rm", args.merge_output_file + ".temp.merged"])
        subprocess.call(["rm", args.merge_output_file + ".temp.merged.sort"])
コード例 #11
0
def split_hdmi(hdmi_path: str,
               json_timing: dict,
               output_dir: str,
               post_fix: str = "",
               audio: bool = True,
               crop_coords_per_slide: list = []):
    """
    Takes an hdmi video as input and splits it into multiple videos.
    The post_fix string is used to append to the end of the output files.
    Audio defaults to true: April 2019 decision to always include audio in slides (to be muted by frontend, sometimes, depending on layout).
    """
    # Generate the output directory if it does not exist.
    make_directory(output_dir)

    OFFSETDELAY = 0.4

    # check arg
    if len(crop_coords_per_slide) > 0:
        assert len(crop_coords_per_slide) == len(json_timing['allId']), str(
            len(crop_coords_per_slide)) + ' vs ' + str(
                len(json_timing['allId']))

    # Start splitting the videos based off the timings.
    # NOTE: The videos will be in the order dictated by allId!!!!
    finalidx = int(len(json_timing['allId'])) - 1
    for index, slide_id in enumerate(json_timing['allId']):
        # Get timing attributes
        time_start = float(
            json_timing['byId'][slide_id]['start']) + OFFSETDELAY
        time_end = float(json_timing['byId'][slide_id]['end']) + OFFSETDELAY
        duration = time_end - time_start

        # Generate the output file name
        output_file = append_slash(output_dir) + append_postfix(
            get_file_name(hdmi_path), post_fix + str(index))

        # Start splitting
        print('Splitting HDMI video to file %s' % output_file)
        docrop = None
        if len(crop_coords_per_slide) > 0:
            docrop = crop_coords_per_slide[index]
            assert isinstance(docrop,
                              dict), str(index) + ': ' + str(type(docrop))
        ffmpeg_split(hdmi_path,
                     time_start,
                     duration,
                     output_file,
                     audio,
                     is_final_end_of_video=(index >= finalidx),
                     docrop=docrop)
コード例 #12
0
ファイル: getcam.py プロジェクト: kanalmighty/kansgitrepo
def call_get_cam(args):
    configer = Configer().get_configer()
    cam_image_path = configer['camImagePath']
    utils.make_directory(cam_image_path)
    image_save_directory = os.path.join(cam_image_path, args.date, args.time)
    utils.make_directory(image_save_directory)
    check_point_path = configer['checkPointPath']
    test_log = os.path.join(configer['logpath'], args.date, args.time + '_test.log')
    data_dict = utils.get_dict_from_json(test_log)
    error_file_list = data_dict['ERROR LIST']
    # right_file_list = data_dict['RIGHT LIST']

    model_path = os.path.join(check_point_path, args.date, args.time + '.pth')
    net = get_net(args.network, args.class_number, model_path)
    for error_image in tqdm(error_file_list):
        original_test_image = os.path.join(configer['testImagePath'], error_image + '.jpg')
        get_cam_for_error(args, net, cam_image_path, original_test_image, check_point_path)
コード例 #13
0
def main():
    urls_path = sys.argv[1] # filepath
    output_folder = sys.argv[2] # just foldername
    exp_folder = utils.get_parent(urls_path) # split parent dir
    urls_copy_path = os.path.join(exp_folder, "urls_copy.txt")
    output_dir = os.path.join(exp_folder, output_folder)
    utils.make_directory(output_dir)
    # look for existing copy file urls_copy.txt
    # set paths and copy url file if not already present
    if not os.path.isfile(urls_copy_path):
        # use this in the function that does the rest
        try:
            copyfile(urls_path, urls_copy_path)
        except:
            print('No urls.txt file provided!')

    process_file(urls_copy_path, output_dir)
コード例 #14
0
ファイル: logger.py プロジェクト: jk0/pyhole
    def archive_old_logs(self):
        matcher = "*.log.*[!b][!z][!2]"
        files = glob.glob(os.path.join(LOG_DIR, matcher))
        for file_path in files:
            filename = os.path.basename(file_path)
            compressed_filename = filename + ".bz2"
            network_name = filename[:filename.rfind(".log")]
            archive_dir = os.path.join(LOG_ARCHIVE_DIR, network_name)
            utils.make_directory(archive_dir)
            compressed_file_path = os.path.join(archive_dir,
                                                compressed_filename)

            with open(file_path, "rb") as fp:
                with bz2.BZ2File(compressed_file_path, "wb",
                                 compresslevel=9) as output:
                    shutil.copyfileobj(fp, output)

            os.remove(file_path)
コード例 #15
0
def save_dataset(res_dict, outdir):
    for prefix, filtered_list in res_dict.items():
        print("Prcessing set labelled {}".format(prefix))
        df = pd.concat(filtered_list, axis=1)
        prefix_dir = utils.make_directory(os.path.join(outdir, prefix))
        df.to_csv(os.path.join(prefix_dir, '{}.csv'.format(prefix)))

        urls = df['File download URL'].values

        wget_list(urls, prefix_dir)
コード例 #16
0
ファイル: main.py プロジェクト: hhjung1202/OwnAdaptation
 def calc_TDA(self, epoch, cls_num):
     path = utils.make_directory(
         os.path.join(utils.default_model_dir, 'tda_total', str(cls_num)))
     path2 = utils.make_directory(
         os.path.join(utils.default_model_dir, 'tda_sub', str(cls_num)))
     dgms = ripser(self.z.data, maxdim=3)['dgms']
     plot_diagrams(dgms)
     plt.savefig('{}/{}_total.png'.format(path, epoch))
     plt.clf()
     if len(dgms[0]) is not 0:
         plot_diagrams(dgms, plot_only=[0], ax=subplot(221))
     if len(dgms[1]) is not 0:
         plot_diagrams(dgms, plot_only=[1], ax=subplot(222))
     if len(dgms[2]) is not 0:
         plot_diagrams(dgms, plot_only=[2], ax=subplot(223))
     if len(dgms[3]) is not 0:
         plot_diagrams(dgms, plot_only=[3], ax=subplot(224))
     plt.savefig('{}/{}_sub.png'.format(path2, epoch))
     plt.clf()
コード例 #17
0
ファイル: main.py プロジェクト: hhjung1202/OwnAdaptation
def make_sample_image(state_info, sample, epoch):
    """Saves a grid of generated digits ranging from 0 to n_classes"""

    img_path = utils.make_directory(
        os.path.join(utils.default_model_dir, 'image'))
    sample_hat, _ = state_info.forward(sample)
    sample, sample_hat = to_data(sample), to_data(sample_hat)
    image = merge_images(sample, sample_hat)
    save_image(image.data,
               os.path.join(img_path, '%d.png' % epoch),
               normalize=True)
コード例 #18
0
ファイル: PlayerManager.py プロジェクト: harokb/YouStream
    def __init__(self, media_player, directory=None):
        # Download
        self.downloader = None

        # Player
        self.length = None
        self.video_time_position = 0
        self.current_video_index = 0

        self.directory = utils.make_directory(directory)
        self.media_player = media_player
コード例 #19
0
def main():
    usage = 'usage: %prog [options] <data_path> <output_folder>'
    parser = OptionParser(usage)
    parser.add_option('-n', dest='N',
        default='919', type='int',
        help='Number of labels to subset [Default: %default]')
    (options,args) = parser.parse_args()
    if len(args) != 2:
        parser.error('Must provide data path and output folder')
    else:
        data_path = sys.argv[1] # dir where deepsea_train folder is
        output_folder = sys.argv[2] # output_folder to save dataset in

    utils.make_directory(output_folder)
    # base_dir = utils.get_parent(files_path)
    class_range=range(options.N)
    uncompressed_data_dir = os.path.join(data_path, 'deepsea_train')
    train = load_set('train', uncompressed_data_dir, class_range)
    valid = load_set('valid', uncompressed_data_dir, class_range)
    test = load_set('test', uncompressed_data_dir, class_range)
    save_deepsea_dataset(train, valid, test, output_folder)
コード例 #20
0
def make_sample_image(state_info, epoch, realS_sample, realT_sample):
    """Saves a grid of generated digits ranging from 0 to n_classes"""
    # Sample noise
    img_path1 = utils.make_directory(os.path.join(utils.default_model_dir, 'images/cycle'))
    img_path2 = utils.make_directory(os.path.join(utils.default_model_dir, 'images/resS_T'))
    img_path3 = utils.make_directory(os.path.join(utils.default_model_dir, 'images/resT_T'))

    fake_T = state_info.G_Residual(realS_sample, realT_sample)
    fake_S = state_info.G_Restore(fake_T)

    realS, fake_T = to_data(realS_sample), to_data(fake_T)
    realT, fake_S = to_data(realT_sample), to_data(fake_S)

    cycle = merge_images(realS_sample, fake_S)
    residual1 = merge_images(realS_sample, fake_T)
    residual2 = merge_images(realT_sample, fake_T)


    save_image(cycle.data, os.path.join(img_path1, '%d.png' % epoch), normalize=True)
    save_image(residual1.data, os.path.join(img_path2, '%d.png' % epoch), normalize=True)
    save_image(residual2.data, os.path.join(img_path3, '%d.png' % epoch), normalize=True)
コード例 #21
0
def make_sample_image(state_info, epoch, realA_sample, realB_sample):
    """Saves a grid of generated digits ranging from 0 to n_classes"""
    # Sample noise
    img_path1 = utils.make_directory(
        os.path.join(utils.default_model_dir, 'images/src'))
    img_path2 = utils.make_directory(
        os.path.join(utils.default_model_dir, 'images/target'))

    fake_B = state_info.G_AB(realA_sample)
    fake_A = state_info.G_BA(realB_sample)

    realA, fake_B = to_data(realA_sample), to_data(fake_B)
    realB, fake_A = to_data(realB_sample), to_data(fake_A)

    makeAtoB = merge_images(realA_sample, fake_B)
    makeBtoA = merge_images(realB_sample, fake_A)

    save_image(makeAtoB.data,
               os.path.join(img_path1, '%d.png' % epoch),
               normalize=True)
    save_image(makeBtoA.data,
               os.path.join(img_path2, '%d.png' % epoch),
               normalize=True)
コード例 #22
0
ファイル: PlayerUI.py プロジェクト: harokb/YouStream
    def __init__(self, directory=None):
        super(PlayerUI, self).__init__(parent=None, title="YouStream",
                                       size=(DEFAULT_WIDTH, DEFAULT_HEIGHT))

        self.directory = utils.make_directory(directory)

        self.build_UI()
        self.Show()
        self.panel.Layout()
        # self.Maximize()

        self.player_manager = PlayerManager(self.media_player, self.directory)
        self.timer = wx.Timer(self)
        self.Bind(wx.EVT_TIMER, self.on_timer)
        self.timer.Start(TIMER_INTERVAL * 1000)
コード例 #23
0
def generate_macrofile(sidelength,particle,energy,nparticles):
    
    name = "%s_%sMeV" % (particle,energy)
    macro_filename = ""

    utils.make_directory("../output/")
    utils.make_directory("../output/%s" % particle)
    utils.make_directory("../output/%s/%sMeV" % (particle,energy))

    # Render the template
    macro_template_filled = templates.macro_template.format(sidelength=sidelength,particle=particle,energy=energy,nbeamon=nparticles)

    with file("../macros/%s.mac" % name, "w") as f:
        f.write(macro_template_filled)
        macro_filename = "../macros/%s.mac" % name

    return macro_filename
コード例 #24
0
def generate_macrofile_series(sidelength,particle,energy_lowlim,energy_highlim,energy_spacing,nparticles):
    
    energy_linspace = np.linspace(energy_lowlim,energy_highlim,float(energy_highlim-energy_lowlim)/float(energy_spacing)+1)
    macro_filenames = []
    
    for i in energy_linspace:
        energy = str(float(i))
        name = "%s_%sMeV" % (particle,energy)
        
        utils.make_directory("../output/")
        utils.make_directory("../output/%s" % particle)
        utils.make_directory("../output/%s/%sMeV" % (particle,energy))
        
        # Render the template
        macro_template_filled = templates.macro_template.format(sidelength=sidelength,particle=particle,energy=energy,nbeamon=nparticles)
    
        with file("../macros/%s.mac" % name, "w") as f:
            f.write(macro_template_filled)
            macro_filenames.append("../macros/%s.mac" % name)

    return macro_filenames
コード例 #25
0
from losses import *
from continuous_losses import *

from networks import initialize_model, freeze_layers, freeze_conv_layers
from train import train, continuous_train
from metrics import initialize_metrics, evaluation, update_metrics, write_results

import utils
from utils import device

args = utils.config()
print(args)

experiment_name = utils.get_experiment_name()

utils.make_directory("../logs")
logging.basicConfig(filename=os.path.join(
    '../logs/{}.log'.format(experiment_name)),
                    level=logging.INFO,
                    format='%(asctime)s %(name)s %(levelname)s %(message)s')
logger = logging.getLogger(__name__)
utils.print_log('START with Configuration : {}'.format(args))

data_path = utils.make_directory(args.data_path)
partitions, partitions_train, partitions_tune = utils.get_partitions()

if args.dataset == "Cars3D":
    train_data = Cars3D(root=args.data_path,
                        mode="train",
                        image_size=args.image_size)
    query_data = Cars3D(root=args.data_path,
コード例 #26
0
def write_results(metrics, experiment_name):
    parent_dir = make_directory(RESULTS_PATH)
    for metric in metrics.keys():
        with open(os.path.join(parent_dir, experiment_name+"_"+metric)+'.pkl', 'wb') as f:
            pickle.dump(metrics[metric], f)
コード例 #27
0
import numpy as np
from tqdm import tqdm
from scipy.spatial.distance import cdist

import os
import logging
import pickle
from collections import OrderedDict

from utils import get_dataset_embeddings, make_directory, args
from utils import print_log

logger = logging.getLogger(__name__)


RESULTS_PATH = make_directory("../{}/".format(args.output))


def get_pairwise_distances(model, query_loader, gallery_loader, function='euclidean'):
    query_embeddings, query_targets = get_dataset_embeddings(
        model, query_loader)
    gallery_embeddings, gallery_targets = get_dataset_embeddings(
        model, gallery_loader)
    distances = cdist(query_embeddings, gallery_embeddings, metric=function)
    return distances, query_targets, gallery_targets


def is_hit(is_target):
    if np.sum(is_target) > 0:
        return 1
    return 0
コード例 #28
0
ファイル: run.py プロジェクト: Genomon-Project/fusionfusion
def fusionfusion_main(args):

    starBamFile = args.star
    ms2BamFile = args.ms2
    th2BamFile = args.th2
    output_dir = args.out

    if starBamFile == None and ms2BamFile  == None and th2BamFile == None:
        print >> sys.stderr, "At least one of --star, --ms2 or --th2 should be included"
        sys.exit(1)
 
    # config.param_conf.read(args.param)
    # debug_mode = config.param_conf.getboolean("debug", "debug_mode")

    ##########
    # set parameters
    param_conf.reference_genome = args.reference_genome
    # param_conf.resource_dir = args.resource_dir
    param_conf.debug = args.debug
    param_conf.abnormal_insert_size = args.abnormal_insert_size
    param_conf.min_major_clipping_size = args.min_major_clipping_size
    param_conf.min_read_pair_num = args.min_read_pair_num
    param_conf.min_valid_read_pair_ratio = args.min_valid_read_pair_ratio
    param_conf.min_cover_size = args.min_cover_size
    param_conf.anchor_size_thres = args.anchor_size_thres
    param_conf.min_chimeric_size = args.min_chimeric_size
    param_conf.min_allowed_contig_match_diff = args.min_allowed_contig_match_diff
    param_conf.check_contig_size_other_breakpoint = args.check_contig_size_other_breakpoint
    param_conf.filter_same_gene = args.filter_same_gene
    ##########

    debug_mode = param_conf.debug

    ####################
    # make direcotry
    utils.make_directory(output_dir)
    ####################


    ####################
    # parsing chimeric reads from bam files
    if starBamFile is not None:

        parseJunctionInfo.parseJuncInfo_STAR(starBamFile, output_dir + "/star.chimeric.tmp.txt")
                                             

        hOUT = open(output_dir + "/star.chimeric.txt", "w")
        subprocess.check_call(["sort", "-k1,1", "-k2,2n", "-k4,4", "-k5,5n", output_dir + "/star.chimeric.tmp.txt"], stdout = hOUT)
        hOUT.close()

        cluster_filter_junction(output_dir + "/star.chimeric.txt", output_dir + "/star", args)

        if debug_mode == False:
            subprocess.check_call(["rm", output_dir + "/star.chimeric.tmp.txt"])
            subprocess.check_call(["rm", output_dir + "/star.chimeric.txt"])

    if ms2BamFile is not None:

        parseJunctionInfo.extractFusionReads_ms2(ms2BamFile, output_dir + "/ms2.chimeric.tmp.sam")

        hOUT = open(output_dir + "/ms2.chimeric.sam", "w")
        subprocess.check_call(["sort", "-k1", output_dir + "/ms2.chimeric.tmp.sam"], stdout = hOUT)
        hOUT.close()
    
        parseJunctionInfo.parseJuncInfo_ms2(output_dir + "/ms2.chimeric.sam", output_dir + "/ms2.chimeric.tmp.txt", args.abnormal_insert_size) 

        hOUT = open(output_dir + "/ms2.chimeric.txt", "w")
        subprocess.check_call(["sort", "-k1,1", "-k2,2n", "-k4,4", "-k5,5n", output_dir + "/ms2.chimeric.tmp.txt"], stdout = hOUT)
        hOUT.close()

        cluster_filter_junction(output_dir + "/ms2.chimeric.txt", output_dir + "/ms2", args)

        if debug_mode == False:
            subprocess.check_call(["rm", output_dir + "/ms2.chimeric.tmp.sam"])
            subprocess.check_call(["rm", output_dir + "/ms2.chimeric.sam"])
            subprocess.check_call(["rm", output_dir + "/ms2.chimeric.tmp.txt"])
            subprocess.check_call(["rm", output_dir + "/ms2.chimeric.txt"])


    if th2BamFile is not None:

        parseJunctionInfo.extractFusionReads_th2(th2BamFile, output_dir + "/th2.chimeric.tmp.sam")

        hOUT = open(output_dir + "/th2.chimeric.sam", "w")
        subprocess.check_call(["sort", "-k1", output_dir + "/th2.chimeric.tmp.sam"], stdout = hOUT)
        hOUT.close()
        parseJunctionInfo.parseJuncInfo_th2(output_dir + "/th2.chimeric.sam", output_dir + "/th2.chimeric.tmp.txt", args.abnormal_insert_size)
        
        hOUT = open(output_dir + "/th2.chimeric.txt", "w")
        subprocess.check_call(["sort", "-k1,1", "-k2,2n", "-k4,4", "-k5,5n", output_dir + "/th2.chimeric.tmp.txt"], stdout = hOUT)
        hOUT.close()

        cluster_filter_junction(output_dir + "/th2.chimeric.txt", output_dir + "/th2", args)

        if debug_mode == False:
            subprocess.check_call(["rm", output_dir + "/th2.chimeric.tmp.sam"])
            subprocess.check_call(["rm", output_dir + "/th2.chimeric.sam"])
            subprocess.check_call(["rm", output_dir + "/th2.chimeric.tmp.txt"])
            subprocess.check_call(["rm", output_dir + "/th2.chimeric.txt"])


    annotationFunction.merge_fusion_result(output_dir, 
                                           output_dir + "/fusion_fusion.result.txt")
コード例 #29
0
        if is_pos < self.pos_threshold:
            return 1
        return 0


class TripletSampler(MetricSampler):
    def __init__(self, train_data):
        super().__init__(train_data)

    @property
    def is_triplet(self):
        return True

    def sample_data(self, anchor_id, anchor_target):
        pos_id = random.sample(self.class_idxs[anchor_target], k=1)[0]
        neg_class = random.choice(
            [x for x in self.classes_list if x != anchor_target])
        neg_id = random.sample(self.class_idxs[neg_class], k=1)[0]

        return pos_id, neg_id


if __name__ == "__main__":
    data_path = make_directory("../datasets/")
    mnist_transforms = transforms.Compose([transforms.ToTensor()])
    train_data = MNIST(root=data_path, train=True, transform=mnist_transforms)

    sampler = TripletSampler(train_data)
    print(sampler.is_triplet)
    print(sampler.sample_data(0, 5))
コード例 #30
0
    def __init__(self, config):

        self._work_type = config.work_type
        self._seed = config.seed

        self._vocab_size = 70
        self._decode_length = config.decode_length
        self._emb_dim = config.emb_dim
        self._attention_dim = config.attention_dim
        self._decoder_dim = config.decoder_dim
        self._dropout = config.dropout
        self._device = config.device
        self._gpu_non_block = config.gpu_non_block
        self._cudnn_benchmark = config.cudnn_benchmark

        self._epochs = config.epochs
        self._batch_size = config.batch_size
        self._workers = config.workers
        self._encoder_lr = config.encoder_lr
        self._decoder_lr = config.decoder_lr
        self._grad_clip = config.grad_clip
        self._fine_tune_encoder = config.fine_tune_encoder

        self._model_save_path = config.model_save_path
        self._model_load_path = config.model_load_path
        self._model_load_num = config.model_load_num
        self._test_file_path = config.test_file_path

        self._model_name = self._model_name_maker()

        self._seed_everything(self._seed)

        # define different decoder by work type
        if self._work_type == 'train':
            make_directory(self._model_save_path + '/' + self._model_name)
            self._decoder = DecoderWithAttention(
                attention_dim=self._attention_dim,
                embed_dim=self._emb_dim,
                decoder_dim=self._decoder_dim,
                vocab_size=self._vocab_size,
                dropout=self._dropout,
                device=self._device)
            self._decoder.to(self._device, non_blocking=self._gpu_non_block)
            self._decoder_optimizer = torch.optim.Adam(params=filter(
                lambda p: p.requires_grad, self._decoder.parameters()),
                                                       lr=self._decoder_lr)
        elif self._work_type == 'single_test':
            self._decoder = PredictiveDecoder(
                attention_dim=self._attention_dim,
                embed_dim=self._emb_dim,
                decoder_dim=self._decoder_dim,
                vocab_size=self._vocab_size,
                device=self._device)
            self._decoder.to(self._device, non_blocking=self._gpu_non_block)

        self._encoder = Encoder(model_type=config.encoder_type)
        self._encoder.to(self._device, non_blocking=self._gpu_non_block)
        self._encoder.fine_tune(self._fine_tune_encoder)
        self._encoder_optimizer = torch.optim.Adam(
            params=filter(lambda p: p.requires_grad,
                          self._encoder.parameters()),
            lr=self._encoder_lr) if self._fine_tune_encoder else None
        if torch.cuda.device_count() > 1 and self._device != 'cpu':
            print("Let's use", torch.cuda.device_count(), "GPUs!")
            self._encoder = nn.DataParallel(self._encoder)
        self._criterion = nn.CrossEntropyLoss().to(
            self._device, non_blocking=self._gpu_non_block)
コード例 #31
0
                        metavar='N',
                        help='kernel size')
    parser.add_argument('--conv_len',
                        default=32,
                        type=int,
                        metavar='N',
                        help='conv len in LSTM')
    parser.add_argument('-b',
                        '--batch-size',
                        default=32,
                        type=int,
                        metavar='N',
                        help='mini-batch size (default: 32)')
    parser.add_argument('--lr',
                        '--learning-rate',
                        default=0.001,
                        type=float,
                        metavar='LR',
                        help='initial learning rate')
    args = parser.parse_args()

    str_args = args_to_string(args)
    out_dir = './search/' + str_args
    make_directory(out_dir)
    print str_args

    if args.model == 'ConvAE':
        conv_ae(args, out_dir, str_args)
    elif args.model == 'ConvLSTMAE':
        conv_lstm_ae(args, out_dir, str_args)
コード例 #32
0
ファイル: task2.py プロジェクト: chawasit/Karn-Detection
file_list = glob.glob("%s/*.json" % data_path)
file_list.sort()

success = True

count = 0
while success:
    success, image = vidcap.read()

    if not success:
        break

    print "Process frame %d / %s" % (count, file_list[count])

    if args.frame:
        utils.make_directory("%s/frame" % output_path)
        cv2.imwrite("%s/frame/frame_%d.jpg" % (output_path, count),
                    image)  # save frame as JPEG file

    height, width, channels = image.shape

    filename = file_list[count]
    with open(filename, 'r') as f:
        pose_result = json.loads(f.read())

    number_of_people = len(pose_result['people'])

    for number in range(number_of_people):
        data = pose_result['people'][number]

        key_point = models.KeyPoint(data['pose_keypoints'])
コード例 #33
0
    def __init__(self):
        print("JJ Mumble Bot Initializing...")
        # Core access.
        GM.jjmumblebot = self
        # Initialize configs.
        GM.cfg.read(utils.get_config_dir())
        # Initialize up-time tracker.
        GM.start_seconds = time.time()
        # Initialize application logging.
        logging.getLogger('chardet.charsetprober').setLevel(logging.INFO)

        log_file_name = f"{GM.cfg['Bot_Directories']['LogDirectory']}/runtime.log"
        GM.logger = logging.getLogger("RuntimeLogging")
        GM.logger.setLevel(logging.DEBUG)

        handler = TimedRotatingFileHandler(log_file_name, when='midnight', backupCount=30)
        handler.setLevel(logging.INFO)
        log_formatter = logging.Formatter('%(asctime)s - [%(levelname)s] - %(message)s')
        handler.setFormatter(log_formatter)
        GM.logger.addHandler(handler)

        GM.logger.info("######################################")
        GM.logger.info("Initializing JJMumbleBot...")
        GM.logger.info("Application configs have been read successfully.")
        # Initialize system arguments.
        if sys.argv:
            for item in sys.argv:
                # Enable safe mode.
                if item == "-safe":
                    GM.safe_mode = True
                    print('Safe mode has been enabled.')
                    GM.logger.info("Safe mode has been enabled through system arguments.")
                # Enable debug mode.
                if item == "-debug":
                    GM.debug_mode = True
                    print('Debug mode has been enabled.')
                    GM.logger.info("Debug mode has been enabled through system arguments.")
                # Enable quiet mode.
                if item == "-quiet":
                    GM.quiet_mode = True
                    print('Quiet mode has been enabled.')
                    GM.logger.info("Quiet mode has been enabled through system arguments.")
                # Enable verbose mode.
                if item == "-verbose":
                    GM.verbose_mode = True
                    print('Verbose mode has been enabled.')
                    GM.logger.info("Verbose mode has been enabled through system arguments.")
        # Initialize command queue.
        cmd_queue_lim = int(GM.cfg['Main_Settings']['CommandQueueLimit'])
        self.command_queue = QueueHandler(cmd_queue_lim)
        # Initialize command history tracker.
        cmd_history_lim = int(GM.cfg['Main_Settings']['CommandHistoryLimit'])
        GM.cmd_history = CMDQueue(cmd_history_lim)
        # Run Debug Mode tests.
        if GM.debug_mode:
            self.config_debug()
        # Retrieve mumble client data from configs.
        server_ip = GM.cfg['Connection_Settings']['ServerIP']
        server_pass = GM.cfg['Connection_Settings']['ServerPassword']
        server_port = int(GM.cfg['Connection_Settings']['ServerPort'])
        user_id = GM.cfg['Connection_Settings']['UserID']
        user_cert = GM.cfg['Connection_Settings']['UserCertification']
        GM.logger.info("Retrieved server information from application configs.")
        # Set main logic loop tick rate.
        self.tick_rate = float(GM.cfg['Main_Settings']['CommandTickRate'])
        # Set multi-command limit.
        self.multi_cmd_limit = int(GM.cfg['Main_Settings']['MultiCommandLimit'])
        # Set the command token.
        self.cmd_token = GM.cfg['Main_Settings']['CommandToken']
        if len(self.cmd_token) != 1:
            print("ERROR: The command token must be a single character! Reverting to the default: '!' token.")
            GM.logger.critical(
                "ERROR: The command token must be a single character! Reverting to the default: '!' token.")
            self.cmd_token = '!'
        # Initialize mumble client.
        GM.mumble = pymumble.Mumble(server_ip, user=user_id, port=server_port, certfile=user_cert,
                                    password=server_pass)
        # Initialize mumble callbacks.
        GM.mumble.callbacks.set_callback("text_received", self.message_received)
        # Set mumble codec profile.
        GM.mumble.set_codec_profile("audio")
        # Create temporary directories.
        utils.make_directory(GM.cfg['Media_Directories']['TemporaryImageDirectory'])
        GM.logger.info("Initialized temporary directories.")
        # Create any missing permanent directories.
        utils.make_directory(GM.cfg['Media_Directories']['PermanentMediaDirectory'] + "sound_board/")
        utils.make_directory(GM.cfg['Media_Directories']['PermanentMediaDirectory'] + "images/")
        GM.logger.info("Initialized permanent directories.")
        # Setup privileges.
        pv.setup_privileges()
        GM.logger.info("Initialized user privileges.")
        # Setup aliases.
        aliases.setup_aliases()
        GM.logger.info("Initialized aliases.")
        # Initialize PGUI.
        GM.gui = PseudoGUI()
        GM.logger.info("Initialized pseudo graphical user interface.")
        # Initialize plugins.
        if GM.safe_mode:
            self.initialize_plugins_safe()
            self.tick_rate = 0.2
            GM.logger.info("Initialized plugins with safe mode.")
        else:
            self.initialize_plugins()
            GM.logger.info("Initialized plugins.")
        # Run a plugin callback test.
        self.plugin_callback_test()
        GM.logger.info("Plugin callback test successful.")
        print("JJ Mumble Bot initialized!\n")
        # Initialize the web interface.
        if GM.cfg.getboolean('Connection_Settings', 'EnableWebInterface'):
            from helpers.web_handler import init_web
            self.web_thr = threading.Thread(target=init_web)
            self.web_thr.start()
            reg_print("JJMumbleBot Web Service was initialized.")
            GM.logger.info("JJMumbleBot Web Service was initialized.")
        # Join the server after all initialization is complete.
        self.join_server()
        GM.logger.info("JJ Mumble Bot has fully initialized and joined the server.")
        self.loop()
コード例 #34
0
    # Load setup info and data
    print("Loading network statistics and edge data...")
    experiment_dir = args.outdir + ('' if args.outdir.endswith('/') else '/')
    with open(experiment_dir + 'args/args.txt', 'r') as f:
        sim_args_str = f.read()
    sim_args = ast.literal_eval(sim_args_str)
    p = len(sim_args['form_terms']) + len(sim_args['diss_terms'])
    H = np.loadtxt(experiment_dir + sim_args['data_name'] + '_H.txt')
    y = np.loadtxt(experiment_dir + sim_args['data_name'] + '_y.txt')
    t = H.shape[0]
    H = H.reshape((t, -1, p)) # t x n^2(E) x p
    n = np.sqrt(H.shape[1]).astype(int)
    print(f"Data has dimension (t, n, p): ({t}, {n}, {p})")

    # Get the output filenames
    result_dir = utils.make_directory(experiment_dir, 'results')
    args_dir = utils.make_directory(experiment_dir, 'args')
    utils.save_args(args, args_dir + 'args_model.txt')

    theta_outfile = result_dir + 'theta_' + sim_args['data_name'] + ".txt"
    u_outfile = result_dir + 'u_' + sim_args['data_name'] + ".txt"
    z_outfile = result_dir + 'z_' + sim_args['data_name'] + ".txt"
    theta_plot_dir = result_dir + 'est_theta_diff.png'

    print('Initialize STERGM model...')
    model = mple_learn.STERGMGraph(
        lam=args.lam,
        admm_alpha=args.admm_alpha,
        rel_tol=args.rel_tol,
        max_steps=args.max_steps,
        newton_max_steps=args.max_steps_newton,