コード例 #1
0
def get_result_bb(arch, seq):
    result_path = fullfile(arch, seq + '.txt')
    if os.path.exists(result_path):
        temp = np.loadtxt(result_path, delimiter=',').astype(np.float)
    else:
        result_path = fullfile(arch, seq + '.json')
        temp = json.load(open(result_path, 'r'))
        temp = temp['res']
    return np.array(temp)
コード例 #2
0
def test_make_word_cloud():
    '''
    This function just tries to make an image from a text file

    Fails if the image file is not created
    '''
    homedir = Path(fullfile(Path(__file__).parent, '..')).resolve(strict=True)
    textfile = fullfile(homedir, 'cloud_test', 'const.txt')
    picfile = fullfile(homedir, 'cloud_test', 'const.png')
    try:
        rm(picfile)
    except FileNotFoundError:
        pass
    word_cloud_from_txt(textfile)
    assert isfile(picfile), 'File not created'
コード例 #3
0
 def load_landmarks(self):
     with h5py.File(fullfile(self.lm_folder, self.lm_file), 'r') as f:
         self.lm_attrs['width'] = f['landmarks_thermal'].attrs['width']
         self.lm_attrs['height'] = f['landmarks_thermal'].attrs['height']
         self.lm_attrs['idx'] = f['landmarks_thermal'].attrs['idx']
         self.lm_attrs['timestamps'] = f['landmarks_thermal'].attrs['timestamps']
         self.landmarks = f['landmarks_thermal'][:]
コード例 #4
0
def clean_labels(dirty_labels):
    '''
    This is a computationally complex way to remove common words from labels

    There is probably a nicer way to do this, since each label is a list
    -- Adapted from a function which operated on tweets (paragraphs)
    '''
    clean_labels = []
    filepath = Path(__file__)
    filename = fullfile(filepath.parent, 'textfiles', 'commonwords.txt')

    if not isfile(filename):
        print(f'Could not find file {filename}', file=stderr)
        raise Exception
    with open(filename, 'r') as wordlist:
        words_to_remove = wordlist.readlines()
    words_to_remove = [word.lower().strip() for word in words_to_remove]
    for label in dirty_labels:
        temp_string = label.lower().strip()
        appendFlag = True
        for word in words_to_remove:
            # Remove the matching word from the list
            if word == temp_string:
                appendFlag = False
        if appendFlag:
            clean_labels.append(temp_string)

    return clean_labels
コード例 #5
0
    def __init__(self, file_name=''):
        '''
        This starts a google image client and pushes a file through

        If initiated with a valid file name, it will process that file
        '''
        google_env = "GOOGLE_APPLICATION_CREDENTIALS"
        filepath = Path(__file__)
        if os.environ.get(google_env) is None:
            envfile = fullfile(filepath, 'tokens',
                               'my-google-api-credentials.json')
            if not isfile(envfile):
                print('API credentials not placed in proper directory',
                      file=stderr)
                raise FileNotFoundError()
            else:
                os.environ[google_env] = envfile

        self.client = vision.ImageAnnotatorClient()
        if isfile(file_name):
            self.process_file(file_name)
        else:
            if file_name == '':
                pass
                # Just create a class instance and wait
            else:
                # We expected to get a file
                raise FileNotFoundError
コード例 #6
0
def remove_words(dirty_tweets):
    '''
    This is a computationally complex way to remove a set of words from string

    Check to make sure dirty_tweets is a list of strings
    '''
    clean_tweets = []
    filepath = Path(__file__)
    filename = fullfile(filepath.parent, 'textfiles', 'commonwords.txt')
    if not isfile(filename):
        print(f'Could not find file {filename}', file=sys.stderr)
        raise Exception
    with open(filename, 'r') as wordlist:
        words_to_remove = wordlist.readlines()
    words_to_remove = [word.lower().strip() for word in words_to_remove]
    for text in dirty_tweets:
        temp_string = re.sub(r"http\S+", "", text.lower().strip())
        temp_string = re.sub('@\\w*', "",    temp_string)  # Remove mentions
        # print("removing common words")
        # print(temp_string)
        for word in words_to_remove:
            # Remove anything matching the list with surronding spaces
            temp_string = temp_string.lower().replace(" " + word + " ", " ")
        # print('Removed common words')
        # print(temp_string)
        # sleep(5)
        clean_tweets.append(temp_string)

    return clean_tweets
コード例 #7
0
    def work_picture_data(self, urlData):
        '''
        Google Vision API reqiures local files to be worked

        Download files and return a list of these filepaths
        '''
        # print('working image data')
        urlFile = self.curFolder + sep + 'imageData_' + self.user + '.txt'
        outfolder = fullfile(self.curFolder, 'images', '')
        with open(urlFile, 'w') as f_url:
            image_list = []
            for url in urlData:
                fname = image_downloader(url, outfolder)
                f_url.write(url + '\n')
                if fname == 0:
                    # Did not download image, currently not doing anything
                    pass
                else:
                    if isfile(fname):
                        image_list.append(fname)
                    else:
                        print(f'Imagedownloader returned ({fname})\n' +
                              'but did not download', file=sys.stderr)
        self.images = image_list
        return image_list
コード例 #8
0
 def write_dataset_to_h5file(self, setname, dataset, attributes: dict = None):
     subject_id = self.lm_file.partition("ThermalData_")[2].partition(".h5")[0]
     name = 'Landmark_Verification_' + subject_id + '_F{}.h5'.format(self.lm_attrs['face_number'])
     with h5py.File(fullfile(self.save_dir, name), 'w') as f:
         dset = f.create_dataset(setname, data=dataset)
         if attributes is not None:
             for key in attributes.keys():
                 dset.attrs[key] = attributes[key]
コード例 #9
0
 def load_dataset(self):
     with h5py.File(fullfile(self.folder, self.filename), 'r') as f:
         self.h5attributes['width'] = f['FRAMES'].attrs['FrameWidth']
         self.h5attributes['height'] = f['FRAMES'].attrs['FrameHeight']
         self.h5attributes['n_frames'] = f['FRAMES'].attrs['FrameCount']
         self.h5dataset = f['FRAMES'][:]
         timestamps = f['Timestamps_ms'][:]
         # setting the first timestamp to 0:
         self.h5timestamps = np.array(timestamps - timestamps[0]).astype(np.float)
コード例 #10
0
ファイル: thermal_analysis.py プロジェクト: meistalampe/rPPG
 def write_dataset_to_h5file(self,
                             setname,
                             dataset,
                             attributes: dict = None):
     subject_id = self.data_file.partition("ThermalData_")[2].partition(
         ".h5")[0]
     name = 'Results_' + subject_id + '_F{}.h5'.format(self.n_face)
     with h5py.File(fullfile(self.save_dir, name), 'a') as f:
         if setname in list(f.keys()):
             del f[setname]
         else:
             dset = f.create_dataset(setname, data=dataset)
             if attributes is not None:
                 for key in attributes.keys():
                     dset.attrs[key] = attributes[key]
コード例 #11
0
    def makeoutputfolder(self):
        '''
        This module creates a unique file structure for analysis

        Saves the root file in curFolder for access by other methods

        Folder structure: output/date/user_X/[files]
        -- X is a simple iterator
        '''
        if self.iteration > 0:
            self.curFolder = re.sub(
                'iter\\d+',
                'iter' + str(self.iteration),
                self.curFolder)
            makedirs(self.curFolder)
            if not isdir(fullfile(self.curFolder, 'images')):
                # Make a unique directory to save images as well
                makedirs(fullfile(self.curFolder, 'images'))
            return

        if not isdir('output'):
            makedirs('output')
        datestr = datetime.now().strftime('%Y_%m_%d')
        # timestr = datetime.now().strftime('%H_%M%S')
        curFolder = fullfile('output', datestr, '')
        if not isdir(curFolder):
            # Create the first folder from scratch
            curFolder = curFolder
            makedirs(curFolder)

        curFolder = fullfile(curFolder,
                             self.user + '_iter' + str(self.iteration))

        if not isdir(curFolder):
            makedirs(curFolder)
        else:
            # Make a unique directory
            i = 1
            temp = curFolder
            while isdir(temp):  # Loop until the directory no longer exists
                temp = curFolder.replace(
                    self.user,
                    self.user + '_' + str(i))
                i += 1
            curFolder = temp
            curFolder = curFolder   # + '_iter' + str(self.iteration)
            makedirs(curFolder)

        if not isdir(fullfile(curFolder, 'images')):
            # Make a unique directory to save images as well
            makedirs(fullfile(curFolder, 'images'))

        self.curFolder = curFolder
コード例 #12
0
def eval_auc(dataset='vot2018', tracker_reg='S*', start=0, end=1e6):
    list_path = os.path.join('dataset', dataset + '.json')    #./dataset/vot2018.json
    annos = json.load(open(list_path, 'r'))
    seqs = annos.keys()


    

    trackers = glob.glob(fullfile('result', dataset, tracker_reg))
    trackers = trackers[start:min(end, len(trackers))]

    n_seq = len(seqs)
    thresholds_overlap = np.arange(0, 1.05, 0.05)
    # thresholds_error = np.arange(0, 51, 1)

    success_overlap = np.zeros((n_seq, len(trackers), len(thresholds_overlap)))
    # success_error = np.zeros((n_seq, len(trackers), len(thresholds_error)))
    for i in range(n_seq):
        seq = list(seqs)[i]
        ground = readall(seq)
        gt_rect = np.array(ground).astype(np.float)
        gt_center = convert_bb_to_center(gt_rect)
        for j in range(len(trackers)):
            tracker = trackers[j]
            print('{:d} processing:{} tracker: {}'.format(i, seq, tracker))
            bb = get_result_bb(tracker, seq)
            center = convert_bb_to_center(bb)
            success_overlap[i][j] = compute_success_overlap(gt_rect, bb)
            # success_error[i][j] = compute_success_error(gt_center, center)

    print('Success Overlap')

    max_auc = 0.
    max_name = ''
    for i in range(len(trackers)):
        auc = success_overlap[:, i, :].mean()
        if auc > max_auc:
            max_auc = auc
            max_name = trackers[i]
        print('%s(%.4f)' % (trackers[i], auc))

    print('\n%s Best: %s(%.4f)' % (dataset, max_name, max_auc))
コード例 #13
0
    def write_summaryfile(self, image_labels=[]):
        '''
        Write a summary file from the list of tweets and image labels
        '''
        outfile = fullfile(self.curFolder,
                           'twitter_' + self.user + '_' +
                           self.daterange + '.txt')
        print(f'\nWriting output file: {outfile}\n')

        if image_labels == []:
            image_labels = self.image_labels

        with open(outfile, 'w') as summary_file:
            print(*self.tweet_text, sep='\n\n', file=summary_file)
            print('\n\n', file=summary_file)
            if image_labels:  # Test if list is not empty
                for i in range(1):
                    # Make the image files more significant, arbitrarily
                    print(*image_labels, sep='\n', file=summary_file)
                    print('')
        print('Output file complete.')
        return outfile
コード例 #14
0
 def write_landmarks_to_h5file(self, landmarks, attributes: dict = None):
     with h5py.File(fullfile(self.save_dir, 'Landmarks_' + self.filename), 'w') as f:
         dset = f.create_dataset('landmarks_thermal', data=landmarks)
         if attributes is not None:
             for key in attributes.keys():
                 dset.attrs[key] = attributes[key]
コード例 #15
0
ファイル: eval_otb.py プロジェクト: JBamberger/UDT_pytorch
def eval_auc(dataset='OTB2015', tracker_reg='S*', start=0, end=1e6):
    ds = OtbDataset(variant=dataset)

    trackers = glob.glob(fullfile('result', dataset, tracker_reg))
    trackers = trackers[start:min(end, len(trackers))]

    n_seq = len(ds)
    thresholds_overlap = np.arange(0, 1.05, 0.05)
    # thresholds_error = np.arange(0, 51, 1)

    success_overlap = np.zeros((n_seq, len(trackers), len(thresholds_overlap)))
    # success_error = np.zeros((n_seq, len(trackers), len(thresholds_error)))
    for i, video in enumerate(ds):
        gt_rect = np.array(video.gt_rects).astype(np.float)
        gt_center = convert_bb_to_center(gt_rect)
        for j in range(len(trackers)):
            tracker = trackers[j]
            print(f'{i:d} processing:{video.video_name} tracker: {tracker}')
            bb = get_result_bb(tracker, video.video_name)
            center = convert_bb_to_center(bb)
            success_overlap[i][j] = compute_success_overlap(gt_rect, bb)
            # success_error[i][j] = compute_success_error(gt_center, center)

    print('Success Overlap')

    if 'OTB2015' == dataset:
        OTB2013_id = []
        for i, video in enumerate(ds):
            if video.contained_in('OTB2013'):
                OTB2013_id.append(i)

        max_auc_OTB2013 = 0.
        max_name_OTB2013 = ''
        for i in range(len(trackers)):
            auc = success_overlap[OTB2013_id, i, :].mean()
            if auc > max_auc_OTB2013:
                max_auc_OTB2013 = auc
                max_name_OTB2013 = trackers[i]
            print('%s(%.4f)' % (trackers[i], auc))

        max_auc = 0.
        max_name = ''
        for i in range(len(trackers)):
            auc = success_overlap[:, i, :].mean()
            if auc > max_auc:
                max_auc = auc
                max_name = trackers[i]
            print('%s(%.4f)' % (trackers[i], auc))

        print('\nOTB2013 Best: %s(%.4f)' % (max_name_OTB2013, max_auc_OTB2013))
        print('\nOTB2015 Best: %s(%.4f)' % (max_name, max_auc))
    else:
        max_auc = 0.
        max_name = ''
        for i in range(len(trackers)):
            auc = success_overlap[:, i, :].mean()
            if auc > max_auc:
                max_auc = auc
                max_name = trackers[i]
            print('%s(%.4f)' % (trackers[i], auc))

        print('\n%s Best: %s(%.4f)' % (dataset, max_name, max_auc))
コード例 #16
0
    words_to_remove = [word.lower().strip() for word in words_to_remove]
    for label in dirty_labels:
        temp_string = label.lower().strip()
        appendFlag = True
        for word in words_to_remove:
            # Remove the matching word from the list
            if word == temp_string:
                appendFlag = False
        if appendFlag:
            clean_labels.append(temp_string)

    return clean_labels


if __name__ == '__main__':
    '''
    This provides command-line debugging
    '''
    img_class = python_image()
    # a.analyzeUsername('brabbott42', range(0, 1000, 200))
    if len(argv) == 2:
        in_file = argv[1]
    elif len(argv) == 1:
        in_file = fullfile('images', 'samples', 'mountains.jpg')

    if not isfile(in_file):
        # This error handling may also be handled in the class
        print(f'\nCould not find input file: {in_file}', file=stderr)
        raise FileNotFoundError
    img_class.process_file(in_file)
コード例 #17
0
    def __init__(self, keyspath=''):
        '''
        This attempts to initialize a twitter API interface

        Attempts to read from the keys (stored locally),
        Throws error if connection unsuccessful
        '''
        try:
            # Try using local files -- My original method
            filepath = Path(__file__)
            tokenspath = fullfile(filepath.parent, 'tokens')
            print(f'\nFile TOKEN path: {tokenspath}\n', file=sys.stderr)
            consumerfile = fullfile(tokenspath, 'twitter_consumer.token')
            consumer_key = getKeyFromTxt(consumerfile)
            fullfile(tokenspath, 'twitter_consumer_secret.token')
            consecfile = fullfile(tokenspath, 'twitter_consumer_secret.token')
            consumer_secret = getKeyFromTxt(consecfile)
            access_file = fullfile(tokenspath, 'twitter_access.token')
            access_token = getKeyFromTxt(access_file)
            acsecfile = fullfile(tokenspath, 'twitter_access_secret.token')
            access_secret = getKeyFromTxt(acsecfile)
            auth = tweepy.OAuthHandler(consumer_key,
                                       consumer_secret)
            auth.set_access_token(access_token, access_secret)

        except FileNotFoundError:
            print('\nCould not find .token files, trying to find "key" file\n',
                  file=sys.stderr)
            # Stephan's block
            try:
                filepath = Path(__file__)
                tokenspath = fullfile(filepath.parent, 'tokens')
                print(f'File KEY path: {tokenspath}', file=sys.stderr)
                config = configparser.ConfigParser()
                keyfile = fullfile(tokenspath, 'keys')
                if not isfile(keyfile):
                    print(f'File "key" not found: {keyfile}', file=sys.stderr)
                    raise FileNotFoundError()
                config.read(keyfile)
                conskey = config.get('auth', 'consumer_key')  # .strip()
                consecret = config.get('auth', 'consumer_secret')  # .strip()
                accesstok = config.get('auth', 'access_token')  # .strip()
                accesssec = config.get('auth', 'access_secret')  # .strip()
                # print(f'Consumer key: "{conskey}"')
                # print(f'Consumer sec: "{consecret}"')
                # print(f'access key: "{accesstok}"')
                # print(f'access sec: "{accesssec}"')
                auth = tweepy.OAuthHandler(conskey, consecret)
                auth.set_access_token(accesstok, accesssec)
            except FileNotFoundError:
                try:
                    print('\nCould not find .token or "key" file' +
                          'Checking environmental variables (Github actions\n',
                          file=sys.stderr)
                    # Use environmental variables (Github Actions)
                    consumer_key = environ['CONSUMER_KEY']
                    consumer_secret = environ['CONSUMER_SECRET']
                    access_token = environ['ACCESS_TOKEN']
                    access_secret = environ['ACCESS_SECRET']
                    auth = tweepy.OAuthHandler(consumer_key,
                                               consumer_secret)
                    auth.set_access_token(access_token, access_secret)
                except:
                    print('Tried three methods and could not find key files.',
                          file=sys.stderr)
                    raise
        print('\nGenerated auth. Attempting to connect\n', file=sys.stderr)
        self.client = tweepy.API(auth)
        if not self.client.verify_credentials():
            # except tweepy.TweepError as e:
            # Print a more helpful debug message and rethrow
            print('\nERROR : connection failed. Check your Twitter keys.\n')
            raise tweepy.TweepError

        print(f'Connected as {self.client.me().screen_name}, you can tweet !')
        self.client_id = self.client.me().id
        self.max_id = None  # For aquiring past tweets
        self.iteration = 0  # For saving iterative tweets in new pages
        self.tweet_count = 0  # This API is limited to 3200 tweets