Esempio n. 1
0
	def query_by_event(self,eid):
		if eid < 4:
			df = pd.read_table(self.data_path1,sep="\s+",low_memory=False)[1:]
			eid_trans = eid
		else:
			df = pd.read_table(self.data_path2,sep="\s+",low_memory=False)[1:]
			eid_trans = eid - 4
		df = df[1:]
		for col in df.columns:
			try:
				df[col] = pd.to_numeric(df[col])
			except:
				continue
		tor_point = df[df['User13'].diff()>=1]
		tor_point = list(tor_point['FRAME_NUM'])
		end_point = df[df['User13'].diff()<=-1]
		end_point = list(end_point['FRAME_NUM'])
		# get the takeover period lists
		takeover_list = []
		for i in range(len(tor_point)):
		    takeover_list.append([tor_point[i],end_point[i]])
		situation_id = self.metadata[self.metadata['subject']==self.sub_id].values[0][1:][eid]
		meta_org = self.meta_dict[situation_id[0]]
		meta = Metadata(self.sub_id,meta_org,eid)
		takeover = SingleTakeover(df,takeover_list[eid_trans],meta)
		return takeover
Esempio n. 2
0
 def parseMetadata(self, file_name, metadata):
     metadata = metadata.split('-')
     date = metadata[0]
     time = metadata[1]
     temperature = metadata[2]
     pressure = metadata[3]
     return Metadata(file_name, date, time, temperature, pressure)
Esempio n. 3
0
 def table_row(bill_dic):
     metadata = Metadata()
     session_name = metadata.session_name(bill_dic['session'])
     return {
         'Title':
         bill_dic['bill_id'] + ', ' + session_name,
         'os_bill_id':
         bill_dic['id'],
         'bill':
         bill_dic['bill_id'],
         'bill_title':
         bill_title(bill_dic),
         'chamber':
         state_utils.chamber_name(bill_dic['chamber']),
         'website':
         bill_dic['sources'][-1]['url'],
         'session':
         session_name,
         'status':
         bill_status(bill_dic),
         'latest_status':
         latest_status(bill_dic),
         'bill_date':
         utils.datetime_to_date(bill_dic['action_dates']['first']),
         'date_passed_senate':
         utils.datetime_to_date(bill_dic['action_dates']['passed_upper']),
         'date_passed_assembly':
         utils.datetime_to_date(bill_dic['action_dates']['passed_lower']),
         'date_signed':
         utils.datetime_to_date(bill_dic['action_dates']['signed']),
         'summary':
         bill_summary(bill_dic),
         'keywords':
         suggested_topics(bill_dic)
     }
Esempio n. 4
0
def postMetadata(takid=-1):
    if takid <= 0:
        return json_response(code=400)
    tak = Tak.get_by_id(takid)
    if tak is None:
        logging.info("tak is None")
        return json_response(code=400)
    key = getValue(request, "key", "")
    value = getValue(request, "value", "")
    if key != '' and value != '':
        for mdata in tak.metadata:
            if mdata.key == key:
                mdata.value = value
                tak.put()
                return json_response(code=200)
        metadata = Metadata(key=key, value=value)
        tak.metadata.append(metadata)
        tak.put()
        return json_response(code=200)
    else:
        if request.method == 'POST':
            try:
                logging.info("json")
                data = json.loads(request.data, object_hook=_decode_dict)
                logging.info(data)
                for datum in data:
                    # datum is a metadata object
                    logging.info(datum['key'])
                    logging.info(datum['value'])
                    found = bool(0)
                    for mdata in tak.metadata:
                        if datum['key'] == mdata.key:
                            mdata.value = datum['value']
                            found = bool(1)
                            break
                    if not found:
                        metadata = Metadata(key=datum['key'],
                                            value=datum['value'])
                        tak.metadata.append(metadata)
                tak.put()

                return json_success(data)
            except Exception as e:
                logging.info(e)
                return json_response(code=400)
        return json_response(code=400)
Esempio n. 5
0
def __tree_hdfs(path):
    cmd = "hadoop fs -ls {}".format(path)
    txt, err = shell.execute(cmd)
    dirs = []
    if err:
        print("Failed to read HDFS path {}".format(path))
        shell.log(os.path.basename(sys.argv[0]), os.path.basename(sys.argv[1]),
                  "WARN", "Failed to read HDFS path {}".format(err))
    # Parse `ls` output
    lines = txt.decode("utf-8").split("\n")

    for line in lines:
        # Igonore file count and other meta informations
        if line.find("/") == -1:
            continue

        _path = _path = line[line.find("/"):]

        # Check if the current path belongs to configured pond
        pond_path = _path[len(hdfs):]
        if pond_path.startswith("/"):
            pond_path = pond_path[1:]

        if pond_path.find("/") != -1:
            pond_end = pond_path.find("/")
        else:
            pond_end = len(pond_path)

        pond_name = pond_path[:pond_end]
        config_ponds = retentions.keys()
        if pond_name not in config_ponds:
            continue

        # drwx------
        if line.startswith("d"):
            print("Working on path: {}".format(_path))
            #shell.log(os.path.basename(sys.argv[0]), os.path.basename(sys.argv[1]), "INFO", "Working on path: {}". format(_path))
            yield from __tree_hdfs(_path)

        # -rwx------
        elif line.startswith("-"):
            print("Working on file: {}".format(_path))
            _date = ' '.join(line[:line.find("/")].split(' ')[-3:-1])
            if _path.split('/')[-2] == 'data':
                _name = _path.split('/')[-3]
            else:
                _name = _path.split('/')[-2]
            _pond = _path.split('/')[2]
            _last_used = datetime.strptime(_date, "%Y-%m-%d %H:%M")
            #shell.log(os.path.basename(sys.argv[0]), os.path.basename(sys.argv[1]), "INFO", "Working on file: {}". format(_path))
            yield Metadata(_name, _pond, _last_used, path)

        # Any other text or errors
        else:
            continue
 def setUp(self):
     self.test_metadata = Metadata()
     self.test_text = ("Here is some test text. Blah blah blah blah \n" +
                       "1234567890987654321 Yea Alabama Drown 'em Tide!\n")
     self.test_filename = "test_superdoc.txt"
     self.test_document = Document(self.test_metadata, self.test_text,
                                   self.test_filename)
     self.test_metadata_list = ([
         self.test_metadata, self.test_metadata, self.test_metadata
     ])
     self.test_superdoc_text = self.test_text * 3
     #print self.test_superdoc_text
     self.test_superdoc = SuperDocument(self.test_metadata_list,
                                        self.test_superdoc_text,
                                        self.test_filename)
     self.assertEqual(len(self.test_superdoc.component_metadata), 3)
Esempio n. 7
0
    def configDb(self,type,creds = None):
        if self.test == True:
            name = "test.u1db"
            name2 = "test1.u1db"
        else:
            name = "metadata.u1db"
            name2 = None
            if type == "deleteEvent" or type == "updateEvent" or type == "selectEvent" or type == "insertEvent" or type == "insertCalendar" or type == "deleteCalendar" or type == "selectCalendar" or type == "updateCalendar" or type == 'deleteCalendarUser' or type == 'selectCalendarsAndEvents':
                name = "calendar.u1db"
            elif type == "insertDownloadVersion" or type == "updateDownloadVersion" or type == "deleteDownloadVersion" or type == "getDownloadVersion":
                name = "downloadfile.u1db"
            elif type == "recursiveDeleteVersion" or type == "deleteMetadataUser":
                name2 = "downloadfile.u1db"
            elif type == "getMetadataFile" or type == "lockFile" or type == "updateDateTime" or type == "unLockFile":
                name = "lockfile.u1db"

        self.metadata = Metadata(name,creds,name2)
Esempio n. 8
0
    def query(self, type='active', term_name=None):
        """
        Obtains raw data of legislators, defaults to active legislators from the latest term
        Args:
            term_name: term name as it comes from OpenStates API
            type: Either 'all' or 'active'

        Returns:
            String transformed
        """
        Tables.query(self)

        if type == 'all':
            if term_name is None:
                metadata = Metadata()
                term_name = metadata.latest_term_name
            legislators = pyopenstates.search_legislators(state=config.STATE,
                                                          term=term_name,
                                                          fields='id')
        else:  # 'active'
            legislators = pyopenstates.search_legislators(
                state=config.STATE,
                active='true',  # default
                fields='id')

        self.raw_dictionary = map(
            lambda dic: pyopenstates.get_legislator(
                dic['id'],
                fields=[
                    'id',
                    'full_name',
                    'url',
                    'roles',
                    # 'old_roles',
                    'party',
                    'district',
                    'chamber',
                    'offices',
                    'email'
                ]),
            legislators)
Esempio n. 9
0
 def get_features(self, label):
     test_dataset_folder_path = os.path.abspath(
         os.path.join(Path(os.getcwd()).parent, self.labelled_dataset_path))
     images_list = list(
         misc.get_images_in_directory(test_dataset_folder_path).keys())
     metadata = Metadata(images_list)
     if self.feature_name != 'SIFT':
         metadata.save_label_decomposed_features(label,
                                                 self.decomposed_feature)
         features = misc.load_from_pickle(
             self.reduced_pickle_file_folder,
             self.decomposed_feature + '_' + label)
     else:
         features = {}
         database_features = misc.load_from_pickle(
             self.main_pickle_file_folder, self.feature_name)
         label_images_list = metadata.get_specific_metadata_images_list(
             feature_dict={'aspectOfHand': label})
         for image in label_images_list:
             features[image] = database_features[image]
     return features
class ImageSynchronizer:

    dataRootFolder = ""

    dataFrontLeft  = Metadata()
    dataFrontRight = Metadata()
    dataRearLeft   = Metadata()
    dataRearRight  = Metadata()
    dataNavLeft    = Metadata()
    dataNavRight   = Metadata()

    rawFrontStamps = OrderedDict()
    rawRearStamps  = OrderedDict()
    rawNavStamps   = OrderedDict()

    iSynchedFrontStamps = OrderedDict()
    iSynchedRearStamps  = OrderedDict()
    iSynchedNavStamps   = OrderedDict()

    eSynchedFrontStamps = OrderedDict()
    eSynchedRearStamps  = OrderedDict()
    eSynchedNavStamps   = OrderedDict()

    def __init__(self, dataRootFolder="./"):

        self.dataRootFolder = dataRootFolder

        # loading front cam metadata
        self.dataFrontLeft.parse_metadata( self.dataRootFolder + "front_cam/left/left_dataformat.txt",   self.dataRootFolder + "front_cam/left/left_all_metadata.txt")
        self.dataFrontRight.parse_metadata(self.dataRootFolder + "front_cam/right/right_dataformat.txt", self.dataRootFolder + "front_cam/right/right_all_metadata.txt")
        for i in range(len(self.dataFrontLeft.timestamp)):
            self.rawFrontStamps[self.dataFrontLeft.index[i]] = StereoPairStamp(self.dataFrontLeft.index[i], self.dataFrontLeft.timestamp[i], self.dataFrontRight.timestamp[i])

        # loading rear cam metadata
        self.dataRearLeft.parse_metadata( self.dataRootFolder + "rear_cam/left/left_dataformat.txt",   self.dataRootFolder + "rear_cam/left/left_all_metadata.txt")
        self.dataRearRight.parse_metadata(self.dataRootFolder + "rear_cam/right/right_dataformat.txt", self.dataRootFolder + "rear_cam/right/right_all_metadata.txt")
        for i in range(len(self.dataRearLeft.timestamp)):
            self.rawRearStamps[self.dataRearLeft.index[i]] = StereoPairStamp(self.dataRearLeft.index[i], self.dataRearLeft.timestamp[i], self.dataRearRight.timestamp[i])

        # loading nav cam metadata
        self.dataNavLeft.parse_metadata( self.dataRootFolder + "nav_cam/left/left_dataformat.txt",   self.dataRootFolder + "nav_cam/left/left_all_metadata.txt")
        self.dataNavRight.parse_metadata(self.dataRootFolder + "nav_cam/right/right_dataformat.txt", self.dataRootFolder + "nav_cam/right/right_all_metadata.txt")
        for i in range(len(self.dataNavLeft.timestamp)):
            self.rawNavStamps[self.dataNavLeft.index[i]] = StereoPairStamp(self.dataNavLeft.index[i], self.dataNavLeft.timestamp[i], self.dataNavRight.timestamp[i])

    def get_isynched_front_pair_stamps(self):
        res = np.empty([len(self.iSynchedFrontStamps.values()),1])
        i = 0
        for value in self.iSynchedFrontStamps.values():
            res[i] = value.leftStamp
            i += 1
        return res

    def get_isynched_rear_pair_stamps(self):
        res = np.empty([len(self.iSynchedRearStamps.values()),1])
        i = 0
        for value in self.iSynchedRearStamps.values():
            res[i] = value.leftStamp
            i += 1
        return res

    def get_isynched_nav_pair_stamps(self):
        res = np.empty([len(self.iSynchedNavStamps.values()),1])
        i = 0
        for value in self.iSynchedNavStamps.values():
            res[i] = value.leftStamp
            i += 1
        return res

    def get_esynched_front_pair_stamps(self):
        res = np.empty([len(self.eSynchedFrontStamps.values()),1])
        i = 0
        for value in self.eSynchedFrontStamps.values():
            res[i] = value.leftStamp
            i += 1
        return res

    def get_esynched_rear_pair_stamps(self):
        res = np.empty([len(self.eSynchedRearStamps.values()),1])
        i = 0
        for value in self.eSynchedRearStamps.values():
            res[i] = value.leftStamp
            i += 1
        return res

    def get_esynched_nav_pair_stamps(self):
        res = np.empty([len(self.eSynchedNavStamps.values()),1])
        i = 0
        for value in self.eSynchedNavStamps.values():
            res[i] = value.leftStamp
            i += 1
        return res

    # getting indexes
    def get_esynched_front_pair_indexes(self):
        res = np.empty([len(self.eSynchedFrontStamps.values()),1])
        i = 0
        for value in self.eSynchedFrontStamps.values():
            res[i] = value.index
            i += 1
        return res

    def get_esynched_rear_pair_indexes(self):
        res = np.empty([len(self.eSynchedRearStamps.values()),1])
        i = 0
        for value in self.eSynchedRearStamps.values():
            res[i] = value.index
            i += 1
        return res

    def get_esynched_nav_pair_indexes(self):
        res = np.empty([len(self.eSynchedNavStamps.values()),1])
        i = 0
        for value in self.eSynchedNavStamps.values():
            res[i] = value.index
            i += 1
        return res
    # remove unsynchronized stereo pairs tolerance is expressed in milliseconds
    def intrinsic_synchro(self, tolerance = 0):

        print("Stereo pairs intrisic synchronization :")

        print("Front cams :\n")
        self.iSynchedFrontStamps = self.rawFrontStamps
        toBeRemoved = []
        for pair in self.iSynchedFrontStamps.values():
            if abs(pair.leftStamp - pair.rightStamp) > 1000*tolerance:
                toBeRemoved.append(pair.index)
        for index in toBeRemoved:
            print("    Removing " + str(index))
            del self.iSynchedFrontStamps[index]

        print("Rear cams :\n")
        self.iSynchedRearStamps = self.rawRearStamps
        toBeRemoved = []
        for pair in self.iSynchedRearStamps.values():
            if abs(pair.leftStamp - pair.rightStamp) > 1000*tolerance:
                toBeRemoved.append(pair.index)
        for index in toBeRemoved:
            print("    Removing " + str(index))
            del self.iSynchedRearStamps[index]

        print("Nav cams :\n")
        self.iSynchedNavStamps = self.rawNavStamps
        toBeRemoved = []
        for pair in self.iSynchedNavStamps.values():
            if abs(pair.leftStamp - pair.rightStamp) > 1000*tolerance:
                toBeRemoved.append(pair.index)
        for index in toBeRemoved:
            print("    Removing " + str(index))
            del self.iSynchedNavStamps[index]


    # remove stereo pairs to keep sync between stereo benches, tolerance is expressed in milliseconds
    def extrinsic_synchro(self, tolerance = 75):

        def are_synched(stamps, tol):
            m = median(stamps)
            if abs(stamps[0] - m) < tol and abs(stamps[1] - m) < tol and abs(stamps[2] - m) < tol:
                return True
            else:
                return False

        print("Stereo benches extrinsic synchronization :")
        tolerance = 1000*tolerance

        iFront = list(self.iSynchedFrontStamps.values());
        iRear  = list(self.iSynchedRearStamps.values());
        iNav   = list(self.iSynchedNavStamps.values());

        while len(iFront) > 0 and len(iRear) > 0 and len(iNav) > 0:
        # for i in range(10):
            # print(median([iFront[0].leftStamp, iRear[0].leftStamp, iNav[0].leftStamp]), [iFront[0].leftStamp, iRear[0].leftStamp, iNav[0].leftStamp])
            if are_synched([iFront[0].leftStamp, iRear[0].leftStamp, iNav[0].leftStamp], tolerance):
                self.eSynchedFrontStamps[iFront[0].leftStamp] = iFront[0]
                self.eSynchedRearStamps [iRear[0].leftStamp]  = iRear[0]
                self.eSynchedNavStamps  [iNav[0].leftStamp]   = iNav[0]
                del(iFront[0])
                del(iRear[0])
                del(iNav[0])
            else:
                class SyncInfo:

                    def __init__(self, distToMedian, name, tol):
                        self.stamp = distToMedian
                        self.name = name
                        self.toKeep = True
                        if abs(self.stamp) <= tol:
                            self.synched = True
                        else:
                            self.synched = False

                    def __repr__(self):
                        return "SyncInfo()"

                    def __str__(self):
                        return str(self.stamp) + " " + self.name + " synched=" + str(self.synched) + " toKeep=" + str(self.toKeep)

                m = median([iFront[0].leftStamp, iRear[0].leftStamp, iNav[0].leftStamp])
                syncInfo = []
                syncInfo.append(SyncInfo(iFront[0].leftStamp - m, 'iFront', tolerance))
                syncInfo.append(SyncInfo(iRear[0].leftStamp  - m, 'iRear',  tolerance))
                syncInfo.append(SyncInfo(iNav[0].leftStamp   - m, 'iNav',   tolerance))

                # Sort syncInfo by stamp values
                syncInfo.sort(key=lambda x: x.stamp)

                if syncInfo[0].synched:
                    if syncInfo[2].synched:
                        raise Exception("Fatal error extrinsic_synchro")
                    else:
                        syncInfo[0].toKeep = False
                        syncInfo[1].toKeep = False
                else:
                    if syncInfo[2].synched:
                        syncInfo[0].toKeep = False
                    else:
                        syncInfo[0].toKeep = False
                        syncInfo[1].toKeep = False
                        syncInfo[2].toKeep = False

                for inf in syncInfo:
                    if not inf.toKeep:
                        cam = list(inf.name)
                        cam[0] = " " 
                        print("    Removing " + str(eval(inf.name)[0].index) + "".join(cam))
                        del(eval(inf.name)[0])

    def export_extrinsic_synchronized_index_file(self, filename):

        front = list(self.eSynchedFrontStamps.values())
        rear  = list(self.eSynchedRearStamps.values())
        nav   = list(self.eSynchedNavStamps.values())
        
        exportFile = open(filename, "w")
        for i in range(len(self.eSynchedFrontStamps)):
            exportFile.write(str(front[i].index) + " " + str(rear[i].index) + " " + str(nav[i].index) + "\n")
        exportFile.close()
Esempio n. 11
0
def main(args):
    __location__ = os.path.realpath(
        os.path.join(os.getcwd(), os.path.dirname(__file__)))

    # additional argument checks
    if not os.path.isdir(args.working_dir):
        raise ValueError('Working directory not found')
    args.working_dir = os.path.realpath(args.working_dir) + '/'
    if os.path.isdir(args.working_dir + 'analysis/'):
        shutil.rmtree(args.working_dir + 'analysis/')

    options_dict = dict()
    options_dict['wd_envs'] = hp.parse_output_path(args.working_dir + 'envs/')
    options_dict['threads'] = args.threads_per_job
    options_dict['ref_fasta'] = os.path.realpath(args.ref_fasta)
    options_dict['reads_fastq'] = args.working_dir + 'all_reads.fastq'
    options_dict['wd_analysis'] = hp.parse_output_path(args.working_dir +
                                                       'analysis/')
    options_dict[
        'wd_analysis_condas'] = __location__ + '/analysis_conda_files/'
    options_dict['__location__'] = __location__

    # --- create output directories
    if os.path.isdir(options_dict['wd_analysis']):
        shutil.rmtree(options_dict['wd_analysis'])
    _ = hp.parse_output_path(options_dict['wd_analysis'] + 'quast')
    _ = hp.parse_output_path(options_dict['wd_analysis'] + 'jellyfish')
    _ = hp.parse_output_path(options_dict['wd_analysis'] + 'readset_analysis')

    options_dict['wd_analysis_summary'] = hp.parse_output_path(
        options_dict['wd_analysis'] + 'summary/')
    options_dict[
        'wd_assembler_results'] = args.working_dir + 'assembler_results/'
    options_dict[
        'wd_assemblies'] = args.working_dir + 'assembler_results/assemblies/'
    assemblies_list = hp.parse_input_path(options_dict['wd_assemblies'],
                                          pattern='*.fasta')
    if len(assemblies_list) == 0:
        raise ValueError('No succesful assemblies found to analyze!')
    assemblies_names_list = [
        os.path.splitext(os.path.basename(af))[0] for af in assemblies_list
    ]
    options_dict['assemblies_string'] = ' '.join(assemblies_names_list)
    with open(args.user_info, 'r') as f:
        md_yaml = yaml.load(f)
    md = Metadata(md_yaml)
    md.write_publication_info(options_dict['wd_analysis_summary'] +
                              'publication_info.yaml')
    # --- Quast ---
    options_dict['quast_options'] = ''
    if md.is_eukaryote:
        options_dict['quast_options'] += '-e '
    if args.gff_file:
        options_dict['quast_options'] += '-G ' + os.path.abspath(
            args.gff_file) + ' '
    quast_output = ''
    quast_output_cmd = ''
    for anl in assemblies_names_list:
        quast_output += (
            ',\n\t\t{anl}_fplot=\'{wd_analysis_summary}quast/{anl}.fplot\''
            ',\n\t\t{anl}_rplot=\'{wd_analysis_summary}quast/{anl}.rplot\''
        ).format(anl=anl,
                 wd_analysis_summary=options_dict['wd_analysis_summary'])
        quast_output_cmd += (
            'if [ -e contigs_reports/nucmer_output/{anl}.fplot ]; then '  # for quast <5.0.0
            'cp contigs_reports/nucmer_output/{anl}.fplot {wd_analysis_summary}quast/.\n'
            'cp contigs_reports/nucmer_output/{anl}.rplot {wd_analysis_summary}quast/.\n'
            'fi\n').format(
                anl=anl,
                wd_analysis_summary=options_dict['wd_analysis_summary'])
        quast_output_cmd += (
            'if [ -e contigs_reports/all_alignments_{anl}.tsv ]; then '  # for quast =>5.0.0
            'cp contigs_reports/all_alignments_{anl}.tsv {wd_analysis_summary}quast/.\n'
            'fi\n').format(
                anl=anl,
                wd_analysis_summary=options_dict['wd_analysis_summary'])
    options_dict['quast_output'] = quast_output
    options_dict['quast_output_cmd'] = quast_output_cmd

    # --- Construct snakemake file ---
    sf_fn = args.working_dir + 'Snakefile_analysis_' + datetime.datetime.now(
    ).strftime('%Y%m%d%H%M%S')
    with open(__location__ + '/Snakemake_analysis', 'r') as f:
        sf = f.read()

    sf = sf.format(**options_dict)
    with open(sf_fn, 'w') as sf_handle:
        sf_handle.write(sf)

    sm_dict = {'use_conda': True}

    if args.slurm_config is not None:
        sm_dict['cluster'] = 'sbatch'
        sm_dict['cluster_config'] = args.slurm_config
        sm_dict['nodes'] = 5

    snakemake.snakemake(sf_fn, **sm_dict)
    makeColorMap(args.colormap, args.colors)
# Check the arguments for times...
startTime = 0
endTime = -1
timestamp = None
if args.time_stamp is not None:
    timestamp = args.time_stamp
elif args.start_time is not None:
    startTime = args.start_time
if args.end_time is not None:
    endTime = args.end_time
# Once it's guaranteed there's a color map, then generate the colors
#################### METADATA #####################
meta = None
if args.time_stamp is not None:
    meta = Metadata(args.infile, timeStamp=timestamp)
else:
    meta = Metadata(args.infile, startTime=startTime, endTime=endTime)
meta.writeMetadata()
data = meta.loadData()
minVal = np.nanmin(data)
maxVal = np.nanmax(data)

# Read the colormap
PUcols = np.loadtxt(scriptDir + '\\..\\ColorMaps\\' + args.colormap + '.txt')

# Create the Color map object
gendMap = ListedColormap(PUcols, N=len(PUcols))
# Can't make a color nap with NaN values so we set to them to a value out of the color map
data[np.isnan(data)] = minVal - 5
# Set nan values as black
    decomposition = Decomposition(decomposition_model, k, model,
                                  test_dataset_path)
    similarity = Similarity(model, image_id, m)
    similarity.get_similar_images(test_dataset_path,
                                  decomposition,
                                  reduced_dimension=True)

elif task == '3':
    model = input("1.CM\n2.LBP\n3.HOG\n4.SIFT\nSelect model: ")
    decomposition_model = input(
        "1.PCA\n2.SVD\n3.NMF\n4.LDA\nSelect decomposition: ")
    test_dataset_folder_path = os.path.abspath(
        os.path.join(Path(os.getcwd()).parent, test_dataset_path))
    images_list = list(
        misc.get_images_in_directory(test_dataset_folder_path).keys())
    metadata = Metadata(images_list)
    label = int(
        input("1.Left-Hand\n2.Right-Hand\n3.Dorsal\n4.Palmar\n"
              "5.With accessories\n6.Without accessories\n7.Male\n8.Female\n"
              "Please choose an option: "))
    label_interpret_dict = {
        1: {
            "aspectOfHand": "left"
        },
        2: {
            "aspectOfHand": "right"
        },
        3: {
            "aspectOfHand": "dorsal"
        },
        4: {
Esempio n. 14
0
    palmar_cluster_visualization.plot()

    similarity_val2 = kmeans.get_similarity_val(
        labelled_dataset_features=palmar_features,
        unlabelled_dataset_features=unlabelled_features)
    result = {}
    for image_id in list(unlabelled_features.keys()):
        if similarity_val1[image_id] <= similarity_val2[image_id]:
            result[image_id] = 'dorsal'
        else:
            result[image_id] = 'palmar'

    print(result)

    #ACCURACY
    metadata = Metadata(metadatapath='Data/HandInfo.csv')
    images_dop_dict = metadata.getimagesdop_dict()
    print('Accuracy:', misc.getAccuracy(result, images_dop_dict))

elif task == '3':
    folder_path = input("Enter folder path: ")
    start_images = list(map(str, input("Enter 3 imageids: ").split()))
    k = int(input("Enter number of outgoing edges: "))
    m = int(input("Enter number of dominant images to show: "))
    pagerank = PageRankUtil(folder_path, k, m, start_images)
    pagerank.page_rank_util()
    pagerank.plot_k_similar()

elif task == '4':
    classifier = input("1.SVM\n2.DT\n3.PPR\nSelect Classifier: ")
    labelled_dataset_path = input('Enter labelled dataset path: ')
Esempio n. 15
0
from Random import Random
from Regression import Regression
from LinUCB_Disjoint import LinUCB_Disjoint
from TS_Lin import TS_Lin
from GP_Clustered import GP_Clustered
from NN import NN

import TestBuilder
import Util
import MetricsCalculator

detailed = False
campaign_id = 837817
algoName = "LinUCB_Disjoint"
meta = Metadata(algoName, campaign_id)
algo = LinUCB_Disjoint(meta)

testsMeta = TestBuilder.get_lin_tests_mini(meta)

ctr_multipliers = [0.5, 1, 2, 5, 10]
# ctr_multipliers = [1]
simulation_ids = [2]

output_path = "./Results/{0}/Simulated/{1}_2.csv".format(meta.campaign_id, algoName)
output_column_names = False
if not Path(output_path).is_file():
	output = open(output_path, "w")	
	output_column_names = True;
else:
	output = open(output_path, "a")
Esempio n. 16
0
time.sleep(10)
ap = AdminPanel(val)
print "######Sleep 10 seconds"
time.sleep(10)
n = Netcraft(val)
print "######Sleep 10 seconds"
time.sleep(10)
sd = SubDomain(val)
print "######Sleep 10 seconds"
time.sleep(10)

gl = GeoLocation(val)
print "######Sleep 10 seconds"
time.sleep(10)

l = Linkedin(val)
print "######Sleep 10 seconds"
time.sleep(10)

f = Facebook(val)
print "######Sleep 10 seconds"
time.sleep(10)

wc = WebCr(val)
print "######Sleep 10 seconds"
time.sleep(10)

m = Metadata(val)
print "######Sleep 10 seconds"
time.sleep(10)
Esempio n. 17
0
# sync.export_extrinsic_synchronized_index_file("synchronized_cam_indexes.txt")
print(
    "Uncomment line 11 to generate syncronised image indexes file. See ImageSynchronizer for code details."
)

# Front cam
frontLeftStamps = sync.dataFrontLeft.get_nparray('timestamp')
frontRightStamps = sync.dataFrontRight.get_nparray('timestamp')
# Rear cam
rearLeftStamps = sync.dataRearLeft.get_nparray('timestamp')
rearRightStamps = sync.dataRearRight.get_nparray('timestamp')
# Nav cam
navLeftStamps = sync.dataNavLeft.get_nparray('timestamp')
navRightStamps = sync.dataNavRight.get_nparray('timestamp')

tokamak = Metadata()
# tokamak.parse_metadata('tokamak/dataformat.txt','tokamak/tokamak.txt')

# time beginning of acquisition
t0 = min([frontLeftStamps[0], rearLeftStamps[1], navLeftStamps[2]])

# Plotting #######################################################

# Ploting image timestamps
fig0, axes = plt.subplots(2, 1, sharex=True, sharey=False)
axes[0].plot((frontLeftStamps - t0) / 1000000.0, label="front left stamp")
axes[0].plot((rearLeftStamps - t0) / 1000000.0, label="rear left stamp")
axes[0].plot((navLeftStamps - t0) / 1000000.0, label="nav left stamp")
axes[0].legend(loc="upper left")
axes[0].set_xlabel("image index")
axes[0].set_ylabel("time (s)")
Esempio n. 18
0
#! /usr/bin/python3

from Metadata import Metadata

metadata = Metadata()

metadata.parse_metadata("data/left_dataformat.txt",
                        "data/left_all_metadata.txt")

metadata.plot(0)
Esempio n. 19
0
 def setUp(self):
     self.sut = Metadata("test.u1db",{'oauth':{'token_key':'NKKN8XVZLP5X23X','token_secret':'59ZN54UEUD3ULRU','consumer_key':'keySebas','consumer_secret':'secretSebas'}},"test1.u1db")
Esempio n. 20
0
from Metadata import Metadata
from TestMetadata import TestMetadata

from Random_Multi import Random_Multi
from LinUCB_Disjoint_Multi import LinUCB_Disjoint_Multi
from TS_Lin_Multi import TS_Lin_Multi

import MetricsCalculator
import TestBuilder
import Util

campaign_ids = set([866128, 856805, 847460, 858140, 865041])
campaign_ids_str = ",".join([str(x) for x in campaign_ids])

meta = Metadata("LinUCB_Disjoint_Multi_Target",
                campaign_id=5,
                initialize_user_embeddings=False)
days = pd.date_range(start='15/8/2018', end='20/08/2018')

algo = LinUCB_Disjoint_Multi(meta, campaign_ids, days[0], days[-1] + 1)

testsMeta = TestBuilder.basic_feature_target_tests2(meta, 6)

output_path = "./Results/{0}/{1}_Feature.csv".format(meta.campaign_id,
                                                     meta.algo_name)
output_log_path = "./Log/{0}/{1}_Feature.csv".format(meta.campaign_id,
                                                     meta.algo_name)
output_campaign_log_path = "./Log/{0}/Campaign_Log_Feature.csv".format(
    meta.campaign_id)

output_column_names = False
Esempio n. 21
0
from TestMetadata import TestMetadata

from Random import Random
from Regression import Regression
from LinUCB_Disjoint import LinUCB_Disjoint
from TS_Lin import TS_Lin
from GP_Clustered import GP_Clustered
from NN import NN

import MetricsCalculator
import TestBuilder
import Util

campaign_ids = [866128, 856805, 847460, 858140, 865041]
for campaign_id in campaign_ids:
    meta = Metadata("LinUCB_Disjoint", campaign_id)
    algo = LinUCB_Disjoint(meta)

    testsMeta = TestBuilder.get_lin_test(meta, 12)
    output_path = "./Results/{0}/{1}_Metrics.csv".format(
        meta.campaign_id, meta.algo_name)

    output_column_names = False
    if not Path(output_path).is_file():
        output = open(output_path, "w")
        output_column_names = True
    else:
        output = open(output_path, "a")

    # specials = read_csv("{0}//special_users.csv".format(meta.path), header=0)
    # specials = set(specials["UserHash"].values)