Exemplo n.º 1
0
def train_validation_set_2(model_input=None, model_output=None):

    data_v = DataSources.load_validation_dataset2()
    data = DataSources.load_naive_augmented_dataset(DataSources.DataSources.VALIDATION_SET2_NG)

    # data = data[:]
    run_train(data, data_v, model_input=model_input, model_output=model_output, epochs=50)
Exemplo n.º 2
0
    def bt_dur_week_action(self, sender):
        ''' fill "tableview1" with list of duration of tasks in the week, the selected day is in
        '''
        start = self.view["datepicker"].date
        start = start.replace(hour=0, minute=0, second=0, microsecond=0)
        monday = start - timedelta(days=start.weekday())
        satday = monday + timedelta(days=5)

        lc = self.myCalender.findBetween(monday, satday)
        l = lc.UICalcDurations()

        self.LogMessage(
            f"dur_week_action len {len(l)} {monday.strftime('%a %d.%m.%y')}")

        #self.LogMessage(f"dur_day_action liste {len(l)}")
        lst = MDS.MyDurDataSource("weekly", l)
        lst.highlight_color = (1.0, 0.9, 0.3, 1.0)
        tv1 = self.view['tableview1']
        tv1.data_source = tv1.delegate = lst
        tv1.data_source.delete_enabled = tv1.editing = False
        self.selected_row = -1
        tv1.reload_data()
        self.view['bt_save_hours'].enabled = True
        if self.state == 1:
            self.view['bt_save_hours'].title = "Save Hours"
            self.view['switch_share_hours'].hidden = False
            self.view['l_share'].hidden = False
            self.view['bt_add'].enabled = False
            self.view['label_up'].hidden = True
            self.view['label_left'].hidden = True
            self.view['bt_edit_tasks'].hidden = True
        self.state = 4
Exemplo n.º 3
0
    def bt_dur_day_action(self, sender):
        ''' fill "tableview1" with list of duration of tasks in selcted day
        Anzeigen welche Zeiten für welche Tasks gebraucht wurden
        '''
        start = self.view["datepicker"].date

        start = start.replace(hour=0, minute=0, second=0, microsecond=0)
        end = start + timedelta(days=1)

        lc = self.myCalender.findBetween(start, end)
        l = lc.UICalcDurations()

        self.LogMessage(f"dur_day_action len {len(l)}")

        #self.LogMessage(f"dur_day_action liste {len(l)}")
        lst = MDS.MyDurDataSource("daily", l)
        lst.highlight_color = (1.0, 0.9, 0.3, 1.0)
        tv1 = self.view['tableview1']
        tv1.data_source = tv1.delegate = lst
        tv1.data_source.delete_enabled = tv1.editing = False
        self.selected_row = -1
        tv1.reload_data()
        self.view['bt_save_hours'].enabled = True
        if self.state == 1:
            self.view['bt_save_hours'].title = "Save Hours"
            self.view['switch_share_hours'].hidden = False
            self.view['l_share'].hidden = False
            self.view['bt_add'].enabled = False
            self.view['label_up'].hidden = True
            self.view['label_left'].hidden = True
            self.view['bt_edit_tasks'].hidden = True
        self.state = 3
Exemplo n.º 4
0
def run_test_set(model_input=None, limit=-1, is_6pos=False, model_name='c2_net'):
    import GenerateTrainingSet
    log.info('run_validation_set2::')
    data: [Data] = DataSources.load_test_set()

    # find bbox for each image
    data = DetectFace.detect_face_multithread(data)

    # find landmarks
    data = DetectFace.detect_face_multithread(data, fn=DetectFace.detect_face_landmarks_dlib)

    # updated bboxes according to landmarks if such were found
    for data_ in data:
        if data_.landmarks_2d is not None:
            DetectFace.get_face_bb(data_)

    # estimate pose based on the dlib landmarks and the face model
    face_model = GenerateTrainingSet.get_face_model()
    for data_ in data:
        try:
            rotation_vecs, translation_vecs = GenerateTrainingSet.solve_pnp(data_.landmarks_2d, face_model.model_TD, face_model)
            pose = np.concatenate((rotation_vecs.flatten(), translation_vecs.flatten()))
            data_.pose = pose
        except:
            log.warning('failed to get pose for image %s' % data_.image)

    predicted_poses = predict(data, model_input, limit, is_6pos, model_name=model_name)
Exemplo n.º 5
0
def train_300w_3d_helen_naive_augmentations(data_sources: [DataSources.DataSources],
                                            model_input, model_output,
                                            limit=-1):
    log.info('train_300w_3d_helen_naive_augmentations::')
    data_v: [Data] = DataSources.load_validation_dataset2(recalc_pose=True)

    data: [Data] = []

    for data_source in data_sources:
        data += DataSources.load_naive_augmented_dataset(data_source, limit=limit)

    if limit > -1:
        np.random.shuffle(data)
        data = data[:limit]

    run_train(data, data_v, model_input=model_input, model_output=model_output, epochs=30)
Exemplo n.º 6
0
def naive_augment_300w_3d_lfpw():
    output_folder = '../augmented/300w_3d_lfpw_naive'

    limit = -1  # if not DEBUG else 5

    data: [Data
           ] = DataSources._load_data(DataSources.DataSources._300W_3D_LFPW,
                                      DataSources._300w_3d_parser,
                                      limit=limit)

    gen_naive_augmentations(data, output_folder)
Exemplo n.º 7
0
def naive_augment_aflw2000():
    output_folder = '../augmented/AFLW2000'

    limit = -1 if not DEBUG else 5

    data: [Data] = DataSources._load_data(DataSources.DataSources.AFLW2000,
                                          DataSources._300w_3d_parser,
                                          limit=limit,
                                          landmarks_fld_name='pt3d_68')

    gen_naive_augmentations(data, output_folder)
Exemplo n.º 8
0
def validate_pose_vs_landmarks():
    import DataSources
    import GenerateTrainingSet

    data = DataSources.load_naive_augmented_dataset(
        DataSources.DataSources.VALIDATION_SET2_NG)
    data = DataSources.load_validation_dataset2(
        DataSources.DataSources.VALIDATION_2, recalc_pose=False)

    total_theta = 0
    for data_ in data:
        face_model = GenerateTrainingSet.get_face_model()
        rot_mat_orig, _ = cv2.Rodrigues(data_.pose[:3])
        rotation_vecs, translation_vecs = GenerateTrainingSet.solve_pnp(
            data_.landmarks_2d, face_model.model_TD, face_model)
        rot_mat_land, _ = cv2.Rodrigues(rotation_vecs)

        theta = Utils.get_theta_between_rot_mats(rot_mat_orig, rot_mat_land)
        total_theta += theta

    print(np.rad2deg(total_theta / len(data)))
Exemplo n.º 9
0
    def bt_cal2_action(self, sender):
        ''' fill "tableview2" with list of calender entries = actions
        '''
        now = self.view['datepicker'].date
        # datetime.datetime.today()

        lst = MDS.MyCalDataSource(self.myCalender,
                                  self.myCalender.UIActionsOfDayList(now))
        tv2 = self.view['tableview2']
        tv2.data_source = tv2.delegate = lst
        tv2.data_source.delete_enabled = tv2.editing = False
        lst.action = self.tv_cal_action
        #        self.state=2
        #        self.selected_row=-1
        tv2.reload_data()
        self.get_available_memory()
Exemplo n.º 10
0
    def bt_dur_year_action(self, sender):
        ''' fill "tableview1" with list of duration of tasks in the month, the selected day is in
        '''
        start = self.view["datepicker"].date
        #        self.view["datepicker"].font=("<system>",12). # WHAT is THIS ??

        mstart = start.replace(month=1,
                               day=1,
                               hour=0,
                               minute=0,
                               second=0,
                               microsecond=0)
        mend = mstart.replace(year=mstart.year + 1,
                              day=1,
                              hour=0,
                              minute=0,
                              second=0,
                              microsecond=0)

        # print(f"mstart = {mstart} mend = {mend}")

        lc = self.myCalender.findBetween(mstart, mend)
        l = lc.UICalcDurations()

        self.LogMessage(f"dur_year_action len {len(l)}")
        lst = MDS.MyDurDataSource("yearly", l)

        #lst.font=("<system>",12)
        lst.highlight_color = (1.0, 0.9, 0.3, 1.0)
        tv1 = self.view['tableview1']
        tv1.data_source = tv1.delegate = lst
        tv1.data_source.delete_enabled = tv1.editing = False
        self.selected_row = -1
        tv1.reload_data()
        self.view['bt_save_hours'].enabled = True
        if self.state == 1:
            self.view['bt_save_hours'].title = "Save Hours"
            self.view['switch_share_hours'].hidden = False
            self.view['l_share'].hidden = False
            self.view['bt_add'].enabled = False
            self.view['label_up'].hidden = True
            self.view['label_left'].hidden = True
            self.view['bt_edit_tasks'].hidden = True
        self.state = 6
Exemplo n.º 11
0
def validate_load_image():
    import DataSources
    from detect_face import DetectFace

    from keras.preprocessing import image
    # from keras.applications.resnet50 import preprocess_input

    data = DataSources.load_validation_dataset2()
    data: [Data] = DetectFace.get_face_bboxes(data[:1])

    image_array = load_image(data[0])
    image_array = preprocess_input(image_array, mode='tf')

    img = image.load_img(data[0].image, target_size=(224, 224))
    x = image.img_to_array(img)
    x = np.expand_dims(x, axis=0)
    x = preprocess_input(x, mode='tf')

    print('done')
Exemplo n.º 12
0
    def bt_task_action(self, sender):
        ''' fill "tableview1" with List of Tasks
        '''
        lst = MDS.MyTaskDataSource(TiTra.Task.UITasksList())
        #print(f"\nShow tasks {TiTra.Task.UITasksList()}")
        tv1 = self.view['tableview1']
        tv1.data_source = tv1.delegate = lst
        tv1.data_source.delete_enabled = tv1.editing = False
        lst.action = self.tv_task_action
        self.state = 1
        self.selected_row = -1
        tv1.reload_data()
        #        tv1.font=('<system>',12)
        self.view['bt_add'].enabled = True
        #        self.view['bt_save_hours'].enabled=False
        self.view['bt_save_hours'].title = "Save Cal"
        self.view['label_up'].hidden = False
        self.view['label_left'].hidden = False
        self.view['switch_share_hours'].hidden = True
        self.view['l_share'].hidden = True
        self.view['bt_edit_tasks'].hidden = False

        self.get_available_memory()
Exemplo n.º 13
0
                yy.append('w')
            #if (y[k][1]>y[k][0]) and (y[k][0]>= y[k-1][1]):#...(?,6),(8,10)
            #yy.append('g')
            #yy.append('w') #wrapped around
            #if (y[k][1]>y[k][0]) and (y[k][1]== y[k-1][1]):
            #yy.append('w') #wrapped around after a previous blue
    return yy


if __name__ == "__main__":

    import DataSources as mydatasource
    import sys
    reload(mydatasource)
    datatype = 'sample'
    polygons, inputsspatialref, feature_names = mydatasource.getPolygons(
        datatype=datatype, dofilter=True)
    print("len of features_names"), len(polygons)
    printer(polygons)

    domapall = True  #divides a complete map of polygons.
    if domapall:
        howmany = 1
    else:
        howmany = len(polygons)
    datarows = []
    for fid in range(0, howmany):
        if domapall:
            polygon = polygons  #we are passing list of polys
            fidx = 'map'
        else:
            polygon = [polygons[fid]]
Exemplo n.º 14
0
def main():
    logger.info('Starting...')
    read_config()

    # Read lang files
    path_to_local = "locales/"
    for file in os.listdir(path_to_local):
        if fnmatch.fnmatch(file, 'pokemon.*.json'):
            read_pokemon_names(file.split('.')[1])
        if fnmatch.fnmatch(file, 'moves.*.json'):
            read_move_names(file.split('.')[1])

    dbType = config.get('DB_TYPE', None)
    scannerName = config.get('SCANNER_NAME', None)
    global dataSource
    dataSource = None
    global ivAvailable
    ivAvailable = False
    if dbType == 'sqlite':
        if scannerName == 'pogom':
            dataSource = DataSources.DSPogom(config.get('DB_CONNECT', None))
        elif scannerName == 'pokemongo-map':
            dataSource = DataSources.DSPokemonGoMap(config.get('DB_CONNECT', None))
        elif scannerName == 'pokemongo-map-iv':
            dataSource = DataSources.DSPokemonGoMapIV(config.get('DB_CONNECT', None))
            ivAvailable = True
    elif dbType == 'mysql':
        if scannerName == 'pogom':
            dataSource = DataSources.DSPogomMysql(config.get('DB_CONNECT', None))
        elif scannerName == 'pokemongo-map':
            dataSource = DataSources.DSPokemonGoMapMysql(config.get('DB_CONNECT', None))
        elif scannerName == 'pokemongo-map-iv':
            dataSource = DataSources.DSPokemonGoMapIVMysql(config.get('DB_CONNECT', None))
            ivAvailable = True
    if not dataSource:
        raise Exception("The combination SCANNER_NAME, DB_TYPE is not available: %s,%s" % (scannerName, dbType))

    #ask it to the bot father in telegram
    token = config.get('TELEGRAM_TOKEN', None)
    updater = Updater(token)
    b = Bot(token)
    logger.info("BotName: <%s>" % (b.name))

    # Get the dispatcher to register handlers
    dp = updater.dispatcher

    # on different commands - answer in Telegram
    dp.add_handler(CommandHandler("start", cmd_start))
    dp.add_handler(CommandHandler("help", cmd_help))
    dp.add_handler(CommandHandler("add", cmd_add, pass_args = True, pass_job_queue=True))
    dp.add_handler(CommandHandler("addbyrarity", cmd_addByRarity, pass_args = True, pass_job_queue=True))
    dp.add_handler(CommandHandler("clear", cmd_clear))
    dp.add_handler(CommandHandler("rem", cmd_remove, pass_args = True, pass_job_queue=True))
    dp.add_handler(CommandHandler("save", cmd_save))
    dp.add_handler(CommandHandler("load", cmd_load, pass_job_queue=True))
    dp.add_handler(CommandHandler("list", cmd_list))
    dp.add_handler(CommandHandler("lang", cmd_lang, pass_args = True))
    dp.add_handler(CommandHandler("radius", cmd_radius, pass_args=True))
    dp.add_handler(CommandHandler("location", cmd_location_str, pass_args=True))
    dp.add_handler(CommandHandler("remloc", cmd_clearlocation))
    dp.add_handler(MessageHandler([Filters.location],cmd_location))

    # log all errors
    dp.add_error_handler(error)

    # add the configuration to the preferences
    prefs.add_config(config)

    # Start the Bot
    updater.start_polling()

    logger.info('Started!')
    # Block until the you presses Ctrl-C or the process receives SIGINT,
    # SIGTERM or SIGABRT. This should be used most of the time, since
    # start_polling() is non-blocking and will stop the bot gracefully.
    updater.idle()
Exemplo n.º 15
0
def test_300w_3d_helen1(model_input=None, limit=-1, is_6pos=False, model_name='c2_net'):
    log.info('run_validation_set2::')
    data: [Data] = DataSources.load_naive_augmented_dataset(DataSources.DataSources._300W_3D_HELEN_NG1, limit=limit)
    predict(data, model_input, limit, is_6pos, model_name=model_name)
Exemplo n.º 16
0
def run_validation_set2(model_input=None, limit=-1, is_6pos=False, model_name='c2_net'):
    log.info('run_validation_set2::')
    data: [Data] = DataSources.load_validation_dataset2()
    predict(data, model_input, limit, is_6pos, model_name=model_name)
Exemplo n.º 17
0
def main():
    with open(sys.argv[1]) as conf_data:
        config = json.load(conf_data)

    con = ds.connector(config)
    print con.proc_model()
Exemplo n.º 18
0
def naive_augment_validation_set2():
    output_folder = '../augmented/validation_set2'

    data: [Data] = DataSources.load_validation_dataset2()

    gen_naive_augmentations(data, output_folder)
Exemplo n.º 19
0

if __name__ == "__main__":
    import DataSources as mydatasource
    reload(mydatasource)
    #Constants
    FACTORING_NUMBER = 100  #values divide this number to rescale to integer.
    cchuncked_lines = []
    ROUNDAT = 0  #to convert to int, make it 0
    INTERGER_CONVERSION = True

    #Load Data.
    dlslist = []
    datatype = 'mscounty'
    t0, t1 = 0, 0
    polygons, inputspatialref = mydatasource.getPolygons(datatype=datatype)
    print len(polygons)
    for pid in [0]:  #,63,64,65,66,67,68,69]:#range(len(polygons)):
        print("pid:"), pid
        polygon = [polygons[pid]]
        try:
            t0 = time.time()
            cchuncked_lines, cntoriglines = divide_lines_into_chunks(polygon)
            t1 = time.time()
        except:
            continue
        dls = constructDls(cchuncked_lines)
        dlslist = [(datatype + ',' + str(pid) + "," + str(cntoriglines), dls)]
        print("Constructed a dictionary type DLS.time:div,dlsconst:"), round(
            t1 - t0, 3), ',', round(time.time() - t1, 3)
        print
Exemplo n.º 20
0
def main():

    ######################
    # PROCESS ONTOLOGIES #
    ######################
    # read in ontologies - using script from PheKnowLator repo (https://github.com/callahantiff/PheKnowLator)
    ont = DataSources.OntData('resources/ontology_source_list.txt')
    ont.file_parser()
    ont.get_data_type()
    ont.get_source_list()
    ont.url_download()
    ont.get_data_files()
    ont.source_metadata()
    ont.get_source_metadata()
    ont.write_source_metadata()

    # obtain classes, DbXRefs, labels, definitions, and Synonyms for each
    ont_explorer = OntologyEx()
    onts = {
        'DOID':
        ['resources/ontologies/doid_without_imports.owl', ['SNOMED', 'UMLS']],
        'HP':
        ['resources/ontologies/hp_without_imports.owl', ['SNOMED', 'UMLS']],
        'VO': ['resources/ontologies/vo_without_imports.owl', [None]],
        'NCBITaxon':
        ['resources/ontologies/ncbitaxon_without_imports.owl', [None]],
        'PR': ['resources/ontologies/pr_without_imports.owl', ['UniProtKB']],
        'CHEBI': [
            'resources/ontologies/chebi_without_imports.owl',
            ['DrugBank', 'ChEMBL', 'UniProt']
        ],
        'NCIT': ['resources/ontologies/ncit_without_imports.owl', [None]]
    }

    ont_explorer.ont_info_getter(onts)

    # load ontologies
    ont_names = ['NCIT', 'CHEBI', 'PR', 'NCBITaxon', 'VO', 'HP', 'DOID']
    ontology_data = ont_explorer.ont_loader(ont_names)

    # convert dictionaries to pandas data frames (works for synonyms and class information)
    ont_syn = ontology_data['ncit_without_imports_synonyms']
    ont_cls = ontology_data['ncit_without_imports_classes']

    ont_cls = {}
    ont_syn = {}

    for keys in ontology_data.keys():

        if 'classes' in keys:
            ont_cls.update(ontology_data[keys])

        if 'synonyms' in keys:
            ont_syn.update(ontology_data[keys])

    ont_labels = {}
    items = len(ont_cls)
    for k, v in ont_cls.items():
        items -= 1
        print('{0}/{1}'.format(items, len(ont_cls)))
        syn = [
            x for y in [(key, val[1]) for key, val in ont_syn.items()
                        if v[0] == val[0]] for x in y
        ]
        ont_labels[v[0]] = [k, '|'.join(set([k, v[1]] + syn))]

    ont_id = []
    ont_label = []
    ont_strings = []
    for k, v in ont_labels.items():
        print(k, v)
        ont_id.append(k)
        ont_label.append(v[0])
        ont_strings.append(v[1])

    ont_data = pd.DataFrame({
        'ont_id': ont_id,
        'ont_label': ont_label,
        'ont_strings': ont_strings
    })
    ont_data.to_csv('resources/ontologies/all_onts_dict.csv',
                    index=None,
                    header=True)
    ont_data = pd.read_csv('resources/ontologies/proc_ont_dict.csv',
                           header=0,
                           skiprows=0)

    ####################################
    # DOWNLOAD + PROCESS CLINICAL DATA #
    ####################################
    input_file = [
        'resources/programming/google_api/client_secret.json',
        'https://www.googleapis.com/auth/drive', 'Procs_Ontology_Mapping',
        'original_OMOP_download_4.1'
        '1.2019'
    ]

    proc_map = Procedures(input_file)
    proc_map.annotator_type()
    proc_map.read_data()
    data = proc_map.get_data()

    # # pre-process data sources  -  (id: index_VOCAB_CODE_gram#_string.string)
    # identifier = ['Procedure_Vocabulary', 'Procedure_ID']
    # definition = ['Procedure_Label', 'Procedure_Synonym']
    # clinical_corpus_123 = list(set(proc_map.data_pre_process(identifier, definition, '|', [1, 2, 3])))
    # clinical_corpus_0 = list(set(proc_map.data_pre_process(identifier, definition, '|', [])))
    # clinical_corpus_1 = list(set(proc_map.data_pre_process(identifier, definition, '|', [1])))
    # clinical_corpus_2 = list(set(proc_map.data_pre_process(identifier, definition, '|', [2])))
    # clinical_corpus_3 = list(set(proc_map.data_pre_process(identifier, definition, '|', [3])))

    input_text = []

    for i, row in data[[
            'ANCESTOR_LABEL', 'ANCESTOR_SYN', 'Procedure_Label',
            'Procedure_Synonym'
    ]].iterrows():
        print(i)
        input_text.append(' '.join(
            preprocess_sentences(row['Procedure_Label'] + ' ' +
                                 row['Procedure_Synonym'])))
        input_text.append(' '.join(
            preprocess_sentences(row['Procedure_Label'] + ' ' +
                                 row['Procedure_Synonym'] + ' ' +
                                 row['ANCESTOR_LABEL'] + ' ' +
                                 row['ANCESTOR_SYN'])))

    for i, row in ont_data.iterrows():
        print(i)
        try:
            input_text.append(' '.join(
                preprocess_sentences(row['ont_label'] + ' ' +
                                     row['ont_strings'])))
        except TypeError:
            pass

    processed_data = pd.DataFrame(dict(LABEL=[x for x in input_text]))
    processed_data.to_csv(r'resources/fasttext_input.csv',
                          header=None,
                          index=None)

    ncit = pd.read_csv('resources/NCIT.csv',
                       header=0,
                       skiprows=0,
                       error_bad_lines=False,
                       index_col=False,
                       dtype='unicode')

    ncit_reduced = ncit[[
        'Class ID', 'Preferred Label', 'Synonyms', 'Definitions', 'Obsolete',
        'CUI', 'Semantic Types', 'Parents', 'ALT_DEFINITION',
        'Anatomic_Structure_Has_Location',
        'Anatomic_Structure_Is_Physical_Part_Of', 'CHEBI_ID', 'code',
        'def-definition', 'def-source', 'DEFINITION', 'Display_Name',
        'EntrezGene_ID', 'FULL_SYN', 'GenBank_Accession_Number', 'go-term',
        'GO_Annotation', 'HGNC_ID', 'Maps_To', 'NCBI_Taxon_ID', 'OMIM_Number',
        'Preferred_Name', 'Procedure_Has_Completely_Excised_Anatomy',
        'Procedure_Has_Excised_Anatomy', 'Procedure_Has_Imaged_Anatomy',
        'Procedure_Has_Partially_Excised_Anatomy',
        'Procedure_Has_Target_Anatomy', 'Procedure_Has_Target_Disease',
        'Procedure_May_Have_Completely_Excised_Anatomy',
        'Procedure_May_Have_Excised_Anatomy',
        'Procedure_May_Have_Partially_Excised_Anatomy',
        'Procedure_Uses_Manufactured_Object', 'PubMedID_Primary_Reference',
        'Semantic_Type', 'term-group', 'term-name', 'term-source', 'UMLS_CUI',
        'Use_For', 'xRef', 'xRef Source'
    ]]

    ncit_reduced = ncit_reduced.loc[ncit_reduced['Obsolete'] != 'true']
    ncit_reduced.to_csv(r'resources/ncit_edit.csv', header=1, index=None)