def __init__(self, DEBUG, list_of_demonstrations, featfile, trialname): # self.list_of_demonstrations = parser.generate_list_of_videos(constants.PATH_TO_DATA + constants.CONFIG_FILE) self.list_of_demonstrations = list_of_demonstrations self.data_X = {} self.data_X_size = {} self.data_N = {} self.file = None self.featfile = featfile self.metrics_picklefile = None self.change_pts = None self.change_pts_Z = None self.change_pts_W = None self.list_of_cp = [] self.map_cp2frm = {} self.map_cp2demonstrations = {} self.map_cp_level1 = {} self.map_cp_level2 = {} self.map_level1_cp = {} self.map_level2_cp = {} self.map_cp2milestones = {} self.map_cp2surgemes = {} self.map_cp2surgemetransitions = {} self.l2_cluster_matrices = {} self.map_frm2surgeme = parser.get_all_frame2surgeme_maps(self.list_of_demonstrations) self.trial = utils.hashcode() + trialname # self.trial = trialname self.cp_surgemes = [] self.pruned_L1_clusters = [] self.pruned_L2_clusters = [] self.silhouette_scores = {} self.dunn_scores_1 = {} self.dunn_scores_2 = {} self.dunn_scores_3 = {} self.level2_dunn_1 = [] self.level2_dunn_2 = [] self.level2_dunn_3 = [] self.level2_silhoutte = [] self.label_based_scores_1 = {} self.label_based_scores_2 = {} self.gmm_objects = {} self.sr = 10
def __init__(self, DEBUG, list_of_demonstrations, featfile, trialname): # self.list_of_demonstrations = parser.generate_list_of_videos(constants.PATH_TO_DATA + constants.CONFIG_FILE) self.list_of_demonstrations = list_of_demonstrations self.data_X = {} self.data_X_size = {} self.data_N = {} self.file = None self.featfile = featfile self.metrics_picklefile = None self.change_pts = None self.change_pts_Z = None self.change_pts_W = None self.list_of_cp = [] self.map_cp2frm = {} self.map_cp2demonstrations = {} self.map_cp_level1 = {} self.map_cp_level2 = {} self.map_level1_cp = {} self.map_level2_cp = {} self.map_cp2milestones = {} self.map_cp2surgemes = {} self.map_cp2surgemetransitions = {} self.l2_cluster_matrices = {} self.map_frm2surgeme = utils.get_all_frame2surgeme_maps(self.list_of_demonstrations) self.trial = utils.hashcode() + trialname # self.trial = trialname self.cp_surgemes = [] self.pruned_L1_clusters = [] self.pruned_L2_clusters = [] self.silhouette_scores = {} self.dunn_scores_1 = {} self.dunn_scores_2 = {} self.dunn_scores_3 = {} self.level2_dunn_1 = [] self.level2_dunn_2 = [] self.level2_dunn_3 = [] self.level2_silhoutte = [] self.label_based_scores_1 = {} self.label_based_scores_2 = {} self.gmm_objects = {} self.sr = 10
from save.instituicoes_pt import * serialize_rdf_unidades = { "classType": Unidade, "collection": [ { ## ufrn "toSave": False, "mapper": { "nome": "nome_unidade", "code": "id_unidade", "id": lambda d: hashcode("ufrn", "centro", d["id_unidade"]), "university": lambda d: UFRN, "sameas": lambda d: "https://sigaa.ufrn.br/sigaa/public/departamento/portal.jsf?id=" + str(d["id_unidade"]), }, "data": lambda: list( filter( lambda d: d["tipo_unidade_organizacional"].find("CENTRO") > -1, dados_ckan( "http://dados.ufrn.br/api/action/datastore_search?resource_id=3f2e4e32-ef1a-4396-8037-cbc22a89d97f" ))),
def __init__(self, DEBUG, list_of_demonstrations, featfile, trialname): self.list_of_demonstrations = list_of_demonstrations self.data_X = {} self.data_W = {} self.data_Z = {} self.data_X_size = {} self.data_N = {} self.file = None self.featfile = featfile self.metrics_picklefile = None self.change_pts = None self.change_pts_Z = None self.change_pts_W = None self.list_of_cp = [] self.map_cp2frm = {} self.map_cp2demonstrations = {} self.map_cp2level1 = {} self.map_level12cp = {} self.map_cp2milestones = {} self.map_cp2surgemes = {} self.map_cp2surgemetransitions = {} self.l2_cluster_matrices = {} self.map_frm2surgeme = utils.get_all_frame2surgeme_maps(self.list_of_demonstrations) self.trial = utils.hashcode() + trialname self.cp_surgemes = [] self.pruned_L1_clusters = [] self.pruned_L2_clusters = [] self.silhouette_scores_global = {} self.silhouette_scores_weighted = {} self.dunn_scores_1 = {} self.dunn_scores_2 = {} self.dunn_scores_3 = {} self.level2_dunn_1 = [] self.level2_dunn_2 = [] self.level2_dunn_3 = [] self.level2_silhoutte_global = [] self.level2_silhoutte_weighted = [] self.label_based_scores_1 = {} self.label_based_scores_2 = {} self.sr = constants.SR self.representativeness = constants.PRUNING_FACTOR_ZW # Components for Mixture model at each level self.n_components_cp = constants.N_COMPONENTS_CP self.n_components_L1 = constants.N_COMPONENTS_L1 self.n_components_L2 = constants.N_COMPONENTS_L2 self.temporal_window = constants.TEMPORAL_WINDOW_ZW self.fit_GMM = False self.fit_DPGMM = True assert self.fit_DPGMM or self.fit_GMM == True
from utils import dados_ckan, dados_ufma, hashcode, dados_iffar from save.instituicoes_pt import * serialize_rdf_subunidades = { "classType": Subunidade, "collection": [ { ## ufrn "toSave": False, "mapper": { "nome": "nome_unidade", "id": lambda d: hashcode("ufrn", "departamento", d["id_unidade"]), "code": "id_unidade", "university": lambda d: UFRN, "unidade": lambda d: "https://www.dbacademic.tech/resource/" + hashcode( "ufrn", "centro", d["id_unidade_responsavel"]), "sameas": lambda d: "https://sigaa.ufrn.br/sigaa/public/departamento/portal.jsf?id=" + str(d["id_unidade"]), }, "data": lambda: list( filter(
def __init__(self, DEBUG, list_of_demonstrations, featfile, trialname): self.list_of_demonstrations = list_of_demonstrations self.data_X = {} self.data_W = {} self.data_Z = {} self.data_X_size = {} self.data_N = {} self.file = None self.featfile = featfile self.metrics_picklefile = None self.change_pts = None self.change_pts_Z = None self.change_pts_W = None self.list_of_cp = [] self.map_cp2frm = {} self.map_cp2demonstrations = {} self.map_cp2level1 = {} self.map_level12cp = {} self.map_cp2milestones = {} self.map_cp2surgemes = {} self.map_cp2surgemetransitions = {} self.l2_cluster_matrices = {} self.map_frm2surgeme = utils.get_all_frame2surgeme_maps( self.list_of_demonstrations) self.trial = utils.hashcode() + trialname self.cp_surgemes = [] self.pruned_L1_clusters = [] self.pruned_L2_clusters = [] self.silhouette_scores_global = {} self.silhouette_scores_weighted = {} self.dunn_scores_1 = {} self.dunn_scores_2 = {} self.dunn_scores_3 = {} self.level2_dunn_1 = [] self.level2_dunn_2 = [] self.level2_dunn_3 = [] self.level2_silhoutte_global = [] self.level2_silhoutte_weighted = [] self.label_based_scores_1 = {} self.label_based_scores_2 = {} self.sr = constants.SR self.representativeness = constants.PRUNING_FACTOR_ZW # Components for Mixture model at each level self.n_components_cp = constants.N_COMPONENTS_CP self.n_components_L1 = constants.N_COMPONENTS_L1 self.n_components_L2 = constants.N_COMPONENTS_L2 self.temporal_window = constants.TEMPORAL_WINDOW_ZW self.fit_GMM = False self.fit_DPGMM = True assert self.fit_DPGMM or self.fit_GMM == True
def __init__(self, DEBUG, list_of_demonstrations, fname, log, vision_mode = False, feat_fname = None): self.list_of_demonstrations = list_of_demonstrations if vision_mode and feat_fname is None: print "[Error] Please provide file with visual features" sys.exit() self.vision_mode = vision_mode self.feat_fname = feat_fname self.data_X = {} self.X_dimension = 0 self.data_X_size = 0 self.data_N = {} self.log = log self.file = None self.metrics_picklefile = constants.PATH_TO_CLUSTERING_RESULTS + fname + ".p" self.changepoints = None self.list_of_cp = [] self.map_cp2frm = {} self.map_cp2demonstrations = {} self.map_cp2cluster = {} self.map_level1_cp = {} self.map_cp2milestones = {} self.map_cp2surgemes = {} self.map_cp2surgemetransitions = {} self.map_frm2surgeme = utils.get_all_frame2surgeme_maps(self.list_of_demonstrations) self.trial = utils.hashcode() + fname # self.trial = fname self.cp_surgemes = [] self.pruned_L1_clusters = [] self.silhouette_score_global = None self.silhouette_score_weighted = None self.dunn_scores_1 = {} self.dunn_scores_2 = {} self.dunn_scores_3 = {} self.label_based_scores_1 = {} self.sr = constants.SR # Components for Mixture model at each level if self.vision_mode: self.n_components_cp = constants.N_COMPONENTS_CP_Z self.n_components_L1 = constants.N_COMPONENTS_L1_Z self.temporal_window = constants.TEMPORAL_WINDOW_Z self.representativeness = constants.PRUNING_FACTOR_Z self.ALPHA_CP = constants.ALPHA_Z_CP if (constants.TASK_NAME in ["000", "010", "011", "100"]): self.ALPHA_CP = constants.ALPHA_W_CP else: self.n_components_cp = constants.N_COMPONENTS_CP_W self.n_components_L1 = constants.N_COMPONENTS_L1_W self.temporal_window = constants.TEMPORAL_WINDOW_W self.representativeness = constants.PRUNING_FACTOR_W self.ALPHA_CP = constants.ALPHA_W_CP self.ALPHA_L1 = 0.4 self.fit_GMM = False self.fit_DPGMM = True assert self.fit_DPGMM or self.fit_GMM == True
def __init__(self, DEBUG, list_of_demonstrations, fname, log, vision_mode = False, feat_fname = None): self.list_of_demonstrations = list_of_demonstrations if vision_mode and feat_fname is None: print "[Error] Please provide file with visual features" sys.exit() self.vision_mode = vision_mode self.feat_fname = feat_fname self.data_X = {} self.X_dimension = 0 self.data_X_size = 0 self.data_N = {} self.log = log self.file = None self.metrics_picklefile = constants.PATH_TO_CLUSTERING_RESULTS + fname + ".p" self.changepoints = None self.list_of_cp = [] self.map_cp2frm = {} self.map_cp2demonstrations = {} self.map_cp2cluster = {} self.map_level1_cp = {} self.map_cp2milestones = {} self.map_cp2surgemes = {} self.map_cp2surgemetransitions = {} self.map_frm2surgeme = parser.get_all_frame2surgeme_maps(self.list_of_demonstrations) self.trial = utils.hashcode() + fname # self.trial = fname self.cp_surgemes = [] self.pruned_L1_clusters = [] self.silhouette_score_global = None self.silhouette_score_weighted = None self.dunn_scores_1 = {} self.dunn_scores_2 = {} self.dunn_scores_3 = {} self.label_based_scores_1 = {} self.sr = constants.SR # Components for Mixture model at each level if self.vision_mode: self.n_components_cp = constants.N_COMPONENTS_CP_Z self.n_components_L1 = constants.N_COMPONENTS_L1_Z self.temporal_window = constants.TEMPORAL_WINDOW_Z self.representativeness = constants.PRUNING_FACTOR_Z self.ALPHA_CP = constants.ALPHA_Z_CP if (constants.TASK_NAME in ["000", "010", "011", "100"]): self.ALPHA_CP = constants.ALPHA_W_CP else: self.n_components_cp = constants.N_COMPONENTS_CP_W self.n_components_L1 = constants.N_COMPONENTS_L1_W self.temporal_window = constants.TEMPORAL_WINDOW_W self.representativeness = constants.PRUNING_FACTOR_W self.ALPHA_CP = constants.ALPHA_W_CP self.ALPHA_L1 = 0.4 self.fit_GMM = False self.fit_DPGMM = True assert self.fit_DPGMM or self.fit_GMM == True
from utils import dados_ckan, dados_ufma, hashcode, removeNonUTF8, dados_csv from save.instituicoes_pt import * serialize_rdf_monografia = { "classType": Monografia, "collection": [ { ## ufrn "toSave": False, "mapper": { "titulo": lambda d: removeNonUTF8(d["titulo"]), "university": lambda d: UFRN, "id": lambda d: hashcode("ufrn", "monografias", str(d["_id"])), "autor": "nome_autor" }, "data": lambda: dados_ckan( "http://dados.ufrn.br/api/action/datastore_search?resource_id=7c01071b-81a4-4793-9a63-acfcd8a1aa83" ), "rdf_path": "rdf/monografias_ufrn.rdf" }, { ## ufma "toSave": False, "mapper": { "titulo": lambda d: (d["titulo"]),