def __init__(self): AggregationAPI.__init__(self,"tate") self.marking_params_per_shape = dict() self.marking_params_per_shape["text"] = text_mapping2 self.workflows[683] = {},{"init":["text"]}
def __init__(self,project_id,environment): AggregationAPI.__init__(self,project_id,environment)#"tate")#"tate",environment="staging") # the code to extract the relevant params froma text json file self.marking_params_per_shape["text"] = relevant_text_params # the code to cluster lines together self.default_clustering_algs["text"] = TextCluster self.default_clustering_algs["image"] = BlobClustering # the code for reducing a line segment (4d) into a 2d object # todo - can probably replace this with the standard for line segments # self.reduction_algs["text"] = text_line_reduction # load in the tag file api_details = yaml.load(open("/app/config/aggregation.yml","rb")) try: tag_file = api_details[self.project_id]["tags"] self.additional_clustering_args = {"text": {"reduction":text_line_reduction,"tag_file":tag_file}} except: self.additional_clustering_args = {"text": {"reduction":text_line_reduction}} self.ignore_versions = True # self.instructions[683] = {} self.instructions[121] = {} self.only_retired_subjects = False self.only_recent_subjects = True self.rollbar_token = None
def __init__(self, project_id, environment, end_date=None): AggregationAPI.__init__(self, project_id, environment, end_date=end_date) # just to stop me from using transcription on other projects if not int(project_id) in [245, 376]: raise ValueError('project_id must be either 245 or 376')
def __init__(self,project_id,csv_classification_file): AggregationAPI.__init__(self,project_id,"development") # read in the csv file as a dataframe (pandas) self.classifications_dataframe = pandas.read_csv(csv_classification_file) # extract the subject id for each subject - based on the subject data field self.classifications_dataframe["subject_id"] = self.classifications_dataframe["subject_data"].map(lambda x: extract_subject_id(x)) self.aggregation_results = {}
def __init__(self, project_id, csv_classification_file): AggregationAPI.__init__(self, project_id, "development") # read in the csv file as a dataframe (pandas) self.classifications_dataframe = pandas.read_csv( csv_classification_file) # extract the subject id for each subject - based on the subject data field self.classifications_dataframe[ "subject_id"] = self.classifications_dataframe["subject_data"].map( lambda x: extract_subject_id(x)) self.aggregation_results = {}
def __init__(self): AggregationAPI.__init__(self,245)#"tate",environment="staging") self.marking_params_per_shape = dict() self.marking_params_per_shape["text"] = text_mapping2 # self.workflows[683] = {},{"init":["text"]} # self.versions[683] = 1 self.marking_params_per_shape["text"] = relevant_text_params reduction_algs = {"text":text_line_reduction} self.__set_clustering_algs__({"text":TextCluster},reduction_algs) self.ignore_versions = True self.instructions[683] = {}
def __init__(self): AggregationAPI.__init__(self, 245) #"tate",environment="staging") self.marking_params_per_shape = dict() self.marking_params_per_shape["text"] = text_mapping2 # self.workflows[683] = {},{"init":["text"]} # self.versions[683] = 1 self.marking_params_per_shape["text"] = relevant_text_params reduction_algs = {"text": text_line_reduction} self.__set_clustering_algs__({"text": TextCluster}, reduction_algs) self.ignore_versions = True self.instructions[683] = {}
def __init__(self): AggregationAPI.__init__(self,245)#"tate",environment="staging") self.marking_params_per_shape = dict() self.marking_params_per_shape["text"] = text_mapping2 # self.workflows[683] = {},{"init":["text"]} # self.versions[683] = 1 self.marking_params_per_shape["text"] = relevant_text_params reduction_algs = {"text":text_line_reduction} self.__set_clustering_algs__({"text":TextCluster},reduction_algs) self.__set_classification_alg__(SubjectRetirement,{"host":self.host_api,"project_id":self.project_id,"token":self.token,"workflow_id":121}) self.ignore_versions = True self.instructions[683] = {}
def __init__(self): AggregationAPI.__init__(self, 245) #"tate",environment="staging") self.marking_params_per_shape = dict() self.marking_params_per_shape["text"] = text_mapping2 # self.workflows[683] = {},{"init":["text"]} # self.versions[683] = 1 self.marking_params_per_shape["text"] = relevant_text_params reduction_algs = {"text": text_line_reduction} self.__set_clustering_algs__({"text": TextCluster}, reduction_algs) self.__set_classification_alg__( SubjectRetirement, { "host": self.host_api, "project_id": self.project_id, "token": self.token, "workflow_id": 121 }) self.ignore_versions = True self.instructions[683] = {}
def __init__(self,project_id,environment,end_date=None): AggregationAPI.__init__(self,project_id,environment,end_date=end_date) # just to stop me from using transcription on other projects if not int(project_id) in [245, 376]: raise ValueError('project_id must be either 245 or 376')
def __init__(self,project_id,environment,end_date=None): AggregationAPI.__init__(self,project_id,environment,end_date=end_date) # just to stop me from using transcription on other projects assert int(project_id) in [245,376]