def make_predictions(): #We will make predictions for those experiments, which are already finished, but don't have the csv file with predictions experiments = projects.Project(Path(__file__).parent.parent).experiments() for exp in experiments: res_file = os.path.join(exp.path, exp.name() + '_result.csv') if exp.isCompleted() and not os.path.exists(res_file): preds = generic.parse(exp.path).predictions('test') preds.dump(res_file)
def __init__(self,path,input_columns,output_columns,ctypes={},input_groups={},output_groups={}): self.cfg=generic.parse(path) path=os.path.dirname(os.path.dirname(path)) self.cfg._projectDir=path self.input_columns=input_columns self.output_columns=output_columns self.image_path=[] self.ctypes=ctypes self.path=path self.input_groups=input_groups self.output_groups=output_groups
def translate(sentence: str): file_path = os.path.join(context.get_current_project_data_path(), "rus.vocab") vocabulary = utils.load(file_path) preds = generic.parse('eng_to_ru').predictions( builtin_datasets.from_array([sentence], [''])) for item in preds: rootItem = item.rootItem() sentence = '' for indices in item.prediction: sentence = sentence + " " + vocabulary.i2w[np.argmax(indices)] print(rootItem.x + " " + sentence)
def parse_config(self): extra = None if self.project is not None: if os.path.exists(self.project.commonPath()): extra = self.project.commonPath() if os.path.exists(self.getConfigYamlPath()): cfg = generic.parse(self.getConfigYamlPath(), extra) else: cfg = generic.parse(self.getConfigYamlConcretePath(), extra) cfg.gpus = self.gpus if self.allowResume: cfg.setAllowResume(self.allowResume) if self.project is not None: if os.path.exists(self.project.modulesPath()): for m in os.listdir(self.project.modulesPath()): if ".py" in m: cfg.imports.append(m[0:m.index(".py")]) if os.path.exists(self.project.dataPath()): cfg.datasets_path = self.project.dataPath() return cfg
def make_predictions(): experiments = projects.Project(Path(__file__).parent.parent).experiments() for exp in experiments: if exp.isCompleted(): file_path=os.path.join(context.get_current_project_data_path(),"rus.vocab") vocabulary=utils.load(file_path) preds = generic.parse(exp.path).predictions('test') for item in preds: rootItem = item.rootItem() sentence = '' for indices in item.prediction: sentence = sentence + " " + vocabulary.i2w[np.argmax(indices)] print(rootItem.x + " " + sentence)
def blend(self, ds, w=0.5) -> DataSet: if isinstance(ds, str): from musket_core import generic return self.blend(generic.parse(ds).predictions(self.parent.name)) return self._inner_blend(ds, w)
def make_predictions(): preds = generic.parse("questions1").predictions('test') preds.dump('predictions.csv')
def make_predictions(): preds = generic.parse("simpleCnn").predictions('Test') preds.dump('predictions_cnn.csv') preds = generic.parse("simpleRnn").predictions('Test') preds.dump('predictions_rnn.csv')