def read_models(config, families): models = {} with open(config[DATA_FILE_NAME_KEY], 'r') as data_file: reader = csv.DictReader(data_file) for row in reader: if row[SMID_KEY]: model = row[SMID_KEY] if model not in models: models[model] = Model(model, row[FAMILY_ID_KEY]) model_year = ModelYear(int(row[YEAR_KEY]), to_float(row[STATISTIC_PRICE_KEY]), to_float(row[ITEMS_AMOUNT_KEY]), to_float(row[STDEV_KEY]), to_float(row[NEW_CAR_PRICE_KEY])) models[model].family_data = families[row[FAMILY_ID_KEY]] models[model].add_model_year(model_year) return models
def __init__(self, controller: RemoteInterface, state_observer: StateObserver, serializer: CommandClassSerializer): Model.__init__(self) BaseNode.__init__(self, controller) self.serializer = serializer self.state_observer = state_observer self.security_utils = SecurityUtils() self.home_id: Optional[int] = None self.node_id: Optional[int] = None self.suc_node_id: Optional[int] = None self.channels: List[Channel] = [] self.secure = False
def create(modeldir, logger=StdOutLogger): start = time.time() featuremap = FeatureMap.parse(modeldir) factormap = FactorMap.parse(modeldir) binarymodel = Model.deserialize(modeldir) model = PlayRunRatioModel(binarymodel, featuremap, factormap) model.logger.info("Elapsed time to create model {0}".format( (time.time() - start))) return model
def __getstate__(self): return { **Model.__getstate__(self), 'home_id': self.home_id, 'node_id': self.node_id, 'suc_node_id': self.suc_node_id, 'channels': self.channels, 'network_key': (self.secure and self.security_utils.network_key) or None }
def train(): train = mk_loader('data', 'train.txt') validation = mk_loader('data', 'validate.txt') model = Model(4).to('cuda') optimizer = SGD(model.parameters(), lr=0.1, momentum=0.9, weight_decay=1e-4, nesterov=True) scheduler = CyclicLR(optimizer, 0.01, 0.1, mode='exp_range', gamma=0.99995, step_size_up=4000) stats = Writer(model) best_loss = torch.tensor(float('inf')) while True: stats.report_model_parameters() print("validating...") model.eval() val_loss = validation_loss(model, validation) stats.report_validation_loss(val_loss) if val_loss < best_loss: torch.save(model.state_dict(), 'model.pt') best_loss = val_loss print(f"...done, loss {val_loss:.3E}") model.train() for batch in tqdm(train): optimizer.zero_grad() y, loss = batch_loss(model, batch) loss.backward() optimizer.step() scheduler.step() stats.report_train_loss(loss.mean()) if stats.step % 32 == 0: stats.report_output(batch.y, torch.sigmoid(y)) stats.on_step()
elif self.m.mybox[1] > self.current_pellet[1]: cmd = 'up' if cmd: self.m.do_cmd(cmd) ################### VIEW ############################# class View(): def __init__(self, m, max_timer = 50): self.m = m self.max_timer = max_timer self.timer = max_timer def display(self): self.timer-=1 if self.timer <= 0: print 'location: ' + str(self.m.mybox[0]) + ', ' + str(self.m.mybox[1]) self.timer = self.max_timer ################### LOOP ############################# model = Model() c = Controller(model) v = View(model) while not model.game_over: sleep(0.02) c.poll() model.update() v.display()
print("TRAIN RMSE: {0} XVAL RMSE {1}".format(tr_rmse, xv_rmse)) #print out the feature importance importances = rfc.feature_importances_ std = np.std([tree.feature_importances_ for tree in rfc.estimators_], axis=0) indices = np.argsort(importances)[::-1] # Print the feature ranking print("Feature ranking:") for f in range(len(traincols)): print("%d. feature %s (%f)" % (f + 1, traincols[f], importances[indices[f]])) #save the model to disk Model.serialize(rfc, modeldir) #serialize the features FeatureMap.save(traincols, modeldir) #serialize the Factors / Categorical variables if len(labelencoders) > 0: FactorMap.save(labelencoders, modeldir) # revert the float columns back to their string representations for writing out to a file for colname in classificationcols: le = labelencoders[colname] X_xv[colname] = le.inverse_transform(X_xv[colname]) #save results to file outfilepath = os.path.join(datadir, "xvalpreictions.csv")
def prase_content(soup, code): model = Model() model.code = unicode(code, "utf-8") try: title = soup.find("title") model.name = title.text.split('(')[0] item01 = soup.find("dl", class_="dataItem01") model.evaluate_value = item01.contents[1].contents[0].text model.increase_value = item01.contents[1].contents[2].contents[0].text model.increase_percent = item01.contents[1].contents[2].contents[ 1].text[:-1] model.one_month = item01.contents[2].contents[1].text[:-1] model.one_year = item01.contents[3].contents[1].text[:-1] item02 = soup.find("dl", class_="dataItem02") model.per_value = item02.contents[1].contents[0].text model.per_value_percent = item02.contents[1].contents[1].text[:-1] model.three_month = item02.contents[2].contents[1].text[:-1] model.three_year = item02.contents[3].contents[1].text[:-1] item03 = soup.find("dl", class_="dataItem03") model.total_value = item03.contents[1].contents[0].text model.six_month = item03.contents[2].contents[1].text[:-1] model.till_now = item03.contents[3].contents[1].text[:-1] tables = soup.find_all("table") model.type = tables[2].contents[0].contents[0].text.split("|")[0] model.size = tables[2].contents[0].contents[1].contents[1][1:] model.manager = tables[2].contents[0].contents[2].contents[1].text model.start_date = tables[2].contents[1].contents[0].contents[1][1:] model.owner = tables[2].contents[1].contents[1].contents[2].text #model.level = tables[2].contents[1].contents[2].contents[2].text level = tables[2].contents[1].contents[2].contents[2].attrs['class'][0] if len(level) > 4: model.level = level[4] else: model.level = 0 except IndexError, e: infoItem = soup.find("div", class_="fundInfoItem") model.wan_get = infoItem.contents[0].contents[0].contents[1].text model.seven_get = infoItem.contents[0].contents[2].contents[ 1].text[:-1] model.fourting_get = infoItem.contents[0].contents[4].contents[ 1].text[:-1] model.two_eghit_get = infoItem.contents[0].contents[6].contents[ 1].text[:-1] model.one_month = infoItem.contents[1].contents[0].contents[ 0].contents[1].text[:-1] model.one_year = infoItem.contents[1].contents[0].contents[1].contents[ 1].text[:-1] model.three_month = infoItem.contents[1].contents[1].contents[ 0].contents[1].text[:-1] model.three_year = infoItem.contents[1].contents[1].contents[ 1].contents[1].text[:-1] model.six_month = infoItem.contents[1].contents[2].contents[ 0].contents[1].text[:-1] model.till_now = infoItem.contents[1].contents[2].contents[1].contents[ 1].text[:-1] tables = soup.find_all("table") model.type = tables[2].contents[0].contents[0].text.split("|")[0] model.size = tables[2].contents[0].contents[1].contents[1][1:] model.manager = tables[2].contents[0].contents[2].contents[1].text model.start_date = tables[2].contents[1].contents[0].contents[1][1:] model.owner = tables[2].contents[1].contents[1].contents[2].text #model.level = tables[2].contents[1].contents[2].contents[2].text level = tables[2].contents[1].contents[2].contents[2].attrs['class'][0] if len(level) > 4: model.level = level[4] else: model.level = 0
from DAL.JSONDAL import JSONDal from common import SMID_KEY, Model, FAMILY_ID_KEY, YEAR_KEY, STATISTIC_PRICE_KEY, ITEMS_AMOUNT_KEY, STDEV_KEY, \ NEW_CAR_PRICE_KEY, ModelYear, FILE_DATA_BASE_PATH_KEY, SMALL_PIPE_NAME, PipeHistory from data_reader import to_float if __name__ == '__main__': file = ".\\data\\All_Cars_List.csv" models = {} with open(file, 'r') as data_file: reader = csv.DictReader(data_file) for row in reader: if row[SMID_KEY]: model = row[SMID_KEY] if model not in models: models[model] = Model(model, row[FAMILY_ID_KEY]) model_year = ModelYear(int(row[YEAR_KEY]), to_float(row[STATISTIC_PRICE_KEY]), to_float(row[ITEMS_AMOUNT_KEY]), to_float(row[STDEV_KEY]), to_float(row[NEW_CAR_PRICE_KEY])) model_year.new_price = row["PriceYad2"] model_year.past_pipes[SMALL_PIPE_NAME] = PipeHistory(model_year.new_price, 0, 0, datetime.datetime.now()) if model_year.new_price and model_year.new_price != 0: models[model].add_model_year(model_year) else: if len(models[model].years) == 0: models.pop(model, None) dal = JSONDal({FILE_DATA_BASE_PATH_KEY: ".\\data\\FileData.json"}) dal.update_models(models) dal.flush()