def logreg_train(args): d = load_file(args.dataset) try: # Sanitize dataset d = d.dropna(subset=['Herbology', 'Ancient Runes', 'Astronomy']) X = np.array(d.values[:, [8, 12, 7]], dtype=float) y = d.values[:, 1] # Init model if args.stochastic: args.batch = 1 model = Model(args.iter, args.learning, int(args.batch) > 0, args.batch, args.precision, args.visualizer) # Normalize features X = np.array([normalize(t) for t in X.T]).T new_df = pd.DataFrame(X) # Convert guild names to integers indexes) Y = [] for i in y: Y.append(model.feature_i[i]) y = np.array(Y, dtype=int) y_unique = np.unique(y) # Execute logistic regression model.process_logreg(X, y) except Exception as e: print ("error : {0}".format(e))
def logreg_predict(args): d = load_file(args.dataset) v = load_file(args.values) try: # Sanitize dataset d = d.fillna(0) # Normalize features X = np.array(d.values[:, [8, 12, 7]], dtype=float) X = np.array([normalize(t) for t in X.T]).T X = np.insert(X, 0, 1, axis=1) theta = np.array(v.values[:, 1:].T, dtype=float) model = Model() prediction = model.hypothesis(theta, X) # Convert integers indexes to guild names) houses = np.argmax(prediction, axis=1) matching_houses = list(map(lambda v: model.i_feature[v], houses)) write_prediction(matching_houses) print("houses.csv successfully written !") if args.show: greek_god_graph(matching_houses, X, model) except Exception as e: print("error : {0}".format(e)) sys.exit(-1)
def main(): cfg = Config() TVT, TMO = set_devices(cfg.sys_device_ids) data_loader = get_data_loader(cfg) spec_loss = SpectralCLusterLayer() model = Model(cfg.vector_size, cfg.fix_weight) model_w = DataParallel(model) optimizer = optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr = cfg.lr) modules_optims = [model, optimizer] TMO(modules_optims) may_set_mode(modules_optims, 'train') for epoch in range(cfg.total_epoch): epoch_done = False step = 0 while not epoch_done: step += 1 ims, _, labels, epoch_done= data_loader.next_batch() ims_var = Variable(TVT(torch.from_numpy(ims).float())) batch_size = ims_var.size()[0] num_cluster = len(data_loader.ids) labels_matrix = np.zeros([batch_size, num_cluster], dtype=int) labels_matrix[range(batch_size), labels-1] = 1 labels_var = TVT(torch.from_numpy(labels_matrix).float()) optimizer.zero_grad() feat = model_w(ims_var) G = spec_loss.grad_F(feat, labels_var) feat.backward(gradient=G) optimizer.step() objective_value = labels_var.size()[1] - torch.sum(torch.mm(spec_loss.pseudo_inverse(labels_var),feat) * torch.mm(spec_loss.pseudo_inverse(feat), labels_var).t()) print("epoch %d --- loss value= %f" % (epoch, objective_value)) print "Finished"
def __init__( self, fname, **params ): """Create a mixture model for components using given weights""" Model.__init__( self, fname, **params ) self.k = self.get_parameter( "k" ) self.d = self.get_parameter( "d" ) self.weights = self.get_parameter( "w" ) self.means = self.get_parameter( "M" ) self.sigmas = self.get_parameter( "S" )
def __init__( self, fname, **params ): Model.__init__( self, fname, **params ) self.k = self.get_parameter( "k" ) self.d = self.get_parameter( "d" ) self.n_views = self.get_parameter( "v" ) self.weights = self.get_parameter( "w" ) self.means = self.get_parameter( "M" ) self.sigmas = self.get_parameter( "S" )
def initWorld(self, world): """ Method to init the Rpg's world. """ if os.path.isfile(world) is False: raise core.exception.exception(_('ERROR_UNKNOWN_SELECTED_WORLD')) Model.setDB(world)
def __init__(self): print("unet init") Model.__init__(self) self.learning_rate = tf.train.exponential_decay( 0.0001, tf.Variable(0, trainable=False), 10, 0.8, staircase=True) self.loss = Pixelwise_weighted_loss().compute_loss self.optimizer = tf.train.GradientDescentOptimizer(self.learning_rate) self.metric = IOU()
def __init__(self, **params): """Create a mixture model for components using given weights""" Model.__init__(self, **params) self.k = self["k"] self.d = self["d"] self.weights = self["w"] self.means = self["M"] self.sigmas = self["S"]
def getting_model(self, args): print('getting model...') if args.train or args.test: self.model = Model(batch_size = self.batch_size , val_size = self.val_size, max_len = self.max_len ,args = args , dictionary = self.dictionary)# self.model.compile() if (args.train and args.model_restore) or args.test: self.model.restore(mode = self.mode)
def generate(cls, place): """ Generate a place using an external generating tool @param place Place entity representing the place """ p = subprocess.Popen( core.config.generator['dungeon']['generator'], shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE ) result = p.communicate() if len(result[1]) is not 0: # pragma: no cover raise exception(_('ERROR_PLACE_GENERATION')) d = result[0].decode('utf-8').strip().split('\n') # Import an external check class from the generator sys.path.insert(0, core.config.generator['dungeon']['path']) import checks containerName = cls.areaType + '_' + str(place['id_place']) idRegion = area.model.loadById(place['id_area'], ('id_region'))['id_region'] db = Model.connect() c = db.cursor() query = "INSERT INTO area\ (id_area_type, x, y, directions, container, id_region)\ VALUES (:id_area_type, :x, :y, :directions, :container, :id_region)" for index, room in enumerate(d): if int(room) == 0: continue params = { 'id_area_type': place['id_area_type'], 'x': index % 10, 'y': index / 10, 'directions': checks.getDirections(room) >> 2, 'container': containerName, 'id_region': idRegion } Model.executeQuery(c, query, params) if checks.isEntrance(int(room)): entrance = c.lastrowid Model.disconnect(db) model.update( {'entrance_id': entrance}, ('id_place = ?', [place['id_place']]) ) place['entrance_id'] = entrance return place
def updateModel(model_id): if not request.json or not 'name' in request.json or not 'id' in request.json: return jsonify({"result": False, "msg": "Failed to Update Model!"}) model = Model(request.json['name'], request.json['dataset']) result = Model.updateModel(model, mysql) if result is True: return jsonify({"result": True, "msg": "Successfully Updated Model!"}) return jsonify({"result": False, "msg": "Failed to Update Model!"})
def __init__(self, genres, label_probs, image_shape, filter_counts, unit_counts, resize_shape=None): self.label_probs = label_probs self.image_shape = image_shape self.filter_counts = filter_counts self.unit_counts = unit_counts Model.__init__(self, genres, resize_shape)
def __init__(self, **params): """Create a mixture model for components using given weights""" Model.__init__(self, **params) self.k = self["k"] self.d = self["d"] self.weights = self["w"] self.means = self["M"] # Draw as a multinomial distribution assert allclose(self.weights.sum(), 1.) assert allclose(self.means.sum(0), 1.) # symbolic means and observed variables self.sym_means = sp.symbols('x1:'+str(self.d+1)) self.sym_obs = self.sym_means
def getFromAreaType(idAreaType, probability): """ emeny.model.getFromArea(area) -> dict() Return a random enemy that the player can encounter in a given area type @param idAreaType dict area where the enemy can be found @param probability float the probability of finding an enemy @return dict an enemy or None if no enemy can be found in the given area type """ query = "\ SELECT\ name,\ stat_current_hp,\ stat_max_hp,\ stat_attack,\ stat_defence,\ stat_speed,\ stat_luck\ FROM\ creature\ JOIN creature_area_type ON creature.id_creature = creature_area_type.id_creature\ WHERE\ creature_area_type.id_area_type = ?\ AND creature_area_type.probability >= ?\ ORDER BY RANDOM() LIMIT 1\ " return Model.fetchOneRow(query, [idAreaType, probability])
def deleteModel(model_id): result = Model.deleteModel(model_id, mysql) if result is True: return jsonify({"result": True, "msg": "Successfully Deleted Model!"}) return jsonify({"result": False, "msg": "Failed to Delete Model!"})
def __init__(self, **params): """Create a mixture model for components using given weights""" Model.__init__(self, **params) self.k = self["k"] self.d = self["d"] self.weights = self["w"] self.betas = self["B"] # Draw as a multinomial distribution assert allclose(self.weights.sum(), 1.) self.mean = self["xM"] self.sigma = self["xS"] self.sigma_val = self["xSigma"] self.sym_betas = sp.symbols('b1:' + str(self.d + 1)) self.sym_obs = sp.symbols('x1:' + str(self.d + 1) + 'y')
def __init__(self, **params): """Create a mixture model for components using given weights""" Model.__init__(self, **params) self.k = self["k"] self.d = self["d"] self.weights = self["w"] self.betas = self["B"] # Draw as a multinomial distribution assert allclose(self.weights.sum(), 1.) self.mean = self["xM"] self.sigma = self["xS"] self.sigma_val = self["xSigma"] self.sym_betas = sp.symbols('b1:'+str(self.d+1)) self.sym_obs = sp.symbols('x1:'+str(self.d+1) + 'y')
def test_fix_up_edges_excluded_edges(self): self.assertEqual(syntax_only_excluded_edge_types, all_edge_types.difference(syntax_only_edge_types)) for graph, instances in tqdm(self.task.graphs_and_instances): graph, _ = Model.fix_up_edges(graph, instances, excluded_edge_types=syntax_only_excluded_edge_types) for e in graph.edges: if e[3]['type'].startswith('reverse_'): e[3]['type'] = e[3]['type'][8:] self.assertIn(e[3]['type'], syntax_only_edge_types)
def init_db_models(): with open(INIT_MODEL_DATA, 'r') as csvfile: model_list = csv.reader(csvfile, delimiter=',', quotechar='"') next(model_list, None) # Skip header line for row in model_list: brand = Brand.query.get(row[1]) model = Model(id=row[0], name=row[2], brand=brand) db.session.add(model) db.session.commit()
def __init__(self, model: Model, data: DataImporter, number_epoch: int = 10, lr: float = 0.001, momentum: float = -1, print_intermediate_perf=True, save_performances=True, sheet_name: str = "", location_to_save: str = "", parameters_data_input: dict = None, rounding_digit: int = 5, adam: bool = True): self._model = model.model self._model_info = model self._data = data self._number_epoch = number_epoch self._lr = lr if lr > 0 else 0.05 self._momentum = momentum if momentum > -1 else None self.criterion = nn.CrossEntropyLoss() self.print_val = print_intermediate_perf self.save_val = save_performances self.sheet_saver = SheetSaver(location_to_save) self.device = torch.device( "cuda:0" if torch.cuda.is_available() else "cpu") self.is_binary_problem = self._model_info.nb_classes_out == 2 self.dict_to_save = { SheetNames.PARAMETERS.value: { ParametersNames.MODEL.value: type(self._model).__name__, ParametersNames.NB_EPOCH.value: self._number_epoch, ParametersNames.LEARNING_RATE.value: self._lr, ParametersNames.MOMENTUM.value: self._momentum }, SheetNames.PARAMETERS_MODELS.value: model.get_parameters(), SheetNames.TRAINING.value: { TrainingResult.ACCURACY.value: [], TrainingResult.LOSS_TRAIN.value: [], TrainingResult.LOSS_VAL.value: [], TrainingResult.TP.value: [], TrainingResult.FP.value: [], TrainingResult.FN.value: [], TrainingResult.TN.value: [], TrainingResult.RECALL.value: [], TrainingResult.PRECISION.value: [], TrainingResult.CONFUSION_MATRIX.value: [] }, SheetNames.RESULT.value: {} } self.sheet_name = sheet_name self.adam = adam self.rounding_digit = rounding_digit for element in parameters_data_input: self.dict_to_save[SheetNames.PARAMETERS_MODELS. value][element] = parameters_data_input[element]
def __init__(self, name, depth=5, lr=0.001, max_length=822, kernel_size=5, filters=100, regularization_factor=0.001, keep_prob=0.5, batch_size=200, hidden_size=150): self.lr = lr self.regularization_factor = regularization_factor self.keep_prob = keep_prob self.batch_size = batch_size self.hidden_size = hidden_size self.filters = filters self.kernel_size = kernel_size self.depth = depth Model.__init__(self, name, max_length)
def generate( fname, k, d, mean = "zero", cov = "random", betas = "random", weights = "random", dirichlet_scale = 10, gaussian_precision = 0.01 ): """Generate a mixture of k d-dimensional multi-view gaussians""" model = Model( fname ) model.add_parameter( "k", k ) model.add_parameter( "d", d ) if weights == "random": w = dirichlet( ones(k) * dirichlet_scale ) elif weights == "uniform": w = ones(k)/k elif isinstance( weights, sc.ndarray ): w = weights else: raise NotImplementedError if betas == "eye": B = sc.eye(d)[:,:k] elif betas == "random": B = sc.randn( d, k ) elif isinstance( betas, sc.ndarray ): B = betas else: raise NotImplementedError if mean == "zero": M = zeros( d ) elif mean == "random": M = sc.randn( d ) elif isinstance( mean, sc.ndarray ): M = mean else: raise NotImplementedError if cov == "eye": S = eye( d ) elif cov == "spherical": # Using 1/gamma instead of inv_gamma sigma = 1/sc.random.gamma(1/gaussian_precision) S = sigma * eye( d ) elif cov == "random": S = gaussian_precision * inv( wishart( d+1, sc.eye( d ), 1 ) ) elif isinstance( cov, sc.ndarray ): S = cov else: raise NotImplementedError model.add_parameter( "w", w ) model.add_parameter( "B", B ) model.add_parameter( "M", M ) model.add_parameter( "S", S ) # Unwrap the store and put it into the appropriate model return LinearRegressionsMixture( model.fname, **model.params )
def processUserResponse(update, context, user_msg): REPLY_MARKUP = telegram.ReplyKeyboardRemove() chat_id = update.effective_chat.id # Initiate new user object if chat_id not in ACTIVE_USERS: ACTIVE_USERS[chat_id] = User(chat_id, update.effective_chat.first_name) # Set user language if chat_id in ACTIVE_USERS.keys() and user_msg in config.sections( ) and ACTIVE_USERS[chat_id].getLang() == "DEFAULT": ACTIVE_USERS[chat_id].setLang(user_msg) # Set chat language user_lang = ACTIVE_USERS[chat_id].getLang() if user_lang == "DEFAULT" and len(config.sections()) > 1: REPLY_MARKUP = lang_reply_markup print(config.sections()) MESSAGE = config['DEFAULT']['LANG_MESSAGE'] REPLY_MARKUP = lang_reply_markup # If User questionary is already in process then process user response: elif chat_id in ACTIVE_USERS.keys() and ACTIVE_USERS[chat_id].isModel(): MESSAGE = ACTIVE_USERS[chat_id].getModel().processQuestion(user_msg) REPLY_MARKUP = ACTIVE_USERS[chat_id].getModel().getMarkup() # If last question in the questionary: if ACTIVE_USERS[chat_id].getModel().getStatus() == 0: saveAnswers(update, context, ACTIVE_USERS[chat_id].getModel().getAnswers()) ACTIVE_USERS[chat_id].setModel("NA") MESSAGE += config[user_lang]['BYE_MESSAGE'] # If User is in list, but have not started questionary: # Initialize Questionary elif chat_id in ACTIVE_USERS.keys( ) and user_msg in config[user_lang]['categories'].split(","): ACTIVE_USERS[chat_id].setModel( Model(model_name=config[user_lang]['models'].split(",")[ config[user_lang]['categories'].split(",").index(user_msg)], user_lang=user_lang)) MESSAGE = ACTIVE_USERS[chat_id].getModel().processQuestion(user_msg) # Init new user # Show greeting message one more time else: REPLY_MARKUP = start_reply_markup(user_lang) MESSAGE=config[user_lang]['GREETING_WORD']+" " \ +ACTIVE_USERS[chat_id].getName()+"! "+config[user_lang]['WELCOME_MESSAGE'] context.bot.send_message(chat_id=chat_id, text=MESSAGE, parse_mode=telegram.ParseMode.HTML, reply_markup=REPLY_MARKUP)
def get_model(input_channels, input_time_length, dilations=None, kernel_sizes=None, padding=False): """ initializes a new Deep4Net and changes the kernel sizes and dilations of the network based on the input parameters :param input_channels: 1 axis input shape :param input_time_length: 0 axis input shape :param dilations: dilations of the max-pool layers of the network :param kernel_sizes: kernel sizes of the max-pool layers of the network :param padding: if padding is to be added :return: a Model object, the changed Deep4Net based on the kernel sizes and dilation parameters and the name of the model based on the kernel sizes and dilatiosn """ if kernel_sizes is None: kernel_sizes = [3, 3, 3, 3] print('SBP False!!!') model = Model(input_channels=input_channels, n_classes=1, input_time_length=input_time_length, final_conv_length=2, stride_before_pool=False) model.make_regressor() if cuda: model.model = model.model.cuda() model_name = get_model_name_from_kernel_and_dilation( kernel_sizes, dilations) changed_model = change_network_kernel_and_dilation(model.model, kernel_sizes, dilations, remove_maxpool=False) # print(changed_model) return model, changed_model, model_name
def test_fix_up_edges(self): for graph, instances in tqdm(self.task.graphs_and_instances): for e in graph.edges: self.assertIn(e[3]['type'], all_edge_types, "Found a weird edge type in the data") orig_graph = deepcopy(graph) orig_instances = deepcopy(instances) graph, instances = Model.fix_up_edges(graph, instances, excluded_edge_types=frozenset()) self.assertEqual(orig_instances, instances, "Instances changes when it shouldn't have") correct_edges = [(e[0], e[1], e[3]) for e in orig_graph.edges] for e in orig_graph.edges: new_attrs = deepcopy(e[3]) new_attrs['type'] = 'reverse_' + e[3]['type'] correct_edges.append((e[1], e[0], new_attrs)) edges_no_keys = [(e[0], e[1], e[3]) for e in graph.edges] self.assertCountEqual(correct_edges, edges_no_keys)
def getOneFromTypeAndExitId(areaType, idArea): """ Method to get the informations of a place in a given area """ query = "\ SELECT\ *\ FROM\ place AS p\ JOIN area_type AS at ON p.id_area_type = at.id_area_type\ WHERE\ entrance_id = ?\ AND at.name = ?\ " return Model.fetchOneRow(query, [idArea, areaType])
def getTypes(): """ Returns the available types as an dict with ids as keys and labels as values @return dict the types """ query = "\ SELECT\ id_item_container_type,\ label\ FROM\ item_container_type\ " return {t['id_item_container_type']: t['label'] for t in Model.fetchAllRows(query)}
def generate( fname, k, d, means = "hypercube", cov = "spherical", weights = "random", dirichlet_scale = 10, gaussian_precision = 0.01 ): """Generate a mixture of k d-dimensional gaussians""" model = Model( fname ) model.add_parameter( "k", k ) model.add_parameter( "d", d ) if weights == "random": w = dirichlet( ones(k) * dirichlet_scale ) elif weights == "uniform": w = ones(k)/k elif isinstance( weights, sc.ndarray ): w = weights else: raise NotImplementedError if means == "hypercube": # Place means at the vertices of the hypercube M = zeros( (d, k) ) for i in xrange(k): M[i, i] = 1.0 elif means == "random": M = sc.randn( d, k ) elif isinstance( means, sc.ndarray ): M = means else: raise NotImplementedError if cov == "spherical": # Using 1/gamma instead of inv_gamma sigma = 1/sc.random.gamma(1/gaussian_precision) S = array( [ sigma * eye( d ) for i in xrange( k ) ] ) elif isinstance( cov, sc.ndarray ): S = cov elif cov == "random": S = array( [ gaussian_precision * inv( wishart( d+1, sc.eye( d ), 1 ) ) for i in xrange( k ) ] ) else: raise NotImplementedError model.add_parameter( "w", w ) model.add_parameter( "M", M ) model.add_parameter( "S", S ) # Unwrap the store and put it into the appropriate model return GaussianMixtureModel( model.fname, **model.params )
def loadByCharacterIdAndTriggerWord(idCharacter, triggerWord): query = "\ SELECT\ ta.id_talk_answer,\ trigger_word,\ sentence,\ condition\ FROM\ talk_answer ta\ INNER JOIN character_answer ca\ ON ca.id_talk_answer = ta.id_talk_answer\ WHERE\ trigger_word = ?\ AND id_character = ?\ ORDER BY RANDOM()\ " return Model.fetchAllRows(query, (triggerWord, idCharacter))
def getSurroundingPlaces(idArea): """ place.model.getSurroundingPlaces(idArea) -> dict() Return the places being in the area given in argument. @param idArea integer id of the reference area @return list a list of places """ query = "\ SELECT\ CASE WHEN id_area = ? THEN p.name ELSE 'Exit of ' || p.name END AS name\ FROM\ place AS p\ JOIN area_type AS at ON p.id_area_type = at.id_area_type\ WHERE\ id_area = ?\ OR entrance_id = ?\ " return Model.fetchAllRows(query, [idArea, idArea, idArea])
def getRegionNameFromAreaId(idArea): """ area.model.getRegionNameFromAreaId(idArea) -> string Returns the name of the current area's region. @param idArea integer id of the reference area @return string name of the region """ query = "\ SELECT\ r.region_name\ FROM\ area AS a\ JOIN region AS r\ WHERE\ a.id_area = ?\ " return Model.fetchOneRow(query, [idArea])['region_name']
def getFromDirection(direction): """ area.model.getFromDirection(direction) -> dict() Returns the neighbour of the area given in arguments from a given direction. @direction tuple of the area to return, represented by its relative values of x and y from idArea ((-1, 0) for example) @return dict informations of the found area, empty dict if not found. """ query = "\ SELECT\ %s\ FROM\ area\ WHERE\ x = ?\ AND y = ?\ " % (', '.join(model.fields)) return Model.fetchOneRow(query, direction)
def getSurroundingAreas(idArea): """ area.model.getSurroundingAreas(idArea) -> dict() Return the available neighbour areas of the area given in argument. @param idArea integer id of the reference area @return dict a list of directions, with for each direction, True if there is an area in this direction, False else. """ query = "\ SELECT\ orig.directions\ FROM\ area AS orig\ JOIN area AS dest ON (dest.x = orig.x - 1 OR dest.x = orig.x + 1 OR dest.x = orig.x)\ AND (dest.y = orig.y - 1 OR dest.y = orig.y + 1 OR dest.y = orig.y)\ AND orig.id_area <> dest.id_area\ WHERE\ orig.id_area = ?\ " return Model.fetchOneRow(query, [idArea])
def add_placeholders(self): self.regularization_factor = tf.placeholder_with_default(0.0, shape=()) self.keep_prob = tf.placeholder_with_default(1.0, shape=()) Model.add_placeholders(self)
def generate( fname, k, d, n_views = 3, means = "hypercube", cov = "spherical", weights = "random", dirichlet_scale = 10, gaussian_precision = 0.01 ): """Generate a mixture of k d-dimensional multi-view gaussians""" model = Model( fname ) model.add_parameter( "k", k ) model.add_parameter( "d", d ) model.add_parameter( "v", n_views ) if weights == "random": w = dirichlet( ones(k) * dirichlet_scale ) elif weights == "uniform": w = ones(k)/k elif isinstance( weights, sc.ndarray ): w = weights else: raise NotImplementedError if means == "hypercube": # Place means at the vertices of the hypercube M = [] for i in xrange( n_views ): m = zeros( (d, k) ) for j in xrange(k): m[(i+j) % k, (i+j) % k] = 1.0 M.append( m ) M = array( M ) elif means == "random": M = [] for i in xrange( n_views ): M.append( sc.randn( d, k ) ) M = array( M ) elif isinstance( means, sc.ndarray ): M = means else: raise NotImplementedError if cov == "spherical": # Using 1/gamma instead of inv_gamma S = [] for i in xrange( n_views ): sigma = 1/sc.random.gamma(1/gaussian_precision) s = array( [ sigma * eye( d ) for i in xrange( k ) ] ) S.append( s ) S = array( S ) elif isinstance( cov, sc.ndarray ): S = cov elif cov == "random": S = [] for i in xrange( n_views ): # Roughly the largest element if p = 2d ~= 1, so well # scaled. s = array( [ gaussian_precision * inv( wishart( d+1, sc.eye( d ), 1 ) ) for i in xrange( k ) ] ) S.append( s ) S = array( S ) else: raise NotImplementedError model.add_parameter( "w", w ) model.add_parameter( "M", M ) model.add_parameter( "S", S ) # Unwrap the store and put it into the appropriate model return MultiViewGaussianMixtureModel( model.fname, **model.params )
def factory_model(): if Model.counter == 0: return Model()
def initialize_model(global_path, image_size, image_format, config, loss_type): model = None torch.cuda.empty_cache() gc.collect() epochs, lr, leaky_thresh, lamda, beta1, beta2 = get_model_params(config) if loss_type == 'hybrid_l1': model = Hybrid_L1_Model(base_path=global_path, image_size=image_size, image_format=image_format, epochs=epochs, learning_rate=lr, leaky_relu=leaky_thresh, lamda=lamda, betas=(beta1, beta2)) elif loss_type == 'hybrid_l2': model = Hybrid_L2_Model(base_path=global_path, image_size=image_size, image_format=image_format, epochs=epochs, learning_rate=lr, leaky_relu=leaky_thresh, lamda=lamda, betas=(beta1, beta2)) elif loss_type == 'l1': model = L1_Model(base_path=global_path, image_size=image_size, image_format=image_format, epochs=epochs, learning_rate=lr, leaky_relu=leaky_thresh, lamda=lamda, betas=(beta1, beta2)) elif loss_type == 'l2': model = L2_Model(base_path=global_path, image_size=image_size, image_format=image_format, epochs=epochs, learning_rate=lr, leaky_relu=leaky_thresh, lamda=lamda, betas=(beta1, beta2)) elif loss_type == 'perpetual': model = Perpetual_Model(base_path=global_path, image_size=image_size, image_format=image_format, epochs=epochs, learning_rate=lr, leaky_relu=leaky_thresh, lamda=lamda, betas=(beta1, beta2)) elif loss_type == 'default': model = Model(base_path=global_path, image_size=image_size, image_format=image_format, epochs=epochs, learning_rate=lr, leaky_relu=leaky_thresh, lamda=lamda, betas=(beta1, beta2)) else: raise NotImplementedError( 'This Loss function has not been implemented!') average_loss = AverageLoss(os.path.join(global_path, 'Loss_Checkpoints')) return model, average_loss
def __init__(self, genres, label_probs, image_shape, hidden_layer_sizes, resize_shape=None): self.label_probs = label_probs self.image_shape = image_shape self.hidden_layer_sizes = hidden_layer_sizes Model.__init__(self, genres, resize_shape)
def from_file( fname ): """Load model from a HDF file""" model = Model.from_file( fname ) return LinearRegressionsMixture( fname, **model.params )
class main(object): def parse(self): parser = argparse.ArgumentParser(description="chatbot") parser.add_argument('--train', action='store_true', help='whether train') parser.add_argument('--test', action='store_true', help='whether test') parser.add_argument('--model_restore', action='store_true', help='whether restore the model') try: from argument import add_arguments parser = add_arguments(parser) except: pass args = parser.parse_args() return args def set_parameter(self, args): print('setting parameters...') if args.train: self.batch_size = 200 self.mode = 'train' elif args.test: self.batch_size = 1 self.mode = 'test' def set_training_data(self): print('getting training data...') self.max_len = 25 dataset = DataManager(max_len = self.max_len) self.val_size,self.train_x,self.train_y,self.dictionary = dataset.getTrainData() tf.reset_default_graph() def set_testing_data(self): print('setting testing data...') self.max_len = 25 dataset = DataManager(max_len = self.max_len) self.val_size, self.dictionary = dataset.getTestData() tf.reset_default_graph() def getting_model(self, args): print('getting model...') if args.train or args.test: self.model = Model(batch_size = self.batch_size , val_size = self.val_size, max_len = self.max_len ,args = args , dictionary = self.dictionary)# self.model.compile() if (args.train and args.model_restore) or args.test: self.model.restore(mode = self.mode) def train(self, args): print('start traing...') #start train epoch = 0 min_loss = math.inf while True: epoch += 1 loss = self.model.fit(self.train_x, self.train_y, self.batch_size, epoch) #store the Model self.model.save() def test(self, args): print('start testing...') while True: ques = input('請說話...') ans = self.model.predict(ques) print(ans)
def __init__(self): Model.__init__(self, 'book')
class MainController(object): def __init__(self): self._close_funcs = [] self.model = Model() self.fig_widget = None self.main_view = MainView(self) self.main_view.show() # subscribe a function for closing on program exit def subscribe_close_func(self, func): if func not in self._close_funcs: self._close_funcs.append(func) # unsubscribe a function from closing on program exit def unsubscribe_close_func(self, func): if func in self._close_funcs: self._close_funcs.remove(func) # call all close functions on close. def announce_close(self): # call this function from main application for func in self._close_funcs: func() def main_view_init(self, **kwargs): self.main_line_edits = kwargs.pop('lineEdits', None) self.main_combo_boxes = kwargs.pop('comboBoxes', None) self.main_fig_widget = kwargs.pop('fig_widget', None) self.main_deg_rad = kwargs.pop('deg_rad', None) self.main_statusbar = kwargs.pop('statusbar', None) self.set_line_edits(self.model.values) self.set_combo_boxes(self.model.comboBoxItems, self.model.comboBoxIndexes) self.update_combo_boxes() self.fig_widget = MplFigureCanvas(self) self.set_result(self.model.status, self.model.triangle, self.model.deg_rad) self.main_fig_widget.addWidget(self.fig_widget) def set_line_edits(self, values): for n in range(len(self.main_line_edits)): if values[n]: self.main_line_edits[n].setText(str(values[n])) def set_combo_boxes(self, comboBoxItems, comboBoxIndexes): for n in range(len(self.main_combo_boxes)): self.main_combo_boxes[n].clear() self.main_combo_boxes[n].addItems(comboBoxItems[n]) self.main_combo_boxes[n].setCurrentIndex(comboBoxIndexes[n]) def update_combo_boxes(self): keys = [] for cb in self.main_combo_boxes: keys.append(cb.currentText()) self.set_combo_boxes(*self.model.update_combo_boxes(keys)) def update_deg_rad(self): deg_rad = True if self.main_deg_rad.currentIndex() == 0 else False values = self.model.update_deg_rad(deg_rad) self.set_line_edits(values) self.calculate() def calculate(self): keys = [] for cb in self.main_combo_boxes: keys.append(cb.currentText()) values = [] for le in self.main_line_edits: values.append(le.text()) try: values[-1] = float(values[-1]) except ValueError: values[-1] = None deg_rad = True if self.main_deg_rad.currentIndex() == 0 else False status, triangle = self.model.triangle_calc(keys, values, deg_rad) self.set_result(status, triangle, deg_rad) def set_result(self, status, triangle, deg_rad): msg = self.model.status_msg(status) self.main_statusbar.showMessage(msg) if self.fig_widget.animation_is_running: self.fig_widget.stop_animation() if status == 0: self.fig_widget.plot(self.model.draw_triangle, triangle, deg_rad) elif status == 1: list_of_args = [(t, deg_rad) for t in triangle] self.fig_widget.start_animation(self.model.draw_triangle, list_of_args, 2000) elif status == 2: list_of_args = self.model.path_list_calc(triangle, deg_rad) self.fig_widget.start_animation(self.model.draw_path, list_of_args, 100) else: self.fig_widget.fig.clf() self.fig_widget.draw()
def __init__(self): self._close_funcs = [] self.model = Model() self.fig_widget = None self.main_view = MainView(self) self.main_view.show()
def __init__(self, genres, label_probs, image_shape, resize_shape=None): self.label_probs = label_probs self.image_shape = image_shape Model.__init__(self, genres, resize_shape)
def from_file(fname): """Load model from a HDF file""" model = Model.from_file(fname) return GaussianMixtureModel(**model.params)
def from_file(fname): """Load model from a HDF file""" model = Model.from_file(fname) return LinearRegressionsMixture(**model.params)
import sys from utils import readjson from models.SimpleRergression import Linear from utils.DataLoader import DataLoader from models.Model import Model if __name__ == '__main__': config = readjson(sys.argv[1]) linear = Linear(**config['linear']) dataloader = DataLoader(**config['dataloader']) modal = Model(linear, dataloader, **config['modal']) modal.fit()