Пример #1
0
    def __init__(self, model_options, input_options, stock_code):
        """Initializes the model."""

        Model.__init__(self,
                       model_options,
                       input_options,
                       stock_code=stock_code)
Пример #2
0
    def __init__(self, init_avg=1):
        Model.__init__(self)
        self.VERSION = 1
        self.name = "TimeItem-average"
        self.model_time = True

        self._init_avg = init_avg
Пример #3
0
def main():
    left_img = ''
    right_img = ''

    bat_size = 1
    maxdisp = 128

    with tf.Session() as sess:
        PSMNet = Model(sess, height=368, weight=1224, batch_size=bat_size, max_disp=maxdisp, lr=0.0001)
        saver = tf.train.Saver()
        saver.restore(sess, './weights/PSMNet.ckpt-600')

        img_L = cv2.cvtColor(cv2.imread(left_img), cv2.COLOR_BGR2RGB)
        img_L = cv2.resize(img_L, (368, 1224))
        img_R = cv2.cvtColor(cv2.imread(right_img), cv2.COLOR_BGR2RGB)
        img_R = cv2.resize(img_R, (368, 1224))

        img_L = DataLoaderKITTI.mean_std(img_L)
        img_L = np.expand_dims(img_L, axis=0)
        img_R = DataLoaderKITTI.mean_std(img_R)
        img_R = np.expand_dims(img_R, axis=0)
        pred = PSMNet.predict(img_L, img_R)

        item = (pred * 255 / pred.max()).astype(np.uint8)
        pred_rainbow = cv2.applyColorMap(item, cv2.COLORMAP_RAINBOW)
        cv2.imwrite('prediction.png', pred_rainbow)
Пример #4
0
    def __init__(self,
                 model_name,
                 fold,
                 model_path,
                 class_num=4,
                 tta_flag=False):
        ''' 处理当前fold一个batch的数据分割结果

        :param model_name: 当前的模型名称
        :param fold: 当前的折数
        :param model_path: 存放所有模型的路径
        :param class_num: 类别总数
        '''
        self.model_name = model_name
        self.fold = fold
        self.model_path = model_path
        self.class_num = class_num
        self.tta_flag = tta_flag

        # 加载模型及其权重
        self.segment_model = Model(self.model_name,
                                   encoder_weights=None).create_model()
        self.segment_model_path = os.path.join(
            self.model_path,
            '%s_fold%d_best.pth' % (self.model_name, self.fold))
        self.solver = Solver(self.segment_model)
        self.segment_model = self.solver.load_checkpoint(
            self.segment_model_path)
        self.segment_model.eval()

        # 加载存放像素阈值和连通域的json文件
        self.json_path = os.path.join(self.model_path,
                                      '%s_result.json' % self.model_name)
        self.best_thresholds, self.best_minareas = get_thresholds_minareas(
            self.json_path, self.fold)
Пример #5
0
def infer():
    # in the response you get task id and data, convert data to right format
    request_method = request.method

    res=None
    if request_method == "GET":
        file = request.files['file']
        # task_id = request.files['task_id']
        foo=file.filename
        print("File Name : ", foo)
        ext = foo.split(".")[-1]
        if ext == "xlsx":
            unparsedFile = file.read()
            dframe = pd.read_excel(file, index_col="date")
            data_config = jsonify(request.data)
            file_data = unparsedFile

            model = Model()
            location = config["model"]["save_location"]
            res = model.infer(location, "okP0KEPL", dframe)
        elif ext.lower() == "jpg":
            with torch.no_grad():
                res, text = detect(foo)

    return str(res)
Пример #6
0
class TestModel(TestCase):
    @classmethod
    def setUpClass(cls):
        cls.modelGetPresenterInstancePatch = patch(
            'models.model.Model._getPresenterInstance')
        cls.modelGetPresenterInstanceClass = cls.modelGetPresenterInstancePatch.start(
        )

    # noinspection PyUnresolvedReferences
    @classmethod
    def tearDownClass(cls):
        cls.modelGetPresenterInstancePatch.stop()

    def setUp(self):
        if self.__class__ is not TestModel:
            return

        self.modelGetPresenterInstanceClass.reset_mock()

        self.model = Model()

    def testWhenShowViewCalledNotifySHOW_VIEW(self):
        observer = Mock()
        self.model.subject.addObserver(observer)
        self.model.showView()
        observer.onNotify.assert_called_with(self.model, Model.SHOW_VIEW, ())
Пример #7
0
def test(args, test_iter, TEXT, LABEL, ID, cate_manager, checkpoint):
    # get device
    device = torch.device(args.device)
    model = Model(TEXT, LABEL, dropout=args.dropout,
                  freeze=args.freeze).to(device)
    if checkpoint is not None:
        model.load_state_dict(checkpoint['model'])

    # evaluate
    model = model.eval()
    print('====    Testing..   ====')
    start_time = datetime.now()
    all_pred, ids = [[], [], []], []
    for iter_num, batch in enumerate(test_iter):
        ids.extend(batch.item_id.tolist())
        output, result = model(batch, training=False)
        result = cate_manager.merge_weights(result)
        for i in range(len(result)):
            all_pred[i].extend(result[i].max(1)[1].tolist())
    print('time: {}'.format(datetime.now() - start_time))
    with open('../data/out.txt', 'w') as fp:
        fp.write('item_id\tcate1_id\tcate2_id\tcate3_id\n')
        for i in range(len(all_pred[0])):
            fp.write(ID.vocab.itos[ids[i]] + '\t')
            fp.write('\t'.join(
                [LABEL[j].vocab.itos[all_pred[j][i]] for j in range(3)]))
            fp.write('\n')
    print('Result saved in ../../data/out.txt')
Пример #8
0
    def calc_feature_importance(self, model, x_train, x_test, y_test, feature_names):
        test_df = pd.DataFrame(y_test)
        cols = test_df.columns.values.tolist()
        if len(cols)==1:
            target_col = cols[0]
        else:
            target_col = cols
        y_hat = model.predict(x_test)
        pred_df = Model.gen_pred_df(test_df, y_hat, target_col)
        base_score = Evaluator.eval_acc(pred_df)
        base_score

        num_samples = x_test.shape[0]

        scores = []
        for i in range(len(feature_names)):
            x_perm = x_test.copy()
            perm = np.random.permutation(np.array(range(num_samples)))
            x_perm[:,i] = x_test[perm,i]
            
            y_hat_perm = model.predict(x_perm)
            pred_df = Model.gen_pred_df(test_df, y_hat_perm, target_col)
            col_score = Evaluator.eval_acc(pred_df)
            scores.append(base_score-col_score)
        feature_df = pd.DataFrame({'features':feature_names, 'score':scores})
        feature_df = feature_df.sort_values('score',ascending=False)

        return feature_df
Пример #9
0
    def __init__(self,
                 model_options,
                 input_options,
                 stock_code=None,
                 load=False,
                 saved_model_dir=None,
                 saved_model_path=None,
                 build_model=True):
        """Initializes the model. Creates a new model or loads a saved model."""

        Model.__init__(self,
                       model_options,
                       input_options,
                       stock_code=stock_code)

        self.input_shape = get_input_shape(input_options)

        if not load or saved_model_dir is None and build_model:
            self.build_model()

        else:
            model_path = saved_model_path if saved_model_path is not None else self.get_saved_model_path(
                saved_model_dir)
            if model_path is not None:
                self.load_model(path.join(saved_model_dir, model_path),
                                Model.KERAS_MODEL)
Пример #10
0
    def calc_feature_importance(self, model, x_train, x_test, y_test,
                                feature_names):
        test_df = pd.DataFrame(y_test)
        cols = test_df.columns.values.tolist()
        if len(cols) == 1:
            target_col = cols[0]
        else:
            target_col = cols
        y_hat = model.predict(x_test)
        pred_df = Model.gen_pred_df(test_df, y_hat, target_col)
        base_score = Evaluator.eval_acc(pred_df)
        base_score

        num_samples = x_test.shape[0]

        scores = []
        for i in range(len(feature_names)):
            x_perm = x_test.copy()
            perm = np.random.permutation(np.array(range(num_samples)))
            x_perm[:, i] = x_test[perm, i]

            y_hat_perm = model.predict(x_perm)
            pred_df = Model.gen_pred_df(test_df, y_hat_perm, target_col)
            col_score = Evaluator.eval_acc(pred_df)
            scores.append(base_score - col_score)
        feature_df = pd.DataFrame({'features': feature_names, 'score': scores})
        feature_df = feature_df.sort_values('score', ascending=False)

        return feature_df
Пример #11
0
def evaluate(args, valid_iter, TEXT, LABEL, cate_manager, checkpoint):
    # get device
    device = torch.device(args.device)
    model = Model(TEXT, LABEL, dropout=args.dropout,
                  freeze=args.freeze).to(device)
    if checkpoint is not None:
        model.load_state_dict(checkpoint['model'])

    # evaluate
    model = model.eval()
    print('====   Validing..   ====')
    start_time = datetime.now()
    all_pred, all_label = [[], [], []], [[], [], []]
    for iter_num, batch in enumerate(valid_iter):
        label = (batch.cate1_id, batch.cate2_id, batch.cate3_id)
        output, result = model(batch, training=False)
        result = cate_manager.merge_weights(result)
        for i in range(len(result)):
            all_pred[i].extend(result[i].max(1)[1].tolist())
            all_label[i].extend(label[i].tolist())
    print('time: {}'.format(datetime.now() - start_time))
    print(*[
        'Cate{} F1 score: {}  \t'.format(
            i + 1, f1_score(all_label[i], all_pred[i], average='macro'))
        for i in range(len(LABEL))
    ])
Пример #12
0
    def __init__(self,
                 dataset_name: str,
                 hyperparameters: Dict,
                 infra_s3: Dict,
                 features: list,
                 target: str,
                 h2o_ip: str,
                 data_dir: str,
                 training_job_dir: str = None,
                 clean: bool = False,
                 model_id: str = None):
        Model.__init__(self, dataset_name, hyperparameters, infra_s3, features,
                       target, data_dir, training_job_dir, clean)

        self.ip = h2o_ip.split(':')[0]
        self.port = h2o_ip.split(':')[1]

        if model_id:
            self.model_id = model_id
            timestamp = self.model_id[-23:]
            self.model_filename = os.path.join(
                *(training_job_dir.split('/')[:-1] +
                  [timestamp, self.MODEL_FILENAME]))
        else:
            self.model_id = '-'.join(training_job_dir.split('/')[1:])
            self.model_filename = os.path.join(training_job_dir,
                                               self.MODEL_FILENAME)

        logging.info('======== Model ID ========\n{}'.format(self.model_id))
Пример #13
0
    def __init__(self,
                 session,
                 initial_embeddings: Optional[np.ndarray],
                 static: str='non-static',
                 input_length=20,
                 embeddings_dim=50,
                 learning_rate=1,
                 num_filters=100,
                 regularization_rate=0.01,
                 ckpt_file: Optional[str] = None):
        Model.__init__(self)

        self.sess = session
        self.learning_rate = learning_rate
        self.input_length = input_length
        assert static in ['non-static', 'static', 'rand', 'both']
        self.static = static
        self.embeddings_dim = embeddings_dim
        if initial_embeddings is None:
            assert static == 'rand'
            self.initial_embeddings = np.random.rand(VOCABULARY_SIZE, self.embeddings_dim).astype(np.float32)
        else:
            self.initial_embeddings = initial_embeddings
        self.num_filters = num_filters
        self.regularization_rate = regularization_rate
        if ckpt_file:
            self.ckpt_file = ckpt_file
        else:
            ckpt_dir = os.path.join('tmp', 'models', str(self))
            os.makedirs(ckpt_dir, exist_ok=True)
            self.ckpt_file = os.path.join(ckpt_dir, 'yoon_kim.ckpt')

        self._build_model()
        self._add_training_objectives()
        self._load_or_init()
Пример #14
0
 def get_model(self,
               x_train,
               obs_dim,
               actions_dim,
               batch_size,
               layers,
               opt,
               mod_out_dir,
               restore=True,
               sess=None,
               env_name=None,
               y=None,
               lr=None,
               lr_sched=None,
               n_batches=None,
               lr_show=False,
               **kwargs):
     if lr:
         mod_out_dir += '_lr={}'.format(lr)
     self.model = Model(x_train, obs_dim, actions_dim, batch_size, layers,
                        opt, mod_out_dir)
     ep, st = None, None
     if restore:
         ep, st = self.model.restore(sess)
     print("Env = {} | {} | Current epoch = {} | timestep = {}".format(
         env_name, self.__class__.__name__, ep, st))
     lr = self.model.init_model(lr, sess, x_train, y, n_batches, lr_show,
                                **kwargs)
     if ep is None:
         if not lr:
             self.model.checkpoint_dir = mod_out_dir + '_lr={}'.format(lr)
         sess.run(tf.global_variables_initializer())
     return (ep, st, mod_out_dir) if ep is not None else (0, 0, mod_out_dir)
Пример #15
0
    def setup_train(self, model_path=None):
        self.model = Model(model_path, is_tran=config.tran)
        initial_lr = config.lr_coverage if config.is_coverage else config.lr

        params = list(self.model.encoders.parameters()) + list(self.model.decoder.parameters()) + \
                 list(self.model.reduce_state.parameters())
        total_params = sum([param[0].nelement() for param in params])
        print('The Number of params of model: %.3f million' %
              (total_params / 1e6))  # million
        self.optimizer = optim.Adagrad(
            params,
            lr=initial_lr,
            initial_accumulator_value=config.adagrad_init_acc)

        start_iter, start_loss = 0, 0

        if model_path is not None:
            state = torch.load(model_path,
                               map_location=lambda storage, location: storage)
            start_iter = state['iter']
            start_loss = state['current_loss']

            if not config.is_coverage:
                self.optimizer.load_state_dict(state['optimizer'])
                if use_cuda:
                    for state in self.optimizer.state.values():
                        for k, v in state.items():
                            if torch.is_tensor(v):
                                state[k] = v.cuda()

        return start_iter, start_loss
Пример #16
0
    def __init__(self,
                 model_options,
                 input_options,
                 stock_code=None,
                 load=False,
                 saved_model_dir=None,
                 saved_model_path=None):
        """Initializes the model. Creates a new model or loads a saved model."""

        Model.__init__(self,
                       model_options,
                       input_options,
                       stock_code=stock_code)

        # Please check scipy SVR documentation for details
        if not load or saved_model_dir is None:
            self.model = [
                SVR(kernel=self.model_options["kernel"],
                    degree=self.model_options["degree"],
                    gamma=self.model_options["gamma"],
                    coef0=self.model_options["coef0"],
                    tol=self.model_options["tol"],
                    C=self.model_options["C"],
                    epsilon=self.model_options["epsilon"],
                    shrinking=self.model_options["shrinking"],
                    max_iter=self.model_options["max_iter"])
                for _ in range(model_options["predict_n"])
            ]
        else:
            model_path = saved_model_path if saved_model_path is not None else self.get_saved_model_path(
                saved_model_dir)
            if model_path is not None:
                self.load_model(path.join(saved_model_dir, model_path),
                                self.SKLEARN_MODEL)
Пример #17
0
  def __init__(self, hyperparameters, dataset, log_dir="runs", verbose=True, print_every=100):
    self.hyperparameters = hyperparameters
    self.dataset = dataset
    self.log_dir = log_dir
    self.verbose = verbose
    self.print_every = print_every

    self.model = Model(self.hyperparameters['sketchy_mode'], \
                       self.hyperparameters['intensive_mode'],\
                       self.hyperparameters['pool_mode'],\
                       self.hyperparameters['vocab_size'],\
                       self.hyperparameters['hidden_size'],\
                       self.hyperparameters['embedding_length'],\
                       self.hyperparameters['word_embeddings'],\
                       self.hyperparameters['bert_encoder'],\
                       self.hyperparameters['num_passes'],\
                       self.hyperparameters['skip_memory'])
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    self.model = self.model.to(device)
    if self.hyperparameters['loss_mode'] == 'parallel':
      self.loss = self.model.retrospective_parallel_loss
    elif self.hyperparameters['loss_mode'] == 'span':
      self.loss = self.model.retrospective_loss_span
    else:
      self.loss = self.model.retrospective_loss
Пример #18
0
    def save(self, saved_model_dir):
        Model.create_model_dir(self, saved_model_dir + "/" + self.model_options["stock_code"])

        model_name = self.get_model_name()
        model_path = self.model_options["stock_code"] + "/" + model_name

        with open(saved_model_dir + "/" + model_path, "wb") as model_file:
            pickle.dump(self.model, model_file)

        if os.path.isfile(saved_model_dir + "/" + "models_data.json"):
            with open(saved_model_dir + "/" + "models_data.json", "r") as models_data_file:
                models_data = json.load(models_data_file)
        else:
            models_data = {"models": {}}

        if self.model_options["stock_code"] not in models_data["models"]:
            models_data["models"][self.model_options["stock_code"]] = {}

        model_type = self.get_model_type()

        if model_type not in models_data["models"][self.model_options["stock_code"]]:
            models_data["models"][self.model_options["stock_code"]][model_type] = []

        model_data = self.model_options
        model_data["model_name"] = model_name
        model_data["model_path"] = model_path

        models_data["models"][self.model_options["stock_code"]][model_type].append(model_data)

        with open(saved_model_dir + "/" + "models_data.json", "w") as models_data_file:
            json.dump(models_data, models_data_file)
    def __init__(self,
                 model_name,
                 fold,
                 model_path,
                 class_num=4,
                 tta_flag=False):
        self.model_name = model_name
        self.fold = fold
        self.model_path = model_path
        self.class_num = class_num
        self.tta_flag = tta_flag

        self.segment_model = Model(self.model_name,
                                   encoder_weights=None).create_model()
        self.segment_model_path = os.path.join(
            self.model_path,
            '%s_fold%d_best.pth' % (self.model_name, self.fold))
        self.solver = Solver(self.segment_model)
        self.segment_model = self.solver.load_checkpoint(
            self.segment_model_path)
        self.segment_model.eval()

        self.json_path = os.path.join(self.model_path,
                                      '%s_result.json' % self.model_name)
        self.best_thresholds, self.best_minareas = get_thresholds_minareas(
            self.json_path, self.fold)
Пример #20
0
 def __init__(self, sess, args):
     self.args = args
     self.model = Model(sess,
                        optimizer_params={
                            'learning_rate': args.learning_rate, 'alpha': 0.99, 'epsilon': 1e-5}, args=self.args)
     self.trainer = Trainer(sess, self.model, args=self.args)
     self.env_class = A2C.env_name_parser(self.args.env_class)
Пример #21
0
def new_task():
    request_method = request.method

    if request_method == "POST":
        # Extract the Json string and convert it into an object
        # generate unique id, and add to the task object with key "_id"

        # replace the task element of config with task object
        letters_and_digits = string.ascii_letters + string.digits
        _id = ''.join((random.choice(letters_and_digits) for i in range(8)))
        print("Unique ID is:", _id)
        print(request.get_json())
        task_object = request.get_json()
        task_object["task_object"]["_id"] = _id

        config["task"] = task_object["task_object"]

        task = Task(config)
        data = Data.load_data_id(config["data"]["save_location"],
                                 config["task"]["data_id"])
        model = Model(config, task, data)
        best_res = model.train(save_model=True)
        response = {}
        response["best_res"] = str(best_res)
        response["_id"] = _id

    return response
Пример #22
0
def alerts():
    """
    Monitoring stats route
    Use 'curl -O /static/alerts/video.mp4' to get video files
    """
    alert = Model(table='alerts')
    alerts = alert.read_all()
    resp = dict()
    resp['total'] = len(alerts)

    if request.args.get("detailed") == 'false':
        current_date = datetime.datetime.now()
        alerts_by_day = OrderedDict()
        for i in range(4, -1, -1):
            alerts_by_day[(current_date - datetime.timedelta(days=i)).strftime('%x')] = 0
        for al in alerts:
            if al[2].split()[0] in alerts_by_day.keys():
                alerts_by_day[al[2].split()[0]] += 1
        return json.dumps(alerts_by_day)

    else:
        resp['by_day'] = [
            list(group) for k, group in itertools.groupby(
                alerts, lambda d: datetime.datetime.strptime(d[2].split(' ')[0], '%x')
            )]
        resp['by_webcam'] = [
            list(group) for k, group in itertools.groupby(
                alerts, lambda d: d[1])
        ]

        if is_browser_request():
            return render_template('alerts.html', alerts=resp)
        return json.dumps(resp), 200
Пример #23
0
def loadModelAndData(num):
    # Load dictionaries
    input_lang_index2word, output_lang_index2word, input_lang_word2index, output_lang_word2index = util.loadDictionaries(
        mdir=args.data_dir)
    # pp added: load intents
    intent2index, index2intent = util.loadIntentDictionaries(
        intent_type=args.intent_type,
        intent_file='{}/intents.json'.format(
            args.data_dir)) if args.intent_type else (None, None)

    # Reload existing checkpoint
    model = Model(args, input_lang_index2word, output_lang_index2word,
                  input_lang_word2index, output_lang_word2index, intent2index)
    model = model.to(detected_device)
    if args.load_param:
        model.loadModel(iter=num)

    # # Load validation file list:
    with open('{}/val_dials.json'.format(args.data_dir)) as outfile:
        val_dials = json.load(outfile)
    #
    # # Load test file list:
    with open('{}/test_dials.json'.format(args.data_dir)) as outfile:
        test_dials = json.load(outfile)

    return model, val_dials, test_dials, input_lang_word2index, output_lang_word2index, intent2index, index2intent
Пример #24
0
    def setUp(self):
        if self.__class__ is not TestModel:
            return

        self.modelGetPresenterInstanceClass.reset_mock()

        self.model = Model()
Пример #25
0
    def __init__(self, gamma_0=None, gamma_f=None, n=None):
        """
        Initialise when we create a the model.
        """

        # We initialise gamma_0, gamma_f and n if the user entered something.
        Model.__init__(self, gamma_0, gamma_f, n)
        '''
        We define the fixed attribute (that won't change for all the equations of this model).
        display : how we show it to the user to ask him to fill the value
        var_name : how the variable is written in the .conf file
        value : The value of the variable. Set to "" (empty string) by default.
        '''
        # FILL HERE : 3/4 put the good attributes of your model (specifics variables that the user need to give)
        self.fixed_attributes = [{
            "display": "a",
            "var_name": "a_spec",
            "value": ""
        }, {
            "display": "b",
            "var_name": "b_spec",
            "value": ""
        }, {
            "display": "d",
            "var_name": "d_spec",
            "value": ""
        }]

        # We run the setup function to ask every attribute to the user.
        self.setup()
def get_models():
    # regression grid search parameters
    nnr_grid_params = {'n_neighbors': [1]}
    ridge_grid_params = {'alpha': np.logspace(-5, 2, 5)}
    svr_grid_params = {
        'C': np.logspace(2, 4, 5),
        'gamma': np.logspace(-3, 1, 5)
    }
    # classification grid search parameters
    nnc_grid_params = {'n_neighbors': [1]}
    logreg_grid_params = {'solver': ['lbfgs'], 'C': np.logspace(-1, 4, 5)}
    svc_grid_params = {
        'C': np.logspace(-1, 4, 5),
        'gamma': np.logspace(-2, 2, 5)
    }

    models = {}
    # regression models
    models['ridge'] = Model(Ridge, ridge_grid_params)
    #    models['svr'] = Model(SVR, svr_grid_params)
    # classification models
    models['logreg'] = Model(LogisticRegression,
                             logreg_grid_params,
                             classification=True)
    #    models['svc'] = Model(SVC,
    #                          svc_grid_params,
    #                          classification=True)
    # dumb models
    models['nnr'] = Model(KNeighborsRegressor, nnr_grid_params)
    models['nnc'] = Model(KNeighborsClassifier,
                          nnc_grid_params,
                          classification=True)
    return models
Пример #27
0
def main():
    # capture the config path from the run arguments
    # then process the json configuration file
    try:
        args = get_args()
        config = process_config("C:\\Pets\\configs\\config.json")

    except Exception as e:
        print("missing or invalid arguments")
        exit(0)

    # create the experiments dirs
    create_dirs([config.summary_dir, config.checkpoint_dir, config.export_dir])
    # create your data generator
    # dataset = DataGenerator(config)
    # data = dataset.dataset.make_initializable_iterator().get_next()

    # create an instance of the model you want
    model = Model(config)
    if (config.mode == "train"):
        model.training(config)
    if (config.mode == "validation"):
        model.validate(config)
    if (config.mode == "test"):
        model.test(config)
    if (config.mode == "train_and_eval"):
        model.train_and_eval(config)
Пример #28
0
 def test_model_function(self):
     tests = Features().get_tests().keys()
     model1 = Model(tests)
     model1.fit(x, y)
     fm = FinkMos(x, x, model1.tests, model1.tag_corpus)
     a = model1.model_function(1, 3, [2, 3], fm)
     print("model function result")
     print(a)
Пример #29
0
    def __init__(self, controller):
        super().__init__(controller)
        self.__model = Model(800, 400, 10)
        self.__view = View('View', self.__model)
        self.__model.add_observer(self.__view)

        controller.model = self.__model
        controller.view = self.__view
Пример #30
0
 def __init__(self, feature_set, predict_table_name):
     Model.__init__(self, feature_set)
     self.predict_table_name = predict_table_name
     self.x = None
     self.y = None
     self.clf = None
     self.predict_x = None
     self.predict_id = None
     self.vectorizer = None
Пример #31
0
    def train(self, parameters):
        self.one_search_data.clear()
        self.one_search_data['parameters'] = vars(parameters)
        image_path = self.configer['trainingImagePath']
        label_path = self.configer['trainingLabelPath']
        training_csv = utils.get_csv_by_path_name(label_path)
        transforms = utils.get_transforms(parameters)
        isic_dataset = ISICDataset(image_path, training_csv[0], transforms)
        isic_dataset.__assert_equality__()
        trainingdata_loader = DataLoader(isic_dataset, batch_size=parameters.batchsize, shuffle=True, drop_last=True)
        self.model = Model(parameters)  # 根据参数获取模型
        optimizer = self.model.optimizer
        criteria = self.model.loss_function
        epoch_statics_list = []  # store epoch loss and training accuracy
        self.model.train()
        self.is_abandoned = 0
        for EPOCH in range(self.setting.epoch):
            if EPOCH > 1:
                loss_descend_rate = epoch_statics_list[-1]['AVG LOSS']/epoch_statics_list[-2]['AVG LOSS']
                if loss_descend_rate >= self.setting.lossDescendThreshold and EPOCH < 10:
                    print('current loss descend rate is %f ,larger than threshold %f, abandon this SPD' % (loss_descend_rate, self.setting.lossDescendThreshold))
                    self.is_abandoned = 1
                    break
            epoch_statics_dict = {}  # record epochly training statics
            loss_all_samples_per_epoch = 0  # 记录每个epoch,所有batch的loss总和
            train_accuracy = 0  # trainnig accuaracy per epoch
            for idx, (x, y) in tqdm(enumerate(trainingdata_loader)):
                batch_statics_dict = {}
                x = x.to(self.device)
                y = torch.argmax(y, dim=1)
                y_hat = self.model.network(x.float())
                train_accuracy += (y.to(self.device) == torch.argmax(y_hat, dim=1)).sum().item()

                loss = criteria(y_hat, y.long().to(self.device))
                loss_all_samples_per_epoch += loss.item()  # loss.item()获取的是每个batchsize的平均loss
                # 传入的data是一给字典,第个位置是epoch,后面是损失函数名:值
                batch_statics_dict['EPOCH'] = EPOCH
                batch_statics_dict[parameters.lossfunction] = loss.item()
                # loss_dict_print,每个epoch,都是损失函数名:值(值是list)
                # visualizer.get_data_report(batch_statics_dict)
                optimizer.zero_grad()
                loss.backward()
                optimizer.step()
            loss_avg_per_epoch = loss_all_samples_per_epoch / (idx + 1)  # 获取这个epoch中一个平input的均loss,idx从0开始,所以需要加1
            train_accuracy_epoch = train_accuracy / len(isic_dataset)  # training accuracy/sample numbers
            epoch_statics_dict['EPOCH'] = EPOCH
            epoch_statics_dict['AVG LOSS'] = loss_avg_per_epoch

            epoch_statics_dict['TRAINING ACCURACY'] = train_accuracy_epoch

            pkl_name = self.model.save_model(self.logger.date_string, self.logger.start_time_string)  # save the nn every epoch
            epoch_statics_dict['saved_model'] = pkl_name
            epoch_statics_list.append(epoch_statics_dict)  # record epoch loss for drawing
            print('epoch %s finished ' % EPOCH)
            self.visualizer.get_data_report(epoch_statics_dict)
        self.one_search_data['training_statics'] = epoch_statics_list
        self.logger.set_training_data(self.one_search_data)
Пример #32
0
    def __init__(self, ph):
        Model.__init__(self, ph)
        self.input_shape = INPUT_SHAPES[ph['dataset']]
        self.output_shape = OUTPUT_SHAPES[self.ph['dataset']]
        self.pretrained_model_fp = self.ph.setdefault('pretrained_model_fp',
                                                      None)

        if self.pretrained_model_fp:
            print('training on a pretrained model')
Пример #33
0
    def __init__(self, model_options, load=False, saved_model_dir=None, saved_model_path=None):
        Model.__init__(self, model_options)

        if not load or saved_model_dir is None:
            self.model = linear_model.LinearRegression()
        else:
            model_path = saved_model_path if saved_model_path is not None else self.get_saved_model_path(saved_model_dir)
            if model_path is not None:
                with open(saved_model_dir + "/" + model_path, "rb") as model_file:
                    self.model = pickle.load(model_file)
Пример #34
0
def paper(paper_id):
    model = Model()
    paper_list = model.get_all_paper()
    max_paper = model.get_max_paper()
    try:
        pic_info = model.get_pic_info(paper_id)
        article_list = model.get_article_list(pic_info.id)
        page_id = pic_info.id
    except IndexError, e:
        return "该报纸页面不存在!"
Пример #35
0
def page(page_id):
    model = Model()
    print page
    paper_list = model.get_all_paper()
    max_paper = model.get_max_paper()
    try:
        pic_info = model.get_page_info(page_id)
        print pic_info
        article_list = model.get_article_list(pic_info.id)
    except IndexError, e:
        return "<h1>该页面不存在!<h1>"
Пример #36
0
    def __init__(self, alpha=1.0, beta=0.1, KC=3.5, KI=2.5):
        Model.__init__(self)
        self.VERSION = 1
        self.name = "Hierarchical"

        self._alpha = alpha
        self._beta = beta
        self._KC = KC
        self._KI = KI

        self.decay_function = lambda x: alpha / (1 + beta * x)
Пример #37
0
    def __init__(self, alpha=1.0, beta=0.1, KC=1, KI=1):
        Model.__init__(self)
        self.VERSION = 2
        self.name = "Prior-current"

        self._alpha = alpha
        self._beta = beta
        self._KC = KC
        self._KI = KI

        self.decay_function = lambda x: alpha / (1 + beta * x)
Пример #38
0
    def __init__(self, alpha=0.8, beta=0.08, KC=0.075, KI=0.1):
        Model.__init__(self)
        self.VERSION = 4
        self.name = "TimeHierarchical"
        self.model_time = True

        self._alpha = alpha
        self._beta = beta
        self._KC = KC
        self._KI = KI

        self.decay_function = lambda x: alpha / (1 + beta * x)
Пример #39
0
def index():
    model = Model()
    paper_list = model.get_all_paper()  # 获得所有期数列表
    max_paper = model.get_max_paper()
    paper_id = max_paper.id
    try:
        pic_info = model.get_pic_info(paper_id)
        print pic_info
        article_list = model.get_article_list(pic_info.id)
        page_id = pic_info.id
    except IndexError, e:
        return "so sad,the page has gone"
Пример #40
0
    def __init__(self, alpha=1.0, beta=0.1, K=1, init_avg=0, floating_start=True):
        Model.__init__(self)
        self.VERSION = 3
        self.name = "Basic-Time"
        self.model_time = True

        self._alpha = alpha
        self._beta = beta
        self._K = K
        self._init_avg = init_avg
        self._floating_start = floating_start

        self.decay_function = lambda x: alpha / (1 + beta * x)
Пример #41
0
    def __init__(self, alpha=1.0, beta=0.1, K=1, concepts=None):
        Model.__init__(self)
        self.VERSION = 2
        self.name = "TimeConcepts"
        self.model_time = True

        self._alpha = alpha
        self._beta = beta
        self._K = K
        self._concepts = sorted(concepts.keys()) if concepts is not None else "All"
        self._init_concept_map(concepts)

        self.decay_function = lambda x: alpha / (1 + beta * x)
Пример #42
0
    def __init__(self, alpha=1.0, beta=0.1, KC=1, KI=1, init_avg=0, first_level=3):
        Model.__init__(self)
        self.VERSION = 4
        self.name = "Prior-Current-Time"
        self.model_time = True

        self._alpha = alpha
        self._beta = beta
        self._KC = KC
        self._KI = KI
        self._init_avg = init_avg
        self._first_level = first_level

        self.decay_function = lambda x: alpha / (1 + beta * x)
Пример #43
0
def submit():
    article = {}
    article['title'] = request.form.get('title', None)
    article['sub_title'] = request.form.get('sub_title', None)
    article['content'] = request.form.get('content', None)
    article['reply_title'] = request.form.get('reply_title', None)
    article['author'] = request.form.get('author', '')
    article['keyword'] = request.form.get('Nkeyword', '')
    article['has_pic'] = request.form.get('has_pic', 'a')
    article['show_author'] = request.form.get("show_author", 0)
    article['time'] = datetime.now()
    model = Model()
    result = model.insert_article(7421, article)
    print result
    # result = model.insert_article(article)
    return "kkk"
Пример #44
0
def admin():
    if is_login():
        model = Model()
        param = request.args
        arr = check_param(param)
        print arr
        if arr['paper_num'] == None:
            arr['paper_num'] = model.get_max_paper().num
        paper_list = model.get_paper_list(arr['limit'] * (arr['current_id'] - 1), arr['limit'])
        paper_count = model.get_paper_count()
        paper_info = model.get_paper(arr['paper_num'])
        print paper_info
        data = {}
        data['paper_list'] = paper_list
        data['count'] = int(paper_count) / int(arr['limit']) + 1
        data.update(arr.copy())
        return render_template("admin.html", data=data,paper_info=paper_info)
    return redirect(url_for("login"))
Пример #45
0
def article(article_id):
    data = {'now': datetime.now().strftime('%Y年%m月%d日')}
    week = datetime.now().isoweekday()
    data['week'] = format_week(week)
    model = Model()
    paper_list = model.get_all_paper()
    max_paper = model.get_max_paper()
    try:
        article_info = model.get_article_info(article_id)
        print article_info.paper_id
        pic_info = model.get_page_info(article_info.page_id)
        data['current_num'] = model.get_paper_info(article_info.paper_id)  # 当前期数
    except Exception, e:
        return "so sad,article not found!"
Пример #46
0
    def do_task(self):
        while True:
            scrawler = self.get_task()
            if scrawler:
                scrawler.run()


if __name__ == "__main__":
    """
    解决 UnicodeEncodeError: 'ascii' codec can't encode characters
    import sys
    reload(sys)
    sys.setdefaultencoding('utf-8')
    """
    import sys

    reload(sys)
    sys.setdefaultencoding('utf-8')

    db_settings = dict(
        use_unicode=True,
        charset="utf8",
        host=options.mysql_host,
        db=options.mysql_database,
        user=options.mysql_user,
        passwd=options.mysql_password,
    )
    Model.initailize(db_settings)
    rss_manager = RssManagerPool.instance(5, 5)
    rss_manager.run()
Пример #47
0
def init():
    session = DBSession()

    t1 = session.query(Template).filter_by(name=u'新闻模板').first()
    t2 = session.query(Template).filter_by(name=u'职位模板').first()

    job = Model(name='job', title=u'职位')
    job.template = t2

    #title = Field(name='title', title=u'名称', type='string', length=32, required=True)
    point = Field(name='point', title=u'点击数', type='integer')
    content1 = Field(name='content', title=u'内容', type='text')

    #session.add(title)
    session.add(point)
    session.add(content1)

    #job.fields.append(title)
    job.fields.append(point)
    job.fields.append(content1)

    #r1 = Relation(name='user', title=u'创建者', type='many-to-one', target='User', backref='jobs')
    #r2 = Relation(name='categories', title=u'栏目', type='many-to-many', target='Category', backref='jobs')

    #session.add(r1)
    #session.add(r2)

    #job.relations.append(r1)
    #job.relations.append(r2)

    session.add(job)


    news = Model(name='news', title=u'新闻')
    news.template = t1

    #title = Field(name='title', title=u'标题', type='string', length=32, required=True)
    keywords = Field(name='keywords', title=u'关键词', type='string', length=64)
    summary = Field(name='summary', title=u'摘要', type='string', length=255)
    content2 = Field(name='content', title=u'内容', type='text')

    #session.add(title)
    session.add(keywords)
    session.add(summary)
    session.add(content2)

    #news.fields.append(title)
    news.fields.append(keywords)
    news.fields.append(summary)
    news.fields.append(content2)

    #r3 = Relation(name='user', title=u'创建者', type='many-to-one', target='User', backref='news')
    #r4 = Relation(name='categories', title=u'栏目', type='many-to-many', target='Category', backref='news')

    #session.add(r3)
    #session.add(r4)

    #news.relations.append(r3)
    #news.relations.append(r4)

    session.add(news)

    session.commit()
Пример #48
0
def init_db_settings():
    """initialize database the database tables."""
    Model.initailize(settings.db_setting)
Пример #49
0
 def __init__(self, prediction_model, time_model):
     Model.__init__(self)
     self._prediction_model = prediction_model
     self._time_model = time_model