示例#1
0
    def __init__(self, opt):
        """Initialize the pix2pix class.

        Parameters:
            opt (Option class)-- stores all the experiment flags; needs to be a subclass of BaseOptions
        """
        BaseModel.__init__(self, opt)
        # specify the training losses you want to print out. The training/test scripts will call <BaseModel.get_current_losses>
        self.loss_names = ['G', 'G_GAN', 'G_L1', 'D_real', 'D_fake']
        # specify the images you want to save/display. The training/test scripts will call <BaseModel.get_current_visuals>
        # self.visual_names = ['real_A', 'fake_B', 'real_B']
        self.visual_names = ['cloth_decoded', 'fakes_scaled', 'textures_unnormalized']
        # specify the models you want to save to the disk. The training/test scripts will call <BaseModel.save_networks> and <BaseModel.load_networks>
        if self.is_train:
            self.model_names = ['G', 'D']
        else:  # during test time, only load G
            self.model_names = ['G']
        # define networks (both generator and discriminator)
        self.net_G = define_G(opt.cloth_channels + 36, opt.texture_channels, 64, "unet_128", opt.norm, True, opt.init_type, opt.init_gain).to(self.device)

        if self.is_train:  # define a discriminator; conditional GANs need to take both input and output images; Therefore, #channels for D is input_nc + output_nc
            self.net_D = define_D(opt.cloth_channels + 36 + opt.texture_channels, 64, opt.discriminator, opt.n_layers_D, opt.norm, opt.init_type, opt.init_gain).to(self.device)

        if self.is_train:
            # define loss functions
            use_smooth = True if opt.gan_label_mode == "smooth" else False
            self.criterionGAN = GANLoss(opt.gan_mode, smooth_labels=use_smooth).to(self.device)
            self.criterionL1 = torch.nn.L1Loss()
            # initialize optimizers; schedulers will be automatically created by function <BaseModel.setup>.
            self.optimizer_G = torch.optim.Adam(self.net_G.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
            self.optimizer_D = torch.optim.Adam(self.net_D.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
示例#2
0
    def __init__(self, dim_konf, dim_data, atype, region):
        BaseModel.__init__(self, dim_konf, atype, region)
        n_hidden = 32
        self.features = \
            torch.nn.Sequential(
                torch.nn.Conv2d(1, n_hidden, kernel_size=(1, self.dim_konf + 4 + 4 + 4 + 2)),
                torch.nn.LeakyReLU(),
                torch.nn.Conv2d(n_hidden, n_hidden, kernel_size=(1, 1)),
                torch.nn.LeakyReLU(),
                torch.nn.Conv2d(n_hidden, n_hidden, kernel_size=(1, 1)),
                torch.nn.LeakyReLU(),
                torch.nn.MaxPool2d(kernel_size=(2, 1)),
                torch.nn.Conv2d(n_hidden, n_hidden, kernel_size=(1, 1)),
                torch.nn.LeakyReLU(),
                torch.nn.MaxPool2d(kernel_size=(2, 1))
            )

        self.value = \
            torch.nn.Sequential(
                torch.nn.Linear(self.dim_cnn_features, 32),
                torch.nn.ReLU(),
                torch.nn.Linear(32, 32),
                torch.nn.ReLU(),
                torch.nn.Linear(32, 1)
            )
示例#3
0
    def __init__(self, dim_data, atype, region, problem_name):
        BaseModel.__init__(self, atype, region, problem_name)

        self.konf_net = \
            nn.Sequential(
                torch.nn.Linear(self.n_konfs*self.dim_konf, self.n_hidden),
                nn.ReLU(),
                torch.nn.Linear(self.n_hidden, self.n_hidden),
                nn.ReLU()
            )
        self.pose_net = \
            nn.Sequential(
                torch.nn.Linear(self.dim_pose_ids, self.n_hidden),
                nn.ReLU(),
                torch.nn.Linear(self.n_hidden, self.n_hidden),
                nn.ReLU()
            )

        dim_actions = dim_data
        self.action_net = \
            nn.Sequential(
                torch.nn.Linear(dim_actions, self.n_hidden),
                nn.ReLU(),
                torch.nn.Linear(self.n_hidden, self.n_hidden),
                nn.ReLU()
            )

        dim_input = self.n_hidden * 3
        self.output = \
            nn.Sequential(
                torch.nn.Linear(dim_input, self.n_hidden),
                nn.ReLU(),
                torch.nn.Linear(self.n_hidden, 1)
            )
示例#4
0
def predsFromModel(_modelName):
    # Initialise nominal model
    model = Model(len(test[0]), 1)
    model.load_model(_modelName)

    # Make a vector of outputs
    _comp_preds = []
    for (batchX, batchY) in next_batch(test, probs_test, batchSize):
        if batchY.shape[0] < batchSize:
            print('Batch size insufficient (%s), continuing...' %
                  batchY.shape[0])
            continue

        output = model.evaluate_total(batchX, debug=False)

        _comp_preds.extend(output.T)

    # Make a vector of outputs
    _comp_preds_bkg = []
    for (batchX, batchY) in next_batch(test_bkg, probs_test_bkg, batchSize):
        if batchY.shape[0] < batchSize:
            print('Batch size insufficient (%s), continuing...' %
                  batchY.shape[0])
            continue

        output = model.evaluate_total(batchX, debug=False)

        _comp_preds_bkg.extend(output.T)
    return _comp_preds, _comp_preds_bkg
    def __init__(self, env, save_dirs, learning_rate=0.0001):
        BaseModel.__init__(self,
                           input_shape=env.observation_space.shape,
                           num_actions=env.action_space.n,
                           save_dirs=save_dirs)

        self.env = env

        self.blueprint = {
            'conv_layers': 3,
            'filters': [32, 64, 64],
            'kernel_sizes': [(8, 8), (4, 4), (3, 3)],
            'strides': [(4, 4), (2, 2), (1, 1)],
            'paddings': ['valid', 'valid', 'valid'],
            'activations': ['relu', 'relu', 'relu'],
            'dense_units': 512,
            'dense_activation': 'relu'
        }

        self.local_model_save_path = os.path.join(self.save_path,
                                                  'local-wts.h5')
        self.local_model = NeuralNet(input_shape=self.input_shape,
                                     num_actions=self.num_actions,
                                     learning_rate=learning_rate,
                                     blueprint=self.blueprint).model
示例#6
0
 def __init__(self):
     BaseModel.__init__(self)
     self.dictionary = None
     self.model = None
     self.playlists = []
     self.playlist_similarity = defaultdict(list)
     self.pids = []  #All playlist id
示例#7
0
文件: drawer.py 项目: adamjez/POV
 def draw_model(self, model: models.BaseModel):
     """
     Draws model
     :param model: models.BaseModel
     :return:
     """
     model.render(self)
     return self
示例#8
0
 def __init__(self, item):
     BaseModel.__init__(self, item)
     defs.StoppableThread.__init__(self)
     self._scheduler = schedule.Scheduler()
     job = self._scheduler.every(self.schedule['interval'])
     job.unit = self.schedule['unit']
     if len(self.schedule['at']) > 0:
         job.at(self.schedule['at'][0])
     job.do(self.run_job)
     """
示例#9
0
class TestBaseModel(unittest.TestCase):
    """Test for BaseModel class
    """
    def setUp(self):
        """sets up objects for testing later
        """
        self.test_model1 = BaseModel()
        self.test_model2 = BaseModel()

    def test_basic_setup(self):
        """test for to_json method of BaseModel class
        """
        self.assertTrue(hasattr(self.test_model1, "id"))
        self.assertTrue(hasattr(self.test_model1, "__class__"))
        self.assertTrue(hasattr(self.test_model1, "created_at"))
        self.assertTrue(hasattr(self.test_model1, "updated_at"))
        self.assertTrue(self.test_model1.id != self.test_model2.id)
        m1c = self.test_model1.created_at
        m2c = self.test_model2.created_at
        self.assertTrue(m1c != m2c)

    def test_types(self):
        """testing attributes to ensure proper typing
        """
        self.assertTrue(type(self.test_model1.id) is str)
        self.assertTrue(type(self.test_model1.__class__) is type)
        m1c = self.test_model1.created_at
        m2c = self.test_model2.created_at
        m1u = self.test_model1.updated_at
        m2u = self.test_model2.updated_at
        self.assertTrue(type(m1c) is datetime.datetime)
        self.assertTrue(type(m2c) is datetime.datetime)
        self.assertTrue(type(m1u) is datetime.datetime)
        self.assertTrue(type(m2u) is datetime.datetime)

    def test_save(self):
        """testing whether save updates the updated_at attribute
        """
        m1u = self.test_model1.updated_at
        self.test_model1.save()
        m1u_saved = self.test_model1.updated_at
        self.assertFalse(m1u == m1u_saved)

    def test_to_json(self):
        """tests to_json method with diffs in output & in-memory objects
        """
        testmodelid = self.test_model1.id
        jsondict = self.test_model1.to_json()
        self.assertNotEqual(jsondict, self.test_model1.__dict__)
        self.assertEqual(jsondict["id"], self.test_model1.__dict__["id"])
        self.assertNotEqual(jsondict["created_at"],
                            self.test_model1.__dict__["created_at"])
        self.assertNotEqual(type(jsondict["created_at"]),
                            type(self.test_model1.__dict__["created_at"]))
示例#10
0
def states_get(state_id=None):
    '''
        Returns a list of all states, or specific state based on id
    '''
    list_of_dicts = []
    for k, v in storage.all('State').items():
        if state_id == BaseModel.to_dict(v)['id']:
            return jsonify(BaseModel.to_dict(v))
        list_of_dicts.append(BaseModel.to_dict(v))
    if state_id:
        abort(404)
    return jsonify(list_of_dicts)
示例#11
0
def cities_get(city_id=None):
    '''
        Returns a list of all cities, or specific city based on id
    '''
    list_of_dicts = []
    for k, v in storage.all('City').items():
        if city_id == BaseModel.to_dict(v)['id']:
            return jsonify(BaseModel.to_dict(v))
        list_of_dicts.append(BaseModel.to_dict(v))
    if city_id:
        abort(404)
    return jsonify(list_of_dicts)
 def modify_commandline_options(parser, is_train):
     BaseModel.modify_commandline_options(parser, is_train)
     parser.add_argument("--spatial_code_ch", default=8, type=int)
     parser.add_argument("--global_code_ch", default=2048, type=int)
     parser.add_argument("--lambda_R1", default=10.0, type=float)
     parser.add_argument("--lambda_patch_R1", default=1.0, type=float)
     parser.add_argument("--lambda_L1", default=1.0, type=float)
     parser.add_argument("--lambda_GAN", default=1.0, type=float)
     parser.add_argument("--lambda_PatchGAN", default=1.0, type=float)
     parser.add_argument("--patch_min_scale", default=1 / 8, type=float)
     parser.add_argument("--patch_max_scale", default=1 / 4, type=float)
     parser.add_argument("--patch_num_crops", default=8, type=int)
     parser.add_argument("--patch_use_aggregation",
                         type=util.str2bool, default=True)
     return parser
示例#13
0
def tp_xception_multi_period_score_fusion(dataset, config):
    result = []
    for i in range(iteration):
        pre_list = []
        y_test_list = []
        for period in ['R1', 'R3', 'R4', 'R5', 'R6']:
            model = BaseModel.Combined_Model(parallels=4, config=config)
            model.load_weights('./{}_pdm_iteration-{}-{}.hdf5'.format(
                dataset, i, period))
            x_list = np.loadtxt('{}-{}_file_list.txt'.format(dataset, period))
            img_x_list, shape_x, texture_x, vein_x, y_list = utils.data_loader_for_combined_model(
                file_list=x_list,
                dataset=dataset,
                config=config,
                isVenation=True)
            img_x = np.array(img_x_list)
            y = np.array(y_list)
            id_map = np.loadtxt(dataset + '_id.txt')
            for index, d in enumerate(y):
                for label in id_map:
                    if d == label[0]:
                        y[index] = label[1]
            y_one_hot = to_categorical(y)
            test_index = np.load(
                '{}_iteration_{}_img_{}_tp_xception_test_index.npy'.format(
                    dataset, i, period))

            img_x_test = img_x[test_index]
            shape_x_test = shape_x[test_index]
            texture_x_test = texture_x[test_index]
            vein_x_test = vein_x[test_index]
            y_test = y_one_hot[test_index]
            y_test_list.append(y_test)

            x_test_list = create_input_list(shape_x_test,
                                            texture_x_test,
                                            vein_x_test,
                                            img_x_test,
                                            config,
                                            isVenation=True)
            pre = model.predict(x_test_list)
            pre_list.append(pre)

        pre_final_arr = np.array(pre_list)
        pre_final = np.sum(pre_final_arr, axis=2)
        pre_final_label = [np.argmax(d) for d in pre_final]
        for i in range(1, len(y_test_list) + 1):
            if y_test_list[i - 1] != y_test_list[i]:
                print("The test label of different period should be the same")
                return -1
        y_test_label = [np.argmax(d) for d in y_test_list[0]]

        performance = get_performance(pre_final_label, y_test_label)
        result.append(performance)

    plot_result(result)
示例#14
0
def val(val_dataset: 'Single task_dataset',
        model: BaseModel,
        task_index,
        visualizer=None) -> Tuple[MatrixItem, List]:
    """for validation on one task"""
    logging.info(f"Validating task {task_index}")
    start_time = time.time()  # timer for validate a task

    matrixItems = []
    for i, data in enumerate(val_dataset):  # inner loop within one epoch
        model.set_data(PseudoData(opt, Bunch(**data["data"])))
        model.test(visualizer)
        # Add matrixItem result
        matrixItems.append(model.get_matrix_item(task_index))

    res = my_sum(matrixItems)
    res = res / len(matrixItems)
    logging.info(f"Validation Time Taken: {time.time() - start_time} sec")
    return res, matrixItems
示例#15
0
def cities_delete(city_id=None):
    '''
        Deletes a city object
        Not accounting for DELETE request with no city_id
    '''
    for k, v in storage.all('City').items():
        if city_id == BaseModel.to_dict(v)['id']:
            storage.delete(v)
            storage.save()
            return jsonify({}), 200
    abort(404)
示例#16
0
def states_delete(state_id=None):
    '''
        Deletes a state object
        Not accounting for DELETE request with no state_id
    '''
    for k, v in storage.all('State').items():
        if state_id == BaseModel.to_dict(v)['id']:
            storage.delete(v)
            storage.save()
            return jsonify({}), 200
    abort(404)
示例#17
0
class User(BaseModel):
    # 自定义表名(如果不指定表名,默认为model类名的小写(user))
    __tablename__ = 'users'
    username = db.Column(db.String(120),
                         unique=True,
                         nullable=False,
                         doc="用户名",
                         comment="用户名")
    password = db.Column(db.String(120),
                         nullable=False,
                         doc="密码",
                         comment="密码")
    role = db.Column(db.Integer,
                     default=0,
                     doc="角色",
                     comment="角色, 0表示普通用户,1 表示admin")

    email = db.Column(db.String(20), doc="邮箱", comment="邮箱")
    address = db.Column(db.String(250), doc="地址", comment="地址")
    phone = db.Column(db.String(20), doc="手机号", comment="手机号")
    gender = db.Column(db.Integer, doc="性别", comment="性别, 0表示女性,1 表示男性")
    tech = db.Column(db.String(250), doc="擅长领域", comment="擅长领域")
    api = db.relationship("ApiModel", backref='api', lazy=True)

    # 需要隐藏的字段
    _hidden_fields = ["password"] + BaseModel.default_hidden_fields()

    @staticmethod
    def hash_password(password):
        return pwd_context.encrypt(password)

    def verify_password(self, password):
        return pwd_context.verify(password, self.password)

    def generate_auth_token(self, expiration=600):
        s = Serializer(run_config.SECRET_KEY, expires_in=expiration)
        return s.dumps({'id': self.id})

    @staticmethod
    def verify_auth_token(token):
        s = Serializer(run_config.SECRET_KEY)
        try:
            data = s.loads(token)
        except SignatureExpired:
            return None  # valid token, but expired
        except BadSignature:
            return None  # invalid token
        user = User.query.get(data['id'])
        return user
示例#18
0
def val(val_dataset: 'Single task_dataset',
        model: BaseModel,
        task_index,
        visualizer=None) -> Tuple[MatrixItem, List]:
    """for validation on one task"""
    logging.info(f"Validating task {task_index}")
    start_time = time.time()  # timer for validate a task

    matrixItems = []
    for i, data in enumerate(val_dataset):  # inner loop within one epoch
        image, target = data
        # logging.debug(f'{image.shape},{target}')
        image = image.to(opt.device, non_blocking=True)
        target = target.to(opt.device, non_blocking=True)

        model.set_data(PseudoData(opt, image, target))
        model.test(visualizer)
        # Add matrixItem result
        matrixItems.append(model.get_matrix_item(task_index))

    res = my_sum(matrixItems)
    res = res / len(matrixItems)
    logging.info(f"Validation Time Taken: {time.time() - start_time} sec")
    return res, matrixItems
示例#19
0
def analyze_trajectory(model, hold=0, log='y'):
    if isinstance(model, basestring):
        from models import BaseModel
        m = BaseModel.load(model, initialize=False)
    else:
        m = model

    from datatools import plot, plt
    for var in m.results:
        plt.figure()
        res = m.results[var].matrix
        plot(np.array([res[t].ravel() for t in range(res.shape[0])]),
             hold=1,
             xs=m.results[var].index,
             log=log,
             title=var)
    if not hold:
        plt.show()
示例#20
0
def test(opt,
         test_datasets,
         model: BaseModel,
         train_index,
         task_index,
         visualizer=None):
    """test the model on multi-task test_datasets, after training task indexed with <train_index>

	Return
	None
	the global testMatrix will be updated
	"""

    print(
        f'=============================After trained task {train_index - 1}========================================='
    )
    matrixItems = []

    for test_index, test_dataset in enumerate(test_datasets):
        matrixItem, _ = val(
            test_dataset,
            model,
            test_index,
            visualizer,
        )
        test_matrix[(train_index, test_index + 1)] = matrixItem

        if test_index <= task_index:
            matrixItems.append(model.get_matrix_item(task_index))

        print(f'Test in task {test_index}, matrixItem=({matrixItem})')

    if len(matrixItems):
        res = my_sum(matrixItems)
        res = res / len(matrixItems)
        print(f'Average Accuracy is ({res})')

    train_index += 1
    return train_index
示例#21
0
def xception_model_training_and_test(img_x_list, y_list, config):
    x = np.array(img_x_list)
    y = np.array(y_list)
    dataset = config['dataset']
    if dataset == 'soybean':
        period = config['period']
    else:
        period = dataset
    id_map = np.loadtxt(dataset + '_id.txt')
    for index, d in enumerate(y):
        for label in id_map:
            if d == label[0]:
                y[index] = label[1]

    y_one_hot = to_categorical(y)

    lr_adjust = ReduceLROnPlateau(monitor='val_loss',
                                  factor=0.5,
                                  patience=5,
                                  min_lr=1e-6)

    result = []
    for i in range(iteration):
        index = np.arange(len(y))
        # print(index)
        X_train_index, X_test_index, y_train_index, y_test_index = train_test_split(
            index, y, test_size=0.3, random_state=i, shuffle=True, stratify=y)
        print(len(X_train_index))
        print(len(X_test_index))
        np.save(
            '{}_iteration_{}_img_{}_xception_train_index.npy'.format(
                dataset, i, period), X_train_index)
        np.save(
            '{}_iteration_{}_img_{}_xception_test_index.npy'.format(
                dataset, i, period), X_test_index)
        X_train = x[X_train_index]
        X_test = x[X_test_index]
        y_train = y_one_hot[X_train_index]
        y_test = y_one_hot[X_test_index]

        save_best_weight = ModelCheckpoint(
            'xception_img_{}_itreation-{}-{}.hdf5'.format(dataset, i, period),
            monitor='val_loss',
            verbose=1,
            save_best_only=True,
            mode='auto',
            save_weights_only=True)
        # you can change the parallels to create multi_gpu_model if you have more than one GPU available
        model = BaseModel.Xception_Model(parallels=1, config=config)
        # you should set a smaller batch_size if you GPU memory is limited

        model.fit(X_train,
                  y_train,
                  batch_size=32,
                  epochs=100,
                  validation_split=0.1,
                  callbacks=[lr_adjust, save_best_weight])
        K.clear_session()

        model2 = BaseModel.Xception_Model(parallels=1, config=config)
        model2.load_weights('xception_img_{}_itreation-{}-{}.hdf5'.format(
            dataset, i, period))

        score = model2.evaluate(X_test, y_test)
        print(score)

        pre_final = model2.predict(X_test, batch_size=128)
        y_test_label = np.array([np.argmax(d) for d in y_test])
        y_pre_label = np.array([np.argmax(d) for d in pre_final])
        performance = get_performance(y_pre_label, y_test_label)
        performance['test_loss'] = score[0]
        performance['test_acc'] = score[1]
        K.clear_session()
        result.append(performance)
        json_str = json.dumps(performance, indent=4)
        with open(
                '{}_xception-iteration-{}-{}-result.json'.format(
                    dataset, i, period), 'w') as json_file:
            json_file.write(json_str)
        plot_result(result)
示例#22
0
文件: user_CF.py 项目: micolin/thesis
	def __init__(self):
		BaseModel.__init__(self)
		self.user_similarity = defaultdict(list)	# {uid:{sim_id:similarity}}
示例#23
0
	def __init__(self):
		BaseModel.__init__(self)
		self.user_similarity = defaultdict(list)
		self.user_tag_distrib = defaultdict(dict)
示例#24
0
def tp_xception_model_training_and_test(img_x_list, shape_x, texture_x, vein_x,
                                        isVenation, y_list, config):
    img_x = np.array(img_x_list)
    shape_x = np.array(shape_x)
    texture_x = np.array(texture_x)
    if isVenation:
        vein_x = np.array(vein_x)
    y = np.array(y_list)
    dataset = config['dataset']
    id_map = np.loadtxt(dataset + '_id.txt')
    for index, d in enumerate(y):
        for label in id_map:
            if d == label[0]:
                y[index] = int(label[1])
    y_one_hot = to_categorical(y)
    result = []
    lr_adjust = ReduceLROnPlateau(monitor='val_loss',
                                  factor=0.5,
                                  patience=5,
                                  min_lr=1e-6)
    dataset = config['dataset']
    if dataset == 'soybean':
        period = config['period']
    else:
        period = dataset

    for i in range(iteration):
        index = np.arange(len(y))
        # print(index)
        X_train_index, X_test_index, y_train, y_test = train_test_split(
            index,
            y_one_hot,
            test_size=0.3,
            random_state=i,
            shuffle=True,
            stratify=y_one_hot)
        print(len(X_train_index))
        print(len(X_test_index))
        np.save(
            '{}_iteration_{}_img_{}_tp_xception_train_index.npy'.format(
                dataset, i, period), X_train_index)
        np.save(
            '{}_iteration_{}_img_{}_tp_xception_test_index.npy'.format(
                dataset, i, period), X_test_index)

        shape_x_train = shape_x[X_train_index]
        texture_x_train = texture_x[X_train_index]
        img_x_train = img_x[X_train_index]
        shape_x_train_list = [
            shape_x_train[:, i, :, :] for i in range(config["shape_views"])
        ]
        texture_x_train_list = [
            texture_x_train[:, i, :, :] for i in range(config["texture_views"])
        ]

        if isVenation:
            vein_x_train = vein_x[X_train_index]
            vein_x_train[:, 0, :, 1] = (vein_x_train[:, 0, :, 1] - np.mean(
                vein_x_train[:, 0, :, 1])) / np.std(vein_x_train[:, 0, :, 1])
            vein_x_train[:, 1, :, 1] = (vein_x_train[:, 1, :, 1] - np.mean(
                vein_x_train[:, 1, :, 1])) / np.std(vein_x_train[:, 1, :, 1])
            vein_x_train_list = [
                vein_x_train[:, i, :, :] for i in range(config["vein_views"])
            ]

        x_train_list = []

        for index, d in enumerate(texture_x_train_list):
            texture_x_train_list[index] = np.reshape(
                d, [d.shape[0], d.shape[1], d.shape[2], 1])
        if isVenation:
            for index, d in enumerate(vein_x_train_list):
                vein_x_train_list[index] = np.reshape(
                    d, [d.shape[0], d.shape[1], d.shape[2], 1])

        x_train_list.extend(shape_x_train_list)
        x_train_list.extend(texture_x_train_list)
        if isVenation:
            x_train_list.extend(vein_x_train_list)
        x_train_list.append(img_x_train)

        y_train_label = [np.argmax(d) for d in y_train]
        my_class_weights = list(
            class_weight.compute_class_weight('balanced',
                                              np.unique(y_train_label),
                                              y_train_label))
        class_weight_dict = dict(
            zip([x for x in np.unique(y_train_label)], my_class_weights))

        save_best_weight = ModelCheckpoint(
            './{}_pdm_iteration-{}-{}.hdf5'.format(dataset, i, period),
            monitor='val_loss',
            verbose=1,
            save_best_only=True,
            mode='auto',
            save_weights_only=True)

        model = BaseModel.Combined_Model(parallels=1, config=config)

        lr_reduce = LearningRateScheduler(lr_reducer)
        model.fit(x_train_list,
                  y_train,
                  batch_size=16,
                  epochs=100,
                  validation_split=0.1,
                  class_weight=class_weight_dict,
                  callbacks=[save_best_weight, lr_reduce, lr_adjust])

        shape_x_test = shape_x[X_test_index]
        texture_x_test = texture_x[X_test_index]

        img_x_test = img_x[X_test_index]

        shape_x_test_list = [
            shape_x_test[:, i, :, :] for i in range(config["shape_views"])
        ]
        texture_x_test_list = [
            texture_x_test[:, i, :, :] for i in range(config["texture_views"])
        ]

        if isVenation:
            vein_x_test = vein_x[X_test_index]
            vein_x_test[:, 0, :, 1] = (vein_x_test[:, 0, :, 1] - np.mean(
                vein_x_train[:, 0, :, 1])) / np.std(vein_x_train[:, 0, :, 1])
            vein_x_test[:, 1, :, 1] = (vein_x_test[:, 1, :, 1] - np.mean(
                vein_x_train[:, 1, :, 1])) / np.std(vein_x_train[:, 1, :, 1])
            vein_x_test_list = [
                vein_x_test[:, i, :, :] for i in range(config["vein_views"])
            ]

        x_test_list = []

        for index, d in enumerate(texture_x_test_list):
            texture_x_test_list[index] = np.reshape(
                d, [d.shape[0], d.shape[1], d.shape[2], 1])

        x_test_list.extend(shape_x_test_list)
        x_test_list.extend(texture_x_test_list)

        if isVenation:
            for index, d in enumerate(vein_x_test_list):
                vein_x_test_list[index] = np.reshape(
                    d, [d.shape[0], d.shape[1], d.shape[2], 1])
            x_test_list.extend(vein_x_test_list)

        x_test_list.append(img_x_test)

        K.clear_session()

        model2 = BaseModel.Combined_Model(parallels=1, config=config)
        model2.load_weights('./{}_pdm_iteration-{}-{}.hdf5'.format(
            dataset, i, period))
        score = model2.evaluate(x_test_list, y_test, batch_size=128)
        print(score)
        y_test_label = np.array([np.argmax(d) for d in y_test])
        y_pre = model2.predict(x_test_list)
        y_pre_label = np.array([np.argmax(d) for d in y_pre])

        performance = get_performance(y_pre_label, y_test_label)

        performance['test_acc'] = score[1]
        performance['test_loss'] = score[0]

        result.append(performance)
        K.clear_session()
        print("precision_score: {}".format(performance['precision']))
        print("recall_score: {}".format(performance['recall']))
        print("f1_score: {}".format(performance['f1_score']))
        json_str = json.dumps(performance, indent=4)
        with open(
                './{}_pd-rgb-combined-iteration-{}-{}.json'.format(
                    dataset, i, period), 'w') as json_file:
            json_file.write(json_str)

        plot_result(result)
示例#25
0
def run_movie(path,
              destination='./Movie',
              death=None,
              dpi=150,
              tsample=1,
              imgformat='jpg',
              maxpic=800,
              noticks=False):
    '''Creates a movie at destination. Images are stored at destination/img/'''
    destination = Path(destination)
    imgfolder = destination + Path('img')
    imgfolder.mkdir()
    from subprocess import call

    import matplotlib.patches as mpatches
    from matplotlib.collections import PatchCollection
    from models import BaseModel

    ## Remove old figs
    try:
        call("rm -f {}.{}".format(imgfolder + '*', imgformat), shell=True)
    except:
        print(
            "Your OS is not supported by my awesome movie plotter. Old figures not removed"
        )

    ## Load in data
    m = BaseModel.load(path, initialize=False)
    var = m.variables.keys()[0]
    traj = m.results[var]

    prm = m.export_params()
    colors = [
        'b',
        'g',
        'r',
        'y',
        'k',
    ]

    if 'death' in m.parameters[var]:
        death = m.parameters[var]['death']

    if death is None:
        death = 0

    #Size conversion (data to figure size)
    def convert_dist(ax, pos):
        if not hasattr(pos, '__iter__'):
            pos = (0, pos)
        tarea = (ax.transData.transform(
            (area[2], area[3])) - ax.transData.transform((area[0], area[1])))
        return (ax.transData.transform(pos) - ax.transData.transform(
            (0, 0))) / tarea

    graph = Graph(nx.from_numpy_matrix(m.data['community'].matrix))

    ## plot
    alive = graph.nodes()[:]
    for t in range(0, min(maxpic * tsample, traj.shape[0])):
        if t % tsample != 0:
            continue

        plt.clf()
        fig = plt.figure(edgecolor=[.4, .4, .4])
        ax = fig.add_subplot(121)

        dead = np.where(traj[t] < death)[0]
        for d in dead:
            if d in alive:
                graph.remove_node(d)
                alive.remove(d)

        graph.plot(hold=1, newfig=0, node_size=traj[t][alive], edge_width=.1)
        ax = fig.add_subplot(122)
        plt.xlim(xmax=traj.index[-1])
        plot(np.array([traj[t2].ravel() for t2 in range(t)]),
             hold=1,
             xs=traj.index[:t])

        if noticks:
            plt.xticks([])
            plt.yticks([])

        # save
        ti = str(t / tsample + 100000)
        plt.savefig(imgfolder + 'Fig_{}.{}'.format(ti[1:], imgformat),
                    dpi=dpi,
                    bbox_inches='tight')
        print(t)

        plt.close('all')

    try:
        call("ffmpeg -y -r 18  -i {}%05d.{} {}".format(
            imgfolder + 'Fig_', imgformat, destination + 'movie.mp4'),
             shell=True)
    except:
        pass
示例#26
0
文件: item_CF.py 项目: micolin/thesis
 def __init__(self):
     BaseModel.__init__(self)
     self.item_similarity = {
     }  # {itemid:[(sim_item,similarity)]} sorted by similarity
示例#27
0
	def __init__(self):
		BaseModel.__init__(self)
		self.popular_list = []
示例#28
0
 def __init__(self):
     BaseModel.__init__(self)
示例#29
0
from werkzeug.exceptions import HTTPException
from models import BaseModel
from controllers import *
from responses import ISOAwareEncoder


##
# CONFIGURATION
##
# Defines and configures the web server,
# database connection, and data models.
##

app = Flask("letsschedit")
app.json_encoder = ISOAwareEncoder
db = BaseModel.get_database()
db.init(
    env('DATABASE'),
    user=env('DB_USERNAME'),
    password=env('DB_PASSWORD')
)


@app.before_request
def _db_connect():
    """ Ensures that whenever a HTTP request is comming in, a db connection is dispatched
    from the pool. This is required as MySQL oftens kills idle connections, so we want
    a hot new fresh one every time. """
    db.connect()

import diagnostics
from models import BaseModel

mod = BaseModel()
print('instantiation successful')
a = 10**4
Ns = [5 * (10**5), a, 5 * a, 10 * a, 50 * a]
diagnostics.runtime_lineplot_N(mod, N_space=Ns, P=4)
print('ahuevooooooooo')
diagnostics.mse_lineplot_N(mod, N_space=Ns, P=4)
示例#31
0
from models import BaseModel, TextContent, MessageContent, Photo, Role, User, UserRoles, MessageType, ConversationType, Conversation, Message, ConversationParty, Quotation, Company, database

try:

	database.drop_tables(BaseModel.__subclasses__(), safe = True, cascade = True)

	# database.create_tables(BaseModel.__subclasses__())

	Photo.create_table()
	Role.create_table()
	User.create_table()
	UserRoles.create_table()
	MessageType.create_table()
	ConversationType.create_table()
	Quotation.create_table()
	Company.create_table()
	TextContent.create_table()
	MessageContent.create_table()
	Conversation.create_table()
	Message.create_table()
	ConversationParty.create_table()

	database.create_foreign_key(Conversation, Conversation.last_message)

except Exception as e:
	print('Error while creating schema %s' % e)
示例#32
0
文件: item_CF.py 项目: micolin/thesis
	def __init__(self):
		BaseModel.__init__(self)
		self.item_similarity = {} # {itemid:[(sim_item,similarity)]} sorted by similarity
示例#33
0
	def __init__(self):
		BaseModel.__init__(self)
		self.user_similarity = defaultdict(dict)
		self.userCF = UserCF()
		self.userTag = UserTagCF()
		self.userLda = UserLDA()
示例#34
0
	def __init__(self):
		BaseModel.__init__(self)
示例#35
0
 def setUp(self):
     """sets up objects for testing later
     """
     self.test_model1 = BaseModel()
     self.test_model2 = BaseModel()
示例#36
0
test_probabilitiesBkg = []
for b in bkg_test:
    b = scaler.transform([b])
    prob = classifier.predict_proba(b)[0][0]
    b = b[0].flatten().tolist()
    test_probabilitiesBkg.append(prob)
    testDataBkg.append(b)

batchSize = 4
test = testDataSig
probs_test = test_probabilitiesSig
test_bkg = testDataBkg
probs_test_bkg = test_probabilitiesBkg

# Initialise model
model = Model(len(test[0]), 1)
model.load_model('approx1.pkl')

test = np.array(test)
probs_test = np.array(probs_test)

test_bkg = np.array(test_bkg)
probs_test_bkg = np.array(probs_test_bkg)

# Make a vector of outputs
comp_preds = []
comp_true = []
for (batchX, batchY) in next_batch(test, probs_test, batchSize):
    if batchY.shape[0] < batchSize:
        print('Batch size insufficient (%s), continuing...' % batchY.shape[0])
        continue
示例#37
0
#!/usr/bin/env python2.7
# -*- coding: utf-8 -*-

from sqlalchemy import Column, String, Text, Integer, DateTime

from models import BaseModel

__BASE_MODEL = BaseModel.get_base_model()


class Bangumi(__BASE_MODEL):
    __tablename__ = 'bangumi'
    cover = Column(Text, nullable=True)
    favorites = Column(Integer, nullable=True)
    is_finish = Column(Integer, nullable=True)
    newest_ep_index = Column(String(16), nullable=True)
    pub_time = Column(Integer, nullable=True)
    season_id = Column(Integer, primary_key=True)
    season_status = Column(Integer, nullable=True)
    title = Column(String(64), nullable=True)
    introduction = Column(String(512), nullable=True)
    total_count = Column(Integer, nullable=True)
    update_time = Column(Integer, nullable=True)
    url = Column(Text, nullable=True)
    week = Column(String(30), nullable=True)
    tags = Column(Text, nullable=True)
    actors = Column(Text, nullable=True)
    createdAt = Column(DateTime, nullable=True)
    updatedAt = Column(DateTime, nullable=True)

    def __init__(self, data_dict):
示例#38
0
 def setUp(self):
     """
     objects to be tested
     """
     self.model1_test = BaseModel()
     self.model2_test = BaseModel()
示例#39
0
文件: userLDA.py 项目: micolin/thesis
	def __init__(self):
		BaseModel.__init__(self)
		self.dictionary = None
		self.model = None
		self.user_similarity = defaultdict(list)