Exemplo n.º 1
0
def generate_mix_features(source_dir_path,
                          target_dir_path,
                          params_list,
                          group_size=300,
                          bin_number=10):
    # print "script: extract_feature.py,  lineNumber:", sys._getframe().f_lineno, ",  func:", sys._getframe().f_code.co_name
    for root, _, files in os.walk(source_dir_path):
        for file in files:
            data = file_util.read_file(source_dir_path + "\\" + file)
            # #---------------test function-------------------------
            # denoised_data = data_process.denoise(data, settings.THRESHOLDS)

            # file_util.write_file('denoised'+'\\'+file, denoised_data)
            fp = extract_feature.get_feature_from_matrix(
                data, group_size, bin_number)
            # fp = extract_feature.get_feature_from_matrix(denoised_data, group_size, bin_number)
            fp_dict = dict()
            for i, row in enumerate(fp):
                row = data_process.normalize(row)  #归一化
                fp_dict[i] = row

            mix_fp_temp = list_util.merge(fp_dict[0], fp_dict[1])
            mix_fp = list_util.merge(mix_fp_temp, fp_dict[2])
            target_file = os.path.join(target_dir_path,
                                       file[:-4] + "_" + ".txt")
            file_util.write_file(target_file, mix_fp)
    def inference_rgb(self, rgbdata, orgshape, mean=None):
        scale = (orgshape[0] / self.inres[0], orgshape[1] / self.inres[1])
        imgdata = scipy.misc.imresize(rgbdata, self.inres)

        if mean is None:
            mean = np.array([0.4404, 0.444, 0.4327], dtype=np.float32)

        imgdata = normalize(imgdata, mean)

        input = imgdata[np.newaxis, :, :, :]

        out = self.model.predict(input)
        return out[-1], scale
Exemplo n.º 3
0
    def inference_rgb(self, rgbdata, orgshape, mean=None):
        scale = (orgshape[0] * 1.0 / self.inputRes[0], orgshape[1] * 1.0 / self.inputRes[1])

        imgdata = np.array(Image.fromarray(rgbdata).resize(self.inputRes))

        if mean is None:
            mean = np.array([0.4404, 0.4440, 0.4327], dtype=np.float)

        imgdata = normalize(imgdata, mean)

        input = imgdata[np.newaxis, :, :, :]

        out = self.model.predict(input)
        return out[-1], scale
Exemplo n.º 4
0
    def process_image(self, sample_index, kpanno, sigma, rot_flag, scale_flag,
                      flip_flag):
        imagefile = kpanno['img_paths']
        # -image = scipy.misc.imread(os.path.join(self.imgpath, imagefile))
        image = imageio.imread(os.path.join(self.imgpath, imagefile))

        # get center
        center = np.array(kpanno['objpos'])
        joints = np.array(kpanno['joint_self'])
        scale = kpanno['scale_provided']

        # Adjust center/scale slightly to avoid cropping limbs
        if center[0] != -1:
            center[1] = center[1] + 15 * scale
            scale = scale * 1.25

        # filp
        if flip_flag and random.choice([0, 1]):
            image, joints, center = self.flip(image, joints, center)

        # scale
        if scale_flag:
            scale = scale * np.random.uniform(0.8, 1.2)

        # rotate image
        if rot_flag and random.choice([0, 1]):
            rot = np.random.randint(-1 * 30, 30)
        else:
            rot = 0

        cropimg = data_process.crop(image, center, scale, self.inres, rot)
        cropimg = data_process.normalize(cropimg, self.get_color_mean())

        # transform keypoints
        transformedKps = data_process.transform_kp(joints, center, scale,
                                                   self.outres, rot)
        gtmap = data_process.generate_gtmap(transformedKps, sigma, self.outres)

        # meta info
        metainfo = {
            'sample_index': sample_index,
            'center': center,
            'scale': scale,
            'pts': joints,
            'tpts': transformedKps,
            'name': imagefile
        }

        return cropimg, gtmap, metainfo
Exemplo n.º 5
0
def get_vlad_feature(feature_mat_list):
    """
    :param feature_mat:12 feature 
    :return: 
    """
    t = feature_mat_list[0]
    npmodel = len(feature_mat_list)
    w, n = t.shape[0:2]
    counter = 0
    cn = 0
    arr = np.zeros((npmodel * w, n), np.float32)
    for t in feature_mat_list:
        for i in range(w):
            arr[cn, :] = t[i, :]
            cn += 1
    return normalize(vlad(kmeans, pca1.transform(arr)))
Exemplo n.º 6
0
def t1():
    num_samp_per_class = 2
    dim = 2
    N_class = 4

    X, labels = gen_toy_data(dim, N_class, num_samp_per_class)

    X_norm, mean, std = normalize(X)

    X_norm, mean, U, S = PCA_white(X_norm)

    layer_param = [dim, 100, 100, N_class]

    overfit_tinydata(X_norm, labels, layer_param)

    X_train, labels_train, X_val, labels_val, X_test, labels_test = split_data(
        X_norm, labels)

    check_gradient(X, labels, [2, 100, 4], True)
Exemplo n.º 7
0
def t2():
    num_samp_per_class = 200
    dim = 2
    N_class = 4

    # 生成数据
    X, labels = gen_toy_data(dim, N_class, num_samp_per_class)
    X_norm, mean, std = normalize(X)
    X_norm, mean, U, S = PCA_white(X_norm)
    X_train, labels_train, X_val, labels_val, X_test, labels_test = split_data(
        X_norm, labels)

    lr = 10**(-2.1)
    lr_decay = 1
    reg = 10**(-4.3)
    mu = 0.9
    max_epoch = 10000

    # 训练
    layer_param = [dim, 100, 100, N_class]
    train_net(X_train, labels_train, layer_param, lr, lr_decay, reg, mu,
              max_epoch, X_val, labels_val)
Exemplo n.º 8
0
    # else:
    #     print(f"""忽略{cn_history["date"]}全国数据""")

for country in toutiao_data["world"]:
    data_list.append({
        "date": data_date,
        "country": country["country"],
        "confirmed": country["confirmedNum"],
        "suspected": country["suspectedNum"],
        "cured": country["curesNum"],
        "dead": country["deathsNum"]
    })


df = pd.DataFrame(data_list)
df = data_process.normalize(df)


csv_date = os.path.join("Data", f"{data_date}.csv")
json_date = os.path.join("Data", f"{data_date}.json")
df_date = df[df["date"] == data_date]
df_date.to_csv(csv_date, index=False, encoding='utf-8')
with open(json_date, "w", encoding="utf-8") as f:
    df_date.to_json(f, orient="records", force_ascii=False)


yesterday = datetime.strftime(datetime.strptime(data_date, "%Y-%m-%d") - timedelta(days=1), "%Y-%m-%d")
csv_yesterday = os.path.join("Data", f"{yesterday}.csv")
json_yesterday = os.path.join("Data", f"{yesterday}.json")
df_yesterday = pd.read_csv(csv_yesterday, dtype=data_process.data_dtype)
df_yesterday = pd.concat([df_yesterday, pd.DataFrame(df[df["date"] == csv_yesterday])], sort=False)
Exemplo n.º 9
0
def view_crop_image(anno):

    print anno.keys()

    img_paths = anno['img_paths']
    img_width = anno['img_width']
    img_height = anno['img_height']

    #print anno.keys()

    imgdata = scipy.misc.imread(
        os.path.join("../../data/mpii/images", img_paths))
    draw_joints(imgdata, anno['joint_self'])
    #scipy.misc.imshow(imgdata)

    center = np.array(anno['objpos'])
    outimg = data_process.crop(imgdata,
                               center=center,
                               scale=anno['scale_provided'],
                               res=(256, 256),
                               rot=0)
    outimg_normalized = data_process.normalize(outimg)

    print outimg.shape

    newjoints = data_process.transform_kp(np.array(anno['joint_self']),
                                          center,
                                          anno['scale_provided'], (64, 64),
                                          rot=0)
    #draw_joints(outimg_normalized, newjoints.tolist())
    #scipy.misc.imshow(outimg_normalized)
    '''
    mimage = np.zeros(shape=(64, 64), dtype=np.float)
    gtmap = generate_gt_map(newjoints, sigma=1, outres=(64, 64))
    for i in range(16):
        mimage += gtmap[:, :, i]
    scipy.misc.imshow(mimage)
    '''

    # meta info
    metainfo = []
    orgjoints = np.array(anno['joint_self'])
    for i in range(newjoints.shape[0]):
        meta = {
            'center': center,
            'scale': anno['scale_provided'],
            'pts': orgjoints[i],
            'tpts': newjoints[i]
        }
        metainfo.append(meta)

    # transform back
    tpbpts = list()
    for i in range(newjoints.shape[0]):
        tpts = newjoints[i]
        meta = metainfo[i]
        orgpts = tpts
        orgpts[0:2] = data_process.transform(tpts,
                                             meta['center'],
                                             meta['scale'],
                                             res=[64, 64],
                                             invert=1,
                                             rot=0)
        tpbpts.append(orgpts)

    print tpbpts

    draw_joints(imgdata, np.array(tpbpts))
    scipy.misc.imshow(imgdata)
Exemplo n.º 10
0
    # # f1 = plt.figure(1)
    # plt.axis([0, 55000, 0, 1])
    # plt.scatter(range(len(IAT)), IAT, color='darkseagreen', marker='*')
    # # plt.show()
    # plt.savefig("G:\graduate_git\images\IAT.png")

    # -------------FrameSize散点图----------------------
    # FS = get_column_from_matrix(data, 1)
    # # f2 = plt.figure(2)
    # plt.axis([0, 55000, 0, 1])
    # plt.scatter(range(len(FS)), FS, color='steelblue', marker='*')
    # # plt.show()
    # plt.savefig("G:\graduate_git\images\FS.png")

    # -------------transRate散点图----------------------
    TR = get_column_from_matrix(data, 2)
    # f2 = plt.figure(3)
    plt.axis([0, 55000, 0, 1])
    plt.scatter(range(len(TR)), TR, color='salmon', marker='*')
    # plt.show()
    plt.savefig("G:\graduate_git\images\TR.png")


if __name__ == '__main__':

    data = read_file('ytw_ipad.txt')
    denoised = denoise(data, [0.2, 200, 0.2 * 10**9])
    normalized = normalize(denoised)
    # write_file("normalized.txt", normalized)
    scatter_data(normalized)
Exemplo n.º 11
0
    def __init__(self, building_ids, buildings_states_actions, building_info, \
        observation_spaces = None, action_spaces = None, hidden_dim=[400,300], \
            discount=0.99, tau=5e-3, lr=3e-4, batch_size=100, \
                replay_buffer_capacity = 1e5, regression_buffer_capacity = 3e4,\
                     start_training = None, exploration_period = None, \
                         start_regression = None, information_sharing = False, \
                             pca_compression = 1., action_scaling_coef = 1.,\
                                  reward_scaling = 1., update_per_step = 1, \
                                      iterations_as = 2, safe_exploration = False, seed = 0):

        assert start_training > start_regression, 'start_training must be greater than start_regression'

        with open(buildings_states_actions) as json_file:
            self.buildings_states_actions = json.load(json_file)

        self.building_ids = building_ids
        self.start_training = start_training
        self.start_regression = start_regression
        self.discount = discount
        self.batch_size = batch_size
        self.tau = tau
        self.action_scaling_coef = action_scaling_coef
        self.reward_scaling = reward_scaling
        self.regression_freq = 2500
        torch.manual_seed(seed)
        np.random.seed(seed)
        self.deterministic = False
        self.information_sharing = information_sharing
        self.update_per_step = update_per_step
        self.iterations_as = iterations_as
        self.safe_exploration = safe_exploration
        self.exploration_period = exploration_period

        self.action_list_ = []
        self.action_list2_ = []

        self.time_step = 0
        self.pca_flag = {uid: 0 for uid in building_ids}
        self.regression_flag = {uid: 0 for uid in building_ids}
        self.action_spaces = {
            uid: a_space
            for uid, a_space in zip(building_ids, action_spaces)
        }
        self.observation_spaces = {
            uid: o_space
            for uid, o_space in zip(building_ids, observation_spaces)
        }

        # Optimizers/Loss using the Huber loss
        '''
        alert
        '''
        self.soft_q_criterion = nn.SmoothL1Loss()

        # device
        self.device = torch.device(
            "cuda" if torch.cuda.is_available() else "cpu")
        self.critic1_loss_, self.critic2_loss_, self.actor_loss_, self.alpha_loss_, self.alpha_, self.q_tracker = {}, {}, {}, {}, {}, {}

        self.energy_size_coef = {}
        self.total_coef = 0

        # tracks information energy but I am not sure what they are doing in detail...
        for uid, info in building_info.items():
            _coef = info['Annual_DHW_demand (kWh)'] / .9 + info[
                'Annual_cooling_demand (kWh)'] / 3.5 + info[
                    'Annual_nonshiftable_electrical_demand (kWh)'] - info[
                        'solar_power_capacity (kW)'] * 8760 / 6.0
            self.energy_size_coef[uid] = max(
                .3 * (_coef + info['solar_power_capacity (kW)'] * 8760 / 6.0),
                _coef) / 8760
            self.total_coef += self.energy_size_coef[uid]

        for uid in self.energy_size_coef:
            self.energy_size_coef[
                uid] = self.energy_size_coef[uid] / self.total_coef
        '''
        alert
        '''
        # creating empty dictionary for replay buffer and neural network on each building to track
        self.replay_buffer, self.reg_buffer, self.soft_q_net1, self.soft_q_net2, self.target_soft_q_net1, self.target_soft_q_net2, self.policy_net, self.soft_q_optimizer1, self.soft_q_optimizer2, self.policy_optimizer, self.target_entropy, self.alpha, self.log_alpha, self.alpha_optimizer, self.pca, self.encoder, self.encoder_reg, self.state_estimator, self.norm_mean, self.norm_std, self.r_norm_mean, self.r_norm_std, self.log_pi_tracker = {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}, {}

        # setting loss fuction and encoder. However, encoder or state estimator is not needed
        for uid in building_ids:
            # information sharing
            # self.state_estimator[uid] = GradientBoostingRegressor()
            '''
            alert
            '''
            self.critic1_loss_[uid], self.critic2_loss_[uid], self.actor_loss_[
                uid], self.alpha_loss_[uid], self.alpha_[uid], self.q_tracker[
                    uid], self.log_pi_tracker[
                        uid] = [], [], [], [], [], [], []
            # seems like encoder is for setting up environment stuff
            self.encoder[uid] = []
            state_n = 0
            for s_name, s in self.buildings_states_actions[uid][
                    'states'].items():
                if not s:
                    self.encoder[uid].append(0)
                elif s_name in ["month", "hour"]:
                    self.encoder[uid].append(
                        periodic_normalization(
                            self.observation_spaces[uid].high[state_n]))
                    state_n += 1
                elif s_name == "day":
                    self.encoder[uid].append(
                        onehot_encoding([1, 2, 3, 4, 5, 6, 7, 8]))
                    state_n += 1
                elif s_name == "daylight_savings_status":
                    self.encoder[uid].append(onehot_encoding([0, 1]))
                    state_n += 1
                elif s_name == "net_electricity_consumption":
                    self.encoder[uid].append(remove_feature())
                    state_n += 1
                else:
                    self.encoder[uid].append(
                        normalize(self.observation_spaces[uid].low[state_n],
                                  self.observation_spaces[uid].high[state_n]))
                    state_n += 1

            self.encoder[uid] = np.array(self.encoder[uid])

            # If there is no solar PV installed, remove solar radiation variables
            if building_info[uid]['solar_power_capacity (kW)'] == 0:
                for k in range(12, 20):
                    if self.encoder[uid][k] != 0:
                        self.encoder[uid][k] = -1
                if self.encoder[uid][24] != 0:
                    self.encoder[uid][24] = -1
            if building_info[uid][
                    'Annual_DHW_demand (kWh)'] == 0 and self.encoder[uid][
                        26] != 0:
                self.encoder[uid][26] = -1
            if building_info[uid][
                    'Annual_cooling_demand (kWh)'] == 0 and self.encoder[uid][
                        25] != 0:
                self.encoder[uid][25] = -1
            if building_info[uid][
                    'Annual_nonshiftable_electrical_demand (kWh)'] == 0 and self.encoder[
                        uid][23] != 0:
                self.encoder[uid][23] = -1

            self.encoder[uid] = self.encoder[uid][self.encoder[uid] != 0]
            self.encoder[uid][self.encoder[uid] == -1] = remove_feature()

            # Defining the encoder that will transform the states used by the regression model to predict the net-electricity consumption
            self.encoder_reg[uid] = []
            state_n = 0
            for s_name, s in self.buildings_states_actions[uid][
                    'states'].items():
                if not s:
                    self.encoder_reg[uid].append(0)
                elif s_name in ["month", "hour"]:
                    self.encoder_reg[uid].append(
                        periodic_normalization(
                            self.observation_spaces[uid].high[state_n]))
                    state_n += 1
                elif s_name in [
                        "t_out_pred_6h", "t_out_pred_12h", "t_out_pred_24h",
                        "rh_out_pred_6h", "rh_out_pred_12h", "rh_out_pred_24h",
                        "diffuse_solar_rad_pred_6h",
                        "diffuse_solar_rad_pred_12h",
                        "diffuse_solar_rad_pred_24h",
                        "direct_solar_rad_pred_6h",
                        "direct_solar_rad_pred_12h",
                        "direct_solar_rad_pred_24h"
                ]:
                    self.encoder_reg[uid].append(remove_feature())
                    state_n += 1
                else:
                    self.encoder_reg[uid].append(no_normalization())
                    state_n += 1

            self.encoder_reg[uid] = np.array(self.encoder_reg[uid])

            # If there is no solar PV installed, remove solar radiation variables
            if building_info[uid]['solar_power_capacity (kW)'] == 0:
                for k in range(12, 20):
                    if self.encoder_reg[uid][k] != 0:
                        self.encoder_reg[uid][k] = -1
                if self.encoder_reg[uid][24] != 0:
                    self.encoder_reg[uid][24] = -1
            if building_info[uid][
                    'Annual_DHW_demand (kWh)'] == 0 and self.encoder_reg[uid][
                        26] != 0:
                self.encoder_reg[uid][26] = -1
            if building_info[uid][
                    'Annual_cooling_demand (kWh)'] == 0 and self.encoder_reg[
                        uid][25] != 0:
                self.encoder_reg[uid][25] = -1
            if building_info[uid][
                    'Annual_nonshiftable_electrical_demand (kWh)'] == 0 and self.encoder_reg[
                        uid][23] != 0:
                self.encoder_reg[uid][23] = -1

            self.encoder_reg[uid] = self.encoder_reg[uid][
                self.encoder_reg[uid] != 0]
            self.encoder_reg[uid][self.encoder_reg[uid] ==
                                  -1] = remove_feature()

            # PCA will reduce the number of dimensions of the state space to 2/3 of its the original size
            # if self.information_sharing:
            #     state_dim = int((pca_compression)*(2 + len([j for j in np.hstack(self.encoder[uid]*np.ones(len(self.observation_spaces[uid].low))) if j != None])))
            '''
            alert!!!!!
            '''
            #state_dim = int((pca_compression)*(len([j for j in np.hstack(self.encoder[uid]*np.ones(len(self.observation_spaces[uid].low))) if j != None])))
            '''
            alert!!!!!
            '''
            #action_dim = self.action_spaces[uid].shape[0]
            self.alpha[uid] = 0.2
            '''
            alert!!!!!
            '''
            #self.pca[uid] = PCA(n_components = state_dim)

            self.replay_buffer[uid] = ReplayBuffer(int(replay_buffer_capacity))

            # information sharing
            # self.reg_buffer[uid] = RegressionBuffer(int(regression_buffer_capacity))
            '''
            alert policy
            '''

            # making new function from initalizting the network
            # init networks for each policy
            self.soft_q_net1[uid] = SoftQNetwork(state_dim, action_dim,
                                                 hidden_dim).to(self.device)
            self.soft_q_net2[uid] = SoftQNetwork(state_dim, action_dim,
                                                 hidden_dim).to(self.device)

            self.target_soft_q_net1[uid] = SoftQNetwork(
                state_dim, action_dim, hidden_dim).to(self.device)
            self.target_soft_q_net2[uid] = SoftQNetwork(
                state_dim, action_dim, hidden_dim).to(self.device)

            for target_param, param in zip(
                    self.target_soft_q_net1[uid].parameters(),
                    self.soft_q_net1[uid].parameters()):
                target_param.data.copy_(param.data)

            for target_param, param in zip(
                    self.target_soft_q_net2[uid].parameters(),
                    self.soft_q_net2[uid].parameters()):
                target_param.data.copy_(param.data)

            # Policy
            self.policy_net[uid] = PolicyNetwork(state_dim, action_dim,
                                                 self.action_spaces[uid],
                                                 self.action_scaling_coef,
                                                 hidden_dim).to(self.device)
            self.soft_q_optimizer1[uid] = optim.Adam(
                self.soft_q_net1[uid].parameters(), lr=lr)
            self.soft_q_optimizer2[uid] = optim.Adam(
                self.soft_q_net2[uid].parameters(), lr=lr)
            self.policy_optimizer[uid] = optim.Adam(
                self.policy_net[uid].parameters(), lr=lr)
            self.target_entropy[uid] = -np.prod(
                self.action_spaces[uid].shape).item()
            self.log_alpha[uid] = torch.zeros(1,
                                              requires_grad=True,
                                              device=self.device)
            self.alpha_optimizer[uid] = optim.Adam([self.log_alpha[uid]],
                                                   lr=lr)