Exemple #1
0
    def test_infer_operating_list_to_chunks_static(self):
        import staticGO
        appliance_truth, ps, states_list, centers, state_r3_list,appliance_consumtion\
            =staticGO.staticGo(self.datastore,self.datarange, home=self.home, unknown=self.unknown)
        print(datetime.datetime.now())
        self.inference = Inference(total_ps=ps,
                                   states_list=states_list,
                                   centers_list=centers)
        infer_result = self.inference.infer_operating_list_to_chunks_static(
            state_r3_list=state_r3_list)
        infer_result.get_estimated_ps_dict()
        print(appliance_consumtion)
        performance = Performance(infer_result=infer_result,
                                  appliance_truth=appliance_truth)
        df1, df2 = performance.dissagga_metric2()
        error = performance.dissagga_metric()
        print(1 - error)
        print(self.inference.para)
        print()
        print(datetime.datetime.now())
        print()


# t=TestInference()
# t.setUp()
# t.test_infer_operating_list_to_chunks()
def test_find_parameter():
    """Example based testing of self.find_parameter().
    """
    x0 = [0.5, 0.5]
    Lambda = 0.1
    parameters = np.array([0.5, Lambda])
    times = np.arange(0, 10, 0.1)

    analytical_solution = np.transpose(exponential_growth(x0, Lambda, times))

    model = TestModel()
    numerical_solution = model.simulate(parameters, times)

    # testing the scipys odeint works as expected.
    assert np.allclose(a=numerical_solution,
                       b=analytical_solution,
                       rtol=1.0e-7)

    # generate data
    noise_std = 0.1

    data_times = times
    data_ys = analytical_solution + noise_std * np.random.normal(
        size=analytical_solution.shape)

    inference = Inference(model, data_times, data_ys)

    estimated_parameters = inference.optimise()

    assert np.allclose(a=estimated_parameters, b=parameters, rtol=5.0e-02)
    return True
Exemple #3
0
def main(_):
    os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"  # see issue #152
    os.environ["CUDA_VISIBLE_DEVICES"] = FLAGS.GPU
    if FLAGS.dataset == "mnist":
        infer._main_inference_mnist(FLAGS)
    elif FLAGS.dataset == "prostate":
        infer._main_inference_prostate(FLAGS)
    else:
        raise Exception("The dataset you specified is not found!")
    def __init__(self, grid_size, discount, robot_state, beta, robot_goal,
                 obs_sizes, true_env):
        self.human = Human(true_env, beta, discount, robot_goal, grid_size)
        robot_env = np.zeros((grid_size[0], grid_size[1]))
        self.robot = Robot(robot_env, robot_state, robot_goal, grid_size,
                           discount)

        robot_action = self.robot.optimal_policy()
        self.infer = Inference(grid_size, discount, robot_state, beta,
                               robot_goal, robot_action, obs_sizes)
 def setUp_dynamic(self):
     # select total power data in a period
     # datastore = Data_store(redd_hdf5_path='D:\SJTU\湖北项目\数据\h5s/30xiayurong.h5')
     # datastore = Data_store(redd_hdf5_path='D:\SJTU\湖北项目\数据\h5s/30xusuqian.h5')
     # datastore = Data_store(redd_hdf5_path='/home/uftp/hubei/30xusuqian.h5')
     # datastore = Data_store(redd_hdf5_path='/home/uftp/hubei/30fake.h5')
     pss = []
     # datarange = [pd.Timestamp('2017-12-15 10:00:00'), pd.Timestamp('2017-12-15 12:00:00')]
     appliance_truth = {}
     appliance_consumtion = {}
     for app in self.datastore.appliance_names:
         if (app in ['meter', 'TVbox', 'TV']) and self.home == 'xusuqian':
             if app == 'meter':
                 totalpower = self.datastore.get_instance_ps(
                     appliance_name=app,
                     instance='1').loc[self.datarange[0]:self.datarange[-1]]
             continue
         theps = self.datastore.get_instance_ps(
             appliance_name=app,
             instance='1').loc[self.datarange[0]:self.datarange[-1]]
         appliance_truth[app] = theps
         pss.append(theps)
         appliance_consumtion[app] = Tools.ps_consumption(theps=theps)
         # if(app=='lamp'):
         #     print()
     knownps = aggregate_with_resample(pss)
     appliance_truth['unknown'] = totalpower - knownps
     appliance_consumtion['unknown'] = Tools.ps_consumption(
         theps=appliance_truth['unknown'])
     ps = totalpower
     cluster = Clustering()
     print('miaomiaomiao?')
     del pss
     # ps.plot()
     # ps=median_filter(ps=ps)
     # ps.plot()
     # plt.show()
     # 获得states_list
     from readData.getdistributions import getDistribitions
     self.appliance_truth = appliance_truth
     # centers_list, states_list = getDistribitions(ps=ps, redd_hdf5_path='D:\SJTU\湖北项目\数据\h5s/30xusuqian.h5',
     #                                              center_path='D:\SJTU\湖北项目\数据\ori\\xusuqian')
     # centers_list, states_list = getDistribitions(ps=ps)
     centers_list, states_list = getDistribitions(
         ps=ps,
         redd_hdf5_path='/home/uftp/hubei/30%s.h5' % self.home,
         center_path='/home/uftp/hubei/ori/%s' % self.home,
         load=False)
     self.appliance_consumtion = appliance_consumtion
     self.inference = Inference(total_ps=ps, states_list=states_list)
Exemple #6
0
 def build_infer(self):
     self.show_str.set(" \n\n\n!!!!! ")  #clear old txt
     #self.show_str.set("Please wait....building model")#show txt
     self.vb_dict = self.generate_variable_dict()  # cannot skip
     modelName = self.vb_dict["Model"]
     backend = self.vb_dict["Backend"]
     device = self.vb_dict["Device"]
     is_chg = not (self.modelName == modelName and self.backend == backend
                   and self.device == device)
     if (self.infer is None) or (is_chg):
         self.infer = Inference(modelName=modelName,
                                backend=backend,
                                device=device)
         self.modelName = modelName
         self.backend = backend
         self.device = device
Exemple #7
0
	def load (self):
		"""
			PUBLIC: load 
			------------
			loads in all parameters 
		"""
		#=====[ Step 1: load in semantic analysis	]=====
		print_status ("Initialization", "Loading ML parameters (Begin)")
		self.semantic_analysis.load ()
		print_status ("Initialization", "Loading ML parameters (End)")		

		#=====[ Step 2: transfer over models to inference	]=====
		print_status ("Initialization", "Constructing Inference instance (Begin)")
		self.inference = Inference (self.semantic_analysis.lda_model, self.semantic_analysis.lda_model_topics)
		print_status ("Initialization", "Constructing Inference instance (End)")
def index():
    # Main page
    if request.method == 'POST' and 'data' in request.files:
        for filename in request.files.getlist('data'):
            datas.save(filename)
        dataset = DataProcess(os.getcwd() + "/base_test_sum.csv",
                              os.getcwd() + "/knowledge_test_sum.csv",
                              os.getcwd() + "/money_report_test_sum.csv",
                              os.getcwd() + "/year_report_test_sum.csv")
        #print(Inference(dataset))
        Inference(dataset).to_csv(os.getcwd() + "/result.csv",
                                  encoding="utf-8",
                                  index=False)

    return render_template('index.html')
Exemple #9
0
from flask import Flask, request, jsonify, abort
from Inference import Inference


app = Flask(__name__)
api = Inference()


@app.route('/')
def index():
    return "Our Very deep Model is ALIVE"


@app.route('/predict/<name>', methods=['GET', 'POST'])
def prediction(name):
    receiver = request.get_json()
<<<<<<< HEAD
    
    data_json = api.predict(receiver['style'], receiver['Base64Image'])
    print(data_json)
    print()
    return jsonify(data_json)
=======
    print(receiver)
    print()
    data_json = api.predict(name, receiver['Base64Image'])
    return jsonify({'Base64Image': data_json})
>>>>>>> master


if __name__ == "__main__":
class Navigation:
    def __init__(self, grid_size, discount, robot_state, beta, robot_goal,
                 obs_sizes, true_env):
        self.human = Human(true_env, beta, discount, robot_goal, grid_size)
        robot_env = np.zeros((grid_size[0], grid_size[1]))
        self.robot = Robot(robot_env, robot_state, robot_goal, grid_size,
                           discount)

        robot_action = self.robot.optimal_policy()
        self.infer = Inference(grid_size, discount, robot_state, beta,
                               robot_goal, robot_action, obs_sizes)

        # dstb = self.infer.prior
        # sorted_ind = np.argsort(dstb)
        # highest_theta_ind = sorted_ind[-1]
        # highest_theta = self.infer.thetas[highest_theta_ind]
        # robot_env = highest_theta

    def full_pipeline(self):
        dstb = self.infer.prior
        self.infer.visualizations(dstb)
        while self.robot.state != self.robot.goal:
            print(self.robot.environment)
            # Get the robot's next action as predicted by the human
            robot_action = self.robot.optimal_policy()
            self.infer.robot_action = robot_action

            # Human gives correction
            policy_index = self.human.give_correction(self.robot.state,
                                                      robot_action)
            if policy_index != 8:
                print("Human gave a correction! The correction was",
                      policy_index, ", the robot state was", self.robot.state,
                      ", and the robot action was", robot_action)
            else:
                print("Human gave no correction. The robot state was",
                      self.robot.state, ", and the robot action was",
                      robot_action)
            dstb, time = self.infer.exact_inference(policy_index)
            print("Inference took time", time)
            sorted_ind = np.argsort(dstb)
            highest_theta_ind = sorted_ind[-1]
            highest_theta = self.infer.thetas[highest_theta_ind]

            # The robot's new environment is the MAP of the inference
            self.robot.update_env(highest_theta)

            # theta, time = self.robot.update_theta(policy_index, robot_action)
            # self.robot.update_env(theta)
            # print(time)

            # Robot "replans"
            self.robot.move(policy_index)
            self.infer.robot_state = self.robot.state

    def full_pipeline_qmdp(self):
        dstb = self.infer.prior
        self.infer.visualizations(dstb)
        while self.robot.state != self.robot.goal:
            # Get the robot's next action as predicted by the human
            robot_action = self.robot.optimal_policy_qmdp(
                dstb, self.infer.thetas)
            self.infer.robot_action = robot_action

            # Human gives correction
            policy_index = self.human.give_correction(self.robot.state,
                                                      robot_action)
            if policy_index != 8:
                print("Human gave a correction! The correction was",
                      policy_index, ", the robot state was", self.robot.state,
                      ", and the robot action was", robot_action)
            else:
                print("Human gave no correction. The robot state was",
                      self.robot.state, ", and the robot action was",
                      robot_action)
            dstb, time = self.infer.exact_inference(policy_index)
            print("Inference took time", time)

            sorted_ind = np.argsort(dstb)
            highest_theta_ind = sorted_ind[-1]
            highest_theta = self.infer.thetas[highest_theta_ind]
            self.robot.update_env(highest_theta)

            self.robot.move_qmdp(policy_index, dstb, self.infer.thetas)
            self.infer.robot_state = self.robot.state

    def update_goal(self, goal):
        self.human.update_goal(goal)
        self.robot.update_goal(goal)
        self.infer.update_thetas(goal)

    def update_robot_state(self, state):
        self.robot.state = state
        self.infer.robot_state = state
Exemple #11
0
	# so.semantic_analysis.save ()

	#=====[ Step 3: load semantic analysis models	]=====
	print_header ("Demo Script - Loading semantic analysis models")	
	so.semantic_analysis.load ()

	#=====[ Step 4: load users	]=====
	print_header ("Demo Script - Getting users")	
	# so.get_users ()
	so.load_users ()


	#=====[ Step 3: apply to activity dfs	]=====
	print_header ("Demo Script - Performing semantic analysis on activities")
	for adf in so.storage_delegate.iter_activity_dfs ():

		#=====[ Semantic analysis on adf	]=====
		adf = so.semantic_analysis.add_semantic_summary (adf, 'name')

		#=====[ Construct inference	]=====
		inf = Inference (so.semantic_analysis.lda_model, so.semantic_analysis.lda_model_topic_dist)

		#=====[ Recommend	]=====
		print inf.recommend (so.u_df.iloc[3], adf[:100], 'all_event_names_LDA', 'name', 'all_event_names_W2V', 'name_W2V')






Exemple #12
0
    # Path to label map file.
    PATH_TO_LABELS = os.path.join(CWD_PATH, 'data', 'mscoco_label_map.pbtxt')

    # Number of classes the object detector can identify.
    NUM_CLASSES = 90

    # Load the label map.
    # Label maps map indices to category names, so that when the convolution
    # network predicts `5`, we know that this corresponds to `airplane`.
    label_map = label_map_util.load_labelmap(PATH_TO_LABELS)
    categories = label_map_util.convert_label_map_to_categories(
        label_map, max_num_classes=NUM_CLASSES, use_display_name=True)
    category_index = label_map_util.create_category_index(categories)

    inference = Inference()
    sess = inference.get_model_session(PATH_TO_CKPT)

    # Input tensor is the image.
    image_tensor = inference.get_input_tensor()

    # Output tensors are the detection boxes, scores, and classes.
    detection_boxes = inference.get_output_tensor()

    # Each score represents level of confidence for each of the objects.
    detection_scores = inference.get_model_detection_scores()
    detection_classes = inference.get_model_detection_classes()

    # Number of objects detected.
    num_detections = inference.get_model_detected_objects()
import matplotlib.pyplot as plt
from Pde_solver import Solver
from Inference import Inference

## Initiate PDE solver class
solv = Solver(n_save_frames=20, n_time_points=4000, model='gray-scott')

## Set F & k parameters and solve
## spots: F=0.035, k=0.065
## maze-like: F=0.035, k=0.06

tmp = solv.solve(parameters=[0.035, 0.060])  # parameters = [F, k]

inference = Inference(
    solv, solv.save_times,
    solv.save_u_mat.reshape(solv.n_save_frames, solv.n_x * solv.n_y))
inference.optimise()
Exemple #14
0
class TestInference(TestCase):
    def setUp(self):
        self.unknown = True
        self.home = 'xiayurong'
        self.datastore = Data_store(
            redd_hdf5_path='/home/uftp/hubei/4test/30%s.h5' % self.home)
        self.datarange = [
            pd.Timestamp('2017-12-10 00:00:00'),
            pd.Timestamp('2017-12-17 00:00:00')
        ]
        print('')

    def setUp_dynamic(self):
        # select total power data in a period
        # datastore = Data_store(redd_hdf5_path='D:\SJTU\湖北项目\数据\h5s/30xiayurong.h5')
        # datastore = Data_store(redd_hdf5_path='D:\SJTU\湖北项目\数据\h5s/30xusuqian.h5')
        # datastore = Data_store(redd_hdf5_path='/home/uftp/hubei/30xusuqian.h5')
        # datastore = Data_store(redd_hdf5_path='/home/uftp/hubei/30fake.h5')
        pss = []
        # datarange = [pd.Timestamp('2017-12-15 10:00:00'), pd.Timestamp('2017-12-15 12:00:00')]
        appliance_truth = {}
        appliance_consumtion = {}
        if self.unknown:
            for app in self.datastore.appliance_names:
                if (app in ['TVbox', 'TV']) and self.home == 'xusuqian':
                    continue
                if (app in ['lamp', 'TV']) and self.home == 'xiayurong':
                    continue
                if (app in ['sterilizer', 'iron', 'kitchen', 'TV'
                            ]) and self.home == 'zhouqi':
                    continue
                if (app == 'meter'):
                    for key in self.datastore.keys_dict[app]:
                        meterdata = self.datastore.get_instance_ps(
                            appliance_name=app, instance=key
                        ).loc[self.datarange[0]:self.datarange[-1]]
                        try:
                            ps += meterdata
                        except:
                            ps = meterdata
                    continue
                for key in self.datastore.keys_dict[app]:
                    theps = self.datastore.get_instance_ps(
                        appliance_name=app,
                        instance=key).loc[self.datarange[0]:self.datarange[-1]]
                    appliance_truth[app + '_' + key] = theps
                    appliance_consumtion[app + '_' +
                                         key] = Tools.ps_consumption(
                                             theps=theps)
        else:
            for app in self.datastore.appliance_names:
                if (app in ['TVbox', 'TV']) and self.home == 'xusuqian':
                    continue
                if (app in ['lamp', 'TV']) and self.home == 'xiayurong':
                    continue
                if (app in ['sterilizer', 'iron', 'kitchen', 'TV'
                            ]) and self.home == 'zhouqi':
                    continue
                if (app in ['meter', 'unknown']): continue
                for key in self.datastore.keys_dict[app]:
                    theps = self.datastore.get_instance_ps(
                        appliance_name=app,
                        instance=key).loc[self.datarange[0]:self.datarange[-1]]
                    appliance_truth[app + '_' + key] = theps
                    appliance_consumtion[app + '_' +
                                         key] = Tools.ps_consumption(
                                             theps=theps)
                    pss.append(theps)
            ps = aggregate_with_resample(pss)
            del pss
        # ps.plot()
        # ps=median_filter(ps=ps)
        # ps.plot()
        # plt.show()
        # 获得states_list
        from readData.getdistributions import getDistribitions
        self.appliance_truth = appliance_truth
        # centers_list, states_list = getDistribitions(ps=ps, redd_hdf5_path='D:\SJTU\湖北项目\数据\h5s/30xusuqian.h5',
        #                                              center_path='D:\SJTU\湖北项目\数据\ori\\xusuqian')
        # centers_list, states_list = getDistribitions(ps=ps)
        centers_list, states_list = getDistribitions(
            ps=ps,
            redd_hdf5_path='/home/uftp/hubei/30%s.h5' % self.home,
            center_path='/home/uftp/hubei/ori/%s' % self.home,
            load=False)
        self.appliance_consumtion = appliance_consumtion
        self.inference = Inference(total_ps=ps, states_list=states_list)

    def test_infer_operating_list_to_chunks(self):
        self.setUp_dynamic()
        infer_result = self.inference.infer_operating_list_to_chunks_dynamic()
        infer_result.get_estimated_ps_dict()
        print(self.appliance_consumtion)
        performance = Performance(infer_result=infer_result,
                                  appliance_truth=self.appliance_truth)
        error = performance.dissagga_metric()
        print(1 - error)
        print(Parameters())
        print()

    def test_infer_operating_list_to_chunks_static(self):
        import staticGO
        appliance_truth, ps, states_list, centers, state_r3_list,appliance_consumtion\
            =staticGO.staticGo(self.datastore,self.datarange, home=self.home, unknown=self.unknown)
        print(datetime.datetime.now())
        self.inference = Inference(total_ps=ps,
                                   states_list=states_list,
                                   centers_list=centers)
        infer_result = self.inference.infer_operating_list_to_chunks_static(
            state_r3_list=state_r3_list)
        infer_result.get_estimated_ps_dict()
        print(appliance_consumtion)
        performance = Performance(infer_result=infer_result,
                                  appliance_truth=appliance_truth)
        df1, df2 = performance.dissagga_metric2()
        error = performance.dissagga_metric()
        print(1 - error)
        print(self.inference.para)
        print()
        print(datetime.datetime.now())
        print()


# t=TestInference()
# t.setUp()
# t.test_infer_operating_list_to_chunks()
Exemple #15
0
class SpotOn:

	def __init__ (self):
		"""
			PUBLIC: Constructor
			-------------------
			constructs member objects
		"""
		#=====[ Step 1: create member objects	]=====
		self.preprocess = Preprocess ()
		self.storage_delegate = StorageDelegate ()
		self.semantic_analysis = SemanticAnalysis ()
		self.user_analysis = UserAnalysis ()
		self.inference = None


	def load (self):
		"""
			PUBLIC: load 
			------------
			loads in all parameters 
		"""
		#=====[ Step 1: load in semantic analysis	]=====
		print_status ("Initialization", "Loading ML parameters (Begin)")
		self.semantic_analysis.load ()
		print_status ("Initialization", "Loading ML parameters (End)")		

		#=====[ Step 2: transfer over models to inference	]=====
		print_status ("Initialization", "Constructing Inference instance (Begin)")
		self.inference = Inference (self.semantic_analysis.lda_model, self.semantic_analysis.lda_model_topics)
		print_status ("Initialization", "Constructing Inference instance (End)")







	####################################################################################################
	######################[ --- Getting Users --- ]#####################################################
	####################################################################################################

	def get_users (self):
		"""
			PUBLIC: get_users
			-----------------
			constructs self.u_df from all available 
			calendar dataframes 
		"""
		self.u_df = self.user_analysis.extract_users (self.storage_delegate.iter_calendar_dfs)
		self.u_df = self.semantic_analysis.analyze (self.u_df, 'all_event_names')


	def load_users (self, filepath='../data/pandas/users/users.df'):
		"""
			PUBLIC: load_users
			------------------
			constructs self.u_df from a saved file
		"""
		self.u_df = pd.read_pickle(filepath)

	









	####################################################################################################
	######################[ --- Training  --- ]#########################################################
	####################################################################################################

	def extract_text (self, activity_row):
		"""
			PRIVATE: extract_text
			---------------------
			given a row representing an activity, this returns 
			a list of words representing it as a 'text'
		"""
		text = []
		if type(activity_row['name']) == list:
			text += activity_row['name']
		if type(activity_row['words']) == list:
			text += activity_row['words']
		return text

	def get_corpus_dictionary (self):
		"""
			PRIVATE: get_corpus_dictionary
			------------------------------
			Assembles a gensim corpus and dictionary from activities_df,
			where each text is name || words.
		"""
		#=====[ Step 1: iterate through all activity dataframes	]=====
		print_status ("get_corpus", "assembling texts")
		texts = []
		for df in self.storage_delegate.iter_activity_dfs ():
			print_inner_status ("assembling texts", "next df")
			texts += list(df.apply(self.extract_text, axis=1))

		#=====[ Step 3: get dictionary	]=====
		print_status ("get_corpus", "assembling dictionary")
		dictionary = gensim.corpora.Dictionary(texts)

		#=====[ Step 4: get corpus	]=====
		print_status ("get_corpus", "assembling corpus")		
		corpus = [dictionary.doc2bow (text) for text in texts]

		return corpus, dictionary



	def train_semantic_analysis (self):
		"""
			PUBLIC: train_semantic_analysis
			-------------------------------
			finds parameters for self.semantic_analysis
		"""
		#=====[ Step 1: get the corpus	]=====
		print_status ("train_semantic_analysis", "getting corpus/dictionary")
		corpus, dictionary = self.get_corpus_dictionary ()

		#=====[ Step 2: train ]=====
		print_status ("train_semantic_analysis", "training semantic analysis")
		self.semantic_analysis.train (corpus, dictionary)






	####################################################################################################
	######################[ --- Inference --- ]#########################################################
	####################################################################################################

	def score_activities_old (self, user_activities, recommend_activities):
		"""
			PUBLIC: score_activities
			------------------------
			Given a user and a list of activities, both represented as json, this will return 
			(activities, scores) in a sorted list
		"""
		#=====[ Step 1: preprocess json inputs	]=====
		user_events_df = self.preprocess.preprocess_a (user_activities)
		activities_df = self.preprocess.preprocess_a (recommend_activities)

		#=====[ Step 2: construct a user from user_events_df	]=====
		def f():
			yield user_events_df
		users = self.user_analysis.extract_users (f)
		assert len(users) == 1
		user = users.iloc[0]

		#=====[ Step 3: get scores for each one	]=====
		scores = [inference.score_match (user, activities_df.iloc[i]) for i in range(len(activities_df))]

		#=====[ Step 4: return sorted list of activity, score	]=====
		return sorted(zip(activities_json, scores), key=lambda x: x[1], reverse=True)


	def score_activities (self, user_activities, recommend_activities):
		"""
			PUBLIC: score_activities
			------------------------
			Given a user and a list of activities, both represented as json, this will return 
			(activities, scores) in a sorted list
		"""
		#=====[ Step 1: preprocess user_activities and recommend_activities	]=====
		user_activities = self.preprocess.preprocess_a (user_activities)
		# print len(recommend_activities)
		recommend_activities = self.preprocess.preprocess_a (recommend_activities)
		# print len(recommend_activities)

		#=====[ Step 2: get scores for each one	]=====
		scores,act = self.inference.score_activities (user_activities, recommend_activities)
		return scores,act


	####################################################################################################
	######################[ --- Interface --- ]#########################################################
	####################################################################################################

	def print_lda_topics (self):
		"""
			PUBLIC: print_lda_topics
			------------------------
			prints out a representation of the lda topics found in self.semantic_analysis
		"""
		self.semantic_analysis.print_lda_topics ()
Exemple #16
0
def main():
    '''main program'''
    Inference.promptQuestion()
    Inference.answerQuestion()
class TestInference(TestCase):
    def setUp(self):
        self.home = 'xusuqian'
        self.datastore = Data_store(redd_hdf5_path='/home/uftp/hubei/30%s.h5' %
                                    self.home)
        self.datarange = [
            pd.Timestamp('2017-12-2 00:00:00'),
            pd.Timestamp('2018-1-1 00:00:00')
        ]
        print('')

    def setUp_dynamic(self):
        # select total power data in a period
        # datastore = Data_store(redd_hdf5_path='D:\SJTU\湖北项目\数据\h5s/30xiayurong.h5')
        # datastore = Data_store(redd_hdf5_path='D:\SJTU\湖北项目\数据\h5s/30xusuqian.h5')
        # datastore = Data_store(redd_hdf5_path='/home/uftp/hubei/30xusuqian.h5')
        # datastore = Data_store(redd_hdf5_path='/home/uftp/hubei/30fake.h5')
        pss = []
        # datarange = [pd.Timestamp('2017-12-15 10:00:00'), pd.Timestamp('2017-12-15 12:00:00')]
        appliance_truth = {}
        appliance_consumtion = {}
        for app in self.datastore.appliance_names:
            if (app in ['meter', 'TVbox', 'TV']) and self.home == 'xusuqian':
                if app == 'meter':
                    totalpower = self.datastore.get_instance_ps(
                        appliance_name=app,
                        instance='1').loc[self.datarange[0]:self.datarange[-1]]
                continue
            theps = self.datastore.get_instance_ps(
                appliance_name=app,
                instance='1').loc[self.datarange[0]:self.datarange[-1]]
            appliance_truth[app] = theps
            pss.append(theps)
            appliance_consumtion[app] = Tools.ps_consumption(theps=theps)
            # if(app=='lamp'):
            #     print()
        knownps = aggregate_with_resample(pss)
        appliance_truth['unknown'] = totalpower - knownps
        appliance_consumtion['unknown'] = Tools.ps_consumption(
            theps=appliance_truth['unknown'])
        ps = totalpower
        cluster = Clustering()
        print('miaomiaomiao?')
        del pss
        # ps.plot()
        # ps=median_filter(ps=ps)
        # ps.plot()
        # plt.show()
        # 获得states_list
        from readData.getdistributions import getDistribitions
        self.appliance_truth = appliance_truth
        # centers_list, states_list = getDistribitions(ps=ps, redd_hdf5_path='D:\SJTU\湖北项目\数据\h5s/30xusuqian.h5',
        #                                              center_path='D:\SJTU\湖北项目\数据\ori\\xusuqian')
        # centers_list, states_list = getDistribitions(ps=ps)
        centers_list, states_list = getDistribitions(
            ps=ps,
            redd_hdf5_path='/home/uftp/hubei/30%s.h5' % self.home,
            center_path='/home/uftp/hubei/ori/%s' % self.home,
            load=False)
        self.appliance_consumtion = appliance_consumtion
        self.inference = Inference(total_ps=ps, states_list=states_list)

    def test_infer_operating_list_to_chunks(self):
        self.setUp_dynamic()
        infer_result = self.inference.infer_operating_list_to_chunks_dynamic()
        infer_result.get_estimated_ps_dict()
        print(self.appliance_consumtion)
        performance = Performance(infer_result=infer_result,
                                  appliance_truth=self.appliance_truth)
        error = performance.dissagga_metric()
        print(1 - error)
        print(Parameters())
        print()

    def test_infer_operating_list_to_chunks_static(self):
        import staticGO
        appliance_truth, ps, states_list, centers, state_r3_list, appliance_consumtion = staticGO.staticGo(
            self.datastore, self.datarange)
        self.inference = Inference(total_ps=ps,
                                   states_list=states_list,
                                   centers_list=centers)
        infer_result = self.inference.infer_operating_list_to_chunks_static(
            state_r3_list=state_r3_list)
        infer_result.get_estimated_ps_dict()
        print(appliance_consumtion)
        performance = Performance(infer_result=infer_result,
                                  appliance_truth=appliance_truth)
        df1, df2 = performance.dissagga_metric2()
        error = performance.dissagga_metric()
        print(1 - error)
        print(self.inference.para)
        print()


# t=TestInference()
# t.setUp()
# t.test_infer_operating_list_to_chunks()
Exemple #18
0
    var = Variable(varName, shapes, varValue)
    var.fuzzify()
    print("membership for var '" + varName + "' : ")
    print(var.memberships)
    variables[varName] = var

# take output variable
outputVarName = input()
outShapes = {}
for i in range(int(input())):
    addShape(outShapes)
outputVariavble = Variable(outputVarName, outShapes)

print("-----------------------------------")
print("Inference Rules outputs:")
inference = Inference(variables)
rulesOut = []
for i in range(int(input())):
    tupl = inference.doInference(input())
    rulesOut.append(tupl)
    print("Rule " + str(i + 1) + ": " + str(tupl))

print("-----------------------------------")
print("Defuzzification:")

sumValues = 0
sumVales_shapes = 0
for val_shape in rulesOut:
    print(val_shape[1])
    print(outputVariavble.shapes[val_shape[1]])
    centroid = outputVariavble.shapes[val_shape[1]].calculate_centroid()
X_test_tfidf_l2 = data["X_test_tfidf_l2"]
Y_test = data["Y_test"]
paths_test = data["paths_test"]
classes = data["classes"]
kmeans = data["kmeans"]

#Step 8: create Classifier object and call its various methods
classifier = Classifier()
k = 1
while k <= 5:
    classifier.knn(k, X_training_tfidf_l2, Y_training, X_test_tfidf_l2, Y_test)
    k += 2
classifier.naiveBayes(X_training_tfidf_l2, Y_training, X_test_tfidf_l2, Y_test)
classifier.logisticRegression(X_training_tfidf_l2, Y_training, X_test_tfidf_l2,
                              Y_test)

#Step 9: inference on new image
img = "./sample_0.jpg"
img = sio.imread(img)
plt.figure()
plt.imshow(img)
plt.show()
bovw_representation = dataset.extractAndDescribe(img, kmeans)
inference = Inference(bovw_representation.reshape(1, -1))
k = 1
while k <= 5:
    inference.knnInference(k)
    k += 2
inference.NaiveBayesInference()
inference.LogisticRegressionInference()
Exemple #20
0
class SpotOn:

	def __init__ (self):
		"""
			PUBLIC: Constructor
			-------------------
			constructs member objects
		"""
		#=====[ Step 1: create member objects	]=====
		self.preprocess = Preprocess ()
		self.storage_delegate = StorageDelegate ()
		self.semantic_analysis = SemanticAnalysis ()
		self.user_analysis = UserAnalysis ()
		self.inference = None
		self.activities_corpus = None


	def load (self):
		"""
			PUBLIC: load 
			------------
			loads in all parameters 
		"""
		#=====[ Step 1: load in semantic analysis	]=====
		print_status ("Initialization", "Loading ML parameters (Begin)")
		self.semantic_analysis.load ()
		print_status ("Initialization", "Loading ML parameters (End)")		

		#=====[ Step 2: transfer over models to inference	]=====
		print_status ("Initialization", "Constructing Inference instance (Begin)")
		self.inference = Inference (self.semantic_analysis.lda_model, self.semantic_analysis.lda_model_topics)
		print_status ("Initialization", "Constructing Inference instance (End)")







	####################################################################################################
	######################[ --- Getting Users --- ]#####################################################
	####################################################################################################

	def get_users (self):
		"""
			PUBLIC: get_users
			-----------------
			constructs self.u_df from all available 
			calendar dataframes 
		"""
		self.u_df = self.user_analysis.extract_users (self.storage_delegate.iter_calendar_dfs)
		self.u_df = self.semantic_analysis.analyze (self.u_df, 'all_event_names')


	def load_users (self, filepath='../data/pandas/users/users.df'):
		"""
			PUBLIC: load_users
			------------------
			constructs self.u_df from a saved file
		"""
		self.u_df = pd.read_pickle(filepath)











	####################################################################################################
	######################[ --- Training  --- ]#########################################################
	####################################################################################################

	def extract_text (self, activity_row):
		"""
			PRIVATE: extract_text
			---------------------
			given a row representing an activity, this returns 
			a list of words representing it as a 'text'
		"""
		text = []
		if type(activity_row['name']) == list:
			text += activity_row['name']
		if type(activity_row['words']) == list:
			text += activity_row['words']
		return text


	def get_corpus_dictionary (self):
		"""
			PRIVATE: get_corpus_dictionary
			------------------------------
			Assembles a gensim corpus and dictionary from activities_df,
			where each text is name || words.
		"""
		#=====[ Step 1: iterate through all activity dataframes	]=====
		print_status ("get_corpus", "assembling texts")
		documents = []
		for df in self.storage_delegate.iter_activity_dfs ():
			df['lda_doc'] = df['name'] + df['words']
			documents += list(df['lda_doc'])

		#=====[ Step 2: get dictionary	]=====
		print_status ("get_corpus", "assembling dictionary")
		dictionary = gensim.corpora.Dictionary(documents)

		#=====[ Step 3: get corpus	]=====
		print_status ("get_corpus", "assembling corpus")		
		corpus = [dictionary.doc2bow (d) for d in documents]

		return corpus, dictionary


	def print_lda_topics (self):
		"""
			PUBLIC: print_lda_topics
			------------------------
			prints out a representation of the lda topics found in self.semantic_analysis
		"""
		print_header ("LDA TOPICS: ")
		self.semantic_analysis.print_lda_topics ()


	def train_semantic_analysis (self):
		"""
			PUBLIC: train_semantic_analysis
			-------------------------------
			finds parameters for self.semantic_analysis
		"""
		#=====[ Step 1: get the corpus	]=====
		print_status ("train_semantic_analysis", "getting corpus/dictionary")
		corpus, dictionary = self.get_corpus_dictionary ()

		#=====[ Step 2: train ]=====
		print_status ("train_semantic_analysis", "training semantic analysis")
		self.semantic_analysis.train (corpus, dictionary)

		#####[ DEBUG: print out lda topics	]#####
		self.print_lda_topics ()




	####################################################################################################
	######################[ --- Processing --- ]########################################################
	####################################################################################################

	def activities_json_to_df (self, a_json):
		"""
			PRIVATE: activities_json_to_df
			------------------------------
			given: list of json dicts representing activities 
			returns: dataframe with preprocessing, semantic analysis
		"""
		a_df = self.preprocess.preprocess_a (a_json)
		a_df = self.semantic_analysis.add_lda_vec_column (a_df)
		a_df = self.semantic_analysis.add_w2v_sum_column (a_df)
		return a_df


	def calendar_events_json_to_df (self, ce_json):
		"""
			PRIVATE: calendar_events_json_to_df
			------------------------------
			given: list of json dicts representing calendar events 
			returns: dataframe with preprocessing, semantic analysis
		"""
		ce_df = self.preprocess.preprocess_ce (ce_json)
		ce_df = self.semantic_analysis.add_lda_vec_column (ce_df)
		ce_df = self.semantic_analysis.add_w2v_sum_column (ce_df)
		return ce_df


	def calendar_events_to_user_representation(self, ce_json):
		"""
			PUBLIC: calendar_events_to_user_representation
			----------------------------------------------
			given a list containing json dicts representing calendar events belonging
			to a single user, this will return a representation that can be passed to 
			score_activity_for_user and recommend_for_user
		"""
		user_df 	= self.calendar_events_json_to_df (ce_json)
		lda_vec 	= self.semantic_analysis.get_user_lda_vec (user_df)
		return {'events_df':user_df, 'lda_vec':lda_vec}


	def load_activities_corpus(self, activities):
		'''
			function: load_activities_corpus
			params: activities - list of activities to recommend

			returns: none
			notes: use this function to load a big activities corpus into the SpotOn object, and later when calling
			recommend_for_user we will pull activities to recommend from this corpus.

			Can be called multiple times to update to different activities
		'''
		self.activities_corpus = self.activities_json_to_df (activities)












	####################################################################################################
	######################[ --- Recommending --- ]######################################################
	####################################################################################################

	def score_activity_for_user(self, user_representation, activity):
		"""
			PUBLIC: score_activity_for_user
			-------------------------------
			params: user_representation - representation of the user to score for
								(created by calendar_events_to_user_representation)
					activity - json of the activity to score

			notes: goes from the representation of the user that you use + one activity 
					-> return a score for how much they'd like it
		"""
		#=====[ Step 1: get activity dataframe 	]=====
		activity_df = self.activities_json_to_df ([activity])

		#=====[ Step 2: get scored dataframe	]=====
		activity_df = self.inference.infer_scores (user_representation, activity_df)

		#=====[ Step 3: extract and return score	]=====
		return activity_df.iloc[0]['score']


	def recommend_for_user(self, user_representation, activities=None, topn=10):
		"""
			PUBLIC: recommend_for_user
			--------------------------
			params: user_representation - representation of the user to recommend for
					activities - either a list of json activities, or None if 
									.load_activities_corpus has been called
					topn - number of recommendations to return
		"""
		#=====[ Step 1: get a_df, df of activities to recommend	]=====
		if activities is not None:
			activities_df = self.activities_json_to_df (activities)
		else:
			if not (self.activities_corpus is not None):
				self.load_activities_corpus ()
			activities_df = self.activities_corpus

		#=====[ Step 2: get scores, return sorted	]=====
		activity_ranks = self.inference.rank_activities (user_representation, activities_df)
		return list(activity_ranks)


	def recommend_users_for_activity(self, activity, list_of_users, topn=10):
		"""
			PUBLIC: recommend_users_for_activities
			--------------------------------------
			params: activity - activity to recommend users for
					list_of_users - list of users to filter
					topn - number of users to return

			notes: goes from an activity and a list of users -> topn users for that activity
		"""
		scores = [self.score_activity_for_user(user, activity) for user in list_of_users]
		sorted_ix = np.argsort(scores)[::-1]
		return [list_of_users[sorted_ix[i]] for i in range(topn)]
Exemple #21
0
class GUI():

    vb_dict = {}
    infer = None
    modelName, backend, device = "", "", ""

    def __init__(self, master):
        self.master = master
        self.row = 0  #for grid
        self.all_comp = []
        self.get_comp = []
        self.vb_name = []
        self.buildGUI_1()
        #self.show_all_variable()

    # --------Mode interface ---------
    def buildGUI_1(self):  #CNN
        self.master.title('build CNN -- load model')
        self.label_1to1_text_combobox(
            "Model", models, width=50)  # models = ("inception_v3","vgg16"...)
        self.label_1to1_text_entry(name="ImageUrl",
                                   default_text="data/person.jpg",
                                   width=100)
        self.label_1to1_text_combobox("Backend", backends, width=50)
        self.label_1to1_text_combobox("Device", devices, width=50)
        self.label_1to1_text_combobox("SearchSeq", seq_search_TXT, width=100)
        self.vb_dict = self.generate_variable_dict()
        self.button(self.click_show_orig_img, "Show Orig Image")
        self.button(self.click_inference, "Inference")
        self.button(self.click_show_model, "Model Visualization")
        self.button(self.click_search_sequencial_nodes, "Search Nodes")
        self.show_str = StringVar()
        self.textvariable_label(textvariable=self.show_str)

    def click_show_orig_img(self):
        self.vb_dict = self.generate_variable_dict()  # cannot skip
        img_path = self.vb_dict["ImageUrl"]
        self.img_label(path=img_path, title='Original image')

    def click_show_model(self):
        #build infer
        self.build_infer()
        show_str = "\nmodel name = {} \n\nnumber of param = {} \n\nnumber of flops = {}\n\n ".format(
            self.infer.modelName, self.infer.n_param, self.infer.n_flops)
        import webbrowser
        webbrowser.open(self.infer.svgfilepath)  # open <svgfilepath> in web
        self.show_str.set(show_str)  #show txt

    def build_infer(self):
        self.show_str.set(" \n\n\n!!!!! ")  #clear old txt
        #self.show_str.set("Please wait....building model")#show txt
        self.vb_dict = self.generate_variable_dict()  # cannot skip
        modelName = self.vb_dict["Model"]
        backend = self.vb_dict["Backend"]
        device = self.vb_dict["Device"]
        is_chg = not (self.modelName == modelName and self.backend == backend
                      and self.device == device)
        if (self.infer is None) or (is_chg):
            self.infer = Inference(modelName=modelName,
                                   backend=backend,
                                   device=device)
            self.modelName = modelName
            self.backend = backend
            self.device = device

    def click_search_sequencial_nodes(self):
        #build infer
        self.build_infer()
        #seq: ["Conv", "Add", ...]
        self.vb_dict = self.generate_variable_dict()  # cannot skip
        seqTXT = self.vb_dict["SearchSeq"]
        seq = options_for_seq_search[seq_search_TXT.index(seqTXT)]
        #search_n_visualize_sequence
        #self.show_str.set("Please wait....search_sequencial_nodes")#show txt
        show_str, is_match, marked_svgfilepath = self.infer.search_n_visualize_sequence(
            seq)
        self.show_str.set(str(show_str))  #show txt
        if is_match:
            import webbrowser
            webbrowser.open(marked_svgfilepath)  # open <svgfilepath> in web

    def click_inference(self):
        #build infer
        self.build_infer()
        #img
        self.vb_dict = self.generate_variable_dict()  # cannot skip
        imgfile = self.vb_dict["ImageUrl"]
        #predict
        #self.show_str.set("Please wait....predicting")#show txt
        str_, time_cost = self.infer.predict(imgfile=imgfile)
        str_ = "Time cost : {} \n\n".format(time_cost) + str_
        self.show_str.set(str(str_))  #show txt
        #pred img
        if self.infer.is_obj_det:  # if is Object Detect
            self.img_label(path="predictions_samesize.jpg",
                           title='Prediction image')

    # --------Component Conbination ---------
    def label_1to1_text_combobox(self,
                                 name="",
                                 values=("1", "2"),
                                 default_Chosen=0,
                                 width=10):
        self.vb_name.append(name.split()[0])
        self.all_comp.append(Label(self.master, text=name))
        #self.all_comp[-1].pack(anchor='w')
        self.all_comp[-1].grid(row=self.row, sticky=E)  #
        self.get_comp.append(StringVar(self.master, values[default_Chosen]))
        self.all_comp.append(
            Combobox(self.master, width=width, textvariable=self.get_comp[-1]))
        #self.all_comp[-1].pack()
        self.all_comp[-1]['values'] = values
        self.all_comp[-1].current(default_Chosen)
        self.all_comp[-1].grid(row=self.row, column=1, sticky=W)  #
        self.row += 1  #

    def label_1to1_float_entry(self, name="", default_float=0.01, width=5):
        self.vb_name.append(name.split()[0])
        self.all_comp.append(Label(self.master, text=name))
        #self.all_comp[-1].pack(anchor='w')
        self.all_comp[-1].grid(row=self.row, sticky=E)  #
        self.get_comp.append(DoubleVar(self.master, default_float))
        self.all_comp.append(
            Entry(self.master, width=width, textvariable=self.get_comp[-1]))
        #self.all_comp[-1].pack()
        self.all_comp[-1].grid(row=self.row, column=1, sticky=W)  #
        self.row += 1  #

    def label_1to1_int_entry(self, name="", default_int=-1, width=10):
        self.vb_name.append(name.split()[0])
        self.all_comp.append(Label(self.master, text=name))
        #self.all_comp[-1].pack(anchor='w')
        self.all_comp[-1].grid(row=self.row, sticky=E)  #
        self.get_comp.append(IntVar(self.master, default_int))
        self.all_comp.append(
            Entry(self.master, width=width, textvariable=self.get_comp[-1]))
        #self.all_comp[-1].pack()
        self.all_comp[-1].grid(row=self.row, column=1, sticky=W)  #
        self.row += 1  #

    def label_1to1_text_entry(self, name="", default_text="", width=35):
        self.vb_name.append(name.split()[0])
        self.all_comp.append(Label(self.master, text=name))
        #self.all_comp[-1].pack(anchor='w')
        self.all_comp[-1].grid(row=self.row, sticky=E)  #
        self.get_comp.append(StringVar(self.master, default_text))
        self.all_comp.append(
            Entry(self.master, width=width, textvariable=self.get_comp[-1]))
        #self.all_comp[-1].pack()
        self.all_comp[-1].grid(row=self.row, column=1, sticky=W)  #
        self.row += 1  #

    def label_1to3_int_entry(self,
                             name="",
                             default_int=[-1, -1, -1],
                             width=10):
        for i in [1, 2, 3]:
            self.vb_name.append(name.split()[0] + str(i))
        self.all_comp.append(Label(self.master, text=name))
        #self.all_comp[-1].pack(anchor='w')
        self.all_comp[-1].grid(row=self.row, sticky=E)  #
        for i, int_ in enumerate(default_int):
            self.get_comp.append(IntVar(self.master, int_))
            self.all_comp.append(
                Entry(self.master, width=width,
                      textvariable=self.get_comp[-1]))
            #self.all_comp[-1].pack()
            self.all_comp[-1].grid(row=self.row, column=i + 1, sticky=W)  #
        self.row += 1

    def label_1to1_bool_checkbutton(self,
                                    name="",
                                    default_bool=True,
                                    default_text="Yes"):
        self.vb_name.append(name.split()[0])
        self.all_comp.append(Label(self.master, text=name))
        #self.all_comp[-1].pack(anchor='w')
        self.all_comp[-1].grid(row=self.row, sticky=E)  #
        self.get_comp.append(BooleanVar(self.master, default_bool))
        self.all_comp.append(
            Checkbutton(self.master,
                        text=default_text,
                        variable=self.get_comp[-1],
                        offvalue=False,
                        onvalue=True))
        #self.all_comp[-1].pack()
        self.all_comp[-1].grid(row=self.row, column=1, sticky=W)  #
        self.row += 1  #

    def textvariable_label(self, textvariable):
        self.all_comp.append(Label(self.master, textvariable=textvariable))
        #self.all_comp[-1].pack()
        self.all_comp[-1].grid(row=self.row, column=1)  #
        self.row += 1  #

    def text_label(self, text="OK"):
        self.all_comp.append(Label(self.master, text=text))
        #self.all_comp[-1].pack()
        self.all_comp[-1].grid(row=self.row, column=1)  #
        self.row += 1  #

    def text_text(self, text="OK"):
        text_ = Text(self.master, width=80, height=10)
        text_.insert(INSERT, text)
        text_.insert(END, "")
        self.all_comp.append(text_)
        #self.all_comp[-1].pack()
        self.all_comp[-1].grid(row=self.row, column=1)  #
        self.row += 1  #

    def img_label(self, path="cat1.jpeg", title='image'):
        window = Toplevel()
        #window.geometry('400x400')
        window.title(title)
        img = ImageTk.PhotoImage(Image.open(path))
        label = Label(window, image=img)
        label.pack()
        window.mainloop()

    def button(self, command, text="OK"):
        self.all_comp.append(Button(self.master, text=text, command=command))
        #self.all_comp[-1].pack()
        self.all_comp[-1].grid(row=self.row, column=1)  #
        self.row += 1  #

    # ---Show------------------------------------------------------
    def show_all_variable(self):
        print('show all variable:')
        i = 0
        for c in self.get_comp:
            print('{} = {}'.format(self.vb_name[i], c.get()))
            i += 1

    def generate_variable_dict(self):
        i = 0
        d = {}
        for c in self.get_comp:
            d.update({self.vb_name[i]: c.get()})
            i += 1
        return d
Exemple #22
0
 def __init__(self, mrf):
     # check if there's any soft evidence to actually work on
     if len(mrf.softEvidence) == 0:
         raise Exception("Application of IPFP-M inappropriate! IPFP-M is a wrapper method for other inference algorithms that allows to fit probability constraints. An application is not sensical if the model contains no such constraints.")
     Inference.__init__(self, mrf)
Exemple #23
0
 def setUp_dynamic(self):
     # select total power data in a period
     # datastore = Data_store(redd_hdf5_path='D:\SJTU\湖北项目\数据\h5s/30xiayurong.h5')
     # datastore = Data_store(redd_hdf5_path='D:\SJTU\湖北项目\数据\h5s/30xusuqian.h5')
     # datastore = Data_store(redd_hdf5_path='/home/uftp/hubei/30xusuqian.h5')
     # datastore = Data_store(redd_hdf5_path='/home/uftp/hubei/30fake.h5')
     pss = []
     # datarange = [pd.Timestamp('2017-12-15 10:00:00'), pd.Timestamp('2017-12-15 12:00:00')]
     appliance_truth = {}
     appliance_consumtion = {}
     if self.unknown:
         for app in self.datastore.appliance_names:
             if (app in ['TVbox', 'TV']) and self.home == 'xusuqian':
                 continue
             if (app in ['lamp', 'TV']) and self.home == 'xiayurong':
                 continue
             if (app in ['sterilizer', 'iron', 'kitchen', 'TV'
                         ]) and self.home == 'zhouqi':
                 continue
             if (app == 'meter'):
                 for key in self.datastore.keys_dict[app]:
                     meterdata = self.datastore.get_instance_ps(
                         appliance_name=app, instance=key
                     ).loc[self.datarange[0]:self.datarange[-1]]
                     try:
                         ps += meterdata
                     except:
                         ps = meterdata
                 continue
             for key in self.datastore.keys_dict[app]:
                 theps = self.datastore.get_instance_ps(
                     appliance_name=app,
                     instance=key).loc[self.datarange[0]:self.datarange[-1]]
                 appliance_truth[app + '_' + key] = theps
                 appliance_consumtion[app + '_' +
                                      key] = Tools.ps_consumption(
                                          theps=theps)
     else:
         for app in self.datastore.appliance_names:
             if (app in ['TVbox', 'TV']) and self.home == 'xusuqian':
                 continue
             if (app in ['lamp', 'TV']) and self.home == 'xiayurong':
                 continue
             if (app in ['sterilizer', 'iron', 'kitchen', 'TV'
                         ]) and self.home == 'zhouqi':
                 continue
             if (app in ['meter', 'unknown']): continue
             for key in self.datastore.keys_dict[app]:
                 theps = self.datastore.get_instance_ps(
                     appliance_name=app,
                     instance=key).loc[self.datarange[0]:self.datarange[-1]]
                 appliance_truth[app + '_' + key] = theps
                 appliance_consumtion[app + '_' +
                                      key] = Tools.ps_consumption(
                                          theps=theps)
                 pss.append(theps)
         ps = aggregate_with_resample(pss)
         del pss
     # ps.plot()
     # ps=median_filter(ps=ps)
     # ps.plot()
     # plt.show()
     # 获得states_list
     from readData.getdistributions import getDistribitions
     self.appliance_truth = appliance_truth
     # centers_list, states_list = getDistribitions(ps=ps, redd_hdf5_path='D:\SJTU\湖北项目\数据\h5s/30xusuqian.h5',
     #                                              center_path='D:\SJTU\湖北项目\数据\ori\\xusuqian')
     # centers_list, states_list = getDistribitions(ps=ps)
     centers_list, states_list = getDistribitions(
         ps=ps,
         redd_hdf5_path='/home/uftp/hubei/30%s.h5' % self.home,
         center_path='/home/uftp/hubei/ori/%s' % self.home,
         load=False)
     self.appliance_consumtion = appliance_consumtion
     self.inference = Inference(total_ps=ps, states_list=states_list)
import numpy as np
from Inference import Inference
import time

grid_size = [5, 5]
obs_sizes = [[1, 1], [2, 2]]

infer = Inference(grid_size,
                  discount=0.9,
                  robot_state=[1, 1],
                  beta=10,
                  robot_goal=[2, 4],
                  robot_action=2,
                  obs_sizes=obs_sizes)

infer.exact_inference(policy_index=0)

# infer = Inference(grid_size, discount=0.9, robot_state=[4, 0], beta=10, robot_goal=[2, 4], robot_action=0, obs_sizes=obs_sizes)
#
# infer.exact_inference(policy_index=2)
#
# infer.robot_state = [4, 1]
# infer.robot_action = 2
#
# infer.exact_inference(policy_index=8)
#
# infer.robot_state = [4, 2]
# infer.robot_action = 2
#
# infer.exact_inference(policy_index=8)
#