Example #1
0
def test_quadtreeOpt(data, queryShape, all_queries):
    global method_list, exp_name
    exp_name = 'quadtreeOpt'
    method_list = ['quad-geo']
    # method_list = ['quad-baseline', 'quad-geo', 'quad-baseline-localness', 'quad-geo-localness']
    res_cube_abs = np.zeros((len(eps_list), len(seed_list), len(method_list)))
    res_cube_rel = np.zeros((len(eps_list), len(seed_list), len(method_list)))

    for j in range(len(seed_list)):
        queryList = all_queries[j]
        kexp = GKExp(data, queryList)
        p = Params(seed_list[j])

        for i in range(len(eps_list)):
            p.Eps = eps_list[i]
            for k in range(len(method_list)):

                if method_list[k] == 'quad-baseline':
                    res_cube_abs[i, j, k], res_cube_rel[i, j, k] = kexp.run_Quad_baseline(p)
                elif method_list[k] == 'quad-baseline-localness':
                    res_cube_abs[i, j, k], res_cube_rel[i, j, k] = kexp.run_Quad_baseline_localness(p)
                elif method_list[k] == 'quad-geo':
                    res_cube_abs[i, j, k], res_cube_rel[i, j, k] = kexp.run_Quad_geo(p)
                elif method_list[k] == 'quad-geo-localness':
                    res_cube_abs[i, j, k], res_cube_rel[i, j, k] = kexp.run_Quad_geo_localness(p)
                else:
                    logging.error('No such index structure!')
                    sys.exit(1)

    res_abs_summary = np.average(res_cube_abs, axis=1)
    res_rel_summary = np.average(res_cube_rel, axis=1)
    #np.savetxt(Params.resdir+exp_name+'_abs_'+`int(queryShape[0]*10)`+'_'+`int(queryShape[1]*10)`, res_abs_summary, fmt='%.4f\t')
    np.savetxt(Params.resdir + exp_name + '_rel_' + `int(queryShape[0] * 10)` + '_' + `int(queryShape[1] * 10)`,
               res_rel_summary, fmt='%.4f\t')
Example #2
0
    def ExportDojoFiles(self):

        ##
        fname = QFileDialog.getExistingDirectory(self, "Select Export Folder",
                                                 self.u_info.files_path)
        if len(fname) == 0:
            print('No folder was selected.')
            return
        ##
        dir = fname + os.sep + 'dojo'
        print('Export folder: ', dir)
        tmp_info = Params()
        tmp_info.SetUserInfo(dir)

        print(tmp_info.files_path)
        print(tmp_info.ids_path)
        print(tmp_info.tile_ids_path)
        print(tmp_info.tile_ids_volume_file)
        print(tmp_info.color_map_file)
        print(tmp_info.segment_info_db_file)
        print(tmp_info.images_path)
        print(tmp_info.tile_images_path)
        print(tmp_info.tile_images_volume_file)

        os.mkdir(tmp_info.files_path)
        copy_tree(self.u_info.ids_path, tmp_info.ids_path)
        copy_tree(self.u_info.images_path, tmp_info.images_path)
Example #3
0
    def initialize(self):
        """
            Function to call the initialize function on the Params object of the StorageVET application
            Part of the MenuBar
        """

        self.statProgress.reset()
        self.stackedWidgets.setCurrentIndex(0)
        if self.inputFileName:
            if self.inputFileName.endswith(".csv"):
                self.inputFileName = Params.csv_to_xml(self.inputFileName)

            # Initialize the Input Object from Model Parameters and Simulation Cases
            Params.initialize(self.inputFileName, schema_rel_path)
            self.init = True
            self.msgBox.setText(
                'Successfully initialized the Params class with the XML file.')
            self.msgBox.exec_()
            gLogger.info(
                'User successfully initialized the Params class with the XML file.'
            )
        else:
            # Look for proper ways so that the Params initialize errors from StorageVET side can be reported to
            # SVETapp main window text; currently, SVETapp window just exited without any messages...
            self.msgBox.setText('Params has not been initialized to validate.')
            self.msgBox.exec_()
            gLogger.info('User has not given an input file to initialize.')
Example #4
0
def test_grids(data, queryShape, all_queries):
    global method_list, exp_name
    exp_name = 'grids'
    method_list = ['grid-uniform', 'grid-adaptive']
    #'grid-pure','grid-uniform','grid-adaptive','grid-adaptive-localness'
    res_cube_abs = np.zeros((len(eps_list), len(seed_list), len(method_list)))
    res_cube_rel = np.zeros((len(eps_list), len(seed_list), len(method_list)))

    for j in range(len(seed_list)):
        queryList = all_queries[j]
        kexp = GKExp(data, queryList)
        p = Params(seed_list[j])

        for i in range(len(eps_list)):
            p.Eps = eps_list[i]
            for k in range(len(method_list)):
                if method_list[k] == 'grid-pure':
                    res_cube_abs[i, j, k], res_cube_rel[i, j, k] = kexp.run_Grid_pure(p)
                elif method_list[k] == 'grid-uniform':
                    res_cube_abs[i, j, k], res_cube_rel[i, j, k] = kexp.run_Grid_uniform(p)
                elif method_list[k] == 'grid-adaptive':
                    res_cube_abs[i, j, k], res_cube_rel[i, j, k] = kexp.run_Grid_adaptive(p)
                elif method_list[k] == 'grid-adaptive-localness':
                    res_cube_abs[i, j, k], res_cube_rel[i, j, k] = kexp.run_Grid_adaptive_localness(p)
                else:
                    logging.error('No such index structure!')
                    sys.exit(1)

    res_abs_summary = np.average(res_cube_abs, axis=1)
    res_rel_summary = np.average(res_cube_rel, axis=1)
    #np.savetxt(Params.resdir+exp_name+'_abs_'+`int(queryShape[0]*10)`+'_'+`int(queryShape[1]*10)`, res_abs_summary, fmt='%.4f\t')
    np.savetxt(Params.resdir + exp_name + '_rel_' + `int(queryShape[0] * 10)` + '_' + `int(queryShape[1] * 10)`,
               res_rel_summary, fmt='%.4f\t')
Example #5
0
def exp4():

    logging.basicConfig(level=logging.DEBUG, filename='log/debug.log')
    logging.info(time.strftime("%a, %d %b %Y %H:%M:%S", time.localtime()) + "  START")

    param = Params(1000)
    data = data_readin(param)

    # print "Loc"
    # for lid in param.locs.keys():
    #     print len(param.locs[lid])

    print "var"
    for lid in param.locs.keys():
        users = param.locs[lid]
        print max(users.values())

    # print "User"
    # for uid in param.users.keys():
    #     print len(param.users[uid])


    param.NDIM, param.NDATA = data.shape[0], data.shape[1]
    param.LOW, param.HIGH = np.amin(data, axis=1), np.amax(data, axis=1)

    evalPSD(data, param)
Example #6
0
def test_kdTrees(queryShape):
    global methodList, exp_name
    exp_name = 'kdTrees'
    methodList = ['pure', 'true', 'standard', 'hybrid', 'cell', 'noisymean']
    #    Params.maxHeight = 8
    epsList = [0.1, 0.5, 1.0]
    data = data_readin()
    res_cube_abs = np.zeros((len(epsList), len(seedList), len(methodList)))
    res_cube_rel = np.zeros((len(epsList), len(seedList), len(methodList)))

    for j in range(len(seedList)):
        queryList = queryGen(queryShape, seedList[j])
        kexp = KExp(data, queryList)
        for i in range(len(epsList)):
            for k in range(len(methodList)):
                p = Params(seedList[j])
                p.Eps = epsList[i]
                if methodList[k] == 'pure':
                    res_cube_abs[i, j,
                                 k], res_cube_rel[i, j,
                                                  k] = kexp.run_Kd_pure(p)
                elif methodList[k] == 'true':
                    res_cube_abs[i, j,
                                 k], res_cube_rel[i, j,
                                                  k] = kexp.run_Kd_true(p)
                elif methodList[k] == 'standard':
                    res_cube_abs[i, j,
                                 k], res_cube_rel[i, j,
                                                  k] = kexp.run_Kd_standard(p)
                elif methodList[k] == 'hybrid':
                    res_cube_abs[i, j,
                                 k], res_cube_rel[i, j,
                                                  k] = kexp.run_Kd_hybrid(p)
                elif methodList[k] == 'noisymean':
                    res_cube_abs[i, j,
                                 k], res_cube_rel[i, j,
                                                  k] = kexp.run_Kd_noisymean(p)
                elif methodList[k] == 'cell':
                    res_cube_abs[i, j,
                                 k], res_cube_rel[i, j,
                                                  k] = kexp.run_Kd_cell(p)
                else:
                    logging.error('No such index structure!')
                    sys.exit(1)

    res_abs_summary = np.average(res_cube_abs, axis=1)
    res_rel_summary = np.average(res_cube_rel, axis=1)
    np.savetxt(Params.resdir + exp_name + '_abs_' +
               str(int(queryShape[0] * 10)) + '_' +
               str(int(queryShape[1] * 10)),
               res_abs_summary,
               fmt='%.4f')
    np.savetxt(Params.resdir + exp_name + '_rel_' +
               str(int(queryShape[0] * 10)) + '_' +
               str(int(queryShape[1] * 10)),
               res_rel_summary,
               fmt='%.4f')
Example #7
0
def expSensitivity():

    logging.basicConfig(level=logging.DEBUG, filename='./debug.log')
    logging.info(time.strftime("%a, %d %b %Y %H:%M:%S", time.localtime()) + "  START")

    p = Params(1000)

    p.select_dataset()

    p.locs, p.users, p.locDict = readCheckins(p)

    evalActualSensitivity(p)
Example #8
0
    def post(self, param_id):
        """
        Update geocast parameters
        """
        global datasets, tree, all_data
        global eps, percent, com_range, mar, arf, utl, heuristic, subcell, localness, constraint
        workers = tornado.escape.json_decode(self.request.body)

        # print simplejson.dumps(workers)

        # np.fromiter(json.loads(workers),dtype)

        # save data to a file
        # tmp_workers_file = "../../dataset/tmp_workers_file.dat"

        # data = np.genfromtxt("../../dataset/yelp.dat",unpack = True)

        print "Start updating worker locations"
        i = 0
        all_workers = []
        for worker in workers:
            i += 1
            if i % 1000 == 0:
                print "Updated ", i, " workers"
            pair = [worker['k'], worker['B']]
            all_workers.append(pair)
            data = np.array(all_workers)
            np.savetxt('../../dataset/update.txt', data, delimiter='\t')
            data = data.transpose()
        Params.NDIM, Params.NDATA = data.shape[0], data.shape[1]

        Params.LOW, Params.HIGH = np.amin(data, axis=1), np.amax(data, axis=1)
        print Params.NDIM, Params.NDATA
        print Params.LOW, Params.HIGH

        p = Params(1000)
        print "Creating WorkerPSD..."
        dataset = self.get_argument("dataset", default=Params.DATASET)
        Params.DATASET = dataset
        p.select_dataset()
        print dataset
        tree = Grid_adaptive(data, p)
        tree.buildIndex()
        bounds = np.array([[Params.x_min, Params.y_min],
                           [Params.x_max, Params.y_max]])
        print bounds
        all_data[dataset] = (tree, bounds, p.NDATA)

        self.write(
            json.dumps({"status": "update successfully"}, sort_keys=True))
Example #9
0
def expKM():
    logging.basicConfig(level=logging.DEBUG, filename='./debug.log')
    logging.info(time.strftime("%a, %d %b %Y %H:%M:%S", time.localtime()) + "  START")

    p = Params(1000)

    p.select_dataset()

    p.locs, p.users, p.locDict = readCheckins(p)
    p.debug()

    copy_locs = copy.deepcopy(p.locs)
    copy_locDict = copy.deepcopy(p.locDict)

    pool = Pool(processes=len(eps_list))
    params = []
    for M in M_list:
        p.M = M
        global_sen = sensitivity_add(p.C, float(p.C)/p.K)[2] * p.M
        p.locs, p.users = cellStats(p, copy_locs, copy_locDict, global_sen)
        E_actual = shannonEntropy(p.locs)

        param = (p, global_sen, E_actual)
        evalLimitKM(param)
        # params.append((p, global_sen, E_actual))
    # pool.map(evalLimitK, params)
    # pool.join()

    createGnuData(p, "evalLimitKM", M_list)
Example #10
0
    def post(self, param_id):
        """
        Update geocast parameters
        """
        global datasets, tree, all_data
        global eps, percent, com_range, mar, arf, utl, heuristic, subcell, localness, constraint
        workers = tornado.escape.json_decode(self.request.body)

        # print simplejson.dumps(workers)

        # np.fromiter(json.loads(workers),dtype)

        # save data to a file
        # tmp_workers_file = "../../dataset/tmp_workers_file.dat"

        # data = np.genfromtxt("../../dataset/yelp.dat",unpack = True)

        print "Start updating worker locations"
        i = 0
        all_workers = []
        for worker in workers:
            i += 1
            if i % 1000 == 0:
                print "Updated ", i, " workers"
            pair = [worker['k'], worker['B']]
            all_workers.append(pair)
            data = np.array(all_workers)
            np.savetxt('../../dataset/update.txt', data, delimiter='\t')
            data = data.transpose()
        Params.NDIM, Params.NDATA = data.shape[0], data.shape[1]

        Params.LOW, Params.HIGH = np.amin(data, axis=1), np.amax(data, axis=1)
        print Params.NDIM, Params.NDATA
        print Params.LOW, Params.HIGH

        p = Params(1000)
        print "Creating WorkerPSD..."
        dataset = self.get_argument("dataset", default=Params.DATASET)
        Params.DATASET = dataset
        p.select_dataset()
        print dataset
        tree = Grid_adaptiveM(data, 1, p)
        tree.buildIndex()
        bounds = np.array([[Params.x_min, Params.y_min], [Params.x_max, Params.y_max]])
        print bounds
        all_data[dataset] = (tree, bounds, p.NDATA)

        self.write(
            json.dumps({"status": "update successfully"}, sort_keys=True))
Example #11
0
def test_htrees(data, queryShape, all_queries):
    global method_list, exp_name
    exp_name = "htrees"
    method_list = ["ht-standard"]
    #    method_list = ['ht-standard','ht-composite']
    #'ht-pure','ht-true','ht-standard','ht-composite','ht-hybrid','ht-hybrid-skew','ht-composite-localness','ht-hybrid-localness'
    res_cube_abs = np.zeros((len(eps_list), len(seed_list), len(method_list)))
    res_cube_rel = np.zeros((len(eps_list), len(seed_list), len(method_list)))

    for j in range(len(seed_list)):
        queryList = all_queries[j]
        kexp = GKExp(data, queryList)
        p = Params(seed_list[j])

        for i in range(len(eps_list)):
            p.Eps = eps_list[i]
            for k in range(len(method_list)):

                if method_list[k] == "ht-pure":
                    res_cube_abs[i, j, k], res_cube_rel[i, j, k] = kexp.run_HT_pure(p)
                elif method_list[k] == "ht-true":
                    res_cube_abs[i, j, k], res_cube_rel[i, j, k] = kexp.run_HT_true(p)
                elif method_list[k] == "ht-standard":
                    res_cube_abs[i, j, k], res_cube_rel[i, j, k] = kexp.run_HT_standard(p)
                elif method_list[k] == "ht-composite":
                    res_cube_abs[i, j, k], res_cube_rel[i, j, k] = kexp.run_HT_composite(p)
                elif method_list[k] == "ht-composite-localness":
                    res_cube_abs[i, j, k], res_cube_rel[i, j, k] = kexp.run_HT_composite_localness(p)
                elif method_list[k] == "ht-hybrid":
                    res_cube_abs[i, j, k], res_cube_rel[i, j, k] = kexp.run_HT_hybrid(p)
                elif method_list[k] == "ht-standard-skew":
                    res_cube_abs[i, j, k], res_cube_rel[i, j, k] = kexp.run_HT_standard_skew(p)
                elif method_list[k] == "ht-hybrid-skew":
                    res_cube_abs[i, j, k], res_cube_rel[i, j, k] = kexp.run_HT_hybrid_skew(p)
                elif method_list[k] == "ht-standard-adaptive":
                    res_cube_abs[i, j, k], res_cube_rel[i, j, k] = kexp.run_HT_standard_adaptive(p)
                elif method_list[k] == "ht-hybrid-localness":
                    res_cube_abs[i, j, k], res_cube_rel[i, j, k] = kexp.run_HT_hybrid_localness(p)
                else:
                    logging.error("No such index structure!")
                    sys.exit(1)
    res_abs_summary = np.average(res_cube_abs, axis=1)
    res_rel_summary = np.average(res_cube_rel, axis=1)
    # np.savetxt(Params.resdir+exp_name+'_abs_'+`int(queryShape[0]*10)`+'_'+`int(queryShape[1]*10)`, res_abs_summary, fmt='%.4f\t')
    np.savetxt(
        Params.resdir + exp_name + "_rel_" + ` int(queryShape[0] * 10) ` + "_" + ` int(queryShape[1] * 10) `,
        res_rel_summary,
        fmt="%.4f\t",
    )
Example #12
0
def test_quadtreeOpt(queryShape):
    global methodList, exp_name
    exp_name = 'quadtreeOpt'
    methodList = ['Quad-baseline', 'Quad-geo', 'Quad-post', 'Quad-opt']

    #    Params.maxHeight = 10
    epsList = [0.1, 0.5, 1.0]
    data = data_readin()
    res_cube_abs = np.zeros((len(epsList), len(seedList), len(methodList)))
    res_cube_rel = np.zeros((len(epsList), len(seedList), len(methodList)))

    for j in range(len(seedList)):
        queryList = queryGen(queryShape, seedList[j])
        kexp = KExp(data, queryList)
        for i in range(len(epsList)):
            for k in range(len(methodList)):
                p = Params(seedList[j])
                p.Eps = epsList[i]
                if methodList[k] == 'Quad-baseline':
                    res_cube_abs[i, j, k], res_cube_rel[
                        i, j, k] = kexp.run_Quad_baseline(p)
                elif methodList[k] == 'Quad-geo':
                    res_cube_abs[i, j,
                                 k], res_cube_rel[i, j,
                                                  k] = kexp.run_Quad_geo(p)
                elif methodList[k] == 'Quad-post':
                    res_cube_abs[i, j,
                                 k], res_cube_rel[i, j,
                                                  k] = kexp.run_Quad_post(p)
                elif methodList[k] == 'Quad-opt':
                    res_cube_abs[i, j,
                                 k], res_cube_rel[i, j,
                                                  k] = kexp.run_Quad_opt(p)
                else:
                    logging.error('No such index structure!')
                    sys.exit(1)

    res_abs_summary = np.average(res_cube_abs, axis=1)
    res_rel_summary = np.average(res_cube_rel, axis=1)
    np.savetxt(Params.resdir + exp_name + '_abs_' +
               str(int(queryShape[0] * 10)) + '_' +
               str(int(queryShape[1] * 10)),
               res_abs_summary,
               fmt='%.4f')
    np.savetxt(Params.resdir + exp_name + '_rel_' +
               str(int(queryShape[0] * 10)) + '_' +
               str(int(queryShape[1] * 10)),
               res_rel_summary,
               fmt='%.4f')
Example #13
0
    def initialize(self):
        global boundaries, datasets, MTDs, worker_counts
        print "dataset init"
        if len(boundaries) == 0:
            for i in range(len(datasets)):
                Params.DATASET = datasets[i]
                p = Params(1000)
                data = data_readin(p)
                p.select_dataset()
                MTDs.append(p.MTD)
                worker_counts.append(p.NDATA)
                boundaries.append(
                    str(p.x_min) + "," + str(p.y_min) + "," + str(p.x_max) + "," + str(p.y_max))

        """
Example #14
0
def gradLinearSP(
        Y,  # Observed response
        S,  # Stimulus
        P  # Parameters
):

    # Extract parameters
    a1, v1, a2, v2, d = P

    # Model nonlinearities
    f1, f2 = logistic, softPlus

    # Derivative of model nonlinearities and cost function
    df1, df2, dfe = dlog, dSP, dllike

    ndim = v1.ndim

    x1 = a1 + tdot(v1, S, 2 * (list(range(ndim)), ))
    r1 = f1(x1)
    dr1 = df1(x1)
    x2 = a2 + (r1 * v2).sum()
    r2 = f2(x2)
    dr2 = df2(x2)

    dy = d * dfe(Y, d * r2)
    dd = dy * r2 / d

    da2 = dy * dr2
    dv2 = dy * dr2 * r1

    da1 = dy * dr2 * (dr1 * v2).sum()
    dv1 = dy * dr2 * tdot(dr1 * S, v2,
                          (list(range(-ndim, 0)), list(range(ndim))))

    return Params([da1, dv1, da2, dv2, dd])
Example #15
0
 def __init__(self):
     window = tk.Tk()
     with open('params.json', 'r') as f:
         a = json.load(f)
     par = Params('params.json')
     read_parameters = ReadParameters()
     read_parameters.read_params(par)
     # print(read_parameters.URL_var.get())
     self.url = read_parameters.URL_var.get()
     self.gateNo = read_parameters.GateNum_var.get()
     self.headers = {'Content-Type': 'application/json'}
     self.key = "gateNo"
     # self.gateNo = "ZD101"
     self.passInfoNum = 1
     self.passtime = time.strftime('%Y-%m-%d %H:%M:%S',
                                   time.localtime(time.time()))
     self.direction = "1"
     self.checkInfoType = "0"
     self.sign = "A96D48AE69F46BD0E1B17F10316086F3"
     self.timestamp = time.strftime('%Y%m%d%H%M%S',
                                    time.localtime(time.time()))
     self.postdata = {
         self.key: self.gateNo,
         "passInfoNum": self.passInfoNum,
         "direction": self.direction,
         "checkInfoType": self.checkInfoType,
         "sign": self.sign,
         "timestamp": self.timestamp
     }
Example #16
0
    def __init__(self):
        self.state = State(0)
        self.params = Params()
        self.status = Status(self.params)
        self.sleep_time = 1
        self.pwm_read = PwmRead(
            self.params.pin_mode_in,
            self.params.pin_servo_in,
            self.params.pin_thruster_in,
            self.params.pin_OR,
        )
        self.pwm_out = PwmOut(self.params.pin_servo_out, self.params.pin_thruster_out)
        self.pid = PositionalPID()
        self.logger = Logger()
        self.logger.open()
        # Whether experienced OR mode or not
        self.or_experienced = False

        # setup for ina226
        print("Configuring INA226..")
        self.iSensor = ina226(INA226_ADDRESS, 1)
        self.iSensor.configure(
            avg=ina226_averages_t["INA226_AVERAGES_4"],
        )
        self.iSensor.calibrate(rShuntValue=0.002, iMaxExcepted=1)

        time.sleep(1)

        print("Configuration Done")

        current = self.iSensor.readShuntCurrent()

        print("Current Value is " + str(current) + "A")

        print("Mode is " + str(hex(self.iSensor.getMode())))
    def __init__(self):
        self._time_manager = TimeManager()
        self._params = Params()
        self._status = Status(self._params)
        self.log_time = time.time()
        self._pwm_read = PwmRead(
            self._params.pin_mode_in,
            self._params.pin_servo_in,
            self._params.pin_thruster_in,
            self._params.pin_or,
        )
        self._pwm_out = PwmOut(self._params.pin_servo_out,
                               self._params.pin_thruster_out)
        self._pid = PositionalPID()
        self._logger = Logger()
        self._logger.open()
        # Whether experienced OR mode or not
        self._or_experienced = False

        # setup for ina226
        print("Configuring INA226..")
        try:
            self.i_sensor = ina226(INA226_ADDRESS, 1)
            self.i_sensor.configure(
                avg=ina226_averages_t["INA226_AVERAGES_4"], )
            self.i_sensor.calibrate(rShuntValue=0.002, iMaxExcepted=1)
            self.i_sensor.log()
            print("Mode is " + str(hex(self.i_sensor.getMode())))
        except:
            print("Error when configuring INA226")

        time.sleep(1)

        print("Configuration Done")
Example #18
0
    def tariff_summary(self):
        """
            Function to call the table_summary function on the Params object
            Part of the MenuBar
        """

        if self.init:
            table = Params.verify_tariff()
            table.align['Billing Period'] = "r"
            table.align['Start Month'] = "r"
            table.align['End Month'] = "r"
            table.align['Start Time'] = "r"
            table.align['End Time'] = "r"
            table.align['Excluding Start Time'] = "r"
            table.align['Excluding End Time'] = "r"
            table.align['Weekday?'] = "r"
            table.align['Value'] = "r"
            table.align['Charge'] = "r"
            table.align['Name_optional'] = "r"
            self.text.setText(table.get_html_string())
            self.stackedWidgets.setCurrentIndex(1)
            self.msgBox.setText(
                'Successfully printed the user tariff data table.')
            self.msgBox.exec_()
            gLogger.info(
                'User successfully printed the user tariff data table.')
        else:
            self.msgBox.setText(
                'Input has not been initialized to summarize the user tariff data table.'
            )
            self.msgBox.exec_()
            gLogger.info(
                'User has not initialized the input to summarize the user tariff data table.'
            )
Example #19
0
    def __init__(self):

        #
        # Define user info
        #
        self.u_info = Params()

        super(MainWindow, self).__init__()
        FileMenu.__init__(self)
        DojoMenu.__init__(self)  # ???

        #
        # Prepare the main window
        #

        self.title = 'UNI-EM'
        self.left = 200
        self.top = 200
        self.width = 1200
        self.height = 800

        SyncListQComboBoxExcludeDojoMtifManager.build(self.u_info)
        SyncListQComboBoxOnlyDojoManager.build(self.u_info)

        self.initUI()
Example #20
0
    def battery_summary(self):
        """
            Function to call the battery_cycle_life_summary function on the Params object
            Part of the MenuBar
        """

        if self.init:
            table = Params.battery_cycle_life_summary()
            table.align['Cycle Depth Upper Limit'] = "r"
            table.align['Cycle Life Value'] = "r"
            self.text.setText(table.get_html_string())
            self.stackedWidgets.setCurrentIndex(1)
            self.msgBox.setText(
                'Successfully printed the battery cycle life table.')
            self.msgBox.exec_()
            gLogger.info(
                'User successfully printed the battery cycle life table.')
        else:
            self.msgBox.setText(
                'Input has not been initialized to summarize the cycle life table.'
            )
            self.msgBox.exec_()
            gLogger.info(
                'User has not initialized the input to summarize the cycle life table.'
            )
Example #21
0
    def CheckFolderDojo(self, folder_path):
        tmp_info = Params()
        tmp_info.SetUserInfo(folder_path)
        # Check file existence
        if  os.path.exists(tmp_info.files_path) and \
            os.path.exists(tmp_info.ids_path) and \
            os.path.exists(tmp_info.tile_ids_path) and \
            os.path.isfile(tmp_info.tile_ids_volume_file) and \
            os.path.isfile(tmp_info.color_map_file) and \
            os.path.isfile(tmp_info.segment_info_db_file) and \
            os.path.exists(tmp_info.images_path) and \
            os.path.exists(tmp_info.tile_images_path) and \
            os.path.isfile(tmp_info.tile_images_volume_file) :

            return 1
        else:
            return 0
Example #22
0
    def __init__(self):
        self.load_datasets()
        self.create_model()

        if Params.isTrue('ContinueTrain'):
            self.prepare_continued_training()
        else:
            self.start_epoch = 0
Example #23
0
    def series_summary(self):
        """
            Function to call the series_summary function on the Params object
            Part of the MenuBar
        """

        if self.init:
            no_series = len(Params.referenced_data["time_series"])
            if no_series == 1:
                if Params.series_summary():
                    self.canvas = FigureCanvas(Params.series_summary())
                    self.canvas.draw()
                    self.stackedWidgets.addWidget(self.canvas)
                    self.stackedWidgets.setCurrentIndex(2)
                    self.msgBox.setText(
                        'Successfully plotted all the provided time series.')
                    self.msgBox.exec_()
                    gLogger.info(
                        'User successfully plotted all the provided time series.'
                    )
                else:
                    self.msgBox.setText(
                        'The plot is not optimized for time series data longer than 1 year and '
                        'with timestep dt < 1 hour.')
                    self.msgBox.exec_()
                    gLogger.info(
                        'The plot is not optimized for time series data longer than 1 year and '
                        'with timestep dt < 1 hour.')
            elif no_series > 1:
                self.stackedWidgets.setCurrentIndex(0)
                self.msgBox.setText(
                    'There is more than 1 time series data to plot. '
                    'Current GUI version has not supported this yet.')
                self.msgBox.exec_()
            else:
                self.stackedWidgets.setCurrentIndex(0)
                self.msgBox.setText(
                    'There is 0 time series data to plot or '
                    'GUI cannot read the input name for time series data.')
                self.msgBox.exec_()
        else:
            self.msgBox.setText(
                'Input has not been initialized to plot time series.')
            self.msgBox.exec_()
            gLogger.info(
                'User has not initialized the input to plot time series..')
Example #24
0
def expStats():
    logging.basicConfig(level=logging.DEBUG, filename='./debug.log')
    logging.info(time.strftime("%a, %d %b %Y %H:%M:%S", time.localtime()) + "  START")

    p = Params(1000)

    p.select_dataset()

    p.locs, p.users, p.locDict = readCheckins("dataset/gowalla_NY.txt")

    c_locs, c_users = cellStats(p)

    # for uid in c_users:
    #     print len(c_users.get(uid))

    for lid in c_locs:
        print len(c_locs.get(lid))
Example #25
0
def test_htrees(data, queryShape, all_queries):
    global method_list, exp_name
    exp_name = 'htrees'
    method_list = ['ht-standard']
    #    method_list = ['ht-standard','ht-composite']
    #'ht-pure','ht-true','ht-standard','ht-composite','ht-hybrid','ht-hybrid-skew','ht-composite-localness','ht-hybrid-localness'
    res_cube_abs = np.zeros((len(eps_list), len(seed_list), len(method_list)))
    res_cube_rel = np.zeros((len(eps_list), len(seed_list), len(method_list)))

    for j in range(len(seed_list)):
        queryList = all_queries[j]
        kexp = GKExp(data, queryList)
        p = Params(seed_list[j])

        for i in range(len(eps_list)):
            p.Eps = eps_list[i]
            for k in range(len(method_list)):

                if method_list[k] == 'ht-pure':
                    res_cube_abs[i, j, k], res_cube_rel[i, j, k] = kexp.run_HT_pure(p)
                elif method_list[k] == 'ht-true':
                    res_cube_abs[i, j, k], res_cube_rel[i, j, k] = kexp.run_HT_true(p)
                elif method_list[k] == 'ht-standard':
                    res_cube_abs[i, j, k], res_cube_rel[i, j, k] = kexp.run_HT_standard(p)
                elif method_list[k] == 'ht-composite':
                    res_cube_abs[i, j, k], res_cube_rel[i, j, k] = kexp.run_HT_composite(p)
                elif method_list[k] == 'ht-composite-localness':
                    res_cube_abs[i, j, k], res_cube_rel[i, j, k] = kexp.run_HT_composite_localness(p)
                elif method_list[k] == 'ht-hybrid':
                    res_cube_abs[i, j, k], res_cube_rel[i, j, k] = kexp.run_HT_hybrid(p)
                elif method_list[k] == 'ht-standard-skew':
                    res_cube_abs[i, j, k], res_cube_rel[i, j, k] = kexp.run_HT_standard_skew(p)
                elif method_list[k] == 'ht-hybrid-skew':
                    res_cube_abs[i, j, k], res_cube_rel[i, j, k] = kexp.run_HT_hybrid_skew(p)
                elif method_list[k] == 'ht-standard-adaptive':
                    res_cube_abs[i, j, k], res_cube_rel[i, j, k] = kexp.run_HT_standard_adaptive(p)
                elif method_list[k] == 'ht-hybrid-localness':
                    res_cube_abs[i, j, k], res_cube_rel[i, j, k] = kexp.run_HT_hybrid_localness(p)
                else:
                    logging.error('No such index structure!')
                    sys.exit(1)
    res_abs_summary = np.average(res_cube_abs, axis=1)
    res_rel_summary = np.average(res_cube_rel, axis=1)
    #np.savetxt(Params.resdir+exp_name+'_abs_'+`int(queryShape[0]*10)`+'_'+`int(queryShape[1]*10)`, res_abs_summary, fmt='%.4f\t')
    np.savetxt(Params.resdir + exp_name + '_rel_' + `int(queryShape[0] * 10)` + '_' + `int(queryShape[1] * 10)`,
               res_rel_summary, fmt='%.4f\t')
Example #26
0
class MyTestCase(unittest.TestCase):
    def setUp(self):
        # init parameters
        self.p = Params(1000)
        self.p.select_dataset()

        self.log = logging.getLogger("debug.log")

    def testlocationCount(self):
        locs = []
        with open("dataset/weibo/checkins_filtered.txt") as worker_file:
            reader = csv.reader(worker_file, delimiter='\t')
            for row in reader:
                locs.append((float(row[1]), float(row[2]),
                             int(row[3])))  # lat, lon, id
        count = locationCount(self.p, locs)
        print("number of non-empty cells", len(count))
        print("average value per cell", np.mean(list(count.values())))
Example #27
0
    def SharedPreprocess(self, params, comm_title):

        print('Annotator folder is being generated for', comm_title)
        targ = Params()
        targ.SetUserInfoAnnotator(params['Empty Folder for Annotator'])

        if os.path.isdir(targ.surfaces_path) == False:
            os.makedirs(targ.surfaces_path, exist_ok=True)
            os.makedirs(targ.surfaces_whole_path, exist_ok=True)
        if os.path.isdir(targ.skeletons_path) == False:
            os.makedirs(targ.skeletons_path, exist_ok=True)
            os.makedirs(targ.skeletons_whole_path, exist_ok=True)
        if os.path.isdir(targ.volume_path) == False:
            os.makedirs(targ.volume_path, exist_ok=True)
        if os.path.isdir(targ.paint_path) == False:
            os.makedirs(targ.paint_path, exist_ok=True)

        return targ
Example #28
0
    def __init__(self,
                 learning_rate=0.01,
                 momentum=0.0,
                 nb_agents=5,
                 params=None,
                 nesterov=False,
                 name="QCDGD",
                 clip=0,
                 ternSt=0,
                 c1=0.5,
                 delta=0.48,
                 **kwargs):

        super(QCDGD, self).__init__(False, name)
        #self._set_hyper("learning_rate", kwargs.get("lr", 1))
        #self._set_hyper("decay", self._initial_decay)

        self._momentum = False
        if isinstance(momentum,
                      ops.Tensor) or callable(momentum) or momentum > 0:
            self._momentum = True

        if isinstance(momentum,
                      (int, float)) and (momentum < 0 or momentum > 1):
            raise ValueError("`momentum` must be between [0, 1].")

        #self._set_hyper("momentum", momentum)

        self.nesterov = nesterov
        self.learning_rate = learning_rate
        self.nb_agents = nb_agents

        if params == None:
            self.params = Params(nb_agents, 1)
        else:
            self.params = params

        self.epochStart = True

        self.clipSTD = clip
        self.stMultiplier = ternSt

        self.c1 = c1
        self.delta = delta
Example #29
0
    def get(self, param_id):
        """
        Update geocast parameters
        """
        global datasets, tree, all_data
        global eps, percent, com_range, mar, arf, utl, heuristic, subcell, localness, constraint
        dataset = self.get_argument("dataset", default=Params.DATASET)
        eps = self.get_argument("eps", default=eps)
        percent = self.get_argument("percent", default=Params.PercentGrid)
        com_range = self.get_argument("range", default=Params.NETWORK_DIAMETER)

        # geocast parameters
        mar = self.get_argument("mar", default=Params.MAR)
        arf = self.get_argument("arf", default=Params.AR_FUNCTION)
        utl = self.get_argument("utl", default=Params.U)
        heuristic = self.get_argument("heuristic",
                                      default=Params.COST_FUNCTION)
        subcell = self.get_argument("subcell",
                                    default=Params.PARTIAL_CELL_SELECTION)
        localness = self.get_argument("localness",
                                      default=Params.CUSTOMIZED_GRANULARITY)
        constraint = self.get_argument("constraint",
                                       default=Params.CONSTRAINT_INFERENCE)

        Params.DATASET = dataset
        Params.Eps = float(eps)
        Params.PercentGrid = float(percent)
        Params.NETWORK_DIAMETER = float(com_range) / 1000.0
        Params.MAR = float(mar)
        Params.AR_FUNCTION = arf
        Params.U = float(utl)
        Params.COST_FUNCTION = heuristic
        Params.PARTIAL_CELL_SELECTION = (subcell == "true" or subcell == True)
        Params.CUSTOMIZED_GRANULARITY = (localness == "true"
                                         or localness == True)
        Params.CONSTRAINT_INFERENCE = constraint == "true"
        print "Update parameters ... "
        print Params.DATASET, Params.Eps, Params.PercentGrid, Params.NETWORK_DIAMETER, Params.MAR, Params.AR_FUNCTION, Params.U, Params.COST_FUNCTION, Params.PARTIAL_CELL_SELECTION, Params.CUSTOMIZED_GRANULARITY

        # workerPSD parameters
        rebuild = self.get_argument("rebuild", default=0)
        rebuild = int(rebuild)
        if rebuild == 1:
            print "Reading data ... " + dataset
            data = data_readin()
            p = Params(1000)
            print "Creating WorkerPSD..."
            tree = Grid_adaptive(data, p)
            tree.buildIndex()
            bounds = np.array([[Params.x_min, Params.y_min],
                               [Params.x_max, Params.y_max]])
            all_data[dataset] = (tree, bounds, p.NDATA)
            print "Created WorkerPSD..." + dataset

        self.write(
            json.dumps({"status": "update successfully"}, sort_keys=True))
Example #30
0
    def validate(self):
        """
            Function to call the validate function on the Params object after Initialization
            Part of the MenuBar
        """

        self.stackedWidgets.setCurrentIndex(0)
        if self.init:
            Params.validate()
            self.msgBox.setText('Successfully validated the Params class with the Schema file.')
            self.msgBox.exec_()
            gLogger.info('User successfully validated the Params class with the Schema file.')
            self.valid = True
        else:
            # Look for proper ways so that the Params validation errors from StorageVET side can be reported to
            # SVETapp main window text; currently, SVETapp window just exited without any messages...
            self.msgBox.setText('Input has not been initialized to validate.')
            self.msgBox.exec_()
            gLogger.info('User has not initialized the input to validate.')
Example #31
0
 def __init__(self):
     self.state = State(0)
     self.params = Params()
     self.status = Status(self.params)
     self.sleep_time = 1
     self.pwm_read = PwmRead(self.params.pin_mode_in, self.params.pin_servo_in, self.params.pin_thruster_in)
     self.pwm_out = PwmOut(self.params.pin_servo_out, self.params.pin_thruster_out)
     self.pid = PositionalPID()
     self.logger = Logger()
     self.logger.open()
Example #32
0
def data_readin():
    """Read in spatial data and initialize global variables."""
    p = Params(0)
    p.select_dataset()
    data = np.genfromtxt(Params.dataset, unpack=True)
    Params.NDIM, Params.NDATA = data.shape[0], data.shape[1]
    Params.LOW, Params.HIGH = np.amin(data, axis=1), np.amax(data, axis=1)
    logging.debug(data.shape)
    logging.debug(Params.LOW)
    logging.debug(Params.HIGH)
    return data

    all_points = []
    if os.path.isfile(Params.TASKPATH):
        with open(Params.TASKPATH) as f:
            content = f.readlines()
        for i in range(len(seed_list)):
            ran_points = []
            for j in range(taskNo):
                ran_points.append(map(float, content[i * taskNo + j].split()))
            all_points.append(ran_points)
    else:
        tasks = ""
        logging.debug('tasks_gen: generating tasks...')

        boundary = np.array([[x1, y1], [x2, y2]])
        for seed in seed_list:
            ran_points = []
            np.random.seed(seed)
            count = 0
            while count < taskNo:
                idx = np.random.randint(0, data.shape[1])
                _ran_point = data[:, idx]
                if is_rect_cover(boundary, _ran_point):
                    ran_points.append(_ran_point)
                    count += 1
            all_points.append(ran_points)
            for item in ran_points:
                tasks += "%s\n" % " ".join(map(str, item))
        outfile = open(Params.TASKPATH, "w")
        outfile.write(tasks)
        outfile.close()
    return all_points
Example #33
0
    def OnInit(self):

        self.u_info = Params()
        self.control_panel = wxg.ControlPanel(None,
                                              wx.ID_ANY,
                                              "",
                                              sim_name=[self, self.u_info])
        self.SetTopWindow(self.control_panel)
        self.control_panel.Show()
        return True
Example #34
0
 def load(self, out_dir):
     self.params = Params().from_json(
         r.read(out_dir + "/params.json", r.json))
     self.sample = Sample().from_fasta(
         r.read(out_dir + "/sample.fasta", r.fasta_list))
     self.art_output = r.read(out_dir + "/art.aln",
                              r.aln(self.params.take_ref))
     self.instance = Instance().from_json(
         r.read(out_dir + "/instance.json", r.json))
     return self
Example #35
0
def compute_coverage_map(grid_size = 100):
    swlat=34.018212
    swlng=-118.291716
    nelat=34.025296
    nelng=-118.279826
    videos = get_videos(swlat, swlng, nelat, nelng)

    map = np.ndarray(shape=(grid_size, grid_size), dtype=int)
    for video in videos:
        if IS_FROM_MEDIAQ:
            if video.properties['vid']:
                vid = str(video.properties['vid'])
                fovs = getFOVs(vid)

                if fovs and len(fovs) > 0:
                    for fov in fovs.features:
                        f = FOV(fov)
                        param = Params(200, swlat, swlng, nelat, nelng)
                        param.GRID_SIZE = grid_size
                        for cid in f.cellids(param):
                            cell_lat, cell_lng = cell_coord(cid, param)
                            if f.cover(cell_lat, cell_lng):
                                y_idx = cid/param.GRID_SIZE
                                x_idx = cid - y_idx*param.GRID_SIZE
                                # print x_idx, y_idx, map[x_idx][y_idx]
                                map[x_idx][y_idx] = map[x_idx][y_idx] + 1
        else:
            for f in video.fovs:
                param = Params(200, swlat, swlng, nelat, nelng)
                param.GRID_SIZE = grid_size
                for cid in f.cellids(param):
                    cell_lat, cell_lng = cell_coord(cid, param)
                    if f.cover(cell_lat, cell_lng):
                        y_idx = cid/param.GRID_SIZE
                        x_idx = cid - y_idx*param.GRID_SIZE
                        # print x_idx, y_idx, map[x_idx][y_idx]
                        map[x_idx][y_idx] = map[x_idx][y_idx] + 1

    fig, ax = plt.subplots()
    heatmap = ax.pcolor(map, cmap=plt.cm.Reds)
    plt.show()
    plt.close()
    np.savetxt("mediaq_coverage_heatmap.txt" , map, fmt='%i\t')
Example #36
0
def surface_temp_vary_production():
    params = Params()
    depth = params.params['well']['casing1']['depth']

    p = params.params
    # params.set_production_rate(200)
    oil_temp = OilTemp(p)
    oil_temp.load_params()
    oil_temp.run()
    # oil_temp.plot()
    print(oil_temp.params['etc']['t'], oil_temp.temps_in_C[-1])
    annular_temp = AnnularTemp(p, oil_temp.temps_in_K, oil_temp.zindex)
    annular_temp.run()
    # annular_temp.plot()

    w = np.arange(0, 500, 25)
    t_c = []
    t_b = []
    r_p_c = []
    r_p_b = []
    for i in w:
        params.set_production_rate(i)
        ot = OilTemp(params.params)
        # print(params.params['thermal']['W'])
        ot.load_params()
        ot.run()
        at = AnnularTemp(params.params, ot.temps_in_K, ot.zindex)
        at.run()
        p_b = Pressure(params.params, at.temps_B_in_C, at.zindex_B)
        p_c = Pressure(params.params, at.temps_C_in_C, at.zindex_C)

        # r
        t_b.append(at.temps_B_in_C[-1])
        t_c.append(at.temps_C_in_C[-1])
        r_p_b.append(p_b.pressure_delta)
        r_p_c.append(p_c.pressure_delta)
    # fig, ax = plt.subplots(1, 1)
    # # ax.set_yticks(np.arange(50, 100, 10))
    # ax.plot(t, r)
    # fig.show()
    #
    export_to_excel_pressure_temp(w, t_b, t_c, r_p_b, r_p_c)
Example #37
0
def pressure_vary_time():
    params = Params()
    depth = params.params['well']['casing1']['depth']

    p = params.params
    # params.set_production_rate(200)
    oil_temp = OilTemp(p)
    oil_temp.load_params()
    oil_temp.run()
    # oil_temp.plot()
    print(oil_temp.params['etc']['t'], oil_temp.temps_in_C[-1])
    annular_temp = AnnularTemp(p, oil_temp.temps_in_K, oil_temp.zindex)
    annular_temp.run()
    # annular_temp.plot()

    t = np.concatenate((np.arange(0, 100, 10), np.arange(100, 600, 50)))
    # t = np.arange(0, 100, 1 / 24)
    t = [0, 1, 5, 20, 100, 200]
    t = np.arange(0, 25, 1)
    r_c = []
    r_b = []
    for i in t:
        params.set_time_day(i / 24)
        ot = OilTemp(params.params)
        # print(params.params['thermal']['W'])
        ot.load_params()
        ot.run()
        at = AnnularTemp(params.params, ot.temps_in_K, ot.zindex)
        at.run()
        p_b = Pressure(params.params, at.temps_B_in_C, at.zindex_B)
        p_c = Pressure(params.params, at.temps_C_in_C, at.zindex_C)

        # r
        r_b.append(p_b.pressure_delta)
        r_c.append(p_c.pressure_delta)
    # fig, ax = plt.subplots(1, 1)
    # # ax.set_yticks(np.arange(50, 100, 10))
    # ax.plot(t, r)
    # fig.show()
    #
    export_to_excel(t, r_b, r_c)
Example #38
0
def test_kdTrees(queryShape):
    global methodList, exp_name
    exp_name = 'kdTrees'
    methodList = ['pure', 'true', 'standard', 'hybrid', 'cell', 'noisymean']
    #    Params.maxHeight = 8
    epsList = [0.1, 0.5, 1.0]
    data = data_readin()
    res_cube_abs = np.zeros((len(epsList), len(seedList), len(methodList)))
    res_cube_rel = np.zeros((len(epsList), len(seedList), len(methodList)))

    for j in range(len(seedList)):
        queryList = queryGen(queryShape, seedList[j])
        kexp = KExp(data, queryList)
        for i in range(len(epsList)):
            for k in range(len(methodList)):
                p = Params(seedList[j])
                p.Eps = epsList[i]
                if methodList[k] == 'pure':
                    res_cube_abs[i, j, k], res_cube_rel[i, j, k] = kexp.run_Kd_pure(p)
                elif methodList[k] == 'true':
                    res_cube_abs[i, j, k], res_cube_rel[i, j, k] = kexp.run_Kd_true(p)
                elif methodList[k] == 'standard':
                    res_cube_abs[i, j, k], res_cube_rel[i, j, k] = kexp.run_Kd_standard(p)
                elif methodList[k] == 'hybrid':
                    res_cube_abs[i, j, k], res_cube_rel[i, j, k] = kexp.run_Kd_hybrid(p)
                elif methodList[k] == 'noisymean':
                    res_cube_abs[i, j, k], res_cube_rel[i, j, k] = kexp.run_Kd_noisymean(p)
                elif methodList[k] == 'cell':
                    res_cube_abs[i, j, k], res_cube_rel[i, j, k] = kexp.run_Kd_cell(p)
                else:
                    logging.error('No such index structure!')
                    sys.exit(1)

    res_abs_summary = np.average(res_cube_abs, axis=1)
    res_rel_summary = np.average(res_cube_rel, axis=1)
    np.savetxt(Params.resdir + exp_name + '_abs_' + str(int(queryShape[0] * 10)) + '_' + str(int(queryShape[1] * 10)),
               res_abs_summary, fmt='%.4f')
    np.savetxt(Params.resdir + exp_name + '_rel_' + str(int(queryShape[0] * 10)) + '_' + str(int(queryShape[1] * 10)),
               res_rel_summary, fmt='%.4f')
Example #39
0
def testDifferential():
    p = Params(1000)
    p.select_dataset()
    differ = Differential(1000)
    # RTH = (34.020412, -118.289936)
    TS = (40.758890, -73.985100)

    for i in range(100):
        # (x, y) = differ.getPolarNoise(1000000, p.eps)
        # pp = noisyPoint(TS, (x,y))

        pp = differ.addPolarNoise(1.0, TS, 100)


        # u = distance(p.x_min, p.y_min, p.x_max, p.y_min) * 1000.0 / Params.GRID_SIZE
        # v = distance(p.x_min, p.y_min, p.x_min, p.y_max) * 1000.0 / Params.GRID_SIZE
        # rad = euclideanToRadian((u, v))
        # cell_size = np.array([rad[0], rad[1]])
        # roundedPoint = round2Grid(pp, cell_size, p.x_min, p.y_min)


        roundedPoint = pp
        print (str(roundedPoint[0]) + ',' + str(roundedPoint[1]))
Example #40
0
def test_quadtreeOpt(queryShape):
    global methodList, exp_name
    exp_name = 'quadtreeOpt'
    methodList = ['Quad-baseline', 'Quad-geo', 'Quad-post', 'Quad-opt']

    #    Params.maxHeight = 10
    epsList = [0.1, 0.5, 1.0]
    data = data_readin()
    res_cube_abs = np.zeros((len(epsList), len(seedList), len(methodList)))
    res_cube_rel = np.zeros((len(epsList), len(seedList), len(methodList)))

    for j in range(len(seedList)):
        queryList = queryGen(queryShape, seedList[j])
        kexp = KExp(data, queryList)
        for i in range(len(epsList)):
            for k in range(len(methodList)):
                p = Params(seedList[j])
                p.Eps = epsList[i]
                if methodList[k] == 'Quad-baseline':
                    res_cube_abs[i, j, k], res_cube_rel[i, j, k] = kexp.run_Quad_baseline(p)
                elif methodList[k] == 'Quad-geo':
                    res_cube_abs[i, j, k], res_cube_rel[i, j, k] = kexp.run_Quad_geo(p)
                elif methodList[k] == 'Quad-post':
                    res_cube_abs[i, j, k], res_cube_rel[i, j, k] = kexp.run_Quad_post(p)
                elif methodList[k] == 'Quad-opt':
                    res_cube_abs[i, j, k], res_cube_rel[i, j, k] = kexp.run_Quad_opt(p)
                else:
                    logging.error('No such index structure!')
                    sys.exit(1)

    res_abs_summary = np.average(res_cube_abs, axis=1)
    res_rel_summary = np.average(res_cube_rel, axis=1)
    np.savetxt(Params.resdir + exp_name + '_abs_' + str(int(queryShape[0] * 10)) + '_' + str(int(queryShape[1] * 10)),
               res_abs_summary, fmt='%.4f')
    np.savetxt(Params.resdir + exp_name + '_rel_' + str(int(queryShape[0] * 10)) + '_' + str(int(queryShape[1] * 10)),
               res_rel_summary, fmt='%.4f')
Example #41
0
def expM():
    logging.basicConfig(level=logging.DEBUG, filename='./debug.log')
    logging.info(time.strftime("%a, %d %b %Y %H:%M:%S", time.localtime()) + "  START")

    p = Params(1000)

    p.select_dataset()

    p.locs, p.users, p.locDict = readCheckins(p)
    E_actual = shannonEntropy(p.locs)
    p.debug()

    # pool = Pool(processes=len(eps_list))
    # params = []
    for M in M_list:
        param = (p, M, E_actual)
        evalLimitM(param)
        # params.append((p, M, E_actual))
    # pool.map(evalLimitM, params)
    # pool.join()

    createGnuData(p, "evalLimitM", M_list)
Example #42
0
class TestFunctions(unittest.TestCase):

    def setUp(self):
        # init parameters
        self.p = Params(1000)
        self.p.select_dataset()

        self.log = logging.getLogger("debug.log")

        # load precomputed smooth sensitivity
        # c_list = range(1, 21)
        # eps_list = [0.1, 0.4, 0.7, 1.0]
        # self.ss = getSmoothSensitivity(c_list, eps_list)
        #
        # if Params.DATASET in ["sparse", "medium", "dense"]: # synthetic
        #     self.p.locs = readData(self.p.dataset)
        # else: # real
        #     self.p.locs, self.p.locDict = readCheckins(self.p)
        # self.p.users = transformDict(self.p.locs)

        # Discretize
        # self.p.locs = cellStats(self.p)
        # self.p.users = transformDict(self.p.locs)
        # distribution_pdf(self.p.locs)

        # self.E_actual = actualEntropy(self.p.locs)      # entropy
        # self.D_actual = actualDiversity(self.p.locs)    # diversity

        # self.C_actual = actualLocationCount(self.p, self.p.locDict) # count


    @unittest.skip
    def testMain(self):

        # Visualization
        # le = sorted(list(self.E_actual.iteritems()), key=lambda x:x[1], reverse=True)    # decrease entropy
        # locIds = [t[0] for t in le]
        # LEVals = [t[1] for t in le]
        # scatter(LEVals, "Location Id", "Entropy")

        # E_noisy = perturbedLocationEntropy(self.p, self.ss, "SS")
        # perturbedLEVals = [E_noisy.get(id, Params.DEFAULT_ENTROPY) for id in locIds]
        # scatter(perturbedLEVals, "Location Id", "Entropy")

        # div = sorted(list(self.D_actual.iteritems()), key=lambda x:x[1], reverse=True)
        # locIds = [t[0] for t in div]
        # divVals = [t[1] for t in div]
        # scatter(divVals, "Location Id", "Diversity")

        # D_noisy = perturbedDiversity(self.p)
        # perturbedDVals = [D_noisy.get(id, Params.DEFAULT_DIVERSITY) for id in locIds]
        # scatter(perturbedDVals, "Location Id", "Diversity")

        # cells = sorted(list(self.C_actual.iteritems()), key=lambda x:x[1], reverse=True)
        # cellIds = [t[0] for t in cells]
        # counts = [t[1] for t in cells]
        # scatter(counts, "Cell Id", "Locations")
        #
        # C_noisy = perturbeCount(self.p)
        # perturbedCounts = [C_noisy.get(id, Params.DEFAULT_FREQUENCY) for id in cellIds]
        # scatter(perturbedCounts, "Cell Id", "Locations")

        evalEnt(self.p, self.E_actual, self.ss)
        evalDiv(self.p, self.D_actual)
        evalBL(self.p, self.E_actual)

        # evalCountDiff(self.p, self.C_actual)
        # evalCountGeoI(self.p, self.C_actual)
        # evalDivGeoI(self.p, self.D_actual)

    @unittest.skip
    def testLEParser(self):
        self.p.locs, self.p.users, self.p.locDict = readCheckins(self.p)
        # distribution_pdf(self.p.locs)
        distribution_pdf(self.p.users)
        self.p.users = samplingUsers(self.p.users, Params.MAX_M)
        distribution_pdf(self.p.users)
        entropyStats(self.p.locs)


        # self.p.maxC, self.p.maxM = otherStats(self.p.locs, self.p.users)

        # discretize
        # cells = cellStats(self.p)
        # entropyStats(cells)
        # self.p.maxC, self.p.maxM = otherStats(cells, transformDict(cells))
        # distribution_pdf(cells)
        # distribution_pdf(transformDict(cells))

    @unittest.skip
    def testLEStats(self):
        nx = range(1,100+1)
        C, eps, K = 2, 1.0, 50

        # Baseline sensitivity (max C)
        max_C = 100
        max_gs = globalSensitivy(max_C)
        max_gsy = [max_gs] * len(nx)

        # global sensitivity (limit C)
        gs = globalSensitivy(C)
        gsy = [gs] * len(nx)

        # smooth sensitivity
        ssy = [v * 2 for v in self.ss[CEps2Str(C, eps)][:100]]

        # local sensitivity
        K = 20
        ls = localSensitivity(C, K)
        lsy = [ls] * len(nx)

        # vary n (all bounds)
        ny = [max_gsy, gsy, ssy, lsy]
        markers = ["o", "-", "--", "+"]
        legends = ["Global (Max C)", "Global (Limit C)", "Smooth", "Local"]
        line_graph(nx, ny, markers, legends, "Number of users (n)", "Sensitivity")

        # vary C
        eps_list = [0.1, 0.4, 0.7, 1.0]
        c_list = range(1, 21)
        n = 100
        ss_list = [[self.ss[CEps2Str(c, eps)][n - 1] for c in c_list] for eps in eps_list]

        markers = ["o", "-", "--", "+", "x"]
        legends = ["Eps=" + str(eps) for eps in eps_list]
        line_graph(c_list, ss_list, markers, legends, "C", "Sensitivity")

        # vary n
        c = 10
        ss_list = [[self.ss[CEps2Str(c, eps)][n - 1] for n in nx] for eps in eps_list]
        line_graph(nx, ss_list, markers, legends, "Number of users (n)", "Sensitivity")

        # vary n & C
        c_list = [1, 10, 20]
        legends = ["C=" + str(c) for c in c_list]
        ss_list = [[self.ss[CEps2Str(c, eps)][n - 1] for n in nx] for c in c_list]
        line_graph(nx, ss_list, markers, legends, "Number of users (n)", "Sensitivity")

    @unittest.skip
    def testLEBounds(self):
        # precompute smooth sensitivity
        eps_list = [1.0]
        pool = Pool(processes=len(eps_list))
        pool.map(precomputeSmoothSensitivity, eps_list)
        pool.join()
        for eps in eps_list:
            precomputeSmoothSensitivity(eps)

    @unittest.skip
    def testMetrics(self):
        P = [1,2,3,4,5,6,7,8,9]
        Q = [1,2,4,8,7,6,5,8,9]
        self.assertEqual(True, abs(KLDivergence2(P, Q) - KLDiv(P, Q)) < 1e-6)

        true = [1,2,3,4,5,6,7,8,9]
        predicted = [1,2,3,4,5,6,7,8,9]
        self.assertEqual(1, DivCatScore(true, predicted))

    @unittest.skip
    def testDifferential(self):
        differ = Differential(1000)
        RTH = (34.020412, -118.289936)
        radius = 500.0  # default unit is meters
        eps = np.log(2)
        for i in range(100):
            (x, y) = differ.getPolarNoise(radius, eps)
            print (str(RTH[0] + x * Params.ONE_KM * 0.001) + ',' + str(RTH[1] + y * Params.ONE_KM*1.2833*0.001))

    @unittest.skip
    def testUtils(self):
        values1 = [1,2,3,4,5,6,7,8,9]
        values2 = [1, 2, 3, 9, 5, 6, 7, 8, 4]
        topVals1, topVals2 = topKValues(3, values1), topKValues(3, values2)
        indices1 = [t[1] for t in topVals1]
        indices2 = [t[1] for t in topVals2]
        self.assertEqual([8,7,6], indices1)
        self.assertEqual([3, 7, 6], indices2)
        self.assertEqual(2.0 / 3, metrics.precision_score(indices1, indices2, average="micro"))


    # @unittest.skip
    def test_filter_gowalla(self):
        filter_gowalla(self.p)
        # filter_yelp(self.p)

    @unittest.skip
    def testDataGen(self):
        SPARSE_N = int(self.p.MAX_N / 10)
        MEDIUM_N = int(self.p.MAX_N)
        DENSE_N = int(self.p.MAX_N * 10)

        np.random.seed(self.p.seed)
        # writeData(generateData(1e+3, SPARSE_N, Params.MAX_M, Params.MAX_C, 2), "../dataset/sparse.txt")
        # writeData(generateData(1e+3, MEDIUM_N, Params.MAX_M, Params.MAX_C, 2), "../dataset/medium.txt")
        writeData(generateData(1e+3, DENSE_N, Params.MAX_M, Params.MAX_C, 2), "../dataset/dense.txt")

        # readData("../dataset/sparse.txt")
    error = 0
    for i in range(len(publish)):
        if orig[i] < 0:
            error += distance(publish[i], orig[i])
        elif orig[i] == 0:
            error += distance(publish[i], 1)
        else:
            error += distance(max(publish[i], 0), orig[i])
    return error / len(publish)


for q in q_list:
    for i in range(len(eps_list)):
        with open("../log/foursquare/true_count_KF_" + str(eps_list[i]) + ".log") as f:
            rel_errs = []
            for line in f.readlines():
                orig = map(float, line.strip().split("\t"))
                p = Params(1000)
                p.Eps = eps_list[i]

                kf = KalmanFilterPID(p)
                kf.setQ(q)
                budgetKF = budgetKF = eps_list[i] / 2
                # filter = kf.kalmanFilter(seq, budgetKF, p.samplingRate)
                publish = kf.kalmanFilter(orig, budgetKF)

                rel_err = getRelError(publish, orig)
                rel_errs.append(rel_err)

            print q, "\t", eps_list[i], "\t", sum(rel_errs) / len(rel_errs)
Example #44
0
            grid_data = child.text
            rows = grid_data.split("\n")
            for row in rows:
                values = row.split(" ")
                print values[0], values[1], values[4]
                if len(values) == 11 and float(values[4]) > 7:
                    pass


if False:

    if False:    # read shakemap file
        read_shakemap_xml()

    if True:   # read video metadata
        param = Params(1000)
        param.select_dataset()
        videos = read_data(os.path.splitext(param.dataset)[0] + ".txt")

        video_locs = np.zeros((2,len(videos)))
        idx = 0
        for v in videos:
            vl = v.location()
            video_locs[0,idx] = vl[0]
            video_locs[1,idx] = vl[1]
            idx = idx + 1

        # print video_locs

        np.savetxt(param.dataset, video_locs.transpose(), fmt='%.4f\t')
Example #45
0
                    tree = Kd_standard(data, param)
                else:
                    logging.error("No such index structure!")
                    sys.exit(1)
                tree.buildIndex()

                res_cube_value[i, j, k] = optimization(tree, fov_count, seed_list[j], param)

    res_value_summary = np.average(res_cube_value, axis=1)
    np.savetxt(param.resdir + exp_name + dataset_identifier, res_value_summary, fmt="%.4f\t")


if __name__ == "__main__":

    logging.basicConfig(level=logging.DEBUG, filename="../../log/debug.log")
    logging.info(time.strftime("%a, %d %b %Y %H:%M:%S", time.localtime()) + "  START")

    param = Params(1000)
    data = data_readin(param)
    param.NDIM, param.NDATA = data.shape[0], data.shape[1]
    param.LOW, param.HIGH = np.amin(data, axis=1), np.amax(data, axis=1)

    print data
    # eval_partition(data, param)

    eval_analyst(data, param)
    # eval_bandwidth(data, param)
    # eval_skewness(data, param)

    logging.info(time.strftime("%a, %d %b %Y %H:%M:%S", time.localtime()) + "  END")
Example #46
0
def setOptions(optdict, train_or_test):
	assert('--dataFile' in optdict)
	dataFile = optdict['--dataFile']
	assert(len(dataFile) > 0)
	assert('--modelFile' in optdict)
	modelFile = optdict['--modelFile']
	assert(len(modelFile) > 0)
	
	params = Params()
	params.trainOrTest = train_or_test 
	params.splParams = Params()
	params.epsilon = .01
	params.C = 1.0
	params.splParams.splMode = 'CCCP'
	params.seed = 0
	params.maxOuterIters = 10
	params.minOuterIters = 5

	params.estimatedNumConstraints = 100
	params.syntheticParams = None
	params.supervised = False
	params.numYLabels = 20
	params.maxPsiGap = 0.00001
	params.splParams.splInitFraction = 0.5
	params.splParams.splIncrement = 0.1
	params.splParams.splInitIters = 1
	params.splParams.splOuterIters = 1
	params.splParams.splInnerIters = 1

	params.babyData = 0
	params.balanceClasses = 0

	params.UCCCP = int(optdict['--UCCCP'])

	if '--splMode' in optdict:
		params.splParams.splMode = optdict['--splMode']

	params.latentVariableFile = optdict['--latentVariableFile'] if '--latentVariableFile' in optdict else None
	assert(params.latentVariableFile)

	if params.splParams.splMode != 'CCCP':
		assert('--splControl' in optdict)
	if '--splControl' in optdict:
		params.splParams.splControl = int(optdict['--splControl'])
	
	if '--splInitIters' in optdict:
		params.splParams.splInitIters = int(optdict['--splInitIters'])

	if '--balanceClasses' in optdict:
		params.balanceClasses = int(optdict['--balanceClasses'])

	if '--babyData' in optdict:
		params.babyData = int(optdict['--babyData'])

	if params.babyData == 1:
		params.numYLabels = 7

	kernelFile = '/afs/cs.stanford.edu/u/rwitten/projects/multi_kernel_spl/data/allkernels_info.txt'
	if '--maxPsiGap' in optdict:
		params.maxPsiGap = float(optdict['--maxPsiGap'])
	
	params.initialModelFile = None
	if '--initialModelFile' in optdict:
		params.initialModelFile = optdict['--initialModelFile']

	if '--maxTimeIdle' in optdict:
		params.maxTimeIdle = int(optdict['--maxTimeIdle'])

	if '--supervised' in optdict:
		params.supervised = optdict['--supervised']

	if '--kernelFile' in optdict:
		kernelFile = optdict['--kernelFile']

	if '--C' in optdict:
		params.C = float(optdict['--C'])
	
	if '--epsilon' in optdict:
		params.epsilon = float(optdict['--epsilon'])
	
	
	if '--seed' in optdict:
		params.seed = int(optdict['--seed'])
	random.seed(params.seed)
	numpy.random.seed(params.seed)
	
	if '--maxOuterIters' in optdict:
		params.maxOuterIters = int(optdict['--maxOuterIters'])
	
	if '--numYLabels' in optdict:
		params.numYLabels = int(optdict['--numYLabels'])

	if '--synthetic' in optdict and int(optdict['--synthetic']):
		params.syntheticParams = Params()
		params.syntheticParams.numLatents = 10
		params.syntheticParams.strength = 10.0
		params.numYLabels = 5
		params.maxPsiGap = 0.00001
		params.numExamples = 50
		params.totalLength = params.numYLabels + 1
		params.lengthW = params.numYLabels * params.totalLength
		if '--syntheticNumLatents' in optdict:
			params.syntheticParams.numLatents = int(optdict['--syntheticNumLatents'])

		if '--syntheticNumExamples' in optdict:
			params.numExamples = int(optdict['--syntheticNumExamples'])

		if '--syntheticStrength' in optdict:
			params.syntheticParams.strength = float(optdict['--syntheticStrength'])

	assert('--scratchFile' in optdict)
	params.scratchFile = optdict['--scratchFile']

	logFile = '%s.log' %params.scratchFile

	try:
		os.remove(logFile) 
	except OSError, e: # no such file
		pass
Example #47
0

if __name__ == '__main__':
    logging.basicConfig(level=logging.DEBUG, filename='../log/debug.log')
    logging.info(time.strftime("%a, %d %b %Y %H:%M:%S", time.localtime()) + "  START")


    # eps_list = [0.001, 0.004, 0.007, 0.01]
    # dataset_list = ['yelp', 'foursquare', 'gowallasf', 'gowallala']

    eps_list = [0.05, 0.45]
    dataset_list = ['gowallasf']

    for dataset in dataset_list:
        for eps in eps_list:
            param = Params(1000)
            all_workers = data_readin(param)
            param.NDIM, param.NDATA = all_workers.shape[0], all_workers.shape[1]
            param.LOW, param.HIGH = np.amin(all_workers, axis=1), np.amax(all_workers, axis=1)

            param.DATASET = dataset
            param.select_dataset()
            param.Eps = eps
            param.debug()

            path_data = getPathData(all_workers, param)

            # max_count = 0
            # for data in path_data:
            # if data[1] > max_count:
            # max_count = data[1]
Example #48
0
    def setUp(self):
        # init parameters
        self.p = Params(1000)
        self.p.select_dataset()

        self.log = logging.getLogger("debug.log")
Example #49
0
                queue.append(curr.sw)
                queue.append(curr.se)
            else:
                leaf_boxes.append((curr.n_box, curr.n_count))

    return leaf_boxes


if __name__ == '__main__':
    logging.basicConfig(level=logging.DEBUG, filename='log/debug.log')

    # dataset_list = ['yelp', 'foursquare', 'gowallasf', 'gowallala']
    dataset_list = ['mediaq']

    for dataset in dataset_list:
        param = Params(1000)
        data = data_readin(param)
        param.NDIM, param.NDATA = data.shape[0], data.shape[1]
        param.LOW, param.HIGH = np.amin(data, axis=1), np.amax(data, axis=1)

        param.DATASET = dataset
        param.select_dataset()
        param.debug()

        path_data = getPathData(data, param)

        fig, ax = plt.subplots()
        # img = imread("background.png")
        for data in path_data:
            path = data[0]
            codes, verts = zip(*path)
Example #50
0
        rads = atan2(-dy,dx)
        # rads %= 2*pi
        rads += pi/2.0;
        return degrees(rads)
    # print angle_bwn_two_points(0,0,1,-1)

def distance_km(lat1, lon1, lat2, lon2):
    """
    Distance between two geographical locations
    """
    R = 6371  # km
    dLat = math.radians(abs(lat2 - lat1))
    dLon = math.radians(abs(lon2 - lon1))
    lat1 = math.radians(lat1)
    lat2 = math.radians(lat2)

    a = math.sin(dLat / 2) * math.sin(dLat / 2) + math.sin(dLon / 2) * math.sin(dLon / 2) * math.cos(lat1) * math.cos(
        lat2)
    c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))
    d = R * c
    return d

if False:
    # test
    box = np.array([[1.5,1.5],[2.5,3.5]])
    param = Params(1000)
    param.GRID_SIZE = 5
    param.x_min,param.y_min,param.x_max,param.y_max = 0,0,5,5
    print mbr_to_cellids(box,param)