Ejemplo n.º 1
0
def purge_osm_cache_from_addr():  # run by 1 server.
    requestFile = DirOSM + os.sep + "cityrequest.txt"
    print("Checking " + requestFile)
    if os.path.exists(requestFile):
        addrlist = []
        with open(requestFile, "r") as f:
            for l in f:
                l = l.strip()
                if len(l) > 0:
                    st = l.split("~|")
                    if st[-1].strip() == "-1":  # if want to delete
                        addrlist.append(st[0].strip())
        print("going to delete", addrlist)
        for addr in addrlist:
            if iprint:
                print("Deleting: " + addr)
            osm_folder_path = DirOSM + os.sep + addr
            osm = osmPipeline(folder_path=osm_folder_path)
            purge_all_data = True
            print("\nPurge all data !!! %s\n" % osm_folder_path)

            if purge_all_data:
                with AccessRestrictionContext(prefix="purge") as lock:
                    lock.Access_Or_Wait_And_Skip("purge")
                    osm.purge_all_data()
                    print("makedirs " + osm_folder_path)
                    os.makedirs(osm_folder_path)
            ''' delete cache meta file as well. '''
            CacheMetaDir = "~/cachemeta/osm/"  # You Edit
            cmd = "rm " + CacheMetaDir + "cache-" + addr + "*"
            print(cmd)
            subprocess.call(cmd, shell=True)
Ejemplo n.º 2
0
def test_load_mc(addr_list,
                 overwrite_memory=False,
                 ignore_lock=False,
                 force_set_valid=False):
    ''' Check (or redo) if all redis k-v are good. 
	- overwrite_memory: if you don't believe mm is loaded but flag shows valid, set this to true.
	- ignore_lock: still load if other server already started loading.
	- force_set_valid: overwrite valid_flags, regard as all loaded without checking. Do not use this option with other options. Run on 1 server.
	'''
    mm = CacheManager(overwrite_prefix=True,
                      overwrite_redis_servers=False,
                      rt_servers=False)
    Pretend_Valid = force_set_valid  # force change mm valid flag to 1. Careful! <set_to_be_valid>
    tasks = []
    for addr in addr_list:
        metas = [
            "osm/cache-%s-nodeid-to-lat-lng.txt" % addr,  #0
            "osm/cache-%s-nodeid-to-neighbor-nid.txt" % addr,  #1
            "osm/cache-%s-nids-to-speed.txt" % addr,  #2
            "osm/cache-%s-nid-to-elevation.txt" % addr,  #3
            "osm/cache-%s-nids-to-waytag.txt" % addr,  #4
        ]
        tasks.extend(metas)
    lock = AccessRestrictionContext(
        prefix="-test_load_mc~",
        persistent_restriction=True,
        persist_seconds=86400 * 3,
        no_restriction=not On_Cluster,
    )
    for task in tasks:
        if Pretend_Valid:
            mm.use_cache(meta_file_name=task,
                         overwrite_prefix=True,
                         set_to_be_valid=True,
                         loading_msg=task)
            continue
        with lock:
            lock.Access_Or_Skip(task)
            print(task, MyIp)
            mm.use_cache(meta_file_name=task,
                         overwrite_prefix=True,
                         overwrite_memory=overwrite_memory,
                         ignore_lock=ignore_lock,
                         loading_msg=task)

    print("Done", addr_list, MyIp)
Ejemplo n.º 3
0
    def load_cache_file_into_mem(self,
                                 cache_file_abs_path,
                                 params,
                                 store_allKeys=False,
                                 msg=''):
        '''cache_file assumed to be a dict of k:list, with header, dtype, keyPos info.
		'''
        if self.my_platform == "mac":
            single_server = True  # not on cluster
        else:
            single_server = False
        assert params["engine"] == "redis"
        Force_Redo = ("overwrite_memory" in params
                      and params["overwrite_memory"])

        with AccessRestrictionContext(prefix=cache_file_abs_path,
                                      no_restriction=single_server) as lock:
            lock.Access_Or_Wait_And_Skip("load_cache_file_into_mem")
            if iprint: print("load_cache_file_into_mem params", params)
            self.mm = Mem(params)
            tmp = self.mm.get(Valid_Flag_Key)
            tmp2 = self.mm.get(End_Flag_Key)
            if tmp == 1 and tmp2 == 1 and not Force_Redo:
                if iprint: print("Redis already cached, skip...")
                return
            print("pickle.load  %s ..." % cache_file_abs_path)
            da = pickle.load(open(cache_file_abs_path, "rb"))
            if iprint >= 2:
                try:
                    print(" - Info - ", da["header"], da["dtype"],
                          da["keyPos"])
                except:
                    print(" - missing keyPos or header or dtype -")
            if not Force_Redo: self.mm.set(End_Flag_Key, 0)
            self.mm.set(Valid_Flag_Key, 1)
            time.sleep(0.1)  # redis checks expired at freq 10hz.
            allKeys = []
            if "header" in da:
                header = da.pop("header")
                self.mm.set("header", header)
            if "dtype" in da:
                dtype = da.pop("dtype")
                self.mm.set("dtype", dtype)
            if "keyPos" in da:
                keyPos = da.pop("keyPos")
                self.mm.set("keyPos", keyPos)
            cnt, thresh = 0, 1024 * 8
            for k, v in da.items():
                self.mm.set(k, v)
                cnt += 1
                if store_allKeys:
                    allKeys.append(k)
                if iprint and cnt >= thresh:
                    thresh *= 2
                    print("mm key cnt %d. %s" % (cnt, msg))
            if store_allKeys: self.mm.set("allKeys", allKeys)
            self.mm.set(End_Flag_Key, 1)
            if iprint: print("loaded cache key num: %d" % cnt)
Ejemplo n.º 4
0
    def yield_file_into_mem(self,
                            yield_func,
                            yield_args,
                            params,
                            kv_action_type=None,
                            msg=''):
        if self.my_platform == "mac":
            single_server = True  # not on cluster
        else:
            single_server = False
        assert params["engine"] == "redis"
        Force_Redo = ("overwrite_memory" in params
                      and params["overwrite_memory"])
        ignore_lock = ("ignore_lock" in params and params["ignore_lock"])

        with AccessRestrictionContext(
                prefix=yield_args + self.meta_file_name,
                no_restriction=(ignore_lock or single_server)) as lock:
            lock.Access_Or_Wait_And_Skip("yield_file_into_mem")
            self.mm = Mem(params)
            if iprint:
                print("yield_file_into_mem params", params)
                print('mm.prefix', self.mm.prefix)
            tmp = self.mm.get(Valid_Flag_Key)
            tmp2 = self.mm.get(End_Flag_Key)
            if tmp == 1 and tmp2 == 1 and not Force_Redo:
                if iprint: print("Redis already cached, skip...")
                return
            if not Force_Redo: self.mm.set(End_Flag_Key, 0)
            self.mm.set(Valid_Flag_Key, 1)
            time.sleep(0.1)
            cnt, thresh = 0, 1024 * 8
            for k, v in yield_func(yield_args):
                if kv_action_type is None:
                    self.mm.set(k, v)
                elif kv_action_type == 1:  # append to vlist
                    tmp = self.mm.get(k)
                    if tmp is None: tmp = []
                    if v not in tmp: tmp.append(v)
                    self.mm.set(k, tmp)
                cnt += 1
                if iprint and cnt >= thresh:
                    thresh *= 2
                    print("mm key cnt %d. %s" % (cnt, msg))
            self.mm.set(End_Flag_Key, 1)
            if iprint: print("yielded key num: %d" % cnt)
Ejemplo n.º 5
0
def gen_place_request():  # not using this.
    ''' run by 1 server. Find new address. I usually manually add city request.'''
    account_dirs = glob.glob(DirData + "/*")
    for iddir in account_dirs:
        email = iddir.split(os.sep)[-1]
        tmpdir = iddir + "/%s" % gpsfolder
        if not (os.path.exists(tmpdir) and os.path.isdir(tmpdir)):
            if iprint >= 1:
                print(__file__.split(os.sep)[-1], "Empty account", iddir)
            continue
        # gather unix time
        tmpdir = iddir + "/%s" % obdfolder
        time_list = [
            x.strip(os.sep).split(os.sep)[-1].rstrip(EXT)
            for x in glob.glob(tmpdir + "/*%s" % EXT)
        ]
        for truetimestr in time_list:
            tmpf = iddir + os.sep + combinefolder + os.sep + truetimestr + ".txt"
            if not os.path.exists(tmpf):
                if iprint >= 2: print("skip, Not exists: %s " % tmpf)
                continue
            if os.path.exists(tmpf) and get_file_size_bytes(tmpf) < 100:
                if iprint >= 2: print("skip, Too small: %s " % tmpf)
                continue
            if iprint >= 2: print("Proc: %s " % tmpf)
            lastlat = None
            lastlng = None
            loclat = None
            loclng = None
            lasttime = None
            segs = []
            path = []
            sample_loc = []
            with open(tmpf, "r") as f:
                for l in f:
                    dic = convert_line_to_dic(l)
                    if lastlat is not None:
                        lastlat = lat
                        lastlng = lng
                        lasttime = gti
                    lat = dic[KeyGPSLat]
                    lng = dic[KeyGPSLng]
                    gti = dic[KeyGPSTime]
                    if lastlat is None:
                        lastlat = lat
                        lastlng = lng
                        loclat = lat
                        loclng = lng
                        lasttime = gti
                    ddif = get_dist_meters_latlng(lastlat, lastlng, lat, lng)
                    dtime = abs(lasttime - gti)
                    if ddif > kCutTraceDistGap or dtime / 1000.0 > kCutTraceTimeGap:
                        if iprint >= 2: print("weird inside cut? " + tmpf)
                        if iprint >= 2: print("path len %d" % len(path))
                        segs.append(path)
                        sample_loc.append([[loclat, loclng],
                                           [lastlat, lastlng]])
                        path = []
                    path.append(dic)
                if iprint >= 2: print("path len %d" % len(path))
                segs.append(path)
                sample_loc.append([[loclat, loclng], [lastlat, lastlng]])

            for path in segs:
                latlngs = sample_loc.pop(0)
                addr = latlng_to_city_state_country(latlngs[0][0],
                                                    latlngs[0][1]).replace(
                                                        " ", "")
                addr2 = latlng_to_city_state_country(latlngs[1][0],
                                                     latlngs[1][1]).replace(
                                                         " ", "")
                if addr != addr2:
                    dist = get_dist_meters_latlng(latlngs[0][0], latlngs[0][1],
                                                  latlngs[1][0], latlngs[1][1])
                    if dist > 10000:
                        if iprint:
                            print(tmpf, addr, addr2,
                                  "cross region, skip addr2 ...")

                # I need manual approve of cities appearing here, as "addr~|1"
                requestFile = DirOSM + os.sep + "cityrequest.txt"
                appeared = 0
                approved = 0
                if os.path.exists(requestFile):
                    with open(requestFile, "r") as f:
                        for l in f:
                            l = l.strip()
                            if len(l) > 0:
                                st = l.split("~|")
                                if st[0].strip() == addr:
                                    appeared = 1
                                    if st[-1].strip(
                                    ) == "1":  # if want to run gen cache.
                                        approved = 1
                                    elif st[-1].strip(
                                    ) == "-1":  # if purge all data!
                                        approved = 1
                if appeared == 0:
                    with AccessRestrictionContext(
                            prefix="requestFile") as lock:
                        lock.Access_Or_Skip("requestFile")
                        with open(requestFile, "a") as f:
                            f.write(addr + "~|0\n")
                if approved == 0 or appeared == 0:
                    if iprint:
                        print("Please approve new city: " + addr)
                        print("vim ~/greendrive/osmdata/cityrequest.txt")
Ejemplo n.º 6
0
def gen_osm_cache_from_addr(overwrite=False,
                            addr=None,
                            max_num_servers=16,
                            lock_sfx="",
                            group=None,
                            sync_wait_sec=1,
                            test_try=('try'
                                      in sys.argv)):  # run by multi server.
    ''' Multi servers run, sync and lock. Use cmd-line to give max-server and lock suffix.
	If run on 1 PC: python 3genOsmCache.py gen_osm addr=Indiana,US max=1 s=0 t=0
	'''
    assert addr or group, "Please input either 'addr' or 'group' !!!"
    sync_group_name = addr if addr is not None else group
    sync_group_name += str(max_num_servers) + lock_sfx
    if On_Cluster:
        sync = Synchronizer(prefix=py_fname(__file__, True),
                            sync_group_name=sync_group_name)
        sync.Clear()
    print('addr', addr, 'max_num_servers', max_num_servers, 'lock_sfx',
          lock_sfx, 'sync_group_name', sync_group_name, 'sync_wait_sec',
          sync_wait_sec)
    if test_try: sys.exit(0)  # load pyc and exits.
    time.sleep(
        sync_wait_sec)  # if not start at same time, need time to wait here
    if On_Cluster: sync.Register()
    time.sleep(sync_wait_sec)
    if On_Cluster: sync.Synchronize(print_str="init")
    TAG = addr + " " + MyIp
    if On_Cluster and iprint:
        print("total_server_num", sync.total_server_num)
        time.sleep(2)

    requestFile = DirOSM + os.sep + "cityrequest.txt"
    addrlist = []
    if addr is None:
        if os.path.exists(requestFile):
            print("looking at " + requestFile)
            with open(requestFile, "r") as f:
                for l in f:
                    l = l.strip()
                    if len(l) > 0:
                        st = l.split("~|")
                        if st[-1].strip() == "1":
                            addrlist.append(st[0].strip())
    else:
        addrlist.append(addr)

    if iprint: print(addrlist, len(addrlist))

    for addr in addrlist:
        if iprint: print("\nProc addr: " + addr)

        osm_folder_path = DirOSM + os.sep + addr
        print('osm_folder_path ' + osm_folder_path)

        if not os.path.exists(osm_folder_path):
            with AccessRestrictionContext(prefix="mkdir_osm~",
                                          no_restriction=not On_Cluster) as lk:
                lk.Access_Or_Wait_And_Skip("mkdir_osm~")
                try:
                    os.makedirs(osm_folder_path)
                except OSError as e:
                    print(e)
                    print("Already made " + osm_folder_path)
                except Exception as e:
                    print(e)
                    sys.exit(1)

        osm = osmPipeline(folder_path=osm_folder_path)
        overwrite_meta_file = False  #True: if re-run, re-gen cache meta files.
        overwrite_cache_file = False
        overwrite_memory = False
        if overwrite:
            overwrite_meta_file = True
            overwrite_cache_file = True
            overwrite_memory = True
            print("\nOverwrite !!! %s\n" % osm_folder_path)
            time.sleep(3)

        if On_Cluster:
            sync.Synchronize(print_str="before download_osm_given_address")

        params = Global_params  # for Redis

        lock = AccessRestrictionContext(
            prefix=osm_folder_path + lock_sfx,
            write_completion_file_mark=True,  # prevent re-run, like a mark
            completion_file_dir=osm_folder_path,
            print_str=False,
            no_restriction=not On_Cluster,
        )
        with lock:
            print("lock download_osm ? ")
            lock.Access_Or_Wait_And_Skip("download_osm")

            print("osm.download_osm_given_address() ...", addr)
            osm.download_osm_given_address(addr)

        if On_Cluster:
            sync.Synchronize(
                print_str="downloaded, before running tasks with lock")
            sync.waitForNfsFile(osm_folder_path + os.sep + addr + ".osm",
                                sleeptime=3)
            sync.Synchronize(
                print_str="downloaded, wait till everyone sees the file")

        if iprint: print("\n" + addr)
        NFSsync = 0  # on PC same here:
        while NFSsync == 0:
            try:  # wait for downloaded file on cluster.
                osmname = osm.get_osm_file_path().split(
                    os.sep)[-1].rstrip(".osm")
                NFSsync = 1
            except IOError as e:
                time.sleep(1)  # NFS sync wait.
                print(e)
            except Exception as e:
                print(e)
                sys.exit(1)

        mustSeeFiles = []
        QUOTE = get_osm_file_quote_given_file(osm_folder_path +
                                              "/%s.osm" % osmname)
        allow_dead_num = 0

        lock.max_access_num = 1
        mustSeeFiles.append(osm_folder_path +
                            "/cache-%s-nids-to-waytag.txt" % osmname)
        with lock:
            print("lock nids2waytag ? ")
            lock.Access_Or_Skip("nids2waytag")
            metaf = "osm/cache-%s-nids-to-waytag.txt" % osmname
            print("proc " + metaf)
            outf = osm_folder_path + "/cache-%s-nids-to-waytag.txt" % osmname  # outf not in use
            if not mm.exist_cache(metaf) or overwrite_meta_file:
                mm.create_cache(
                    meta_file_name=metaf,
                    gen_cache_file_cmd="python " + HomeDir +
                    "/syncdir/zyrcode/gen_cache/extractNode2WayTag.py gen_cache_file "
                    + osm_folder_path + "/%s.osm " % osmname + outf,
                    cache_file_abs_path=outf,
                    params=params,
                    overwrite_meta_file=overwrite_meta_file,
                    overwrite_cache_file=overwrite_cache_file,
                    overwrite_memory=overwrite_memory,
                    yield_func=yield_nids2waytag,
                    yield_args=osm_folder_path + "/%s.osm" % osmname + CUT +
                    QUOTE,
                    overwrite_prefix=True,
                )
            lg.lg_str_once(TAG + " cache nids2waytag done")

        mustSeeFiles.append(osm_folder_path +
                            "/cache-%s-nodeid-to-lat-lng.txt" % osmname)
        with lock:
            print("lock nid2latlng ? ")
            lock.Access_Or_Skip("nid2latlng")
            metaf = "osm/cache-%s-nodeid-to-lat-lng.txt" % osmname
            print("proc " + metaf)
            outf = osm_folder_path + "/cache-%s-nodeid-to-lat-lng.txt" % osmname  # not in use
            if not mm.exist_cache(metaf) or overwrite_meta_file:
                mm.create_cache(
                    meta_file_name=metaf,
                    gen_cache_file_cmd="python " + HomeDir +
                    "/syncdir/zyrcode/gen_cache/storeNodeId2LatLng.py gen_cache_file "
                    + "%s " % osm.get_osm_file_path() + outf,
                    cache_file_abs_path=outf,
                    params=params,
                    overwrite_meta_file=overwrite_meta_file,
                    overwrite_cache_file=overwrite_cache_file,
                    overwrite_memory=overwrite_memory,
                    yield_func=yield_nid2latlng,
                    yield_args=osm_folder_path + "/%s.osm" % osmname + CUT +
                    QUOTE,
                    overwrite_prefix=True,
                )
            lg.lg_str_once(TAG + " cache nid2latlng done")

        lock.max_access_num = 1
        mustSeeFiles.append(osm_folder_path +
                            "/cache-%s-nodeid-to-neighbor-nid.txt" % osmname)
        with lock:
            print("lock nid2neighbor ? ")
            lock.Access_Or_Skip("nid2neighbor")
            metaf = "osm/cache-%s-nodeid-to-neighbor-nid.txt" % osmname
            print("proc " + metaf)
            outf = osm_folder_path + "/cache-%s-nodeid-to-neighbor-nid.txt" % osmname  # not in use
            if not mm.exist_cache(metaf) or overwrite_meta_file:
                mm.create_cache(
                    meta_file_name=metaf,
                    gen_cache_file_cmd="python " + HomeDir +
                    "/syncdir/zyrcode/gen_cache/genNodeId2NeighborNid.py gen_cache_file "
                    + "%s " % osm.get_osm_file_path() + outf,
                    cache_file_abs_path=outf,
                    params=params,
                    overwrite_meta_file=overwrite_meta_file,
                    overwrite_cache_file=overwrite_cache_file,
                    overwrite_memory=overwrite_memory,
                    yield_func=yield_nid2neighbor,
                    yield_args=osm_folder_path + "/%s.osm" % osmname + CUT +
                    QUOTE,
                    kv_action_type=1,  # 1: append to k= vlist
                    overwrite_prefix=True,
                )
            lg.lg_str_once(TAG + " cache nid2neighbor done")

        if iprint: print("\nMust sync here!!!\n\n" + addr)
        if On_Cluster:
            sync.Synchronize(
                print_str="before write_node_elevation"
            )  # because this depends on finishing the previous.
            for fpath in mustSeeFiles:
                sync.waitForNfsFile(fpath, sleeptime=3)
            sync.Synchronize(
                print_str="mustSeeFiles: wait till everyone sees the file")
        mustSeeFiles = []

        if overwrite:
            lock.max_access_num = 1
            with lock:
                lock.Access_Or_Wait_And_Skip("rm elevation")
                try:
                    osm.remove_file_node_elevation()
                except:
                    print("Exception: osm.remove_file_node_elevation() !")
                lg.lg_str_once(TAG + "rm node_elevation done")

        lock.max_access_num = int(max_num_servers // 2)  # allow more servers.
        done_elevation = 0
        with lock:
            print("lock query elevation ? ")
            lock.Access_Or_Skip("elevation", print_detail=True)
            osm.write_node_elevation(ignore_mc=True,
                                     load_previous=True,
                                     lock_sfx=lock_sfx)
            lg.lg_str_once(TAG + " osm.write_node_elevation done")
            done_elevation = 1

        if done_elevation:  # task depends on previous.
            lock.max_access_num = 1
            with lock:
                print("lock nid2elev yield into mem ? ")
                lock.Access_Or_Skip("nid2elevation")
                if On_Cluster:
                    sync.waitForNfsFile(osm_folder_path +
                                        "/%s-nid-to-elevation.txt" % osmname,
                                        sleeptime=3)
                metaf = "osm/cache-%s-nid-to-elevation.txt" % osmname
                print("proc " + metaf)
                outf = osm_folder_path + "/cache-%s-nid-to-elevation.txt" % osmname
                if not mm.exist_cache(metaf) or overwrite_meta_file:
                    mm.create_cache(
                        meta_file_name=metaf,
                        gen_cache_file_cmd="python " + HomeDir +
                        "/syncdir/zyrcode/gen_cache/storeNodeId2elevation.py gen_cache_file "
                        + osm_folder_path +
                        "/%s-nid-to-elevation.txt " % osmname + outf,
                        cache_file_abs_path=outf,
                        params=params,
                        overwrite_meta_file=overwrite_meta_file,
                        overwrite_cache_file=overwrite_cache_file,
                        overwrite_memory=overwrite_memory,
                        yield_func=yield_nid2elevation,
                        yield_args=osm_folder_path +
                        "/%s-nid-to-elevation.txt" % osmname + CUT + QUOTE,
                        overwrite_prefix=True,
                    )
                lg.lg_str_once(TAG + " cache nid-to-elevation done")

        if overwrite:
            lock.max_access_num = 1  # lock.
            with lock:  # allow 1.
                print("lock remove_file_way_speed ? ")
                lock.Access_Or_Wait_And_Skip("remove_file_way_speed")
                osm.remove_file_way_speed()
                lg.lg_str_once(TAG + " remove_file_way_speed done")

        lock.max_access_num = max_num_servers  # allow more servers.
        done_way_speed = 0
        with lock:
            print("lock query way_speed ? ")
            lock.Access_Or_Skip("way-speed")
            print("start to run write_way_speed()")
            osm.write_way_speed(
            )  # generate nid2spd before storing them. everyone appends to file. Use Mem lock to avoid dup.
            lg.lg_str_once(TAG + " osm.write_way_speed done")
            done_way_speed = 1

        if iprint: print("\n" + addr)
        if On_Cluster:
            allow_dead_num = min(allow_dead_num + 3, sync.total_server_num //
                                 2)  # strict increasing.
            sync.Synchronize(
                print_str="before nids2speed, fix and cache",
                allow_dead_num=allow_dead_num
            )  # because this depends on finishing the previous.

        if done_way_speed:  # task depends. But if allows max_num_servers=all in previous step, then just enter:
            lock.max_access_num = 1  # lock.
            with lock:
                print("lock nids2speed ? ")
                lock.Access_Or_Skip("nids2speed", print_detail=True)
                if On_Cluster:
                    sync.waitForNfsFile(osm_folder_path +
                                        "/%s-nids-to-speed.txt" % osmname,
                                        sleeptime=3)
                fix_no_speed(osmname, mm_loaded='mm_loaded' in sys.argv)
                lg.lg_str_once(TAG + " fix_no_speed done")
                time.sleep(5)

                metaf = "osm/cache-%s-nids-to-speed.txt" % osmname
                print("proc " + metaf)
                outf = osm_folder_path + "/cache-%s-nids-to-speed.txt" % osmname
                if not mm.exist_cache(metaf) or overwrite_meta_file:
                    mm.create_cache(
                        meta_file_name=metaf,
                        gen_cache_file_cmd="python " + HomeDir +
                        "/syncdir/zyrcode/gen_cache/storeNodeId2speed.py gen_cache_file "
                        + osm_folder_path +
                        "/%s-nids-to-speed.txt " % osmname + outf,
                        cache_file_abs_path=outf,
                        params=params,
                        overwrite_meta_file=overwrite_meta_file,
                        overwrite_cache_file=overwrite_cache_file,
                        overwrite_memory=overwrite_memory,
                        yield_func=yield_nids2speed,
                        yield_args=osm_folder_path +
                        "/%s-nids-to-speed.txt" % osmname + CUT + QUOTE,
                        overwrite_prefix=True,
                    )
                lg.lg_str_once(TAG + " cache nids-to-speed done")
Ejemplo n.º 7
0
KeyUserName = get_conf(configfile, "KeyUserName")
UnknownUserEmail = get_conf(configfile, "UnknownUserEmail")
KeySysMs = get_conf(configfile, "KeySysMs")
KeyGPSTime = get_conf(configfile, "KeyGPSTime")
KeyGPSLat = get_conf(configfile, "KeyGPSLat")
KeyGPSLng = get_conf(configfile, "KeyGPSLng")
KeyOriSysMs = get_conf(configfile, "KeyOriSysMs")
PrivacyDist = get_conf_float(configfile, "PrivacyDist")
KeyMAF = get_conf(configfile, "KeyMAF")

rawDirList = glob.glob(DirRaw + os.sep + "*")
emails = [tmp.split(os.sep)[-1] for tmp in rawDirList]

lock = AccessRestrictionContext(
    prefix=py_fname(__file__, False) + "~mr~",
    persistent_restriction=True,
    persist_seconds=100,
    print_str=False,
)


def find_time_diff(dl, ms):
    for dt in dl:
        if ms >= dt[0]:
            return dt[1]
    return dt[1]


with lock:
    ''' make destination directory by 1 server '''
    lock.Access_Or_Wait_And_Skip("makedirs")
    for email in emails:
Ejemplo n.º 8
0
        'tls1': 0,
        'tls2': 0,
        'co': 0,
        'split': 22,
        'pene': 0,
        'coT': 10,
        'coG': 30,
        'coD': 300,
        'sumo_port': 9996
    },
]

# allocate tasks via a redis lock:
_lock = AccessRestrictionContext(
    prefix="sumo~task1~",
    persistent_restriction=True,
    persist_seconds=864000,
)
_Log_Dir = mypydir + 'logs/'
R = None

#  maybe already in log file?
if R is None:
    outdir = _Log_Dir + My_Ip + os.sep
    log_flist = glob.glob(outdir + "*")
    log_keys = set()
    for fn in log_flist:  # previously run
        log_keys.add(fn.rsplit(os.sep, 1)[1])
    for dic in tasks:
        task_key = get_key_from_task_dic(dic)  # sfx
        if task_key in log_keys:
Ejemplo n.º 9
0
        if mindist > dist1: mindist = dist1
        if mindistline > dist12: mindistline = dist12
        if min_angle_diff(sphead, mhead) < minangle:
            minangle = min_angle_diff(sphead, mhead)
        if dist1 < Rough_dist_tolerate and dist12 < Rough_dist_tolerate and min_angle_diff(
                sphead, mhead) < Rough_angle_tolerate:
            return i
    if iprint >= 3:
        print("search_along_path fail: mhead", mhead, "min dist", mindist,
              "min distline", mindistline, "min angle d", minangle)
    return -1


lock = AccessRestrictionContext(
    prefix=py_fname(__file__, False) + "~mn~",
    persistent_restriction=True,
    persist_seconds=600,
    print_str=False,
)

with AccessRestrictionContext(prefix=py_fname(__file__, False) + "-makedirs",
                              max_access_num=1) as tmplock:
    ''' make destination directory by 1 server '''
    tmplock.Access_Or_Wait_And_Skip("makedirs")
    for iddir in account_dirs:
        matchDir = iddir + os.sep + matchfolder
        if not os.path.exists(matchDir):
            os.makedirs(matchDir)

if False:  # remove old match dir
    ''' remove current match files, start new '''
    with AccessRestrictionContext(prefix=py_fname(__file__, False) + "-mvdirs",