Esempio n. 1
0
def open_and_read(filename: Path, should_wait_for_lockfile=False, mode="r"):
    safe_mkdir(FOLDER)
    if not filename.exists():
        return None
    if should_wait_for_lockfile:
        wait_for_lock_file(filename)
    elif _lockfile_exists(filename):
        return None
    create_lockfile(filename)
    dx = filename.read_text().strip() if mode == "r" else filename.read_bytes()
    close_lockfile(filename)
    return dx or None
Esempio n. 2
0
def divide_cuhk(cuhk_dir):
    safe_mkdir(os.path.join(cuhk_dir, 'probe'))
    safe_mkdir(os.path.join(cuhk_dir, 'test'))
    for i, image_name in enumerate(sorted(os.listdir(cuhk_dir))):
        if '.' not in image_name:
            continue
        if i % 2 == 0:
            shutil.copyfile(os.path.join(cuhk_dir, image_name),
                            os.path.join(cuhk_dir, 'probe', image_name))
        else:
            shutil.copyfile(os.path.join(cuhk_dir, image_name),
                            os.path.join(cuhk_dir, 'test', image_name))
Esempio n. 3
0
def cache_json(key, data):

    fn = get_file_name(key)
    safe_mkdir(CACHE_DIR)
    path = Path(CACHE_DIR, fn)
    file_path = f"{fn}{DATA_SUFFIX}"
    js = {"time_stamp": time(), "data": file_path}
    open_and_write(path, dumps(js))
    open_and_write(
        Path(CACHE_DIR, file_path),
        dumps({"data": data} if isinstance(data, (dict, list)) else data),
    )
Esempio n. 4
0
def open_and_write(filename: Path,
                   data,
                   should_wait_for_lockfile=False,
                   mode="w"):
    safe_mkdir(FOLDER)
    if should_wait_for_lockfile:
        wait_for_lock_file(filename)
    elif _lockfile_exists(filename):
        return None
    create_lockfile(filename)

    filename.write_bytes(data) if mode == "w" else filename.write_bytes(data)
    close_lockfile(filename)
Esempio n. 5
0
def update_module(path, name, type, url):
    print 'update ' + path + "---" + type + '---' + url
    if type == 'git':
        util.git_update(path, url)
    else:
        if os.path.exists(path):
            pass
        else:
            util.safe_mkdir(path)
            download_file = util.download_path(name)
            if not os.path.exists(download_file):
                util.download_http_package(url, download_file)
            if type == 'tar.gz':
                util.extract_tar_gz(download_file, path)
            elif type == 'zip':
                util.extract_zip(download_file, os.path.normpath(path + '/../'))
Esempio n. 6
0
    def create_graphs(self):
        fields = [
            'k', 'alg', 'imgs', 'imageId', 'load', 'graph', 'layers', 'hashes',
            'file', 'bins'
        ]
        Args = namedtuple('Arguments', fields)
        Args.__new__.__defaults__ = (None, ) * len(fields)

        basepath = realpath('./precomputed')
        safe_mkdir(basepath)

        graphpath = '/home/crosleyzack/school/fall18/cse515/repo/project/phaseIII/graph/graph'
        for graph in range(3, 11):
            self.graph(Args(graph=f'{graph}'))

            # Create folder for this graph size.
            working_dir = join(basepath, f'graph{graph}')
            safe_mkdir(working_dir)
            self.task1(Args(k=graph), path=working_dir)
Esempio n. 7
0
def main():
    
    parser = argparse.ArgumentParser(__doc__)
    parser.add_argument('confocal_image', help='Confocal image to analyse')
    parser.add_argument('output_dir', help='Path to output directory.')
    parser.add_argument('-o', '--only_rna_probe_channel',
            default=False,
            action="store_true",
            help='Only find probes in the RNA channel')
    parser.add_argument('-r', '--rna_probe_channel',
            type=int, default=1, help='RNA probe channel (default 1)')
    parser.add_argument('-u', '--unspliced_probe_channel',
            type=int, default=2, help='RNA probe channel (default 2)')
    parser.add_argument('-n', '--nuclear_channel',
            type=int, default=3, help='Nuclear channel (default 2)')
    parser.add_argument('-t', '--rna_probe_channel_threshold',
            type=float, default=0.6,
            help='RNA probe channel threshold (default 0.6)')
    parser.add_argument('-s', '--unspliced_probe_channel_threshold',
            type=float, default=0.8,
            help='Unspliced probe channel threshold (default 0.8)')
    args = parser.parse_args()

    nchannel = human_to_computer_index(args.nuclear_channel)
    pchannels = map(human_to_computer_index, [args.rna_probe_channel,
        args.unspliced_probe_channel])
    thresholds = [args.rna_probe_channel_threshold,
        args.unspliced_probe_channel_threshold]

    if args.only_rna_probe_channel:
        pchannels = pchannels[0:1]
        thresholds = thresholds[0:1]

    if any(c<0 for c in pchannels):
        parser.error('Probe channel index is one-based; index zero is invalid.')

    safe_mkdir(args.output_dir)

    AutoName.directory = args.output_dir

    count_and_annotate(args.confocal_image, nchannel, pchannels, thresholds,
        imsave_with_outdir)
Esempio n. 8
0
def get_cache(key, timeout):
    fn = get_file_name(key)
    path = Path(CACHE_DIR, fn)
    data = open_and_read(path)
    if data is None:
        return None
    try:
        data = loads(data)
    except:
        safe_mkdir(path)
        return None

    ret = data["data"]
    ts = data["time_stamp"]
    if time() - ts > timeout:
        safe_remove(Path(CACHE_DIR, ret))
        return None
    if file_size(Path(CACHE_DIR, ret)):
        return ret
    return None
Esempio n. 9
0
def download_chromium():
    SOURCE_CHROMIUM = os.path.join(SOURCE_VENDOR, 'chromium')
    module_list = build_list()
    for module in module_list :
        module_root = os.path.join(SOURCE_CHROMIUM, module['dir'])
        util.safe_mkdir(module_root)
        os.chdir(module_root)
        for submodule_key, submodule_url in module['modules'].iteritems():
            name_and_type = submodule_key.split(':')
            path = os.path.join(module_root, name_and_type[0])
            update_module(path, name_and_type[0], name_and_type[1], submodule_url)


    # download http file chromium/VERSION  chromium/src/base/basictypes.h
    print '3 copy missed file chromium/VERSION  chromium/src/base/basictypes.h'
    # https://chromium.googlesource.com/chromium/src/+/master/chrome/VERSION
    print '3.1 copy script/VERSION to vendor/chromium/'
    shutil.copyfile(SOURCE_ROOT + '/VERSION', os.path.join(SOURCE_CHROMIUM, 'src', 'chrome') + '/VERSION')
    # https://chromium.googlesource.com/chromium/chromium/+/master/base/basictypes.h
    print '3.2 copy script/basictypes.h to vendor/chromium/src/base/'
    shutil.copyfile(SOURCE_ROOT + '/basictypes.h', os.path.join(SOURCE_CHROMIUM, 'src', 'base') + '/basictypes.h')
Esempio n. 10
0
def make_output_dir(out_dir):
    """creates output folders"""
    util.safe_mkdir(out_dir)
    util.safe_mkdir(os.path.join(out_dir, 'color'))
    util.safe_mkdir(os.path.join(out_dir, 'bone_params'))
Esempio n. 11
0
    def run_two_to_three(self):
        """
        Utility to precompute all inputs desired by the professor.
        """
        graphs = [3, 10]  # 10
        task2 = [2, 4, 10]  # 10
        task3 = []  # [1, 5,10]

        basepath = realpath('./precomputed')
        safe_mkdir(basepath)

        # Create named tuple to simulate args.
        fields = [
            'k', 'alg', 'imgs', 'imageId', 'load', 'graph', 'layers', 'hashes',
            'file', 'bins'
        ]
        Args = namedtuple('Arguments', fields)
        Args.__new__.__defaults__ = (None, ) * len(fields)

        # call load once.
        self.load(Args(load='dataset'))

        # graphpath = '/home/crosleyzack/school/fall18/cse515/repo/project/phaseIII/graph/graph'
        for graph in graphs:
            # load graph
            self.graph(Args(graph=f'{graph}'))

            # Create folder for this graph size.
            working_dir = join(basepath, f'graph{graph}')
            safe_mkdir(working_dir)

            print(f'Starting task 2 graph {graph}')

            # task 2.
            task_dir = join(working_dir, 'task2')
            safe_mkdir(task_dir)

            for k in task2:
                print(f'\tTask 2, K = {k}')
                subdir = join(task_dir, f'k{k}')
                safe_mkdir(subdir)
                self.task2(Args(k=k), path=subdir)

            print(f'Starting task 3 graph {graph}')

            # task 3.
            task_dir = join(working_dir, 'task3')
            safe_mkdir(task_dir)

            for k in task3:
                print(f'\tTask 3, K = {k}')
                subdir = join(task_dir, f'k{k}')
                safe_mkdir(subdir)
                self.task3(Args(k=k), path=subdir)
Esempio n. 12
0
def create_lockfile(filename: str) -> str:
    safe_mkdir(FOLDER)
    open(lockfile_path(filename), "w").close()
Esempio n. 13
0
def simulate(override_param, outdir=None,params_file='params',verbose=False):
  #params to override the defaults from params.py
  #plots: create output plots
  #pause: if creating plots, pause when they are displayed on screen for <Enter>
  #returns metrics of simulation as a dict, and the full params used
  #writes a log file (and optional plots) into outdir.  use outdir=None for
  #no log file and instead direct all outout to stdout.  plots will go into
  #current directory in that case.
  #params is the file to import (default is params)
  #roi is (t1,t2) time range for the spatial plots
  #returns out_place: directory+logcode prefix that it used for log file
  global gverbose
  gverbose=verbose
  params=importlib.import_module(params_file)

  np.seterr(all='warn')
  global param
  global norm
  global dg
  global count
  global tstart
  #we will alter print statement for this module to go into the log file
  global print
  if outdir is None:
    dbg=mylog.Log(outdir=None,descr='psa')
    OUTDIR='./'
  else:
    OUTDIR=outdir  # for log files and png files
    dbg=mylog.Log(outdir=OUTDIR,descr='psa')
  #change the print function in the module to print to the log file
  old_print=print
  print=dbg.print

  param=params.create_param(override_param, dbg.print)
  norm=create_norm()
  
  print('simulation params: time 0 to {}, step {}'.format(param.end_time,param.tstep))
  print('step time might be increased if long cycle time')
  print('normalization velocity {:8.3f} m/s'.format(param.norm_v0))
  print('normalization of time norm.t0 {:8.3f} sec'.format(norm.t0))
  print('starting with params import file: {}'.format(params))
  state1=init(param,norm)
  state2=init(param,norm)
  prod=AttrDict()
  prod.Pprod=1e5/norm.P0          # 1 atm to start
  prod.yprod=param.feed_O2        # fract of O2 in product tank

  #create dimensionless groups for the constants needed in the simulation
  dg=create_dgroups(norm,param,state1)

  np.set_printoptions(precision=5, edgeitems=6, linewidth=95, suppress=False)
  pp=pprint.PrettyPrinter(indent=2)

  print('git revision:{}'.format(util.get_git_commit()))
  print('log code: {}'.format(dbg.log_code))
  sys.stdout.write('log file prefix code: {}\n'.format(dbg.log_code))

  print('version:{}'.format(vinfo))
  print('plt {}'.format(matplotlib.__version__))
  #print out the parameters and numbers for the record
  print('dimensionless groups:\n',pp.pformat(dg.__dict__))
  print('parameters:\n',pp.pformat(param.__dict__))

  #create directory and log_code to prepend to out png files
  util.safe_mkdir(OUTDIR)
  out_place=os.path.join(OUTDIR,dbg.log_code)
  #write to screen, not log file
  sys.stdout.write('out_place={}\n'.format(out_place))
  #output is to 1 ATM pressure (abs)
  x01=np.hstack((state1.P,state1.T,state1.Tw,state1.xA,state1.xB,state1.yA,state1.yB))
  x02=np.hstack((state2.P,state2.T,state2.Tw,state2.xA,state2.xB,state2.yA,state2.yB))
  x03=np.array([prod.Pprod, prod.yprod])
  print(x01.shape,x02.shape,x03.shape)
  x0=np.hstack((x01,x02,x03))
  tstart=time.time()
  #time normalized by norm.t0
  #Vectorized means that myfun can take a matrix of values to evaluate dx/dt at,
  #not that x0 is a matrix
  compute_alpha(verbose=True)  # just print out the values for the log file
  #we add a small number because we want to get full cycle at end
  #also, increase tstep is a long cycle time:
  param.tstep=max(param.tstep, param.cycle_time/100)
  end_simulation=param.end_time+1e-6
  t_span=(0,end_simulation)
  #we want to include the end_simulation time
  ev=np.arange(0,end_simulation,param.tstep)
  print('t_span {}'.format(t_span))
  #print('t_eval {}'.format(ev))
  sys.stdout.write('Calling ODE solver\n')
  bunch=scipy.integrate.solve_ivp(myfun, t_span, x0, vectorized=False, t_eval=ev,
                                  method='BDF')
###                                  method='Radau')
  if not bunch.success:
    print(bunch.message)
    sys.stdout.write(bunch.message)
    return None
  print('evaluated at {} places'.format(bunch.t.size))
  print('num eval of RHS {}'.format(bunch.nfev))
  print('num eval of Jacobian {}'.format(bunch.njev))
  print('total number of elements in data is {}'.format(bunch.y.size))
  print('number of elements > 2 = {}'.format(np.sum(bunch.y>2)))
  print('number of elements > 1 = {}'.format(np.sum(bunch.y>1)))
  print('number of elements < 0 = {}'.format(np.sum(bunch.y<0)))
  
  #t will be array of time points (t_points)
  #y will be array of (n x t_points) values
  N=param.N
  #take the output data from ODE solver and place in data.<name> for plotting
  data=AttrDict()
  data.data1,data.data2,data.datap=data_to_state(bunch.y)
  data.t=bunch.t
  print('bunch.t',data.t)
  data.param=param
  print_state_ranges(data.data1)
  print_state_ranges(data.data2)
  #compute xAs at each point
  data.data1.xAs,data.data1.xBs=adsorp_equilibrium(data.data1.yA,data.data1.yB,data.data1.P)
  data.data2.xAs,data.data2.xBs=adsorp_equilibrium(data.data2.yA,data.data2.yB,data.data2.P)
  #compute velocity at each point
  data.data1.V=compute_vel(data.data1)
  data.data2.V=compute_vel(data.data2)
  #Output key statistics in log file with keywords
  #get indexes for full cycle
  print('CYCLE {:8.2f}'.format(param.cycle_time))
  print('VENT {:8.2f}'.format(param.vent_time))
  #indexes to select over - the last 2 cycles for container 1
  r=np.argmax(bunch.t>(max(bunch.t)-2*param.cycle_time))
  ret=AttrDict()
  ret.time=datetime.datetime.now().replace(microsecond=0).isoformat()
  ret.end_time=end_simulation
  ret.cycle_time=param.cycle_time
  ret.vent_time=param.vent_time
  ret.norm_t0=norm.t0
  ret.real_cycle_time=param.cycle_time*norm.t0
  ret.real_vent_time=param.vent_time*norm.t0
  #these are all min,max,mean for the last cycle of container 1
  #for container 1, we calculate parameters for all the odd cycles and store in
  # a list.  To get last one, use index [-1]
  idx=0
  ret.container_y=[]
  ret.prod_pressure=[]
  ret.prod_y=[]
  ret.prod_flow_rate=[]
  while True:
    start=param.cycle_time*idx
    stop=param.cycle_time*(idx+1)
    if stop > max(bunch.t):
      break
    #create a mask of the times for this odd cycle (starting with 1st one)
    mask=np.logical_and(start <= bunch.t, bunch.t <= stop)
    ret.container_y.append([np.min(data.data1.yA[-1,mask]),
                            np.max(data.data1.yA[-1,mask]), 
                            np.mean(data.data1.yA[-1,mask])])
    ret.prod_pressure.append([np.min(data.datap.Pprod[0,mask]),
                              np.max(data.datap.Pprod[0,mask]),
                              np.mean(data.datap.Pprod[0,mask])])
    ret.prod_y.append([np.min(data.datap.yprod[0,mask]),
                       np.max(data.datap.yprod[0,mask]),
                       np.mean(data.datap.yprod[0,mask])])
    #compute product flow in LPM
    ret.prod_flow_rate.append([product_flow_rate(np.min(data.datap.Pprod[0,mask])),
                               product_flow_rate(np.max(data.datap.Pprod[0,mask])),
                               product_flow_rate(np.mean(data.datap.Pprod[0,mask]))])
    idx+=2
    
  ret.logcode=dbg.log_code
  print('results:\n',pp.pformat(ret))
  print('These are repr() outputs for programmatic reading')
  print('PARAM {}'.format(repr(param)))
  print('RESULT {}'.format(repr(ret)))
  data.ret=ret
  #save to pickle file
  pickle_name='{}-data.pickle'.format(out_place)
  with open(pickle_name, 'wb') as fp:
    pickle.dump(data,fp)
  #close the log file because might be called again before exiting and will
  #want a new log file for that simulation
  dbg.close()
  #restore print function
  print=old_print
  return data, ret, param, pickle_name, out_place
Esempio n. 14
0
def get_paths(key):
    fn = get_file_name(key)
    safe_mkdir(CACHE_DIR)
    path = Path(CACHE_DIR, fn)
    file_path = f"{fn}{DATA_SUFFIX}"
    return path, file_path
Esempio n. 15
0
    def run_four_to_six(self):
        """
        Utility to precompute all inputs desired by the professor.
        """
        graphs = [3, 10]
        task4 = []  # 5,10]
        task5_k = [5, 10]
        task5_l = (1, 5, 10)
        task6_knn = [1, 3, 10]
        task6_files = ['task6sample1', 'task6sample2']

        basepath = realpath('./precomputed')
        safe_mkdir(basepath)

        # Create named tuple to simulate args.
        fields = [
            'k', 'alg', 'imgs', 'imageId', 'load', 'graph', 'layers', 'hashes',
            'file', 'bins'
        ]
        Args = namedtuple('Arguments', fields)
        Args.__new__.__defaults__ = (None, ) * len(fields)

        # call load once.
        self.load(Args(load='dataset'))

        # graphpath = '/home/crosleyzack/school/fall18/cse515/repo/project/phaseIII/graph/graph'
        for graph in graphs:
            # load graph
            self.graph(Args(graph=f'{graph}'))

            # Create folder for this graph size.
            working_dir = join(basepath, f'graph{graph}')
            safe_mkdir(working_dir)

            print(f'Starting task 4 graph {graph}')

            # task 4.
            task_dir = join(working_dir, 'task4')
            safe_mkdir(task_dir)
            task4_images = [[2976144, 3172496917, 2614355710],
                            [27483765, 2492987710, 487287905]]
            # used list from submission sample.

            for k, images in product(task4, task4_images):
                subdir = join(
                    task_dir,
                    f'k{k}img{images[0]}')  # include first image in dir name.
                safe_mkdir(subdir)
                self.task4(Args(k=k, imgs=images), path=subdir)
            """
            # task 5.
            task_dir = join(working_dir, 'task5')
            safe_mkdir(task_dir)
            hashes = 5 # no set value. Just generated one at random.

            for k in task5_k:
                subdir = join(task_dir, f'k{k}l{l}')
                safe_mkdir(subdir)
                self.task5(Args(k=k, layers=l, hashes=hashes))

            print(f'Starting task 6 graph {graph}')
            """

            # task 6.
            task_dir = join(working_dir, 'task6')
            safe_mkdir(task_dir)

            for k, f in product(task6_knn, task6_files):
                subdir = join(task_dir, f'knn_k{k}_file{f}')
                safe_mkdir(subdir)
                self.task6(Args(alg='knn', file=f, k=k), path=subdir)

            for f in task6_files:
                subdir = join(task_dir, 'ppr')
                safe_mkdir(subdir)
                self.task6(Args(alg='ppr', file=f), path=subdir)
Esempio n. 16
0
    sanitize,
    validate_email_address,
)

setup_env()


app = Flask(__name__)
app.secret_key = environ.get("FLASK_SECRET")
database_url: str = environ.get("DATABASE_URL")

app.config["SQLALCHEMY_DATABASE_URI"] = database_url
app.config["SQLALCHEMY_TRACK_MODIFICATIONS"] = False

db = SQLAlchemy(app)
safe_mkdir("@cache")

# not exactly as we have 4 workers running, it's basically a lottery if you hit the same worker thrice
# gate = Gate(use_heroku_ip_resolver=IS_HEROKU, limit=1, min_requests=3)


@app.before_request
@guard(ban_time=5, ip_resolver="heroku" if IS_HEROKU else None,request_count=3,per=1)
def gate_check():
    pass


@app.route("/robots.txt")
def robots():
    ONE_YEAR_IN_SECONDS = 60 * 60 * 24 * 365
    # we disallow all bots here because we don't want useless crawling over the API
Esempio n. 17
0
def close_lockfile(filename: str) -> str:
    safe_mkdir(FOLDER)
    rm_path = lockfile_path(filename)
    safe_remove(rm_path)