コード例 #1
0
 def command(self):
     '''
     Parse command line arguments and call appropriate method.
     '''
     self._load_config()
     embed()
     # type('NewClassName', (BaseClass,), {'a_func': a_func})
コード例 #2
0
ファイル: shell.py プロジェクト: frispete/websauna
def main(argv=sys.argv):

    if len(argv) < 2:
        usage(argv)

    config_uri = argv[1]

    request = init_websauna(config_uri)

    imported_objects = OrderedDict()

    imported_objects["request"] = request
    imported_objects["dbsession"] = request.dbsession
    imported_objects["transaction"] = transaction
    imported_objects["redis"] = get_redis(request)
    imported_objects["now"] = now
    imported_objects["datetime"] = datetime

    for name, cls in Base._decl_class_registry.items():

        if name == "_sa_module_registry":
            continue

        imported_objects[name] = cls

    print("")
    print("Following classes and objects are available:")
    for var, val in imported_objects.items():
        print("{:30}: {}".format(var, str(val).replace("\n", " ").replace("\r", " ")))
    print("")

    embed(user_ns=imported_objects)
コード例 #3
0
ファイル: manage.py プロジェクト: yut148/zerodb-server
def console():
    """
    Console for managing users (add, remove, change password)
    """

    def useradd(username, pubkey):
        storage.add_user(username, binascii.unhexlify(pubkey))

    def userdel(username):
        storage.del_user(username)

    def chkey(username, pubkey):
        storage.change_key(username, binascii.unhexlify(pubkey))

    banner = "\n".join([
            "Usage:",
            "========",
            "useradd(username, pubkey) - add user",
            "userdel(username) - remove user",
            "chkey(username, pubkey) - change pubkey",
            "get_pubkey(username, password) - get public key from passphrase",
            "exit() or ^D - exit"])

    db = DB(_sock, username=_username, password=_passphrase, realm=_realm)
    storage = db._storage

    sys.path.append(".")

    embed(banner1=banner)
コード例 #4
0
ファイル: farmtool.py プロジェクト: iceseismic/sito
 def onkey(self, event):
     key = event.key
     if key == 'i':
         from IPython import embed
         embed()
     elif key == 'left' and num_tr is not None:
         if self.ind1 == 0:
             print('reached beginning of file')
         else:
             self.ind2 = self.ind1
             self.ind1 = max(0, self.ind1 - 3 * num_tr)
             self.plot_streams()
     elif key == 'right' and num_tr is not None:
         if self.ind2 == len(self.st2s):
             print('reached end of file')
         else:
             self.ind1 = self.ind2
             self.ind2 = min(self.ind2 + 3 * num_tr, len(self.st2s))
             self.plot_streams()
     elif key == 'o':
         self.open_files()
     elif key == 'w':
         self.save_file()
     elif key == 'm':
         self.pressed_m = True
コード例 #5
0
def get_broadcast_traffic():
    for fname in args.files:
        f = open(fname)
        logging.error("Loading broadcast traffic from %s" % f)
        df = read_pcap(fname,timeseries=True)
        bytes_per_second = df.resample("S",how="sum")
        embed()
コード例 #6
0
ファイル: cmdtool.py プロジェクト: 273k/facepp-python-sdk
def _run():
    global _run
    _run = lambda: None

    msg = """
===================================================
Welcome to Face++ Interactive Shell!
Here, you can explore and play with Face++ APIs :)
---------------------------------------------------
Getting Started:
    0. Register a user and API key on http://www.faceplusplus.com
    1. Write your API key/secret in apikey.cfg
    2. Start this interactive shell and try various APIs
        For example, to find all faces in a local image file, just type:
            api.detection.detect(img = File(r'<path to the image file>'))

Enjoy!
"""

    try:
        from IPython import embed
        embed(banner2 = msg)
    except ImportError:
        import code
        code.interact(msg, local = globals())
コード例 #7
0
ファイル: cli.py プロジェクト: mehdidc/lightjob
def ipython(db_folder):
    """
    launches ipython with the object 'db' loaded
    """
    from IPython import embed
    db = load_db(db_folder)  # NOQA
    embed()
コード例 #8
0
ファイル: shell.py プロジェクト: jclmns/chisubmit
def shell(ctx, course):
    try:
        from IPython import embed, __version__
        from IPython.config.loader import Config
        if __version__ < "1.1.0":
            print "You need IPython (>= 1.1.0) to run the chisubmit shell"
            ctx.exit(CHISUBMIT_FAIL)
    except ImportError:
        print "You need IPython (>= 1.1.0) to run the chisubmit shell"
        
    cfg = Config()
    cfg.TerminalInteractiveShell.banner1 = """
                      WELCOME TO THE CHISUBMIT SHELL
    
    Course: %s
    
    This is an IPython shell with the chisubmit data structures preloaded. 
    You can access the chisubmit objects through variable 'course'.
    
    CAREFUL: Most changes made through the shell will be propagated to the
             database. 
    
    """ % (course.name)
    
    prompt_config = cfg.PromptManager
    prompt_config.in_template = 'chisubmit> '
    prompt_config.in2_template = '   .\\D.> '
    prompt_config.out_template = '         > '
    embed(config = cfg)
        
    return CHISUBMIT_SUCCESS
コード例 #9
0
ファイル: birp.py プロジェクト: sensepost/birp
def menu_screen(transaction, reqres):
	#If reqres is True, we show the request, if False, we show the response
	if reqres: screen = transaction.request
	else: screen = transaction.response
	key = ''
	print screen.colorbuffer
	while key != getch.KEY_x:
		logger(''.join([Fore.CYAN,"Type 'f' to view the screen's fields or 'p' to view the un-markedup screen, 'e' to view the screen as an emulator would, 'r' to switch between the Request/Response, or 's' to export a copy to a text file (.brp/.emu). Type 'x' to go back.",Fore.RESET]),kind='info')
		key = getch()
		if key == getch.KEY_f or key == getch.KEY_F:
			print Fore.BLUE,"View Fields",Fore.RESET
			print Fore.BLUE,"===========",Fore.RESET,"\n"
			pprint(screen.fields)
			logger(''.join([Fore.RED,"Dropping into shell, check the",Fore.BLUE," screen ",Fore.RED,"object. Type quit() to return here.",Fore.RESET,"\n\n"]),kind='info')
			embed()
		elif key == getch.KEY_p or key == getch.KEY_p:
			print '\n',screen
		elif key == getch.KEY_e or key == getch.KEY_e:
			print '\n',screen.emubuffer
		elif key == getch.KEY_r or key == getch.KEY_r:
			reqres = not reqres
			if reqres:
				screen = transaction.request
				print Fore.BLUE,'REQUEST',Fore.RESET
			else:
				screen = transaction.response
				print Fore.BLUE,'RESPONSE',Fore.RESET
			print screen.colorbuffer
		elif key == getch.KEY_s or key == getch.KEY_s:
			filename = transaction.host+'_'+str(transaction.timestamp.date())+'_'+str(transaction.timestamp.time())
			screentofile(screen, filename)
コード例 #10
0
ファイル: buildall.py プロジェクト: Jumpscale/dockers
    def build(self):
        pythonbuild = False
        rc = 0
        if j.sal.fs.exists(path=self._pathPythonBuild):
            pythonbuild = True

            self.log("Python Build:%s"%self._pathPythonBuild)
            # C = j.sal.fs.fileGetContents(self._pathPythonBuild)

            command="cd %s;python3 build.py"%(self.path)
            rc=j.do.executeInteractive(command)
            if rc>0:                
                raise j.exceptions.RuntimeError("could not build %s"%self.name)
            
        else:
            self.log("std docker build:%s" % self.path)
            imageName = 'jumpscale/%s' % self.name
            output = j.sal.docker.build(self.path, imageName, output=True, force=True)
            self.log("build ok")

            if not "Successfully built" in output:
                from IPython import embed
                print ("DEBUG NOW sdsd")
                embed()
                p
                
                raise j.exceptions.RuntimeError("Cannot build %s from dockerfile"%self.name)

        if self.builder.push:
            nopushfile=j.sal.fs.joinPaths(self.path,".nopush")
            if j.sal.fs.exists(nopushfile):
                return
            self.push()
コード例 #11
0
ファイル: main.py プロジェクト: dalexander/PRmm
def main():
    args = docopt(__doc__)
    if args["--debug"] is not False:
        print "Args: \n", args
    fixtureArg = args["--fixture"]
    if "::" not in fixtureArg:
        fixtureIni, fixtureSection = "~/.pacbio/data-fixtures.ini", fixtureArg
    else:
        fixtureIni, fixtureSection = fixtureArg.split("::")
    fixture = Fixture.fromIniFile(fixtureIni, fixtureSection)
    holeNumber = int(args["--hole"])

    if args["--headless"] is not False:
        banner = "Convenient variables available: zmw, fixture"
        zmw = fixture[holeNumber]
        try:
            from IPython import embed
            embed(banner1=banner)
        except ImportError:
            code.InteractiveConsole(locals=locals()).interact(banner=banner)
    else:
        app = QtGui.QApplication([])
        traceViewer = TraceViewer(fixture)
        traceViewer.setFocus(holeNumber)
        if args["--debug"]:
            debug_trace()
        if args["--profile"]:
            import cProfile
            cProfile.runctx("app.exec_()",
                            globals=globals(),
                            locals=locals())
        else:
            app.exec_()
コード例 #12
0
ファイル: opts.py プロジェクト: mkrainin/ecto
def use_ipython(options, sched, plasm, locals={}):
    '''Launch a plasm using ipython, and a scheduler of choice.

       Keyword arguments:
       options -- are from scheduler_options
       sched -- is an already initialized scheduler for plasm.
       plasm -- The graph to execute
       locals -- are a dictionary of locals to forward to the ipython shell, use locals()
    '''
    #expose the locals to the ipython prompt.
    for key, val in locals.items():
        vars()[key] = val

    if type(sched) == ecto.schedulers.Singlethreaded:
        sched.execute_async(options.niter)
    else:
        sched.execute_async(options.niter, options.nthreads)

    import IPython
    if IPython.__version__ < '0.11':
        from IPython.Shell import IPShellEmbed
        #the argv is important so that the IPShellEmbed doesn't use the global
        #Also fancy colors!!!!
        ipython_argv = ['-prompt_in1', 'Input <\\#>', '-colors', 'LightBG']
        ipshell = IPShellEmbed(ipython_argv)
        ipshell()
    else:
        from IPython import embed
        embed() # this call anywhere in your program will start IPython
コード例 #13
0
ファイル: manage.py プロジェクト: v09-software/zerodb-server
def run(username, passphrase, sock):
    logging.basicConfig()
    username = str(username)
    passphrase = str(passphrase)
    sock = str(sock)
    if not sock.startswith("/"):
        sock = (sock.split(":")[0], int(sock.split(":")[1]))
    DB.auth_module.register_auth()
    DB.encrypter.register_class(default=True)
    init_crypto(passphrase=passphrase)

    def useradd(username, password):
        storage.add_user(username, password)

    def userdel(username):
        storage.del_user(username)

    def chpass(username, password):
        storage.change_key(username, password)

    print "Usage:"
    print "========"
    print "useradd(username, password) - add user"
    print "userdel(username) - remove user"
    print "chpass(username, password) - change passphrase"
    print "exit() or ^D - exit"

    storage = client_storage(sock,
            username=username, password=passphrase, realm="ZERO")
    embed(display_banner=False)
コード例 #14
0
def main():
    parser = argparse.ArgumentParser(description='Launch an IPython shell '
                                     'with a MagentoAPI instance, "magento", '
                                     'connected to a given endpoint.')
    parser.add_argument('host', help='The Magento server host.')
    parser.add_argument('port', type=int, default=80,
                        help='The Magento server port.')
    parser.add_argument('api_user', help='The API user to log in as.')
    parser.add_argument('api_key', help='The API key to log in with.')
    parser.add_argument('-p', '--path', default=DEFAULT_XMLRPC_PATH,
                        help='The URL path to the XML-RPC API.')
    parser.add_argument('-v', '--verbose', action='store_true',
                        help='Set the XML-RPC client to verbose.')
    parser.add_argument ('--proto', default='http', help='Choose between http or https')
    args = parser.parse_args()

    url = 'http://{}:{}/{}'.format(args.host, args.port, args.path.strip('/'))

    print('\n\n-- magento-ipython-shell -----------------')
    print('Connecting to "{}"'.format(url))
    print('Using API user/key {}/{}'.format(args.api_user, args.api_key))

    magento = MagentoAPI(args.host, args.port, args.api_user, args.api_key,
                         path=args.path, verbose=args.verbose, proto=args.proto)
    assert magento

    print('Connected! The "magento" variable is bound to a usable MagentoAPI '
          'instance.\n'
          '-- magento-ipython-shell -----------------\n\n')
    embed()  # Shell time!
コード例 #15
0
ファイル: test.py プロジェクト: sergeyk/csrec
def test_predictionerror(fg, sgd, data, allow_rejects=True):
# computes predictionacc or error (getting it exactly right or not) 
  errors = 0
  truenones = 0
  prednones = 0
  N = data.get_nsamples()
  
  indices = range(comm_rank, N, comm_size) 
  update_lookahead_cnt = 0  
  req_ids = data.get_req_ids_for_samples(indices[0:LOOK_AHEAD_LENGTH])
  fg.upt_out_prod_get(req_ids)

  for idx, i in enumerate(indices):
    
    if update_lookahead_cnt == LOOK_AHEAD_LENGTH-2:
      req_ids = data.get_req_ids_for_samples(indices[idx:idx+LOOK_AHEAD_LENGTH+2])
      fg.upt_out_prod_get(req_ids)
      update_lookahead_cnt = 0
    else:
      update_lookahead_cnt += 1
    
    competitorset = data.get_sample(i)
    for l in competitorset.get_surferlist():
      #print l[1]
      try:
        assert(l[1] in fg.outer_product_getter.outer_products.keys())
      except:
        # it is not yet in there! so load by the grace of god!
        fg.outer_product_getter.unsafe_create_outer_prods_from_req_ids(l[1])
    try:
      pred = sgd.predict(
          competitorset, testingphase=False, allow_rejects=allow_rejects)
    except:
        print "Unexpected error:", sys.exc_info()[0]
        if not RON_MODE:
            from IPython import embed
            embed()
        else:
            traceback.print_exc(file=sys.stdout)
            import pdb
            pdb.set_trace()
    true = competitorset.get_winner()
    #if true:
    #  print 'prediction', pred
    #  print 'true val', true
        
    errors += (pred!=true)
    truenones += (true==None)
    prednones += (pred==None)
    
  safebarrier(comm)
  
  errors = comm.allreduce(errors)
  truenones = comm.allreduce(truenones)
  prednones = comm.allreduce(prednones)
    
  errorrate = errors/float(N)  
  truenonerate = truenones/float(N)  
  prednonerate = prednones/float(N) 
  return errorrate, truenonerate, prednonerate
コード例 #16
0
def featureGen(data):
    embed() 
    for x in range(6, 13):
        colList = [col for col in data.columns if ('g' + str(x)) in col and 'mpa' in col]
        yrGPA = 'g' + str(x) + '_gpa'
        data[yr_GPA] = data[colList].mean(axis=0)
    return data
コード例 #17
0
ファイル: run.py プロジェクト: TsarN/pysistem
def shell():
    """Run interactive PySistem shell"""
    import pysistem.models
    import code
    modules = ('checkers', 'compilers', 'contests', 'problems', 'users',
               'submissions', 'test_pairs', 'groups', 'lessons', 'settings')

    context = dict(app=_request_ctx_stack.top.app)

    for module in modules:
        mod = getattr(__import__('pysistem.%s.model' % module), module).model
        for obj in dir(mod):
            if not obj.startswith('_'):
                context[obj] = getattr(mod, obj)

    # Try IPython
    try:
        try:
            # 0.10.x
            from IPython.Shell import IPShellEmbed
            ipshell = IPShellEmbed()
            ipshell(global_ns=dict(), local_ns=context, colors='Linux')
        except ImportError:
            # 0.12+
            from IPython import embed
            embed(user_ns=context, colors='Linux')
        return
    except ImportError:
        pass

    code.interact(local=context)
コード例 #18
0
ファイル: manage.py プロジェクト: Marilyna/promua-test
def shell():
    from catalog import app, create

    create(CONFIG)

    with app.test_request_context('/'):
        app.preprocess_request()

        banner = 'Interactive Shell\n'

        try:
            from IPython import embed
        except ImportError:
            pass
        else:
            try:
                import sys
                if sys.platform == 'win32':
                    import pyreadline
            except ImportError:
                banner = ('There is IPython installed on your system, '
                          'but no pyreadline\n' + banner)
            else:
                embed(banner1=banner)
                return
        from code import interact
        interact(banner)
コード例 #19
0
ファイル: Base.py プロジェクト: AlexandrePinheiro/pyload
 def shell(self):
     """ open ipython shell """
     if self.core.debug:
         from IPython import embed
         #noinspection PyUnresolvedReferences
         sys.stdout = sys._stdout
         embed()
コード例 #20
0
ファイル: pspec_cov_cav_v002.py プロジェクト: domagalski/capo
def plot_eig(data,nchan):
    days=data.keys()
    for k in days:
        eig_order=[]
        eigs = []
        eigs_cav = []
        for bl in data[k]:
           c_mat=cov(data[k][bl])
           cav = get_cav(c_mat,nchan,scaling=opts.auto)
           U,S,V= n.linalg.svd(c_mat.conj())
           U_cav,S_cav,V_cav = n.linalg.svd(cav.conj())
           eig_order.append(S[0])
           eigs.append( n.fft.fftshift(n.fft.fft(V.T.conj(),axis=0)))
           eigs_cav.append( n.fft.fftshift(n.fft.fft(V_cav.T.conj(),axis=0)))


        order=n.argsort(eig_order)

        eig_order=n.take(eig_order,order)
        eigs=n.take(eigs,order,axis=0)
        eigs_cav=n.take(eigs_cav,order,axis=0)
        embed()
        fig=p.figure(1)
        for cnt,eig in enumerate(eigs):
            p.plot(eig[0] + cnt*5)
        p.title('Eigenvectors for day {0}'.format(k))
        p.show()
        p.savefig('eigenvectors_{0}.png'.format(k))
        p.clf()
        for cnt,eig in enumerate(eigs_cav):
            p.plot(eig[0] + cnt*5)
        p.title('Eigenvectors of Cav for day {0}'.format(k))
        p.savefig('eigenvectors_cav_{0}.png'.format(k))
        p.clf()
        p.close()
コード例 #21
0
ファイル: from_json_to_Xs.py プロジェクト: franciscovargas/FH
def picture_emotions_x(articles):
    """
    returns X array NxM with emotion values from picture,
    N = number of articles, M = 7 ordered emotions [anger contempt disgust fear happiness sadness surprise]
    """
    articles = articles['articles']

    debug_faces = []
    x = np.zeros([len(articles), 7])
    for i in xrange(len(articles)):
        face_emotion_list = articles[i]['oxford']
        n_faces = len(face_emotion_list)
        if n_faces==0:
            #no faces recognized on the picture
            x[i,:] = 0
        else:
            # array of emotions for each face in the picture which will be averged
            f_emoion_array = np.zeros([n_faces,7])
            for (j, face) in enumerate(face_emotion_list):
                if face=="error" or face=="activityId" or face=="message" or face=="statusCode":
                    x[i,:] = 0
                    debug_faces.append(face)
                else:
                    for (k, e) in enumerate(['anger', 'contempt', 'disgust', 'fear', 'happiness', 'sadness', 'surprise']):
                        try:
                            f_emoion_array[j, k] = face['scores'][e]
                        except Exception, e:
                            embed()
                        # f_emoion_array[j, k] = face['scores'][e]

            x[i,:] = np.mean(f_emoion_array, axis=0)
コード例 #22
0
ファイル: py-esxi-cmd.py プロジェクト: dvinazza/esxi-tools
def main():
    try:
        print args

        if 'host' in args:
            if args.host in config.creds.keys():
                # me quedo solo con la configuracion del host donde voy a buscar el
                # guest, para acelerar el proceso de conexion
                config.creds = {args.host: config.creds[args.host]}
            else:
                print "No existe el host %s en la config" % args.host
                exit(1)

        a = Administrador(config)

        if 'iniciar' in args:
            a.iniciarGuest(args.guest, args.host)
        elif 'apagar' in args:
            a.apagarGuest(args.guest, args.host)
        elif 'ver' in args:
            for h in a.hosts.values():
                print h
                if args.ver == "guests":
                    for g in h.guests.values():
                        print "\t", g

        if 'interactivo' in args:
            embed()
            exit(0)

    except Exception:
        print_exc(file=stdout)
        exit(1)
コード例 #23
0
ファイル: Demo_LQGAN.py プロジェクト: all-umass/VI-Solver
def CompileResults_NdLQGAN(root='lqganresults/'):
    import seaborn as sns
    results = np.load(root+'results.npy').squeeze()
    passed = np.logical_and(res2[:,:,:,:,-1]<10000,~np.isnan(res2[:,:,:,:,-4])).astype('float').mean(axis=(1,2))
    print(passed)
    results = results[:3]
    # dims = np.atleast_2d(np.repeat([1,2,4,6,8,10],10*6)).T
    # trials = np.tile(np.atleast_2d(np.repeat(np.arange(10),6)).T,(6,1))
    # algorithms = np.tile(np.array(['Fcc','Fsim','Feg','Fcon','Freg','EG'])[:,None],(10*6,1))
    # data = np.hstack((dims,trials,algorithms,results.reshape(-1,6)))
    data = results.reshape(-1,10)
    df = pd.DataFrame(data=data,columns=['Dimensionality','Trial','Algorithm','Start','Condition #',r'KL/KL_0',r'Euc/Euc_0',r'||F||/||F_0||','Runtime','Steps'])
    # sns.violinplot(x="Species", y="PetalLengthCm", data=iris, size=6)
    df.replace("nan", np.inf, inplace=True)
    df.replace(inplace=True, to_replace={'Algorithm':dict(zip([0.0,1.0,2.0,3.0,4.0,5.0],['Fcc','Fsim','Feg','Fcon','Freg','EG']))})
    dtypes = ['int','int','str','int','float','float','float','float','float','int']
    for col,dtype in zip(df,dtypes):
        # print(df[col])
        # df.replace("nan", np.inf)
        # print(df[col])
        if dtype != 'str':
            df[col] = pd.to_numeric(df[col])
        df[col] = df[col].astype(dtype)
    # df['Steps'] = np.log(df['Steps'])
    ax = sns.violinplot(x='Dimensionality', y='Steps', hue='Algorithm', data=df, palette="muted", scale="count", inner="stick")
    # ax = sns.violinplot(x='Dimensionality', y='Steps', data=df, palette="muted")
    # ax.set_ylim([0,4])
    # ax.set(yscale="log")
    ax.set_yscale('log')
    fig = ax.get_figure()
    fig.savefig(root+'violin2.png')
    embed()
コード例 #24
0
ファイル: data_process.py プロジェクト: fbailly/expego
def mask_trial(nb_sb,nb_PI) :
	directory = '~/expego/birapp/databirapp/'
	directory = os.path.expanduser(directory)
	mask_trial_1 = np.empty([nb_sb,nb_PI])
	mask_trial_2 = np.empty([nb_sb,nb_PI])
	mask_trial_3 = np.empty([nb_sb,nb_PI])
	i = 0
	for subjects in os.listdir(directory) :
		if os.path.isdir(directory+subjects) :
			sub_directory = directory+subjects+'/'
			labeled_trials = open(sub_directory+'/labeled_trials.txt','r')
			j = 0
			k = 0
			l = 0
			for lines in labeled_trials :
				line_split = lines.split(',')
				embed()
				if line_split[3] == '1' :
					mask_trial_1[i,j] = int(line_split[5][0])
					j += 1
				elif line_split[3] == '2' :
					mask_trial_2[i,k] = int(line_split[5][0])
					k += 1
				elif line_split[3] == '3' :
					mask_trial_3[i,l] = int(line_split[5][0])
					l += 1
				else : print('error split')
			i += 1
	mask_trial_1 = np.where(mask_trial_1==1,mask_trial_1,np.nan)
	mask_trial_2 = np.where(mask_trial_2==1,mask_trial_2,np.nan)
	mask_trial_3 = np.where(mask_trial_3==1,mask_trial_3,np.nan)
	return mask_trial_1,mask_trial_2,mask_trial_3
コード例 #25
0
ファイル: manage.py プロジェクト: davinirjr/zerodb-server
def console():
    """
    Console for managing users (add, remove, change password)
    """

    def useradd(username, password):
        storage.add_user(username, password)

    def userdel(username):
        storage.del_user(username)

    def chpass(username, password):
        storage.change_key(username, password)

    banner = "\n".join([
            "Usage:",
            "========",
            "useradd(username, password) - add user",
            "userdel(username) - remove user",
            "chpass(username, password) - change passphrase",
            "exit() or ^D - exit"
            ])

    DB.auth_module.register_auth()
    DB.encrypter.register_class(default=True)
    init_crypto(passphrase=_passphrase)

    storage = client_storage(_sock,
            username=_username, password=_passphrase, realm="ZERO")
    embed(banner1=banner)
コード例 #26
0
ファイル: ConvertToPCD.py プロジェクト: MerDane/pyKinectTools
def main(visualize=False, save_dir='~/', sparse_pointcloud=False):

	# Create save directory if it doesn't exist
	if not os.path.isdir(save_dir):
		os.mkdir(save_dir)

	cam = KinectPlayer(base_dir='./', device=1, bg_subtraction=True, get_depth=True,
						get_color=True, get_skeleton=False, background_model='box', background_param=3200)
	
	cloud = pcl.PointCloud()

	framerate = 1
	while cam.next(framerate):

		pts = cam.camera_model.im2PosIm(cam.depthIm).reshape([-1,3])
		cloud.from_array(pts.astype(np.float32))
		depth_name = cam.depthFile.split("_")
		filename = "pcl_" + "_".join(depth_name[1:-1]) + "_" + depth_name[-1].split(".")[0] + ".pcd"

		if sparse_pointcloud:
			nonzero_idx = np.nonzero(pts[:,2]>0)
			cloud.from_array(pts[nonzero_idx].astype(np.float32))

		cloud.to_file(save_dir+filename, ascii=True)

		if visualize:
			cam.visualize(color=True, depth=True, text=True, colorize=False, depth_bounds=[500,3500])

	embed()

	print 'Done'
コード例 #27
0
ファイル: main.py プロジェクト: OpenPymeMx/openerp-proxy
def main():
    """ Entry point for running as standalone APP
    """
    from .session import Session
    from .core import Client

    session = Session()

    header_databases = "\n"
    for index, url in session.index.items():
        header_databases += "        - [%3s] %s\n" % (index, url)

    header_aliases = "\n"
    for aliase, url in session.aliases.items():
        header_aliases += "        - %7s: %s\n" % (aliase, url)

    header = HELP_HEADER % {'databases': header_databases, 'aliases': header_aliases}

    _locals = {
        'Client': Client,
        'session': session,
    }
    try:
        from IPython import embed
        embed(user_ns=_locals, header=header)
    except ImportError:
        from code import interact
        interact(local=_locals, banner=header)

    session.save()
コード例 #28
0
 def has_permission(self, request, view):
     from IPython import embed; embed()
     # if request.method in permissions.SAFE_METHODS:
     #     return True
     # print("NO")
     print("WHASDSA")
     return True
コード例 #29
0
ファイル: Button2Time.py プロジェクト: MerDane/pyKinectTools
def main(filename):

	recording_enabled = True
	button_pressed = False

	while 1:

		while ser.inWaiting() > 0:
			button_current = ser.readline()
			print button_current, button_pressed
			if button_pressed != button_current and button_current:
				recording_enabled = False
				recording_time = time.time()
				# Turn on light
				ser.write('4')
				print 'On'

				# Write time to file
				with open(filename, 'a') as f:
					f.write(str(time.gmtime())+'\n')

			button_pressed = button_current

		if not recording_enabled:
			if time.time() - recording_time > 2:
				recording_enabled = True
				ser.write('3')
				print 'Off'
			else:
				continue



	# Pause at the end
	embed()
コード例 #30
0
ファイル: tools.py プロジェクト: sightmachine/python-opcua
def uaclient():
    parser = argparse.ArgumentParser(description="Connect to server and start python shell. root and objects nodes are available. Node specificed in command line is available as mynode variable")
    add_common_args(parser)
    parser.add_argument("-c",
                        "--certificate",
                        help="set client certificate")
    parser.add_argument("-k",
                        "--private_key",
                        help="set client private key")
    args = parse_args(parser)

    client = Client(args.url, timeout=args.timeout)
    _configure_client_with_args(client, args)
    if args.certificate:
        client.load_client_certificate(args.certificate)
    if args.private_key:
        client.load_private_key(args.private_key)
    client.connect()
    try:
        root = client.get_root_node()
        objects = client.get_objects_node()
        mynode = get_node(client, args)
        embed()
    finally:
        client.disconnect()
    sys.exit(0)
コード例 #31
0
        else:

            if 'manual' in message['what']:
                # nonsense global stuff bc i couldn't get embed to work from here
                global do_embed
                global times
                do_embed = True
                times = message['times']
            else:
                plt.hist(message['times'], bins='auto', density=True)
                plt.xlabel('round trip (ms)')
                plt.show()
            tornado.ioloop.IOLoop.current().stop()

    def on_close(self):
        self.connections.remove(self)


def make_app():
    return tornado.web.Application([(r"/", MainHandler),
                                    (r"/websocket", SimpleWebSocket)])


if __name__ == "__main__":
    app = make_app()
    app.listen(9999)
    print('http://localhost:9999/')
    tornado.ioloop.IOLoop.current().start()
    if do_embed:
        embed(colors='neutral')
コード例 #32
0
def train():
    print('[Dataset Configuration]')
    print('\tImageNet training root: %s' % FLAGS.train_image_root)
    print('\tImageNet training list: %s' % FLAGS.train_dataset)
    print('\tImageNet val root: %s' % FLAGS.val_image_root)
    print('\tImageNet val list: %s' % FLAGS.val_dataset)
    print('\tNumber of classes: %d' % FLAGS.num_classes)
    print('\tNumber of training images: %d' % FLAGS.num_train_instance)
    print('\tNumber of val images: %d' % FLAGS.num_val_instance)

    print('[Network Configuration]')
    print('\tBatch size: %d' % FLAGS.batch_size)
    print('\tNumber of GPUs: %d' % FLAGS.num_gpus)
    print('\tNumber of Groups: %d-%d-%d' % (FLAGS.ngroups3, FLAGS.ngroups2, FLAGS.ngroups1))
    print('\tBasemodel file: %s' % FLAGS.basemodel)

    print('[Optimization Configuration]')
    print('\tL2 loss weight: %f' % FLAGS.l2_weight)
    print('\tThe momentum optimizer: %f' % FLAGS.momentum)
    print('\tInitial learning rate: %f' % FLAGS.initial_lr)
    print('\tEpochs per lr step: %s' % FLAGS.lr_step_epoch)
    print('\tLearning rate decay: %f' % FLAGS.lr_decay)

    print('[Training Configuration]')
    print('\tTrain dir: %s' % FLAGS.train_dir)
    print('\tTraining max steps: %d' % FLAGS.max_steps)
    print('\tSteps per displaying info: %d' % FLAGS.display)
    print('\tSteps per validation: %d' % FLAGS.val_interval)
    print('\tSteps during validation: %d' % FLAGS.val_iter)
    print('\tSteps per saving checkpoints: %d' % FLAGS.checkpoint_interval)
    print('\tGPU memory fraction: %f' % FLAGS.gpu_fraction)
    print('\tLog device placement: %d' % FLAGS.log_device_placement)


    with tf.Graph().as_default():
        init_step = 0
        global_step = tf.Variable(0, trainable=False, name='global_step')

        # Get images and labels of ImageNet
        import multiprocessing
        num_threads = multiprocessing.cpu_count() / FLAGS.num_gpus
        print('Load ImageNet dataset(%d threads)' % num_threads)
        with tf.device('/cpu:0'):
            print('\tLoading training data from %s' % FLAGS.train_dataset)
            with tf.variable_scope('train_image'):
                train_images, train_labels = data_input.distorted_inputs(FLAGS.train_image_root, FLAGS.train_dataset
                                               , FLAGS.batch_size, True, num_threads=num_threads, num_sets=FLAGS.num_gpus)
            # tf.summary.image('images', train_images[0])
            print('\tLoading validation data from %s' % FLAGS.val_dataset)
            with tf.variable_scope('test_image'):
                val_images, val_labels = data_input.inputs(FLAGS.val_image_root, FLAGS.val_dataset
                                               , FLAGS.batch_size, False, num_threads=num_threads, num_sets=FLAGS.num_gpus)

        # Get splitted params
        if not FLAGS.basemodel:
            print('No basemodel found to load split params')
            sys.exit(-1)
        else:
            print('Load split params from %s' % FLAGS.basemodel)

            def get_perms(q_name, ngroups):
                split_q = reader.get_tensor(q_name)
                q_amax = np.argmax(split_q, axis=0)
                return [np.where(q_amax == i)[0] for i in range(ngroups)]

            reader = tf.train.NewCheckpointReader(FLAGS.basemodel)
            split_params = {}

            print('\tlogits...')
            base_logits_w = reader.get_tensor('logits/fc/weights')
            base_logits_b = reader.get_tensor('logits/fc/biases')
            split_p1_idxs = get_perms('group/split_p1/q', FLAGS.ngroups1)
            split_q1_idxs = get_perms('group/split_q1/q', FLAGS.ngroups1)

            logits_params = {'weights':[], 'biases':[], 'input_perms':[], 'output_perms':[]}
            for i in range(FLAGS.ngroups1):
                logits_params['weights'].append(base_logits_w[split_p1_idxs[i], :][:, split_q1_idxs[i]])
                logits_params['biases'].append(base_logits_b[split_q1_idxs[i]])
            logits_params['input_perms'] = split_p1_idxs
            logits_params['output_perms'] = split_q1_idxs
            split_params['logits'] = logits_params

            if FLAGS.ngroups2 > 1:
                print('\tconv5_x...')
                base_conv5_1_shortcut_k = reader.get_tensor('conv5_1/shortcut/kernel')
                base_conv5_1_conv1_k = reader.get_tensor('conv5_1/conv_1/kernel')
                base_conv5_1_conv2_k = reader.get_tensor('conv5_1/conv_2/kernel')
                base_conv5_2_conv1_k = reader.get_tensor('conv5_2/conv_1/kernel')
                base_conv5_2_conv2_k = reader.get_tensor('conv5_2/conv_2/kernel')
                split_p2_idxs = get_perms('group/split_p2/q', FLAGS.ngroups2)
                split_q2_idxs = _merge_split_idxs(split_p1_idxs, _get_even_merge_idxs(FLAGS.ngroups1, FLAGS.ngroups2))
                split_r21_idxs = get_perms('group/split_r21/q', FLAGS.ngroups2)
                split_r22_idxs = get_perms('group/split_r22/q', FLAGS.ngroups2)

                conv5_1_params = {'shortcut':[], 'conv1':[], 'conv2':[], 'p_perms':[], 'q_perms':[], 'r_perms':[]}
                for i in range(FLAGS.ngroups2):
                    conv5_1_params['shortcut'].append(base_conv5_1_shortcut_k[:,:,split_p2_idxs[i],:][:,:,:,split_q2_idxs[i]])
                    conv5_1_params['conv1'].append(base_conv5_1_conv1_k[:,:,split_p2_idxs[i],:][:,:,:,split_r21_idxs[i]])
                    conv5_1_params['conv2'].append(base_conv5_1_conv2_k[:,:,split_r21_idxs[i],:][:,:,:,split_q2_idxs[i]])
                conv5_1_params['p_perms'] = split_p2_idxs
                conv5_1_params['q_perms'] = split_q2_idxs
                conv5_1_params['r_perms'] = split_r21_idxs
                split_params['conv5_1'] = conv5_1_params

                conv5_2_params = {'conv1':[], 'conv2':[], 'p_perms':[], 'r_perms':[]}
                for i in range(FLAGS.ngroups2):
                    conv5_2_params['conv1'].append(base_conv5_2_conv1_k[:,:,split_q2_idxs[i],:][:,:,:,split_r22_idxs[i]])
                    conv5_2_params['conv2'].append(base_conv5_2_conv2_k[:,:,split_r22_idxs[i],:][:,:,:,split_q2_idxs[i]])
                conv5_2_params['p_perms'] = split_q2_idxs
                conv5_2_params['r_perms'] = split_r22_idxs
                split_params['conv5_2'] = conv5_2_params


                for i, unit_name in enumerate(['conv5_1', 'conv5_2', 'conv5_3', 'conv5_4', 'conv5_5', 'conv5_6']):
                    print('\t' + unit_name)
                    sp = {}
                    split_params[unit_name] = sp

            if FLAGS.ngroups3 > 1:
                print('\tconv4_x...')
                base_conv4_1_shortcut_k = reader.get_tensor('conv4_1/shortcut/kernel')
                base_conv4_1_conv1_k = reader.get_tensor('conv4_1/conv_1/kernel')
                base_conv4_1_conv2_k = reader.get_tensor('conv4_1/conv_2/kernel')
                base_conv4_2_conv1_k = reader.get_tensor('conv4_2/conv_1/kernel')
                base_conv4_2_conv2_k = reader.get_tensor('conv4_2/conv_2/kernel')
                split_p3_idxs = get_perms('group/split_p3/q', FLAGS.ngroups3)
                split_q3_idxs = _merge_split_idxs(split_p2_idxs, _get_even_merge_idxs(FLAGS.ngroups2, FLAGS.ngroups3))
                split_r31_idxs = get_perms('group/split_r31/q', FLAGS.ngroups3)
                split_r32_idxs = get_perms('group/split_r32/q', FLAGS.ngroups3)

                conv4_1_params = {'shortcut':[], 'conv1':[], 'conv2':[], 'p_perms':[], 'q_perms':[], 'r_perms':[]}
                for i in range(FLAGS.ngroups3):
                    conv4_1_params['shortcut'].append(base_conv4_1_shortcut_k[:,:,split_p3_idxs[i],:][:,:,:,split_q3_idxs[i]])
                    conv4_1_params['conv1'].append(base_conv4_1_conv1_k[:,:,split_p3_idxs[i],:][:,:,:,split_r31_idxs[i]])
                    conv4_1_params['conv2'].append(base_conv4_1_conv2_k[:,:,split_r31_idxs[i],:][:,:,:,split_q3_idxs[i]])
                conv4_1_params['p_perms'] = split_p3_idxs
                conv4_1_params['q_perms'] = split_q3_idxs
                conv4_1_params['r_perms'] = split_r31_idxs
                split_params['conv4_1'] = conv4_1_params

                conv4_2_params = {'conv1':[], 'conv2':[], 'p_perms':[], 'r_perms':[]}
                for i in range(FLAGS.ngroups3):
                    conv4_2_params['conv1'].append(base_conv4_2_conv1_k[:,:,split_q3_idxs[i],:][:,:,:,split_r32_idxs[i]])
                    conv4_2_params['conv2'].append(base_conv4_2_conv2_k[:,:,split_r32_idxs[i],:][:,:,:,split_q3_idxs[i]])
                conv4_2_params['p_perms'] = split_q3_idxs
                conv4_2_params['r_perms'] = split_r32_idxs
                split_params['conv4_2'] = conv4_2_params


        # Build model
        lr_decay_steps = map(float,FLAGS.lr_step_epoch.split(','))
        lr_decay_steps = map(int,[s*FLAGS.num_train_instance/FLAGS.batch_size/FLAGS.num_gpus for s in lr_decay_steps])
        hp = resnet.HParams(batch_size=FLAGS.batch_size,
                            num_gpus=FLAGS.num_gpus,
                            num_classes=FLAGS.num_classes,
                            weight_decay=FLAGS.l2_weight,
                            ngroups1=FLAGS.ngroups1,
                            ngroups2=FLAGS.ngroups2,
                            ngroups3=FLAGS.ngroups3,
                            split_params=split_params,
                            momentum=FLAGS.momentum,
                            finetune=FLAGS.finetune)
        network_train = resnet.ResNet(hp, train_images, train_labels, global_step, name="train")
        network_train.build_model()
        network_train.build_train_op()
        train_summary_op = tf.summary.merge_all()  # Summaries(training)
        network_val = resnet.ResNet(hp, val_images, val_labels, global_step, name="val", reuse_weights=True)
        network_val.build_model()
        print('Number of Weights: %d' % network_train._weights)
        print('FLOPs: %d' % network_train._flops)


        # Build an initialization operation to run below.
        init = tf.global_variables_initializer()

        # Start running operations on the Graph.
        sess = tf.Session(config=tf.ConfigProto(
            gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=FLAGS.gpu_fraction),
            # allow_soft_placement=True,
            log_device_placement=FLAGS.log_device_placement))

        '''debugging attempt
        from tensorflow.python import debug as tf_debug
        sess = tf_debug.LocalCLIDebugWrapperSession(sess)
        def _get_data(datum, tensor):
            return tensor == train_images
        sess.add_tensor_filter("get_data", _get_data)
        '''

        sess.run(init)

        # Create a saver.
        saver = tf.train.Saver(tf.global_variables(), max_to_keep=10000)
        if FLAGS.checkpoint is not None:
           saver.restore(sess, FLAGS.checkpoint)
           init_step = global_step.eval(session=sess)
           print('Load checkpoint %s' % FLAGS.checkpoint)
        elif FLAGS.basemodel:
            # Define a different saver to save model checkpoints
            # Select only base variables (exclude split layers)
            print('Load parameters from basemodel %s' % FLAGS.basemodel)
            variables = tf.global_variables()
            vars_restore = [var for var in variables
                            if not "Momentum" in var.name and
                               not "logits" in var.name and
                               not "global_step" in var.name]
            if FLAGS.ngroups2 > 1:
                vars_restore = [var for var in vars_restore
                                if not "conv5_" in var.name]
            if FLAGS.ngroups3 > 1:
                vars_restore = [var for var in vars_restore
                                if not "conv4_" in var.name]
            saver_restore = tf.train.Saver(vars_restore, max_to_keep=10000)
            saver_restore.restore(sess, FLAGS.basemodel)
        else:
            print('No checkpoint file of basemodel found. Start from the scratch.')

        # Start queue runners & summary_writer
        tf.train.start_queue_runners(sess=sess)

        if not os.path.exists(FLAGS.train_dir):
            os.mkdir(FLAGS.train_dir)
        summary_writer = tf.summary.FileWriter(os.path.join(FLAGS.train_dir, str(global_step.eval(session=sess))),
                                                sess.graph)

        # Training!
        val_best_acc = 0.0
        for step in xrange(init_step, FLAGS.max_steps):
            # val
            if step % FLAGS.val_interval == 0:
                val_loss, val_acc = 0.0, 0.0
                for i in range(FLAGS.val_iter):
                    loss_value, acc_value = sess.run([network_val.loss, network_val.acc],
                                feed_dict={network_val.is_train:False})
                    val_loss += loss_value
                    val_acc += acc_value
                val_loss /= FLAGS.val_iter
                val_acc /= FLAGS.val_iter
                val_best_acc = max(val_best_acc, val_acc)
                format_str = ('%s: (val)     step %d, loss=%.4f, acc=%.4f')
                print (format_str % (datetime.now(), step, val_loss, val_acc))

                val_summary = tf.Summary()
                val_summary.value.add(tag='val/loss', simple_value=val_loss)
                val_summary.value.add(tag='val/acc', simple_value=val_acc)
                val_summary.value.add(tag='val/best_acc', simple_value=val_best_acc)
                summary_writer.add_summary(val_summary, step)
                summary_writer.flush()

            # Train
            lr_value = get_lr(FLAGS.initial_lr, FLAGS.lr_decay, lr_decay_steps, step)
            start_time = time.time()
            _, loss_value, acc_value, train_summary_str = \
                    sess.run([network_train.train_op, network_train.loss, network_train.acc, train_summary_op],
                            feed_dict={network_train.is_train:True, network_train.lr:lr_value})
            duration = time.time() - start_time

            assert not np.isnan(loss_value)

            # Display & Summary(training)
            if step % FLAGS.display == 0:
                num_examples_per_step = FLAGS.batch_size
                examples_per_sec = num_examples_per_step / duration
                sec_per_batch = float(duration)
                format_str = ('%s: (Training) step %d, loss=%.4f, acc=%.4f, lr=%f (%.1f examples/sec; %.3f '
                              'sec/batch)')
                print (format_str % (datetime.now(), step, loss_value, acc_value, lr_value,
                                     examples_per_sec, sec_per_batch))
                summary_writer.add_summary(train_summary_str, step)

            # Save the model checkpoint periodically.
            if (step > init_step and step % FLAGS.checkpoint_interval == 0) or (step + 1) == FLAGS.max_steps:
                checkpoint_path = os.path.join(FLAGS.train_dir, 'model.ckpt')
                saver.save(sess, checkpoint_path, global_step=step)

            if sys.stdin in select.select([sys.stdin], [], [], 0)[0]:
              char = sys.stdin.read(1)
              if char == 'b':
                embed()
コード例 #33
0
import math
import numpy as np


def gauss_cdf(x, mu, sigma):
    return 0.5 * (1 + math.erf((x - mu) / (math.sqrt(2) * sigma)))


def exgauss_cdf(x, mu, sigma, tau):
    u = (x - mu) / tau
    v = sigma / tau
    return gauss_cdf(u, 0,
                     v) - np.exp(-u + 0.5 * v * v) * (gauss_cdf(u, v * v, v))


def exgauss_cdf_nparray(data, mu, sigma, tau):
    cdf = []
    for x in data:
        u = (x - mu) / tau
        v = sigma / tau
        cdf.append(
            gauss_cdf(u, 0, v) - np.exp(-u + 0.5 * v * v) *
            (gauss_cdf(u, v * v, v)))
    return np.array(cdf)


from IPython import embed
embed()
exit()
コード例 #34
0
ファイル: generator.py プロジェクト: iasawseen/au_dl_course
def generate(data_fn, out_fn, N_epochs):
    """ Generates musical sequence based on the given data filename and settings.
        Plays then stores (MIDI file) the generated output. """
    # model settings
    max_len = 20
    max_tries = 1000
    diversity = 0.5

    # musical settings
    bpm = 130

    # get data
    chords, abstract_grammars = get_musical_data(data_fn)
    corpus, values, val_indices, indices_val = get_corpus_data(
        abstract_grammars)
    print('corpus length:', len(corpus))
    print('total # of values:', len(values))

    ###
    embed()
    ###
    # build model
    model = lstm.build_model(corpus=corpus,
                             val_indices=val_indices,
                             max_len=max_len,
                             N_epochs=N_epochs)

    # set up audio stream
    out_stream = stream.Stream()

    # generation loop
    curr_offset = 0.0
    loopEnd = len(chords)
    for loopIndex in range(1, loopEnd):
        # get chords from file
        curr_chords = stream.Voice()
        for j in chords[loopIndex]:
            curr_chords.insert((j.offset % 4), j)

        # generate grammar
        curr_grammar = __generate_grammar(model=model,
                                          corpus=corpus,
                                          abstract_grammars=abstract_grammars,
                                          values=values,
                                          val_indices=val_indices,
                                          indices_val=indices_val,
                                          max_len=max_len,
                                          max_tries=max_tries,
                                          diversity=diversity)

        curr_grammar = curr_grammar.replace(' A', ' C').replace(' X', ' C')

        # Pruning #1: smoothing measure
        curr_grammar = prune_grammar(curr_grammar)

        # Get notes from grammar and chords
        curr_notes = unparse_grammar(curr_grammar, curr_chords)

        # Pruning #2: removing repeated and too close together notes
        curr_notes = prune_notes(curr_notes)

        # quality assurance: clean up notes
        curr_notes = clean_up_notes(curr_notes)

        # print # of notes in curr_notes
        print('After pruning: %s notes' %
              (len([i for i in curr_notes if isinstance(i, note.Note)])))

        # insert into the output stream
        for m in curr_notes:
            out_stream.insert(curr_offset + m.offset, m)
        for mc in curr_chords:
            out_stream.insert(curr_offset + mc.offset, mc)

        curr_offset += 4.0

    out_stream.insert(0.0, tempo.MetronomeMark(number=bpm))

    # Play the final stream through output (see 'play' lambda function above)
    play = lambda x: midi.realtime.StreamPlayer(x).play()
    play(out_stream)

    # save stream
    mf = midi.translate.streamToMidiFile(out_stream)
    mf.open(out_fn, 'wb')
    mf.write()
    mf.close()
コード例 #35
0
    def create_merged_and_cleaned_dataset(self):

        print("creating and storing a merged, cleaned dataset at "
              "cleaned_history_dissertations_dataset.csv")

        theses = []
        raw_df = pd.read_csv(Path(BASE_PATH, 'data', 'dissertations',
                                  'proquest_raw_dataset.csv'),
                             encoding='windows-1252')
        raw_df['ProQuest.Thesis.ID'] = raw_df['ProQuest.Thesis.ID'].astype(
            'str')
        raw_df['AdvisorID'].fillna('unknown', inplace=True)

        weights_df = pd.read_csv(
            Path(BASE_PATH, 'data', 'dissertations',
                 'dissertation_topic_weights.csv'))
        gender_df = pd.read_csv(
            Path(BASE_PATH, 'data', 'dissertations',
                 'author_genders_dissertations.csv'))
        gender_df['assigned'].fillna(value='unknown', inplace=True)

        name_to_gender = {}
        for _, row in gender_df.iterrows():
            name_to_gender[row['name'].lower()] = row['assigned']

        count_found_in_name_to_gender = 0

        for _, pid in weights_df.ProQid.iteritems():

            pid = str(pid)

            raw_row = raw_df[raw_df['ProQuest.Thesis.ID'] == pid]
            if not len(raw_row) == 1:
                print("not 1 row")
                embed()

            raw_row = raw_row.iloc[0]

            thesis = {'m_pid': pid}
            thesis['m_year'] = int(raw_row['ThesisYear'])
            thesis['m_descendants'] = int(raw_row['NumDirectDescendants'])

            thesis['m_title'] = raw_row['ThesisTitle']
            thesis['m_keywords'] = raw_row['ThesisKeywords']
            thesis['m_institution'] = raw_row['ThesisInstitution']
            thesis['m_text'] = raw_row['Abstract']
            thesis['m_text_len'] = len(
                re.findall(WORD_SPLIT_REGEX, thesis['m_text']))

            # Advisee name and gender
            try:
                thesis['m_authors'] = self.proquest_name_parser(
                    raw_row['AdviseeID'])
            except ValueError:
                print('author embed')
                embed()

            assert raw_row['AdviseeGender'] == raw_row['AdviseeGender.1']

            thesis['m_author_genders'] = raw_row['AdviseeGender']
            if thesis['m_authors'].lower() in name_to_gender:
                thesis['m_author_genders'] = name_to_gender[
                    thesis['m_authors'].lower()]
                count_found_in_name_to_gender += 1

            # Advisor name and gender
            if raw_row['AdvisorID'] == 'unknown':
                thesis['m_advisor'] = 'unknown'
            else:
                try:
                    thesis['m_advisor'] = self.proquest_name_parser(
                        raw_row['AdvisorID'])
                except:
                    print("advisor embed")
                    embed()

            assert raw_row['AdvisorGender'] == raw_row['AdvisorGender.1']
            thesis['m_advisor_gender'] = raw_row['AdvisorGender']
            if thesis['m_advisor'].lower() in name_to_gender:
                thesis['m_advisor_gender'] = name_to_gender[
                    thesis['m_advisor'].lower()]

            theses.append(thesis)

            weights_row = weights_df[weights_df['ProQid'] == pid]
            if not len(weights_row) == 1:
                print("weights row not len 1")
                embed()

            weights_row = weights_row.iloc[0]
            for i in range(1, 91):
                thesis[f'topic.{i}'] = weights_row[f'X{i}']

        dissertations_df = pd.DataFrame(theses)
        dissertations_df.to_csv(Path(
            BASE_PATH, 'data', 'dissertations',
            'cleaned_history_dissertations_dataset.csv'),
                                encoding='utf-8')

        return
コード例 #36
0
        return batch_list

    def getTestList(self, batch_size=5000, cuda=False):
        n_batch = self.n_test // batch_size
        batch_list = []
        for i in range(n_batch):
            X = torch.FloatTensor(self.X_test[batch_size*i:batch_size*(i+1)])
            Y = torch.LongTensor(self.Y_test[batch_size*i:batch_size*(i+1)])
            if cuda:
                X = X.cuda()
                Y = Y.cuda()
            batch_list.append((X, Y))
        return batch_list

    def getTrainList(self, batch_size=5000, cuda=False):
        n_batch = self.n_train // batch_size
        batch_list = []
        for i in range(n_batch):
            X = torch.FloatTensor(self.X_train[batch_size*i:batch_size*(i+1)])
            Y = torch.LongTensor(self.Y_train[batch_size*i:batch_size*(i+1)])
            if cuda:
                X = X.cuda()
                Y = Y.cuda()
            batch_list.append((X, Y))
        return batch_list

if __name__ == '__main__':
    datapath = '/home/wjf/datasets/SVHN/train25000_test70000'
    dataset = SVHN(datapath)
    from IPython import embed; embed()
コード例 #37
0
def main():
    try:
        input_function = raw_input
    except NameError:
        input_function = input  #python3
    session_id = uuid.uuid1()
    history = FileHistory('/tmp/.zhimabot_prompt') #InMemoryHistory()
    my_completer = WordCompleter(['!print','response',"!run","!debug"])

    # zhimabot
    '''
    ai = yige.Yige(CLIENT_ACCESS_TOKEN)
    request = ai.text_request()
    '''

    response = None
    while True:
        try:
            # lexer 从网上把词库拉下来缓存到本地,然后高亮
            # 本地输入先做好分词
            # 看文章 sqlite
            # https://github.com/eliangcs/http-prompt yige-prompt
            # https://github.com/donnemartin/haxor-news
            # 自然语言 https://github.com/jonathanslenders/python-prompt-toolkit/blob/master/examples/regular-language.py
            message = prompt(INTERACTIVE_PROMPT,
                             history=history,
                             enable_history_search=True,
                             auto_suggest=AutoSuggestFromHistory(),
                             completer=my_completer,
                             get_bottom_toolbar_tokens=get_bottom_toolbar_tokens,# 底部转态栏
                             style = BOTTOM_TOOLBAR_STYLE,
                             validator = myValidator(),
                             #get_title= fun #"Yige_prompt",
                             ).rstrip()
                             #mouse_support=True).rstrip() #支持鼠标会导致翻页问题
        except (KeyboardInterrupt, EOFError):
            return
        # 用户输入一句话,然后解析,颜色 方便调试? console ,语法高亮
        #context = run_actions(session_id, message, context, max_steps)
        if message:
            # 仅仅是拿到输入而已,之后要做什么自己决定
            if message.startswith("!run"):
                #在当前上下文执行python
                #上次执行的结果 response 关键词 使用grep?
                code = message.split("run")[-1].strip()
                # 把response变为类似对象
                # 复杂的调试使用ipython 或者进入ipython上下文? 把当前环境带入
                # 在ipython之前诸如上下文?
                try:
                    exec(code) #在上下文执行,python提示
                except Exception as e :
                    print(str(e))
            elif message.startswith("!debug"):
                #帮助安装
                embed(header='zhimabot debug console \n    --  by 『wwj718』(blog.just4fun.site) \n    --  如有建议或bug,欢迎发我邮件:[email protected]', banner1='')

            else:
                #查询
                try:
                    #此处写逻辑
                    #request.query = message #用户输入
                    payload = {}
                    payload["appId"] = CLIENT_ACCESS_TOKEN
                    payload["query"] = message
                    url = "http://dev.zhimabot.com:8080/zhimabot/analysis"
                    response = requests.post(url,json=payload)
                    #response = request.getresponse()  #注意置信度 confidence
                    #request.session_id = session_id
                    #response = request.getresponse() #json
                    print(output_format(response.json()))
                except Exception as e:
                    print(str(e))
コード例 #38
0
def build_template(in_files,
                   slits,
                   wv_cuts,
                   binspec,
                   outroot,
                   normalize=False,
                   subtract_conti=False,
                   wvspec=None,
                   lowredux=True,
                   ifiles=None,
                   det_cut=None,
                   chk=False,
                   miny=None):
    """
    Generate a full_template for a given instrument

    Args:
        in_files (list or str):
            Wavelength solution files, XIDL or PypeIt
        slits (list):
            Slits in the archive files to use
        wv_cuts (list):
            Wavelengths to cut each slit at
        binspec (int):
            Spectral binning of the archived spectrum
        outroot (str):
            Name of output archive
        lowredux (bool, optional):
            If true, in_files are from LowRedux
        wvspec (ndarray, optional):
            Manually input the wavelength values
        ifiles (list, optional):
            Ordering of the in_files.  Default is np.arange(len(in_files))
        det_cut (dict, optional):
            Cut the detector into pieces.  Important for long detectors with wavelengths on one side
        chk (bool, optional):
            Show a plot or two
        miny (float):
            Impose a minimum value
        normalize (bool, optional):
            If provided multiple in_files, normalize each
            snippet to have the same maximum amplitude.
        subtract_conti (bool, optional):
            Subtract the continuum for the final archive
    """
    # Load xidl file
    # Grab it
    # Load and splice
    yvals = []
    lvals = []
    if not isinstance(in_files, list):
        in_files = [in_files]
        ifiles = [0] * len(slits)
    for kk, slit in enumerate(slits):
        if wvspec is None:
            in_file = in_files[ifiles[kk]]
            if lowredux:
                wv_vac, spec = xidl_arcspec(in_file, slit)
            else:
                wv_vac, spec = pypeit_arcspec(in_file, slit)
        else:
            wv_vac, spec = wvspec['wv_vac'], wvspec['spec']
        # Cut
        if len(slits) > 1:
            if kk == 0:
                llow = 0.
                lhi = wv_cuts[0]
            elif kk == len(slits) - 1:
                llow = wv_cuts[kk - 1]
                lhi = 1e9
            else:
                llow = wv_cuts[kk - 1]
                lhi = wv_cuts[kk]
            #
            gdi = (wv_vac > llow) & (wv_vac < lhi)
        else:
            gdi = np.arange(spec.size).astype(int)
        # Append
        yvals.append(spec[gdi])
        lvals.append(wv_vac[gdi])
    # Continuum
    if subtract_conti:
        for kk, spec in enumerate(yvals):
            _, _, _, _, spec_cont_sub = wvutils.arc_lines_from_spec(spec)
            yvals[kk] = spec_cont_sub
    # Normalize?
    if normalize:
        norm_val = 10000.
        # Max values
        maxs = []
        for kk, spec in enumerate(yvals):
            mx = np.max(spec)
            spec = spec * norm_val / mx
            yvals[kk] = spec
    # Concatenate
    nwspec = np.concatenate(yvals)
    nwwv = np.concatenate(lvals)
    # Min y?
    if miny is not None:
        nwspec = np.maximum(nwspec, miny)
    # Check
    if chk:
        debugger.plot1d(nwwv, nwspec)
        embed(header='102')
    # Generate the table
    write_template(nwwv, nwspec, binspec, outpath, outroot, det_cut=det_cut)
コード例 #39
0
def main(argv=None, client=None):
    """Entry point.

  :param argv: Arguments list.
  :param client: For testing.

  """
    args = docopt(__doc__, argv=argv, version=__version__)
    if not client:
        client = configure_client('hdfscli', args)
    elif args['--log']:
        raise HdfsError(
            'Logging is only available when no client is specified.')
    hdfs_path = args['HDFS_PATH']
    local_path = args['LOCAL_PATH']
    n_threads = parse_arg(args, '--threads', int)
    force = args['--force']
    silent = args['--silent']
    if args['download']:
        chunk_size = 2**16
        if local_path == '-':
            if not sys.stdout.isatty() and sys.stderr.isatty() and not silent:
                progress = _Progress.from_hdfs_path(client, hdfs_path)
            else:
                progress = None
            with client.read(
                    hdfs_path,
                    chunk_size=chunk_size,
                    progress=progress,
            ) as reader:
                # https://stackoverflow.com/a/23932488/1062617
                stdout = getattr(sys.stdout, 'buffer', sys.stdout)
                for chunk in reader:
                    stdout.write(chunk)
        else:
            if sys.stderr.isatty() and not silent:
                progress = _Progress.from_hdfs_path(client, hdfs_path)
            else:
                progress = None
            client.download(
                hdfs_path,
                local_path,
                overwrite=force,
                n_threads=n_threads,
                chunk_size=chunk_size,
                progress=progress,
            )
    elif args['upload']:
        append = args['--append']
        if local_path == '-':
            client.write(
                hdfs_path,
                (line for line in sys.stdin),  # Doesn't work with stdin.
                append=append,
                overwrite=force,
            )
        else:
            if append:
                # TODO: Add progress tracking here.
                if osp.isfile(local_path):
                    with open(local_path) as reader:
                        client.write(hdfs_path, reader, append=True)
                else:
                    raise HdfsError(
                        'Can only append when uploading a single file.')
            else:
                if sys.stderr.isatty() and not silent:
                    progress = _Progress.from_local_path(local_path)
                else:
                    progress = None
                client.upload(
                    hdfs_path,
                    local_path,
                    overwrite=force,
                    n_threads=n_threads,
                    progress=progress,
                )
    else:
        banner = ('\n'
                  'Welcome to the interactive HDFS python shell.\n'
                  'The HDFS client is available as `CLIENT`.\n')
        namespace = {'CLIENT': client}
        try:
            from IPython import embed
        except ImportError:
            from code import interact
            interact(banner=banner, local=namespace)
        else:
            embed(banner1=banner, user_ns=namespace)
コード例 #40
0
 def guid(self):
     print("guid")
     from IPython import embed
     embed(colors='Linux')
コード例 #41
0
    def data_retrievingS(self, win_start, win_end):
        '''

        '''

        # this should be revised depending on the maximum distance of the limits.

        self.time_initial = win_start
        self.time_end = win_end

        id_res=self.session.query(self.ASSOC_results).filter(self.ASSOC_results.net==self.net)\
                                  .filter(self.ASSOC_results.fdid ==-1)\
                                  .filter(self.ASSOC_results.passocid ==self.passocid)\
                                  .filter(self.ASSOC_results.timeini==self.time_initial)\
                                  .filter(self.ASSOC_results.timeend==self.time_end)\
                                  .filter(self.ASSOC_results.qassoc==-1).all()
        if len(id_res) == 1:
            return 1
        else:
            print('start getting data for analysis')
        self.Detection = []
        self.Detection_Q = []
        #embed()
        refSTA = []
        for aai in self.Affiliation_Q:

            try:
                STA_dataM = self.session.query(
                    self.Site).filter(self.Site.sta == aai.sta).one()

            except Exception as ex1:
                #print
                print('there is more than just one station:', aai.sta, '  ',
                      ex1)
                embed()
                exit()
            #embed()
            refSTA.append(STA_dataM.refsta)

        refstations_l = list(set(refSTA))
        refsta = []
        #embed()
        for aai in refstations_l:
            STA_dataM = self.session.query(
                self.Site).filter(self.Site.refsta == str(aai)).all()
            array_lo = []
            array_la = []
            array_el = []
            for sta_i in STA_dataM:
                array_la.append(sta_i.lat)
                array_lo.append(sta_i.lon)
                array_el.append(sta_i.elev)
            array_la = np.asarray(array_la)
            array_lo = np.asarray(array_lo)
            array_el = np.asarray(array_el)
            refsta.append({
                'lon': np.mean(array_lo),
                'lat': np.mean(array_la),
                'elev': np.mean(array_el),
                'name': aai,
                'numsta': len(array_la)
            })
        self.det_tot = []
        self.fdtable_name = []
        #embed()

        for aai in refsta:
            print('getting data from:', aai['name'])
            # here it looks for any detections in all the tables
            for ti in range(self.num_tables):
                try:
                    fd_res = self.session.query(self.fdtables[ti]).filter(
                        self.fdtables[ti].sta == aai['name']).filter(
                            self.fdtables[ti].pfdid == self.pfdid).filter(
                                self.fdtables[ti].pfkid == self.pfkid).filter(
                                    self.fdtables[ti].timeini >=
                                    self.time_initial).all()
                    #times_ini=self.session.query(self.fdtables[ti].timeini).filter(self.fdtables[ti].sta==aai['name']).filter(self.fdtables[ti].pfdid==self.pfdid).filter(self.fdtables[ti].pfkid==self.pfkid).filter(self.fdtables[ti].timeini>=self.time_initial).filter(self.fdtables[ti].timeini<=self.time_end).all()
                    try:

                        class Fk_results(schema.fk_results):
                            __tablename__ = fd_res[0].fktablename
                    except Exception as ex1:
                        pass
                    times_ini = self.session.query(
                        self.fdtables[ti].timeini
                    ).filter(self.fdtables[ti].sta == aai['name']).filter(
                        self.fdtables[ti].pfdid == self.pfdid
                    ).filter(self.fdtables[ti].pfkid == self.pfkid).filter(
                        self.fdtables[ti].timeini >= self.time_initial).all()
                    if len(fd_res) > 0:
                        print('length results:', len(fd_res))
                        for dqi in range(len(fd_res)):
                            if win_start < fd_res[dqi].timeini < win_end:

                                fk_res = self.session.query(Fk_results).filter(
                                    Fk_results.pfkid == fd_res[dqi].pfkid
                                ).filter(Fk_results.sta == aai['name']).filter(
                                    Fk_results.timeini.between(
                                        fd_res[dqi].timeini,
                                        fd_res[dqi].timeend)).all()
                                embed()
                                fval = []
                                bz = []

                                for fk_i in fk_res:
                                    fval.append(fk_i.fval)
                                    bz.append(fk_i.bz)

                                fval = np.asarray(fval)
                                fvalm_i = np.argmax(fval)
                                if not fvalm_i:
                                    continue
                                else:
                                    #det1 = inf_det_global(aai['lat'], aai['lon'], UTCDateTime(fd_res.timeini).datetime, fd_res.bz,  fd_res.fval, aai['numsta'])
                                    det1 = (aai['lat'], aai['lon'],
                                            fd_res[dqi].timeini, bz[fvalm_i],
                                            fval[fvalm_i], aai['numsta'],
                                            fd_res[dqi].fdid, aai['name'],
                                            self.fdtables_names[ti])

                                    self.det_tot.append(det1)
                                    self.fdtable_name.append(
                                        self.fdtables_names[ti])

                except Exception as x1:
                    print('There is an error', x1)
                    embed()
                    exit()
        print('all data retrieved')
        return 2
コード例 #42
0
def main(args):
    logger.debug("Arguments: %r", args)
    vect = default_vectorizer()
    vect.set_params(
        ngram_range=(args.min_ngrams, args.max_ngrams),
        max_df=args.max_df,
        max_features=args.max_features,
        sublinear_tf=args.sublinear_tf,
        norm=args.norm,
    )

    with LogRuntime("Loaded input data in {elapsed} seconds", logger):
        data = get_data(args)
    if data:
        logger.debug("Corpus size: {0}".format(len(data)))
    else:
        logger.error("Empty data")
        return

    with LogRuntime("Fitted in {0.elapsed} seconds", logger):
        X = tfidf_vect.fit_transform(data)

    logger.debug("Vocabulary size: {}".format(len(tfidf_vect.vocabulary_)))
    logger.debug("Max DF stop words size: {}".format(
        len(tfidf_vect.stop_words_)))
    logger.debug("Stop words size: {}".format(len(tfidf_vect.stop_words)))

    if args.clusters:
        true_k = args.clusters
    else:
        # ref: http://en.wikipedia.org/wiki/Determining_the_number_of_clusters_in_a_data_set#Finding_Number_of_Clusters_in_Text_Databases
        m_docs, n_terms = X.shape
        t_nonzeros = len(X.nonzero()[0])
        true_k = (m_docs * n_terms) / t_nonzeros
        logger.debug("Calculated number of clusters: {}".format(true_k))

    if args.minibatch:
        km = MiniBatchKMeans(n_clusters=true_k,
                             init='k-means++',
                             n_init=10,
                             init_size=1000,
                             batch_size=1000,
                             verbose=-1)
    else:
        km = KMeans(n_clusters=args.clusters,
                    init='random',
                    max_iter=100,
                    n_init=10,
                    verbose=1,
                    n_jobs=-1)

    with LogRuntime("KMeans Fitted in {0.elapsed} seconds", logger):
        km.fit(X)

    if args.sample_random and args.sample_size:
        sample = [
            data[i]
            for i in np.random.random_integers(0, len(data), args.sample_size)
        ]
    elif args.sample_size:
        sample = data[args.sample_skip:args.sample_size]
    else:
        sample = data

    Y = tfidf_vect.transform(sample)
    sample_terms = tfidf_vect.inverse_transform(Y)

    labels = km.predict(Y)
    distances = km.transform(Y)
    center_terms = tfidf_vect.inverse_transform(km.cluster_centers_)

    clusters = defaultdict(list)
    vocabulary = tfidf_vect.vocabulary_

    for i, doc in enumerate(sample):
        clusters[labels[i]].append((i, doc))

    truncate = lambda t: t[:100] + '...' if len(t) > 100 else t

    for label, result in sorted(clusters.iteritems()):
        # skip single results
        if len(result) < args.cluster_minsize:
            continue
        terms_joined = ', '.join(
            sorted(center_terms[label],
                   reverse=True,
                   key=lambda t: km.cluster_centers_[label, vocabulary[t]]))
        print '=' * 79
        print '=' * 79
        print '=' * 79
        print '-> ' + truncate(terms_joined) + '\n\n'
        result = sorted(
            result,
            key=lambda (i, _): distances[i, label],
        )

        j = 0
        for i, doc in result:
            j += 1
            doc_terms = ', '.join(
                sorted(
                    sample_terms[i],
                    reverse=True,
                    key=lambda t: Y[i, vocabulary[t]],
                ))
            print doc['headline']
            print get_corpus_key(doc)
            print doc['url']
            print 'distance:', distances[i, label]
            print truncate(doc_terms)
            print
            if j > 10:
                print '...'
                break

        print

    if args.shell:
        from IPython import embed
        embed()
コード例 #43
0
 def authorizeKey(self,keypath="/home/despiegk/.ssh/perftest.pub"):
     from IPython import embed
     print("DEBUG NOW authorizeKey")
     embed()
コード例 #44
0
    def data_processingASSOC(self):
        '''

        '''
        print('data processing', short_time(UTCDateTime(self.time_initial)),
              short_time(UTCDateTime(self.time_end)))

        det_list = lklhds.db2dets(self.det_tot)
        EVIDs = []
        embed()
        if len(det_list) > 1:
            try:
                #EVIDs,DAQ,CAQ=assoc(det_list, self.lims, float(self.assocthresh), show_result=False,parallel=True,num_cores=self.numcores)
                #labels, dists = hjl.run(det_list, self.clusterthresh, dist_max=self.distmax, bm_width=self.beamwidth, rng_max=self.rangemax, trimming_thresh=self.trimthresh, pool=self.pl)
                labels, dists = hjl.run(det_list,
                                        self.clusterthresh,
                                        dist_max=self.distmax,
                                        bm_width=self.beamwidth,
                                        rng_max=self.rangemax,
                                        pool=self.pl)
                clusters, qualities = hjl.summarize_clusters(labels, dists)

                for n in range(len(clusters)):
                    print("Cluster:", clusters[n], '\t', "Cluster Quality:",
                          10.0**(-qualities[n]))
                    lastEVENTIDQ = self.session.query(
                        func.max(self.ASSOC_results.eventid)).all()
                    lastEVENTID = lastEVENTIDQ[0][0]
                    if lastEVENTID is None:
                        lastEVENTID = int(0)
                    lastEVENTID = lastEVENTID + 1
                    #embed()
                    for nn in range(len(clusters[n])):
                        det_id = clusters[n][nn]
                        id_res=self.session.query(self.ASSOC_results).filter(self.ASSOC_results.net==self.net)\
                                                  .filter(self.ASSOC_results.fdid ==self.det_tot[det_id][6])\
                                                  .filter(self.ASSOC_results.passocid ==self.passocid)\
                                                  .filter(self.ASSOC_results.timeini==self.time_initial)\
                                                  .filter(self.ASSOC_results.timeend==self.time_end)\
                                                  .filter(self.ASSOC_results.qdetcluster==10.0**(-qualities[n]))\
                                                  .filter(self.ASSOC_results.fdtable==self.det_tot[det_id][8])\
                                                  .filter(self.ASSOC_results.sta==self.det_tot[det_id][7]).all()
                        id_resC = self.session.query(
                            self.ASSOC_results).count() + 1

                        if bool(id_res) == False:

                            res=self.ASSOC_results(associd=id_resC,\
                                    fdid=self.det_tot[det_id][6],\
                                    eventid=int(lastEVENTID),\
                                    passocid=self.passocid,\
                                    net=self.net,\
                                    timeini=self.time_initial,\
                                    timeend=self.time_end,\
                                    qdetcluster=10.0**(-qualities[n]),\
                                    fdtable=self.det_tot[det_id][8],\
                                    sta=self.det_tot[det_id][7])
                            self.session.add(res)
                            self.session.commit()
                print('associations written', len(clusters))

            except Exception as ex1:
                print('error running assoc:', ex1)
                embed()
                exit()
            '''
コード例 #45
0
ファイル: show_2dspec.py プロジェクト: Tang-SL/PypeIt
def main(args):

    # List only?
    if args.list:
        hdu = io.fits_open(args.file)
        hdu.info()
        return

    # Load it up -- NOTE WE ALLOW *OLD* VERSIONS TO GO FORTH
    spec2DObj = spec2dobj.Spec2DObj.from_file(args.file,
                                              args.det,
                                              chk_version=False)

    # Setup for PypeIt imports
    msgs.reset(verbosity=2)

    # Init
    # TODO: get_dnum needs to be deprecated...
    sdet = get_dnum(args.det, prefix=False)

    # Find the set of channels to show
    if args.channels is not None:
        show_channels = [int(item) for item in args.channels.split(',')]
    else:
        show_channels = [0, 1, 2, 3]

    # Grab the slit edges
    slits = spec2DObj.slits
    if spec2DObj.sci_spat_flexure is not None:
        msgs.info("Offseting slits by {}".format(spec2DObj.sci_spat_flexure))
    all_left, all_right, mask = slits.select_edges(
        flexure=spec2DObj.sci_spat_flexure)
    # TODO -- This may be too restrictive, i.e. ignore BADFLTCALIB??
    gpm = mask == 0
    left = all_left[:, gpm]
    right = all_right[:, gpm]
    slid_IDs = spec2DObj.slits.slitord_id[gpm]
    maskdef_id = spec2DObj.slits.maskdef_id[
        gpm] if spec2DObj.slits.maskdef_id is not None else None

    bitMask = ImageBitMask()

    # Object traces from spec1d file
    spec1d_file = args.file.replace('spec2d', 'spec1d')
    if args.file[-2:] == 'gz':
        spec1d_file = spec1d_file[:-3]
    if os.path.isfile(spec1d_file):
        sobjs = specobjs.SpecObjs.from_fitsfile(spec1d_file)
    else:
        sobjs = None
        msgs.warn('Could not find spec1d file: {:s}'.format(spec1d_file) +
                  msgs.newline() +
                  '                          No objects were extracted.')

    display.connect_to_ginga(raise_err=True, allow_new=True)

    # Now show each image to a separate channel

    # Show the bitmask?
    mask_in = None
    if args.showmask:
        viewer, ch = display.show_image(spec2DObj.bpmmask,
                                        chname="BPM",
                                        waveimg=spec2DObj.waveimg,
                                        clear=True)

    channel_names = []
    # SCIIMG
    if 0 in show_channels:
        image = spec2DObj.sciimg  # Processed science image
        chname_sciimage = 'sciimg-det{:s}'.format(sdet)
        # Clear all channels at the beginning
        viewer, ch = display.show_image(image,
                                        chname=chname_sciimage,
                                        waveimg=spec2DObj.waveimg,
                                        clear=True)

        if sobjs is not None:
            show_trace(sobjs, args.det, viewer, ch)
        display.show_slits(viewer,
                           ch,
                           left,
                           right,
                           slit_ids=slid_IDs,
                           maskdef_ids=maskdef_id)
        channel_names.append(chname_sciimage)

    # SKYSUB
    if 1 in show_channels:
        if args.ignore_extract_mask:
            # TODO -- Is there a cleaner way to do this?
            gpm = (spec2DObj.bpmmask == 0) | (spec2DObj.bpmmask == 2**
                                              bitMask.bits['EXTRACT'])
        else:
            gpm = spec2DObj.bpmmask == 0

        image = (spec2DObj.sciimg - spec2DObj.skymodel) * gpm
        chname_skysub = 'skysub-det{:s}'.format(sdet)
        # Clear all channels at the beginning
        # TODO: JFH For some reason Ginga crashes when I try to put cuts in here.
        viewer, ch = display.show_image(image,
                                        chname=chname_skysub,
                                        waveimg=spec2DObj.waveimg,
                                        bitmask=bitMask,
                                        mask=mask_in)
        if not args.removetrace and sobjs is not None:
            show_trace(sobjs, args.det, viewer, ch)
        display.show_slits(viewer,
                           ch,
                           left,
                           right,
                           slit_ids=slid_IDs,
                           maskdef_ids=maskdef_id)
        channel_names.append(chname_skysub)

    # SKRESIDS
    if 2 in show_channels:
        chname_skyresids = 'sky_resid-det{:s}'.format(sdet)
        image = (spec2DObj.sciimg - spec2DObj.skymodel) * np.sqrt(
            spec2DObj.ivarmodel) * gpm
        viewer, ch = display.show_image(image,
                                        chname_skyresids,
                                        waveimg=spec2DObj.waveimg,
                                        cuts=(-5.0, 5.0),
                                        bitmask=bitMask,
                                        mask=mask_in)
        if not args.removetrace and sobjs is not None:
            show_trace(sobjs, args.det, viewer, ch)
        display.show_slits(viewer,
                           ch,
                           left,
                           right,
                           slit_ids=slid_IDs,
                           maskdef_ids=maskdef_id)
        channel_names.append(chname_skyresids)

    # RESIDS
    if 3 in show_channels:
        chname_resids = 'resid-det{:s}'.format(sdet)
        # full model residual map
        image = (spec2DObj.sciimg - spec2DObj.skymodel - spec2DObj.objmodel
                 ) * np.sqrt(spec2DObj.ivarmodel) * (spec2DObj.bpmmask == 0)
        viewer, ch = display.show_image(image,
                                        chname=chname_resids,
                                        waveimg=spec2DObj.waveimg,
                                        cuts=(-5.0, 5.0),
                                        bitmask=bitMask,
                                        mask=mask_in)
        if not args.removetrace and sobjs is not None:
            show_trace(sobjs, args.det, viewer, ch)
        display.show_slits(viewer,
                           ch,
                           left,
                           right,
                           slit_ids=slid_IDs,
                           maskdef_ids=maskdef_id)
        channel_names.append(chname_resids)

    # After displaying all the images sync up the images with WCS_MATCH
    shell = viewer.shell()
    shell.start_global_plugin('WCSMatch')
    shell.call_global_plugin_method('WCSMatch', 'set_reference_channel',
                                    [channel_names[-1]], {})

    if args.embed:
        embed()
コード例 #46
0
    def database_connecting(self):
        print('connecting')

        session, tables = load_config(self.db_PARAM)
        self.session = session
        self.Site = tables['site']
        self.Wfdisc = tables['wfdisc']
        self.Affiliation = tables['affiliation']
        import pisces.schema.css3 as kba

        #class FK_results(schema.fk_results):
        #    __tablename__ = 'FK_results'

        class FK_params(schema.fk_params):
            __tablename__ = 'FK_params'

        class FD_params(schema.fd_params):
            __tablename__ = 'FD_params'

        self.dict_namefk = {}

        self.fdtables = []
        for fdi in self.fdtables_names:
            self.fdtables.append(
                type(str(fdi), (schema.fd_results, ),
                     {'__tablename__': str(fdi)}))

        class ASSOC_params(schema.ASSOC_params):
            __tablename__ = 'ASSOC_params'

        class ASSOC_results(schema.ASSOC_results):
            __tablename__ = self.resultstable

        self.FK_par = FK_params
        #self.FK_results=FK_results
        self.FD_par = FD_params
        #self.Fd_results=Fd_results

        self.ASSOC_par = ASSOC_params
        self.ASSOC_results = ASSOC_results

        self.ASSOC_par.__table__.create(self.session.bind, checkfirst=True)
        self.ASSOC_results.__table__.create(self.session.bind, checkfirst=True)

        try:
            self.Passoc_Q=self.session.query(self.ASSOC_par). \
                filter(self.ASSOC_par.beamwidth==self.beamwidth).\
                filter(self.ASSOC_par.rangemax==self.rangemax). \
                filter(self.ASSOC_par.clusterthresh==self.clusterthresh).\
                filter(self.ASSOC_par.trimthresh==self.trimthresh).\
                filter(self.ASSOC_par.eventdetmin==self.eventdetmin).\
                filter(self.ASSOC_par.eventarrmin==self.eventarrmin).\
                filter(self.ASSOC_par.duration==self.duration).\
                all()

            if len(self.Passoc_Q) > 1:
                print(
                    'issue with the database too many parameters entries, there should be just one'
                )
                embed()
            if len(self.Passoc_Q) == 1:
                self.Passoc_Q = self.Passoc_Q[0]
        except Exception as x1:
            print("issue with the table or first assoc entered")
            print(x1)
            embed()
            self.Passoc_Q = []
            print(Passoc_Q)
        if bool(self.Passoc_Q) == False:
            print(
                'New process parameters, write process to INFRA_ASSOC_PARAM table'
            )
            new_row = self.session.query(self.ASSOC_par).count()
            try:
                res=self.ASSOC_par(  beamwidth=self.beamwidth,\
                                     rangemax=self.rangemax,\
                                     clusterthresh=self.clusterthresh,\
                                     trimthresh=self.trimthresh,\
                                     eventdetmin=self.eventdetmin,\
                                     algorithm=self.algorithm,\
                                     eventarrmin=self.eventarrmin,\
                                     duration=self.duration,\
                                     passocid=new_row)
            except Exception as x1:
                print('problem writing to the assoc param file')
                print("Unexpected error:", x1)
                embed()

            self.session.add(res)
            self.session.commit()
            self.Passoc_Q=self.session.query(self.ASSOC_par). \
                filter(self.ASSOC_par.beamwidth==self.beamwidth).\
                filter(self.ASSOC_par.rangemax==self.rangemax). \
                filter(self.ASSOC_par.clusterthresh==self.clusterthresh).\
                filter(self.ASSOC_par.trimthresh==self.trimthresh).\
                filter(self.ASSOC_par.eventdetmin==self.eventdetmin).\
                filter(self.ASSOC_par.eventarrmin==self.eventarrmin).\
                filter(self.ASSOC_par.duration==self.duration).\
                one()
            self.passocid = self.Passoc_Q.passocid
            #embed()
        else:
            print('process already in table: Assoc params table')
            self.passocid = self.Passoc_Q.passocid
            print(self.Passoc_Q)
        self.db_connected = True
        return self.db_connected
コード例 #47
0
# CsPbI3 - perfect cubic
#PB_X=0.5*NGX
#PB_Y=0.5*NGY
#PB_Z=0.5*NGZ

# MAPBI3 - pseudo cubic distorted
#PB_X=0.476171*NGX
#PB_Y=0.500031*NGY
#PB_Z=0.475647*NGZ

# Read out massive grad table, in {x,y,z} components
print(grad_x[PB_X][PB_Y][PB_Z], grad_y[PB_X][PB_Y][PB_Z],
      grad_z[PB_X][PB_Y][PB_Z])
# Norm of electric field at this point
print(
    np.linalg.norm([
        grad_x[PB_X][PB_Y][PB_Z], grad_y[PB_X][PB_Y][PB_Z],
        grad_z[PB_X][PB_Y][PB_Z]
    ]))

# OK, let's try this with a Spectral method (FFT)
# JMF - Not currently working; unsure of data formats, need worked example
from scipy import fftpack
V_FFT = fftpack.fftn(grid_pot[:, :, :])
V_deriv = fftpack.diff(grid_pot[:, :, :])  #V_FFT,order=1)

# Standard catch all to drop into ipython at end of script for variable inspection etc.
from IPython import embed
embed()  # End on an interactive ipython console to inspect variables etc.
コード例 #48
0
def Preprocessing(d, stage='train'):
    height, width = cfg.data_shape
    imgs = []
    labels = []
    valids = []
    if cfg.use_seg:
        segms = []

    vis = False
    # vis = True
    img = cv2.imread(os.path.join(cfg.img_path, d['imgpath']))
    '''
    if d["score"] >= 0.1:
        cv2.imshow("hoge", img)
        cv2.waitKey(0)
    '''
    #hack(multiprocessing data provider)
    while img is None:
        print('read none image')
        time.sleep(np.random.rand() * 5)
        img = cv2.imread(os.path.join(cfg.img_path, d['imgpath']))
    add = max(img.shape[0], img.shape[1])
    bimg = cv2.copyMakeBorder(img,
                              add,
                              add,
                              add,
                              add,
                              borderType=cv2.BORDER_CONSTANT,
                              value=cfg.pixel_means.reshape(-1))

    bbox = np.array(d['bbox']).reshape(4, ).astype(np.float32)
    bbox[:2] += add

    if 'joints' in d:
        joints = np.array(d['joints']).reshape(cfg.nr_skeleton,
                                               3).astype(np.float32)
        joints[:, :2] += add
        inds = np.where(joints[:, -1] == 0)
        joints[inds, :2] = -1000000

    crop_width = bbox[2] * (1 + cfg.imgExtXBorder * 2)
    crop_height = bbox[3] * (1 + cfg.imgExtYBorder * 2)
    objcenter = np.array([bbox[0] + bbox[2] / 2., bbox[1] + bbox[3] / 2.])

    if stage == 'train':
        crop_width = crop_width * (1 + 0.25)
        crop_height = crop_height * (1 + 0.25)

    if crop_height / height > crop_width / width:
        crop_size = crop_height
        min_shape = height
    else:
        crop_size = crop_width
        min_shape = width
    crop_size = min(crop_size, objcenter[0] / width * min_shape * 2. - 1.)
    crop_size = min(
        crop_size, (bimg.shape[1] - objcenter[0]) / width * min_shape * 2. - 1)
    crop_size = min(crop_size, objcenter[1] / height * min_shape * 2. - 1.)
    crop_size = min(crop_size,
                    (bimg.shape[0] - objcenter[1]) / height * min_shape * 2. -
                    1)

    min_x = int(objcenter[0] - crop_size / 2. / min_shape * width)
    max_x = int(objcenter[0] + crop_size / 2. / min_shape * width)
    min_y = int(objcenter[1] - crop_size / 2. / min_shape * height)
    max_y = int(objcenter[1] + crop_size / 2. / min_shape * height)

    x_ratio = float(width) / (max_x - min_x)
    y_ratio = float(height) / (max_y - min_y)

    if 'joints' in d:
        joints[:, 0] = joints[:, 0] - min_x
        joints[:, 1] = joints[:, 1] - min_y

        joints[:, 0] *= x_ratio
        joints[:, 1] *= y_ratio
        label = joints[:, :2].copy()
        valid = joints[:, 2].copy()

    img = cv2.resize(bimg[min_y:max_y, min_x:max_x, :], (width, height))
    '''
    if d["score"] >= 0.1:
        cv2.imshow("hoge", img)
        cv2.waitKey(0)
    '''

    if stage != 'train':
        details = np.asarray(
            [min_x - add, min_y - add, max_x - add, max_y - add])

    if cfg.use_seg is True and 'segmentation' in d:
        seg = get_seg(ori_img.shape[0], ori_img.shape[1], d['segmentation'])
        add = max(seg.shape[0], seg.shape[1])
        bimg = cv2.copyMakeBorder(seg,
                                  add,
                                  add,
                                  add,
                                  add,
                                  borderType=cv2.BORDER_CONSTANT,
                                  value=(0, 0, 0))
        seg = cv2.resize(bimg[min_y:max_y, min_x:max_x], (width, height))
        segms.append(seg)

    if vis:
        tmpimg = img.copy()
        from utils.visualize import draw_skeleton
        # draw_skeleton(tmpimg, label.astype(int))
        draw_skeleton(tmpimg, 0)
        cv2.imwrite('vis.jpg', tmpimg)
        from IPython import embed
        embed()

    img = img - cfg.pixel_means
    if cfg.pixel_norm:
        img = img / 255.

    img = img.transpose(2, 0, 1)
    imgs.append(img)

    if 'joints' in d:
        labels.append(label.reshape(-1))
        valids.append(valid.reshape(-1))

    if stage == 'train':
        imgs, labels, valids = data_augmentation(imgs, labels, valids)
        heatmaps15 = joints_heatmap_gen(imgs,
                                        labels,
                                        cfg.output_shape,
                                        cfg.data_shape,
                                        return_valid=False,
                                        gaussian_kernel=cfg.gk15)
        heatmaps11 = joints_heatmap_gen(imgs,
                                        labels,
                                        cfg.output_shape,
                                        cfg.data_shape,
                                        return_valid=False,
                                        gaussian_kernel=cfg.gk11)
        heatmaps9 = joints_heatmap_gen(imgs,
                                       labels,
                                       cfg.output_shape,
                                       cfg.data_shape,
                                       return_valid=False,
                                       gaussian_kernel=cfg.gk9)
        heatmaps7 = joints_heatmap_gen(imgs,
                                       labels,
                                       cfg.output_shape,
                                       cfg.data_shape,
                                       return_valid=False,
                                       gaussian_kernel=cfg.gk7)

        return [
            imgs.astype(np.float32).transpose(0, 2, 3, 1),
            heatmaps15.astype(np.float32).transpose(0, 2, 3, 1),
            heatmaps11.astype(np.float32).transpose(0, 2, 3, 1),
            heatmaps9.astype(np.float32).transpose(0, 2, 3, 1),
            heatmaps7.astype(np.float32).transpose(0, 2, 3, 1),
            valids.astype(np.float32)
        ]
    else:
        return [np.asarray(imgs).astype(np.float32), details]
コード例 #49
0
def main():
    print("BigMAC Android Policy Processor")
    print(" by Grant Hernandez (https://hernan.de/z)")
    print("")

    parser = argparse.ArgumentParser()
    parser.add_argument('--vendor', required=True)
    parser.add_argument("policy_name")

    parser.add_argument('--debug',
                        action='store_true',
                        help="Enable debug logging.")

    parser.add_argument(
        '--debug-init',
        action='store_true',
        help="Drop into an IPython shell after simulating the boot process.")
    parser.add_argument('--skip-boot',
                        action='store_true',
                        help="Don't simulate the boot process.")

    parser.add_argument('--draw-graph', action='store_true')
    parser.add_argument('--focus-set')

    parser.add_argument('--save',
                        action='store_true',
                        help="Save the instantiated policy graph.")
    parser.add_argument('--load',
                        action='store_true',
                        help="Reload the saved instantiated policy graph.")
    parser.add_argument('--save-policy',
                        action='store_true',
                        help="Generate selinux.txt for debugging.")
    parser.add_argument('--list-objects', action='store_true')

    parser.add_argument('--dont-expand-objects', action='store_true')
    parser.add_argument(
        '--prolog',
        action='store_true',
        help="Compile Prolog helpers and start the query engine")

    args = parser.parse_args()

    if args.load and args.save:
        log.info("--load and --save are exclusive options")
        return 1

    if args.debug:
        logging.getLogger().setLevel(logging.DEBUG)

    if args.policy_name[-1] == "/":
        args.policy_name = args.policy_name[:-1]
    args.policy_name = os.path.basename(args.policy_name)

    policy_results_dir = os.path.join(POLICY_RESULTS_DIR, args.vendor.lower(),
                                      args.policy_name)

    if not os.access(policy_results_dir, os.R_OK):
        log.error("Policy directory does not exist or is not readable")
        return 1

    log.info("Loading android security policy %s (%s)", args.policy_name,
             args.vendor)

    asp = AndroidSecurityPolicy(args.vendor, args.policy_name)
    aspc = ASPCodec(asp)

    try:
        asp = aspc.load()
    except ValueError as e:
        log.error("Saved policy is corrupt: %s", e)
        return 1

    android_version = asp.get_android_version()
    major, minor, revision = android_version
    log.info("Image summary: %s", asp.get_properties()["summary"])

    # Treble handling
    if major == 8:
        file_contexts = read_file_contexts(
            asp.get_saved_file_path("plat_file_contexts"))
        file_contexts += read_file_contexts(
            asp.get_saved_file_path("nonplat_file_contexts"))
    elif major >= 9:
        # find_cp_redundancy(asp)
        file_contexts = read_file_contexts(
            asp.get_saved_file_path("plat_file_contexts"))
        file_contexts += read_file_contexts(
            asp.get_saved_file_path("vendor_file_contexts"))
    else:
        file_contexts = read_file_contexts(
            asp.get_saved_file_path("file_contexts"))

    log.info("Loaded %d file contexts", len(file_contexts))
    primary_filesystem = asp.fs_policies[0]

    if args.load:
        try:
            inst = aspc._load_db("inst")
        except ValueError as e:
            log.error("Unable to load saved instantiation: %s", e)
            return 1
    else:
        inst, graph = main_process(args, asp, aspc, file_contexts,
                                   primary_filesystem, android_version)

    if inst is None:
        return

    if args.debug:
        from IPython import embed
        oldlevel = logging.getLogger().getEffectiveLevel()
        logging.getLogger().setLevel(logging.INFO)
        embed()
        logging.getLogger().setLevel(oldlevel)

    if args.list_objects:
        output_filename = '%s-files.txt' % (args.policy_name.replace(
            "..", "_").replace(os.sep, ""))
        log.info("Saving file list to %s", output_filename)

        with open(output_filename, 'w') as fp:
            fp.write(primary_filesystem.list_path("*"))

        output_filename = '%s-processes.txt' % (args.policy_name.replace(
            "..", "_").replace(os.sep, ""))
        log.info("Saving process list to %s", output_filename)

        with open(output_filename, 'w') as fp:
            fp.write(inst.list_processes())

    if args.prolog:
        G = inst.fully_instantiate()

        pl = Prolog(G, aspc.db_dir, inst, asp)

        if pl.compile_all():
            pl.interact()

    if args.draw_graph:
        GDF = graph["graphs"]["dataflow"]
        GP = graph["graphs"]["process"]
        GSUB = graph["graphs"]["subject"]

        focus_set = set()
        if args.focus_set:
            focus_names = args.focus_set.split(",")
            focus_set = set(focus_names)

        plot(GDF, "dataflow.svg", prune=True, debug=False, focus_set=focus_set)
        plot(GP, "process.svg", debug=False)
        plot(GSUB, "subject.svg", debug=False)

    return 0
コード例 #50
0
def train():

    train_dir = DATA_DIR + 'trainset/'
    train_label_dir = DATA_DIR + 'train_label.csv'
    test_dir = DATA_DIR + 'testset/'
    test_label_dir = DATA_DIR + 'test_label.csv'

    train_log_dir = DATA_DIR + 'logs/train/'
    val_log_dir = DATA_DIR + 'logs/val/'

    tra_image_batch, tra_label_batch = input_data.read_galaxy11(
        data_dir=train_dir, label_dir=train_label_dir, batch_size=BATCH_SIZE)
    val_image_batch, val_label_batch = input_data.read_galaxy11_test(
        data_dir=test_dir, label_dir=test_label_dir, batch_size=BATCH_SIZE)
    embed()

    x = tf.placeholder(tf.float32, [BATCH_SIZE, 64, 64, 3])
    y_ = tf.placeholder(tf.float32, [BATCH_SIZE, N_CLASSES])
    #    keep_prob=tf.placeholder(tf.float32)

    with slim.arg_scope(resnet_v2.resnet_arg_scope()):
        logits, end_points, output0, output1 = resnet_v2.resnet_v2_26_2(
            x, N_CLASSES, is_training=True)

    loss = resnet_v2.loss(logits, y_)
    #    rmse=resnet_v2.compute_rmse(logits, y_)
    accuracy = resnet_v2.accuracy(logits, y_)

    my_global_step = tf.Variable(0, name='global_step', trainable=False)
    train_op = resnet_v2.optimize(loss, learning_rate, my_global_step)

    saver = tf.train.Saver(tf.global_variables())
    summary_op = tf.summary.merge_all()

    init = tf.global_variables_initializer()
    sess = tf.Session()
    sess.run(init)

    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)
    tra_summary_writer = tf.summary.FileWriter(train_log_dir, sess.graph)
    val_summary_writer = tf.summary.FileWriter(val_log_dir, sess.graph)

    try:
        for step in np.arange(MAX_STEP):
            if coord.should_stop():
                break

            tra_images, tra_labels = sess.run(
                [tra_image_batch, tra_label_batch])
            _, tra_loss, tra_acc, summary_str = sess.run(
                [train_op, loss, accuracy, summary_op],
                feed_dict={
                    x: tra_images,
                    y_: tra_labels
                })

            if step % 50 == 0 or (step + 1) == MAX_STEP:
                print('Step: %d, tra_loss: %.4f, tra_accuracy: %.2f%%' %
                      (step, tra_loss, tra_acc))
                #                summary_str = sess.run(summary_op,feed_dict={x:tra_images, y_:tra_labels})
                tra_summary_writer.add_summary(summary_str, step)

            if step % 200 == 0 or (step + 1) == MAX_STEP:
                val_images, val_labels = sess.run(
                    [val_image_batch, val_label_batch])
                val_loss, val_acc, summary_str = sess.run(
                    [loss, accuracy, summary_op],
                    feed_dict={
                        x: val_images,
                        y_: val_labels
                    })
                print(
                    '**  Step %d, test_loss = %.4f, test_accuracy = %.2f%%  **'
                    % (step, val_loss, val_acc))
                #                summary_str = sess.run([summary_op],feed_dict={x:val_images,y_:val_labels})
                val_summary_writer.add_summary(summary_str, step)

            if step % 2000 == 0 or (step + 1) == MAX_STEP:
                checkpoint_path = os.path.join(train_log_dir, 'model.ckpt')
                saver.save(sess, checkpoint_path, global_step=step)

    except tf.errors.OutOfRangeError:
        print('Done training -- epoch limit reached')
    finally:
        coord.request_stop()

    coord.join(threads)
    sess.close()
コード例 #51
0
    def cangoto(self, node2, obs):
        # 1. Rotate to target orientation. Note that ori and ori + 180 are both acceptable targets
        rotationflag = -1
        try:
            nearestpoints = reduce(lambda x, y: x + y,
                                   [x.nearestpoints(self.pos) for x in obs])
        except AssertionError as e:
            print(e)
            embed()
        nearestpoints = list(
            filter(lambda x: linalg.norm(x - self.pos) <= RRTNode._r,
                   nearestpoints))
        nporis = [self.vec2angle(x - self.pos) for x in nearestpoints]

        oldoris = [
            self.ori - RRTNode._theta, self.ori + RRTNode._theta,
            self.ori + 180 - RRTNode._theta, self.ori + 180 + RRTNode._theta
        ]

        doris = []
        target = node2.ori
        now = self.ori
        if target < now:
            target += 180.
        dori1 = target - now
        target -= 180.
        dori2 = now - target

        oldposs = self.getvertices(dori=dori1)
        newposs = node2.getvertices()

        collide11 = any(
            [any([(x - y) % 360 < dori1 for y in oldoris]) for x in nporis])
        collide21 = any(
            [any([(y - x) % 360 < dori2 for y in oldoris]) for x in nporis])
        if debug:
            embed()
        if collide11 and collide21:
            print("\tCollide 1")
            return False
        """
        collide2 = any([Obj.pintriangle(x, self.pos, oldposs[0], oldposs[1]) for x in nearestpoints])
        if collide2:
            print("\tCollode 2")
            return False

        collide3 = any([Obj.pintriangle(x, self.pos, oldposs[2], oldposs[3]) for x in nearestpoints])
        if collide3:
            print("\tCollide 3")
            return False
        """
        if not collide11:
            doris.append(dori1)
        if not collide21:
            doris.append(dori2)

        # 2. Translation
        oldposs = self.getvertices(dori=dori1)
        # oldposs = oldposs if rotationflag == 1 else [
        # oldposs[2], oldposs[3], oldposs[0], oldposs[1]]    # Counterclockwise. Swap vertice idx
        points = np.vstack([oldposs, newposs])
        hull = spatial.ConvexHull(points)
        points = points[hull.vertices]
        tris = Obj.triangulate(points)
        for ob in obs:
            for obtri in ob.tri:
                for tri in tris:
                    p1, p2, p3, q1, q2, q3 = ob.vertices[
                        obtri[0]], ob.vertices[obtri[1]], ob.vertices[obtri[
                            2]], points[tri[0]], points[tri[1]], points[tri[2]]
                    if debug:
                        embed()
                    if Obj.triangleoverlap(p1, p2, p3, q1, q2, q3):
                        print("\tCollide 4")
                        return False

        print("\tNo collision detected.")
        return True
コード例 #52
0
def main(unused_argv=()):

    # load default data
    data = utils.get_data_retina()

    # verify utils
    utils.verify_data(data)

    #########################################################################

    ## Try some architecture.

    embedx = 20
    embedy = 10
    stim_history = 30
    batch_size = 1000
    batch_neg_resp = 100
    beta = 10
    is_training = True
    with tf.Session() as sess:
        ei_embedding, ei_tf = em.embed_ei(embedx,
                                          embedy,
                                          data['eix'],
                                          data['eiy'],
                                          data['n_elec'],
                                          data['ei_embedding_matrix'],
                                          is_training=is_training)

        responses_embedding, responses_tf = em.embed_responses(
            embedx, embedy, ei_embedding, is_training=is_training)

        stimulus_embedding, stim_tf = em.embed_stimulus(
            embedx,
            embedy,
            data['stimx'],
            data['stimy'],
            stim_history=stim_history,
            is_training=is_training)

        responses_embedding_pos = tf.gather(
            responses_embedding,
            np.arange(batch_size).astype(np.int))

        responses_embedding_neg = tf.gather(
            responses_embedding,
            np.arange(batch_size, batch_neg_resp + batch_size).astype(np.int))
        d_pos = tf.reduce_sum(
            (stimulus_embedding - responses_embedding_pos)**2, [1, 2])
        d_neg_pairs = tf.reduce_sum(
            (tf.expand_dims(responses_embedding_pos, 1) -
             tf.expand_dims(responses_embedding_neg, 0))**2, [2, 3])
        d_neg = -tf.reduce_logsumexp(-d_neg_pairs / beta, 1)
        loss = tf.reduce_sum(tf.nn.relu(d_pos - d_neg + 1))

        train_op = tf.train.AdamOptimizer(0.01).minimize(loss)
        sess.run(tf.global_variables_initializer())

        from IPython import embed
        embed()

        for _ in range(10000):
            stim_batch, resp_batch, ei_batch, resp_batch_neg = get_train_batch(
                data,
                batch_size=batch_size,
                batch_neg_resp=batch_neg_resp,
                stim_history=stim_history)
            feed_dict = {
                ei_tf: ei_batch,
                responses_tf: np.append(resp_batch, resp_batch_neg, 0),
                stim_tf: stim_batch
            }
            loss_np, _ = sess.run([loss, train_op], feed_dict=feed_dict)
            print(loss_np)
コード例 #53
0
def chirp_analysis(dataset, dataset_dict, keys, id):
    # load nix files, eod_array consists of amplitude values
    nf = nix.File.open(dataset, nix.FileMode.ReadOnly)
    b = nf.blocks[0]
    eod_array = b.data_arrays['EOD-1']
    dt = eod_array.dimensions[0].sampling_interval

    for k in keys:
        print(k)
        # stripped key for file names
        k_str = str(k).replace(' ', '')

        loops_frequency = []
        loops_time = []
        loops_raw_eod = []
        loops_raw_time = []
        loops_valid_eod = []

        start = []
        stop = []
        timespan = []

        for mt_id, position in dataset_dict[k]:         # schau ma hier: weird mt_ids und positions
            # retrieve eod trace
            mt = b.multi_tags[mt_id]
            # eod = mt.references("EOD-1")

            # retrieve pre_data (before stimulus onset to get reference)
            di = int(10.0 / dt)
            i0 = int(mt.positions[position][0] / dt)
            i1 = i0 + int(mt.extents[position][0] / dt)
            pre_data = eod_array[i0 - di:i0]

            # 'glue' eod and pre_data together
            trace = np.hstack((pre_data, eod_array[i0:i1]))


            trace = trace - np.median(trace)

            # time for the length of whole data, including start/stop/timespan
            time = np.arange(0, len(trace), 1, dtype=np.double) * dt

            start.append(time[0])
            stop.append(time[-1])
            timespan.append((np.arange(0, len(trace)) * dt)[-1])

            'smoothing data by small filter'
            print('smoothing..')
            kernel_core = 5
            kernel = np.ones(kernel_core) / kernel_core

            # create head and tail filled with mean
            head_mean = np.mean(trace[:kernel_core])
            head = np.full((1, kernel_core), head_mean)
            head = head[0]

            tail_mean = np.mean(trace[kernel_core:])
            tail = np.full((1, kernel_core), tail_mean)
            tail = tail[0]

            # prolong trace with head and tail
            prolonged_trace = np.hstack((head, trace, tail))

            # use new freq with mode same, boundarys effects are outside of wanted time now
            prolonged_convolve_trace = np.convolve(kernel, prolonged_trace, mode='same')

            # remove head and tail and therefore boundaries
            prolonged_convolve_trace = prolonged_convolve_trace[kernel_core:]
            prolonged_convolve_trace = prolonged_convolve_trace[:-kernel_core]

            trace = prolonged_convolve_trace - np.mean(prolonged_convolve_trace)

            # duration (extents is only for eod data), onset und offset of stimulus
            offset = (np.arange(0, len(trace)) * dt)[-1]
            onset = (np.arange(0, len(pre_data) * dt))[-1]

            frequency, eod_times = detect_eod_frequency(time, trace)
            eodf = np.median(frequency)

            # times, frequency = detect_eod_frequency_spectrum(time, trace, nfft=2**15, overlap=0.999)

            print('validating..')
            period = 1 / eodf
            segment = 1.25 * period

            # filter eod_times bigger than the last eod_time
            for idx, e in enumerate(eod_times):
                if e > eod_times[-1]:
                    eod_times[idx] = eod_times[idx + 1] - period

            # make window for every eod_time, get max out of it
            eod_max = []
            for et in eod_times:
                start_segment = et - segment/2
                stop_segment = et + segment/2
                window_eod = trace[(time >= start_segment) & (time < stop_segment)]
                if len(window_eod) == 0:
                    print('window_eod empty')
                    embed()
                max_eod = np.max(window_eod)
                eod_max.append(max_eod)

            # valid eod = eod values bigger than the half of the maximum eod (to filter out fish turns)
            valid_eod = eod_max > (0.6 * np.max(eod_max))
            valid_eod = valid_eod[:-1]

            # valid eod contains either True (bigger than 0.5*max) or False (smaller than 0.5*max)
            # values smaller (False) will be but as NaN, next step in chirp_analysis2
            frequency[valid_eod < 1.0] = np.nan

            eod_times = eod_times[:-1]

            # append the whole data to lists
            loops_valid_eod.append(valid_eod)
            loops_time.append(eod_times)
            loops_frequency.append(frequency)
            loops_raw_eod.append(trace)
            loops_raw_time.append(time)

            # plt.plot(time, trace)
            # plt.scatter(eod_times, valid_eod, color='orange')
            # plt.show()

        # save data to npy files at the following places
        savepath = '/home/localadmin/PycharmProjects/hilfloser_hiwi_project/saves/%s/%s' % (dataset[-17:-4], k_str)
        key_savepath = '/home/localadmin/PycharmProjects/hilfloser_hiwi_project/saves/%s/keys' % dataset[-17:-4]

        np.save(key_savepath + '/%s_key.npy' % k_str, k_str)
        np.save(savepath + '/%s_loops_frequency.npy' % k_str, loops_frequency)
        np.save(savepath + '/%s_loops_time.npy' % k_str, loops_time)
        np.save(savepath + '/%s_loops_raw_eod.npy' % k_str, loops_raw_eod)
        np.save(savepath + '/%s_loops_raw_time.npy' % k_str, loops_raw_time)
        np.save(savepath + '/%s_loops_valid_eod.npy' % k_str, loops_valid_eod)
        np.save(savepath + '/%s_stop.npy' % k_str, stop)
        np.save(savepath + '/%s_start.npy' % k_str, start)
        np.save(savepath + '/%s_timespan.npy' % k_str, timespan)

    pass
コード例 #54
0
def main_process(args, asp, aspc, file_contexts, primary_filesystem,
                 android_version):
    init = AndroidInit(aspc.results_dir, asp.properties, primary_filesystem)

    determine_hardware(asp, primary_filesystem, init)

    init.read_configs("/init.rc")

    if not args.skip_boot:
        init.boot_system()

    if args.debug_init:
        from IPython import embed
        oldlevel = logging.getLogger().getEffectiveLevel()
        logging.getLogger().setLevel(logging.INFO)
        embed()
        logging.getLogger().setLevel(oldlevel)

    ################################
    # Parse SEPolicy file
    ################################

    try:
        sepolicy = None

        if "sepolicy" in asp.policy_files:
            sepolicy = asp.get_saved_file_path("sepolicy")
        elif "precompiled_sepolicy" in asp.policy_files:
            sepolicy = asp.get_saved_file_path("precompiled_sepolicy")

        if not sepolicy:
            log.error("No compiled sepolicy found. Cannot continue")
            return 1

        if args.save_policy:
            policy = SELinuxPolicyDump(sepolicy)
            log.info("Saving sepolicy.txt")
            policy_fp = open(os.path.join(aspc.results_dir, "sepolicy.txt"),
                             'w')
            policy_fp.write(str(policy))
            policy_fp.close()

        policy_graph = SELinuxPolicyGraph(sepolicy)
    except OSError:
        log.error(
            "Unable to load SEAndroid policy file. Use --debug for more details"
        )
        return 1

    log.info("Building SEPolicy graph")
    policy_graph.find_useless_type()
    graph = policy_graph.build_graph()

    log.info("Created SEPolicy graph with %d nodes and %d edges",
             len(graph["graphs"]["allow"].nodes()),
             len(graph["graphs"]["allow"].edges()))

    log.info("Overlaying policy to filesystems")

    # Overlay DAC/filesystem data to the SEAndroid policy
    inst = SEPolicyInst(primary_filesystem, graph, file_contexts, init,
                        android_version)
    result = inst.instantiate(draw_graph=args.draw_graph,
                              expand_obj=not args.dont_expand_objects,
                              skip_fileless=True)

    if not result:
        log.error("Unable to instantiate the SEPolicy")
        return None

    if args.save:
        #inst.subjects = None
        #inst.processes = None
        #inst.objects = None
        #inst.subject_groups = None
        #inst.domain_attributes = None
        #inst.android_version = None
        #inst.filesystem = None
        #inst.sepolicy = None
        #inst.file_contexts = None
        #inst.init = None
        #inst.file_mapping = {}
        aspc._save_db(inst, "inst")

    return inst, graph
コード例 #55
0
ファイル: pyt_utils.py プロジェクト: dontLoveBugs/MyTorch
def _dbg_interactive(var, value):
    from IPython import embed
    embed()
コード例 #56
0
                                             bins) * multiplicity
    elif mean_intensity_method == "anisotropic":
        Sigma = mean_intensity_by_miller_index(I / multiplicity, ds.get_hkls(),
                                               bw) * multiplicity

    # Initialize outputs
    ds[outputI] = 0.
    ds[outputSigI] = 0.

    mean_I, std_I, mean_F, std_F = _french_wilson_posterior_quad(
        ds[intensity_key].to_numpy(), ds[sigma_key].to_numpy(), Sigma,
        ds.CENTRIC.to_numpy())

    # Convert dtypes of columns to MTZDtypes
    ds[outputI] = rs.DataSeries(mean_I, index=ds.index, dtype="Intensity")
    ds[outputSigI] = rs.DataSeries(std_I, index=ds.index, dtype="Stddev")
    ds[outputF] = rs.DataSeries(mean_F, index=ds.index, dtype="SFAmplitude")
    ds[outputSigF] = rs.DataSeries(std_F, index=ds.index, dtype="Stddev")

    return ds


if __name__ == "__main__":  # pragma: no cover
    import reciprocalspaceship as rs
    from sys import argv
    ds = rs.read_mtz(argv[1]).dropna()
    ds = ds.stack_anomalous()
    ds = scale_merged_intensities(ds, "IMEAN", "SIGIMEAN")
    from IPython import embed
    embed(colors='Linux')
コード例 #57
0
def get_data_for_singlegpu(batch_lines):
    batch_records = []
    hw_stat = np.zeros((config.train_batch_per_gpu, 2), np.int32)
    batch_per_gpu = config.train_batch_per_gpu
    short_size = config.image_short_size
    max_size = config.image_max_size

    for i in range(len(batch_lines)):
        raw_line = batch_lines[i]
        record = json.loads(raw_line)
        batch_records.append(record)
        hw_stat[i, :] = record['height'], record['width']

    if config.batch_image_preprocess == 'pad':
        batch_image_height = np.max(hw_stat[:, 0])
        batch_image_width = np.max(hw_stat[:, 1])
    else:
        from IPython import embed
        print("other type is not implemented")
        embed()

    # from IPython import embed;
    # embed()
    is_batch_ok = True
    filter_box_size = config.batch_filter_box_size
    batch_resized_height, batch_resized_width = get_hw_by_short_size(
        batch_image_height, batch_image_width, short_size, max_size)

    batch_images = np.zeros(
        (batch_per_gpu, batch_resized_height, batch_resized_width, 3),
        dtype=np.float32)
    batch_gts = np.zeros(
        (batch_per_gpu, config.max_boxes_of_image, config.nr_box_dim),
        dtype=np.float32)
    batch_info = np.zeros((batch_per_gpu, config.nr_info_dim),
                          dtype=np.float32)

    for i in range(batch_per_gpu):
        record = batch_records[i]
        # process the images
        image_path = config.train_root_folder + record['fpath']
        img = cv2.imread(image_path, cv2.IMREAD_COLOR)
        while img is None:
            img = cv2.imread(image_path, cv2.IMREAD_COLOR)

        gtboxes = record['gtboxes']
        gt_boxes = BoxUtil.parse_gt_boxes(gtboxes)
        gt = np.zeros((len(gt_boxes), 5))
        gt_idx = 0
        for j, gb in enumerate(gt_boxes):
            if gb.ign != 1 or not config.filter_gt_ignore_label:
                gt[gt_idx, :] = [
                    gb.x, gb.y, gb.x1, gb.y1,
                    config.class_names.index(gb.tag)
                ]
                gt_idx += 1
            elif config.train_gt_ignore_label:
                gt[gt_idx, :] = [
                    gb.x, gb.y, gb.x1, gb.y1, config.anchor_ignore_label
                ]
                gt_idx += 1

        if gt_idx == 0:
            is_batch_ok = False
            break
        gt = gt[:gt_idx, :]

        padded_image, padded_gt = pad_image_and_boxes(img, batch_image_height,
                                                      batch_image_width,
                                                      config.image_mean, gt)
        # filter the images with box_size < config.train_min_box_size
        hs_gt = padded_gt[:, 3] - padded_gt[:, 1] + 1
        ws_gt = padded_gt[:, 2] - padded_gt[:, 0] + 1
        keep = (ws_gt >= filter_box_size) * (hs_gt >= filter_box_size)
        if keep.sum() == 0:
            is_batch_ok = False
            break
        else:
            padded_gt = padded_gt[keep, :]

        original_height, original_width, channels = padded_image.shape
        resized_image, scale = resize_img_by_short_and_max_size(
            padded_image, short_size, max_size)
        padded_gt[:, 0:4] *= scale
        resized_gt = padded_gt

        nr_gtboxes = resized_gt.shape[0]

        if np.random.randint(2) == 1:
            resized_image, resized_gt = flip_image_and_boxes(
                resized_image, resized_gt)

        resized_image = resized_image.astype(np.float32) - config.image_mean
        batch_images[i] = resized_image[:, :, [2, 1, 0]]
        batch_gts[i, :nr_gtboxes] = resized_gt
        batch_info[i, :] = (resized_image.shape[0], resized_image.shape[1],
                            scale, original_height, original_width, nr_gtboxes)

    return dict(data=batch_images,
                boxes=batch_gts,
                im_info=batch_info,
                is_valid=is_batch_ok)
コード例 #58
0
ファイル: trends.py プロジェクト: CacheMoneyBot/gtrends-beta
def quarterly_queries(keywords,
                      category,
                      cookies,
                      session,
                      domain,
                      throttle,
                      filing_date,
                      ggplot,
                      month_offset=[-12, 12],
                      trends_url=DEFAULT_TRENDS_URL):
    """Gets interest data (quarterly) for the 12 months before and 12 months after specified date, then gets interest data for the whole period and merges this data.

		month_offset: [no. month back, no. months forward] to query
	Returns daily data over the period.
	"""

    aw_range = arrow.Arrow.range
    begin_period = aget(filing_date).replace(months=month_offset[0])
    ended_period = aget(filing_date).replace(months=month_offset[1])

    # Set up date ranges to iterate queries across
    start_range = aw_range('month', YYYY_MM(begin_period),
                           YYYY_MM(ended_period))
    ended_range = aw_range('month',
                           YYYY_MM(begin_period).replace(months=3),
                           YYYY_MM(ended_period).replace(months=3))

    start_range = [r.datetime for r in start_range][::3]
    ended_range = [r.datetime for r in ended_range][::3]

    # Fix last date if incomplete quarter (offset -1 week from today)
    last_week = arrow.utcnow().replace(weeks=-1).datetime
    start_range = [d for d in start_range if d < last_week]
    ended_range = [d for d in ended_range if d < last_week]
    if len(ended_range) < len(start_range):
        ended_range += [last_week]

    # Iterate attention queries through each quarter
    all_data = []
    missing_queries = []  # use this to scale IoT later.
    for start, end in zip(start_range, ended_range):
        if start > last_week:
            break

        print("Querying period: {s} ~ {e}".format(s=start.date(),
                                                  e=end.date()))
        throttle_rate(throttle)

        response_args = {
            'url': trends_url.format(domain=domain),
            'params': _query_parameters(start, end, keywords, category),
            'cookies': cookies,
            'session': session
        }

        query_data = _check_data(
            keywords, _process_response(_get_response(**response_args)))

        # from IPython import embed; embed()
        if query_data[1] == '':
            query_data = [[date, '0']
                          for date in arrow.Arrow.range('day', start, end)]
            missing_queries.append('missing')
        if all(int(vals) == 0 for date, vals in query_data):
            query_data = [[date, '0']
                          for date in arrow.Arrow.range('day', start, end)]
            missing_queries.append('missing')
        elif len(query_data[0][0]) > 10:
            missing_queries.append('weekly')
        else:
            missing_queries.append('daily')

        try:
            if not aligned_weekly(query_data, all_data):
                ## Workaround: shift filing date
                q1 = weekly_date(all_data[-1][-1][0])
                q2 = weekly_date(query_data[0][0])

                if q1 < q2:
                    start = arrow.get(start).replace(months=-1)
                    response_args['params'] = _query_parameters(
                        start, end, keywords, category)
                    ## Do a new 4month query, overlap/replace previous month.
                    query_data = _check_data(
                        keywords,
                        _process_response(_get_response(**response_args)))
                    if all_data[:-1] != []:
                        q2 = weekly_date(query_data[0][0], 'start')
                        all_data[-1] = [
                            d for d in all_data[-1] if q2 > weekly_date(d[0])
                        ]

                elif q1 >= q2:
                    # if q1 > 1st date in query_data, remove the first few entries
                    query_data = [
                        d for d in query_data if q1 < weekly_date(d[0])
                    ]

        except IndexError:
            pass
        except:
            from IPython import embed
            embed()

        finally:
            all_data.append(query_data)

    # Get overall long-term trend data across entire queried period
    s = begin_period.replace(weeks=-2).datetime
    e1 = arrow.get(ended_range[-1]).replace(months=+1).datetime
    e2 = arrow.utcnow().replace(weeks=-1).datetime
    e = min(e1, e2)
    print("\n=> Merging with overall period: {s} ~ {e}".format(s=s.date(),
                                                               e=e.date()))

    response_args = {
        'url': trends_url.format(domain=domain),
        'params': _query_parameters(s, e, keywords, category),
        'cookies': cookies,
        'session': session
    }

    query_data = _check_data(keywords,
                             _process_response(_get_response(**response_args)))

    if query_data[1] == '':
        adj_all_data = [[
            str(date.date()), int(zero)
        ] for date, zero in zip(*interpolate_ioi(*zip(*sum(all_data, []))))]

    elif len(query_data) > 1:
        # compute changes in IoI (interest over time) per quarter
        # and merged quarters together after interpolating data
        # with daily data.
        # We cannot mix quarters as Google normalizes each query
        all_ioi_delta = []
        qdat_interp = []
        for quarter_data in all_data:
            if quarter_data != []:
                quarter_data = [x for x in quarter_data if x[1] != '']
                all_ioi_delta += list(zip(*change_in_ioi(*zip(*quarter_data))))

                if ggplot:
                    qdat_interp += interpolate_ioi(*zip(*quarter_data))[1]
                    # for plotting only

        qdate = [date for date, delta_ioi in all_ioi_delta]
        delta_ioi = [delta_ioi for date, delta_ioi in all_ioi_delta]

        try:
            ydate = [
                date[-10:] if len(date) > 10 else date
                for date, ioi in query_data
            ]
            yIoI = [float(ioi) for date, ioi in query_data]
        except:
            from IPython import embed
            embed()
            yIoI = [float(ioi) for date, ioi in query_data[:-1]]
        ydate, yIoI = interpolate_ioi(ydate, yIoI)

        # match quarterly and yearly dates and get correct delta IoI
        # common_date = [x for x in ydate+qdate if x in ydate and x in qdate]
        common_date = sorted(set(ydate) & set(qdate))

        delta_ioi = [
            delta_ioi for date, delta_ioi in zip(qdate, delta_ioi)
            if date in common_date
        ]
        y_ioi = [y for x, y in zip(ydate, yIoI) if x in common_date]

        # calculate daily %change in IoI and adjust weekly values
        adj_IoI = [ioi * mult for ioi, mult in zip(y_ioi, delta_ioi)]

        adj_all_data = [[str(date.date()), round(ioi, 2)]
                        for date, ioi in zip(common_date, adj_IoI)]
    else:
        adj_all_data = [[
            str(date.date()), int(zero)
        ] for date, zero in zip(*interpolate_ioi(*zip(*sum(all_data, []))))]

    # from IPython import embed; embed()
    heading = ["Date", keywords[0].title]
    querycounts = list(zip((d.date() for d in start_range), missing_queries))
    keywords[0].querycounts = querycounts

    if not ggplot:
        return [heading] + adj_all_data

    ## GGplot Only
    else:
        # GGPLOT MERGED GTRENDS PLOTS:
        import pandas as pd
        from ggplot import ggplot, geom_line, ggtitle, ggsave, scale_colour_manual, ylab, xlab, aes
        try:
            ydat = pd.DataFrame(list(zip(common_date, y_ioi)),
                                columns=["Date", 'Weekly series'])
            mdat = pd.DataFrame(list(zip(common_date, adj_IoI)),
                                columns=['Date', 'Merged series'])
            qdat = pd.DataFrame(list(zip(common_date, qdat_interp)),
                                columns=['Date', 'Daily series'])
            ddat = ydat.merge(mdat, on='Date').merge(qdat, on='Date')
            ddat['Date'] = list(map(pd.to_datetime, ddat['Date']))

            ydat['Date'] = list(map(pd.to_datetime, ydat['Date']))
            mdat['Date'] = list(map(pd.to_datetime, mdat['Date']))
            qdat['Date'] = list(map(pd.to_datetime, qdat['Date']))
        except UnboundLocalError as e:
            raise (UnboundLocalError("No Interest-over-time to plot"))

        # meltkeys = ['Date','Weekly series','Merged series','Daily series']
        # melt = pd.melt(ddat[meltkeys], id_vars='Date')

        colors = [
            '#77bde0',  # blue
            '#b47bc6',  # purple
            '#d55f5f'  # red
        ]

        entity_type = keywords[0].desc

        g = ggplot(aes(x='Date', y='Daily series' ), data=ddat) + \
         geom_line(aes(x='Date', y='Daily series'), data=qdat, alpha=0.5, color=colors[0]) + \
         geom_line(aes(x='Date', y='Merged series'), data=mdat, alpha=0.9, color=colors[1]) + \
         geom_line(aes(x='Date', y='Weekly series'), data=ydat, alpha=0.5, color=colors[2], size=1.5) + \
         ggtitle("Interest over time for '{}' ({})".format(keywords[0].keyword, entity_type)) + \
         ylab("Interest Over Time") + xlab("Date")

        # from IPython import embed; embed()

        print(g)
        # ggsave(BASEDIR + "/iot_{}.png".format(keywords[0].keyword), width=15, height=5)
        return [heading] + adj_all_data
コード例 #59
0
ファイル: SemRec.py プロジェクト: wpaladins/RSBook
    def fit(self, loaders, epochs=5):
        # training cycle
        best_score = 0.
        for epoch in range(epochs):
            losses = {'train': 0., 'valid': 0}

            for phase in ['train', 'valid']:

                if phase == 'train':
                    self.train()
                else:
                    self.eval()
                pbar = tqdm(enumerate(loaders[phase]),
                            total=len(loaders[phase]),
                            desc='({0}:{1:^3})'.format(phase, epoch + 1))
                for batch_idx, ((batch_U, batch_I), batch_y) in pbar:
                    self.optimizer.zero_grad()

                    batch_U = batch_U.long()
                    batch_I = batch_I.long()
                    batch_y = batch_y.float().to(self.device)
                    y_pred = self.forward(batch_U, batch_I)
                    loss = nn.MSELoss(reduction='sum')(y_pred, batch_y)

                    try:
                        loss_PI = 0

                        for p in self.metaPath['UU']:
                            s_p_k = self.hinSim[p][batch_U]  # U*K->B*K  前K个的取值
                            hin_index = self.hinSimI[p][
                                batch_U]  # U*K -> B*K 前K个的下标
                            w_p_k = self.W_U(
                                hin_index)[:, :, self.path2id[p]].reshape(
                                    s_p_k.shape)  # B,K,1
                            w_p_i = self.W_U(batch_U)[:,
                                                      self.path2id[p]]  # B,1
                            hin_reg = (w_p_i.reshape(-1) -
                                       (w_p_k * s_p_k).sum(-1)).pow(2).sum()
                            loss_PI = loss_PI + (self.lambda_I * hin_reg).sum()
                    except Exception as ex:
                        print(ex)
                        from IPython import embed
                        embed()
                    loss += loss_PI
                    losses[phase] += loss.item()
                    batch_loss = loss.item() / batch_y.shape[0]
                    pbar.set_postfix(train_loss=batch_loss)

                    with torch.set_grad_enabled(phase == 'train'):
                        if phase == 'train':
                            loss.backward()
                            #                             scheduler.step()
                            self.optimizer.step()

                losses[phase] /= len(loaders[phase].dataset)

            with torch.no_grad():
                model.eval()

                y_pred, y_true = [], []

                for ((row, col), val) in loaders['valid']:
                    row = row.long()
                    col = col.long()
                    val = val.float()
                    preds = self.forward(row, col)
                    if IMPLICT:
                        preds = sigmoid(preds.cpu().numpy())
                    y_pred += preds.tolist()
                    y_true += val.tolist()
                y_true, y_pred = np.array(y_true), np.array(y_pred)
                if IMPLICT:
                    epoch_score = roc_auc_score(y_true, y_pred)
                    score = 'auc'
                else:
                    epoch_score = sum([
                        (y - x)**2 for x, y in zip(y_true, y_pred)
                    ]) / len(y_pred)
                    score = 'mse'

                user_item = loaders['valid'].dataset.user_item
                items = torch.arange(self.n_items).long()
                hit, rec_count, test_count, all_rec_items = 0, 0, 0, set()
                train_ui = loaders['train'].dataset.user_item
                for u in user_item:
                    target_items = user_item[u]

                    users = [int(u)] * self.n_items
                    users = torch.Tensor(users).long()
                    scores = self.forward(users, items)
                    if u in train_ui:
                        seen_items = np.array(list(train_ui[u].keys()))
                        scores[seen_items] = -1e9
                    recs = np.argsort(scores)[-10:].tolist()

                    for item in recs:  # 遍历给user推荐的物品
                        if item in target_items:  # 测试集中有该物品
                            hit += 1  # 推荐命中+1
                        all_rec_items.add(item)
                    rec_count += 10
                    test_count += len(target_items)
                    precision = hit / (1.0 * rec_count)
                    recall = hit / (1.0 * test_count)
                    coverage = len(all_rec_items) / (1.0 * self.n_items)

            if ((epoch + 1) % 1) == 0:
                print(
                    f'epoch {epoch + 1} train loss: {losses["train"]:.3f} valid loss {losses["valid"]:.3f} {score} {epoch_score:.3f}'
                )
                print('precisioin=%.4f\trecall=%.4f\tcoverage=%.4f' %
                      (precision, recall, coverage))
        return
コード例 #60
0
def main():
    args = sys.argv[1:]
    Magneto.configure(*args)
    m = magneto = Magneto.instance()

    embed()