Esempio n. 1
0
def winpdb(depth=0):
    import rpdb2
    depth += 1
    if rpdb2.g_debugger is not None:
        return rpdb2.setbreak(depth)
    script = rpdb2.calc_frame_path(sys._getframe(depth))
    pwd = str(os.getpid()) + os.getcwd().replace('/', '_').replace('-', '_')
    pid = os.fork()
    if pid:
        try:
            rpdb2.start_embedded_debugger(pwd, depth=depth)
        finally:
            os.waitpid(pid, 0)
    else:
        try:
            os.execlp('python', 'python', '-c', """import os\nif not os.fork():
                import rpdb2, winpdb
                rpdb2_raw_input = rpdb2._raw_input
                rpdb2._raw_input = lambda s: \
                    s == rpdb2.STR_PASSWORD_INPUT and %r or rpdb2_raw_input(s)
                winpdb.g_ignored_warnings[winpdb.STR_EMBEDDED_WARNING] = True
                winpdb.main()
            """ % pwd, '-a', script)
        finally:
            os.abort()
 def __createLHLL__(self):
     """
     This method makes a simple 5 element listing for later use.
     The data will
     look like [pipeName,prettyH,prettyL,H,L].  It will span the options in
     [all],LH and LL
     """
     i=0
     h=float(self.LH[2])
     l=float(self.LL[2])
     deltah=float(self.LH[1])
     deltal=float(self.LL[1])
     stoph=float(self.LH[0])
     stopl=float(self.LL[0])
     if h>stoph:
         print "Error in config, inconsistent LH options."
         os.abort()
     if l>stopl:
         print "Error in config, inconsistent LL options."
         os.abort()
     while (h <= stoph):
         l=float(self.LL[2])
         while (l <= stopl):
             coord=[h,h*l]
             pH=str(str(coord[0]).__getslice__(0,int(str(coord[0]).index(".")+4))).zfill(8)
             pL=str(str(coord[1]).__getslice__(0,int(str(coord[1]).index(".")+4))).zfill(8)
             pipeIniName=self.installIni+'/'+self.batchMask+':'+pH+':'+pL+':'+'.ini'
             self.FApipeNames.append([pipeIniName,pH,pL,coord[0],coord[1]])
             l=l+deltal
             i=i+1
         h=h+deltah
Esempio n. 3
0
def gen_classifier(X_train=None, y_train=None):
    if isfile(PCA_PICKLE):
        print "Loading PCA from file"
        pca = pickle.load(open(PCA_PICKLE, 'rb'))
    elif X_train is not None and y_train is not None:
        print "Computing PCA and saving"
        pca = compute_pca(X_train, y_train)
        pickle.dump(pca, open(PCA_PICKLE, 'wb'))
    else:
        print "Files do not exist, but no training data given"
        os.abort()

    if isfile(CLASSIFIER_PICKLE):
        print "Loading classifier from file"
        clf = pickle.load(open(CLASSIFIER_PICKLE, 'rb'))
    else:
        print "Fitting the classifier to the training set"
        param_grid = {
            'C': [1e3, 5e3, 1e4, 5e4, 1e5],
            'gamma': [0.0001, 0.0005, 0.001, 0.005, 0.01, 0.1],
        }

        print "Projecting the input data on the eigenfaces orthonormal basis"
        X_train_pca = pca.transform(X_train)
        clf = GridSearchCV(SVC(kernel='rbf', class_weight='auto'), param_grid)
        clf = clf.fit(X_train_pca, y_train)
        print "Best estimator found by grid search:"
        print clf.best_estimator_
        pickle.dump(clf, open(CLASSIFIER_PICKLE, 'wb'))
    return pca, clf
Esempio n. 4
0
def handler_admin_exit(type, source, parameters):
	if not source[1] in GROUPCHATS:
		source[2]=source[1].split('@')[0]
	if parameters:
		reason = parameters
	else:
		reason = ''
	gch=[]
	if GROUPCHATS:
		gch=GROUPCHATS.keys()
	if reason:
		for x in gch:
			if popups_check(x):
				msg(x, u'меня выключает '+source[2]+u' по причине:\n'+reason)
	else:
		for x in gch:
			if popups_check(x):
				msg(x, u'меня выключает '+source[2])
	prs=xmpp.Presence(typ='unavailable')
	if reason:
		prs.setStatus(source[2]+u': выключаюсь -> '+reason)
	else:
		prs.setStatus(source[2]+u': выключаюсь')
	JCON.send(prs)
	time.sleep(2)
	os.abort()
Esempio n. 5
0
def memcheck(output, threshold=ONE_GB, abort_on_hit=False,
             parent_thread_id=None):
    if not parent_thread_id:
        parent_thread_id = thread.get_ident()

    pid = os.getpid()
    rss = psutil.Process(pid).get_memory_info().rss
    sys.stderr.write('Process {} rss={}MB (threshold={}MB)'.format(
        pid, rss / ONE_MB, threshold / ONE_MB))

    if rss > threshold:
        filename = os.path.join(
            output, 'memorydump-{0}.json'.format(int(time.time())))
        frame = get_thread_frame(parent_thread_id)

        sys.stderr.write(
            'TRESHOLD EXCEEDED: dumping profile to {} (this make take a while)\n'.format(
                filename))

        try:
            with open(filename, 'wb') as fp:
                dump_memory(fp, frame)
        finally:
            if abort_on_hit:
                sys.stderr.write('Aboring execution due to memory threshold\n')
                os.abort()
def queuedb_query_execute( cur, query, values ):
    """
    Execute a query.  If it fails, exit.

    DO NOT CALL THIS DIRECTLY.
    """
    timeout = 1.0
    while True:
        try:
            ret = cur.execute( query, values )
            return ret
        except sqlite3.OperationalError as oe:
            if oe.message == "database is locked":
                timeout = timeout * 2 + timeout * random.random()
                log.error("Query timed out due to lock; retrying in %s: %s" % (timeout, namedb_format_query( query, values )))
                time.sleep(timeout)
            
            else:
                log.exception(oe)
                log.error("FATAL: failed to execute query (%s, %s)" % (query, values))
                log.error("\n".join(traceback.format_stack()))
                os.abort()

        except Exception, e:
            log.exception(e)
            log.error("FATAL: failed to execute query (%s, %s)" % (query, values))
            log.error("\n".join(traceback.format_stack()))
            os.abort()
Esempio n. 7
0
def main():
    if not len(sys.argv) in [3]:
        print 'Usage: '
        print '  %s index search_string' % sys.argv[0]
        os.abort()
    else:
        if not isfile(sys.argv[1]):
            print "Index file doesn't exist"
            os.abort()
        
        tim = time.clock
        
        t_start = tim()
        
        idx = fmindex.load(sys.argv[1])
        t_load = tim()
        
        c = idx.count(sys.argv[2])
        t_count = tim()
        
        m = idx.search(sys.argv[2])
        t_search = time.clock()
        print "load: %sms" % diff_time(t_start, t_load)
        print "count: %sms" % diff_time(t_load, t_count)
        print str(c)
        print "matches: %sms" % diff_time(t_count, t_search)
        print str(m)
Esempio n. 8
0
    def run(self):
        """
        Override Thread.run() to do the periodic update
        of the MonitorState object and dispatch it to the monitors
        """
        while True:
            #  //
            # // shutdown signal
            # //
            if self._Finished.isSet():
                return

            # //
            # // Update State information only during a running task
            # //
            if self._RunUpdate.isSet():
                for monitor in self._Monitors:
                    try:
                        monitor.periodicUpdate()
                    except Exception as ex:
                        msg = "Error in periodicUpdate for monitor class %s in Watchdog:\n" % monitor.__class__
                        msg += str(ex)
                        msg += str(traceback.format_exc())
                        msg += "This is a CRITICAL error because this kills the monitoring.\n"
                        msg += "Terminate thread and retry.\n"
                        logging.error(msg)
                        # raise WatchdogException(msg)
                        # This one needs to be killed by itself
                        # since it's run by thread
                        os.abort()
                        # self._MonMgr.periodicUpdate()

            # time.sleep(self._Interval)
            self._Finished.wait(self._Interval)
	def _upgradeMediaFiles(self, records):
		#printl("->", self, "S")
		currentDBVersion = self._getDBVersion(records)
		printl("DBVersion: " + str(currentDBVersion))
		if self.DB_VERSION_MEDIAFILES < 5:
			printl("DB Not correctly updated!!!!!!, aborting....")
			os.abort()
			
		elif self.DB_VERSION_MEDIAFILES == currentDBVersion:
			printl("DB already updated!")
		else:
			printl("Upgrading database to version: " + str(self.DB_VERSION_MEDIAFILES) )
			#   Let's run some Upgrade Scripts... :)
			for updateToVersion in range(currentDBVersion+1, self.DB_VERSION_MEDIAFILES+1):
				printl("Applying upgrade to version : " + str(updateToVersion))
				if updateToVersion==6:
					self._upgrade_MF_6()
					self._setDBVersion(records, updateToVersion)
				elif updateToVersion==7:
					pass
					#self._upgrade_MF_7()
					#self._setDBVersion(records, updateToVersion)
				elif updateToVersion==8:
					pass
				
			self.saveMediaFiles()
Esempio n. 10
0
 def handle(self, req):
     spawn = False
     with self.qlk:
         if self.timeout is not None:
             now = start = time.time()
             while len(self.queue) >= self.qsz:
                 self.qecond.wait(start + self.timeout - now)
                 now = time.time()
                 if now - start > self.timeout:
                     os.abort()
         else:
             while len(self.queue) >= self.qsz:
                 self.qecond.wait()
         self.queue.append(req)
         self.qfcond.notify()
         if len(self.waiting) < 1:
             spawn = True
     if spawn:
         with self.clk:
             if len(self.current) < self.max:
                 th = reqthread(target=self.run)
                 th.registered = False
                 th.start()
                 while not th.registered:
                     self.ccond.wait()
Esempio n. 11
0
File: pybot.py Progetto: croot/abyba
def dcHnd():
	print 'DISCONNECTED'
	logger_stanza()
	if AUTO_RESTART:
                try:
                        if check_file(file='lastconnect.txt'):
                                file='dynamic/lastconnect.txt'
                                fp=open(file,'r')
                                txt=eval(fp.read())
                                fp.close()
                                if 'LAST' in txt:
                                        if not txt['LAST']['time']:
                                                txt['LAST']['time']=time.time()
                                                write_file(file,str(txt))
                                        else:
                                                if time.time() - txt['LAST']['time']<70:
                                                        if txt['LAST']['n']>2:
                                                                txt['LAST']['n']=0
                                                                write_file(file,str(txt))
                                                                os.abort()
                                                        else:
                                                                txt['LAST']['n']+=1
                                                                write_file(file,str(txt))
                                        txt['LAST']['time']=time.time()
                                        write_file(file,str(txt))
                except: pass
		print 'WAITING FOR RESTART...'
		time.sleep(10)
		print 'RESTARTING'
		os.execl(sys.executable, sys.executable, sys.argv[0])
	else: sys.exit(0)
Esempio n. 12
0
  def run(self):
    self.bwaiter = BrowserWaiter(self.command, self.log, self.mod, self.ffprocess)
    noise = 0
    prev_size = 0
    while not self.bwaiter.hasTime():
      if noise > self.timeout: # check for frozen browser
        try:
          ffprocess.cleanupProcesses(self.process_name, self.child_process, self.browser_wait)
        except talosError, te:
          os.abort() #kill myself off because something horrible has happened
        os.chmod(self.log, 0777)
        results_file = open(self.log, "a")
        results_file.write("\n__FAILbrowser frozen__FAIL\n")
        results_file.close()
        return
      time.sleep(1)
      try:
        open(self.log, "r").close() #HACK FOR WINDOWS: refresh the file information
        size = os.path.getsize(self.log)
      except:
        size = 0

      if size > prev_size:
        prev_size = size
        noise = 0
      else:
        noise += 1
Esempio n. 13
0
        def sig_handler(sig, frame):
            if sig == signal.SIGUSR1:
                # tell each account to stop sleeping
                accounts.Account.set_abort_event(self.config, 1)
            elif sig in (signal.SIGUSR2, signal.SIGABRT):
                # tell each account to stop looping
                getglobalui().warn("Terminating after this sync...")
                accounts.Account.set_abort_event(self.config, 2)
            elif sig in (signal.SIGTERM, signal.SIGINT, signal.SIGHUP):
                # tell each account to ABORT ASAP (ctrl-c)
                getglobalui().warn("Preparing to shutdown after sync (this may "\
                                   "take some time), press CTRL-C three "\
                                   "times to shutdown immediately")
                accounts.Account.set_abort_event(self.config, 3)
                if 'thread' in self.ui.debuglist:
                    self.__dumpstacks(5)

                # Abort after three Ctrl-C keystrokes
                self.num_sigterm += 1
                if self.num_sigterm >= 3:
                    getglobalui().warn("Signaled thrice. Aborting!")
                    sys.exit(1)
            elif sig == signal.SIGQUIT:
                stacktrace.dump(sys.stderr)
                os.abort()
Esempio n. 14
0
def handle_files (workdir, file_fw = False, file_rv = False, hts_dir = False, htses = False, reference = False):
	if file_fw and file_rv:
		name_reads = file_from_path(file_fw)[0:-6]
		outdir = workdir + name_reads + '/'
		handle_hts (file_fw, file_rv, outdir, reference = reference)

	elif hts_dir and htses:
		process_count = 0
		for fw, rv in htses:
			file_fw = hts_dir + fw
			file_rv = hts_dir + rv
			name_fw = file_from_path(file_fw)
			name_rv = file_from_path(file_rv)
			name_reads = name_fw[0:-6]
			outdir = workdir + name_reads + '/'
			if not os.path.exists(outdir): os.makedirs(outdir)
			if not MULTIPROC:
				if not ONLY_FIND: handle_hts (file_fw, file_rv, outdir)
				else: handle_hts (file_fw, file_rv, outdir)
			else:
				pid = os.fork()
				time.sleep(0.1)
				if pid == 0:
					if not ONLY_FIND: handle_hts (file_fw, file_rv, outdir)
					else: handle_hts (file_fw, file_rv, outdir)
					os.abort()
				else:
					process_count += 1
					if process_count >= MAX_PROCESSES:
						os.wait()
						process_count -= 1
			
	else: print "Error: handle_htses haven't get needed values"
	return 0
def sync_virtualchain(bitcoind_opts, last_block, state_engine, expected_snapshots={}, tx_filter=None ):
    """
    Synchronize the virtual blockchain state up until a given block.

    Obtain the operation sequence from the blockchain, up to and including last_block.
    That is, go and fetch each block we haven't seen since the last call to this method,
    extract the operations from them, and record in the given working_dir where we left
    off while watching the blockchain.

    Store the state engine state, consensus snapshots, and last block to the working directory.
    Return True on success
    Return False if we're supposed to stop indexing
    Abort the program on error.  The implementation should catch timeouts and connection errors
    """

    rc = False
    start = datetime.datetime.now()
    while True:
        try:

            # advance state
            rc = indexer.StateEngine.build(bitcoind_opts, last_block + 1, state_engine, expected_snapshots=expected_snapshots, tx_filter=tx_filter )
            break
        
        except Exception, e:
            log.exception(e)
            log.error("Failed to synchronize chain; exiting to safety")
            os.abort()
def print_followers(uid):
    if 'page' in request.args:
        page = int(request.args['page'])
    else:
        page = 1
    if page <= 0:
        abort(404)

    id = session['username']
    with easypg.cursor() as cur:
        followers = follows.get_followers(cur, page, id)
        total_pages = follows.get_follower_pg_cnt(cur, id)
        return render_template('followers.html',
                                     follow = followers,
                                     page = page,
                                     total_pages = total_pages,
                                     user = id
                                     )
    if page > 1:
        prevPage = page - 1
    else:
        prevPage = None
    if page < total_pages:
        nextPage = page + 1
    else:
        nextPage = None
def print_followees(user_id):
    if request.method == "GET":
        if 'page' in request.args:
            page = int(request.args['page'])
        else:
            page = 1
        if page <= 0:
            abort(404)

        with easypg.cursor() as cur:
            followees = follows.get_followees(cur, page, user_id)
            total_pages = follows.get_followee_pg_cnt(cur, user_id)
            return render_template('followees.html',
                                         follow = followees,
                                         page = page,
                                         total_pages = total_pages,
                                         uid = user_id
                                        )

        if page > 1:
            prevPage = page - 1
        else:
            prevPage = None
        if page < total_pages:
            nextPage = page + 1
        else:
            nextPage = None
Esempio n. 18
0
def change_element():
    params = json.loads(request.data)
    if data.get(params['key']):
        data[params['key']] = params['value']
    else:
        abort(404)
    return json.dumps({params['key']: params['value'], "time": datetime.datetime.now().strftime("%Y-%m-%d %H:%M")})
def get_search_results():
    if 'q' in request.args:
        query = request.args['q']
    if 'dropdown' in request.args:
        type = request.args['dropdown']
    else:
        abort(400)

    books = None
    authors = None
    categories = None
    users = None

    with easypg.cursor() as cur:
        if type == "Book":
            books = queries.search_titles(cur, query)
        if type == "Author":
            authors = queries.search_authors(cur, query)
        if type == "Categories":
            categories = queries.search_categories(cur, query)
        if type == "User":
            users = queries.search_users(cur, query)


    return render_template('search_results.html',
                           query=query,
                           books=books,
                           users=users,
                           authors=authors,
                           categories=categories)
Esempio n. 20
0
 def process_item(self, item, spider):
     url = item['url']
     title = item['title']
     author = item['author']
     release_time = item['release_time']
     excerpt = item['excerpt']
     category = item['category']
     jx = False
     if category in [u'译科技', u'译生活', u'译商业', u'译新知']:
         jx = True
     content_html = item['content_html']
     try:
         self.cur.execute('insert into yeeyan values(?,?,?,?,?,?,?,?)',
                          (url, title, author, release_time, excerpt, category, content_html, jx))
     except sqlite3.IntegrityError:
         # log.msg(u'error 文章已存在,%s'%title)
         self.exist_count += 1
         if self.exist_count == 100:
             self.con.commit()
             self.con.close()
             os.abort()
     else:
         self.count += 1
         if self.count == 100:
             self.con.commit()
             self.count = 0
         log.msg('get passage %s,%s' % (title, release_time), level=log.INFO)
Esempio n. 21
0
def handle_files (workdir, file_fw = False, file_rv = False, HTS_dir = False, HTSes = False, multiproc = False, name = ''):
	if file_fw and file_rv:
		name_reads = file_from_path(file_fw)[0:-6]
		outdir = workdir + name_reads + '/'
		handle_HTS (file_fw, file_rv, outdir, name = name)

	elif HTS_dir and HTSes:
		process_count = 0
		for fw, rv in HTSes:
			file_fw = HTS_dir + fw
			file_rv = HTS_dir + rv
			name_fw = file_from_path(file_fw)
			name_rv = file_from_path(file_rv)
			name_reads = name_fw[0:-6]
			outdir = workdir + name_reads + '/'
			if not os.path.exists(outdir): os.makedirs(outdir)
			if not multiproc:
				if not ONLY_FIND: handle_HTS (file_fw, file_rv, outdir)
				else: handle_HTS (file_fw, file_rv, outdir)
			else:
				pid = os.fork()
				time.sleep(0.1)
				if pid == 0:
					if not ONLY_FIND: handle_HTS (file_fw, file_rv, outdir)
					else: handle_HTS (file_fw, file_rv, outdir)
					os.abort()
				else:
					process_count += 1
					if process_count >= MAX_PROCESSES:
						os.wait()
						process_count -= 1
			
	else: print "Error: handle_HTSes haven't get needed values"

	return 0
Esempio n. 22
0
def sniff(recent, interface):
    def save_packet(pktlen, data, timestamp):
        # from http://pylibpcap.sourceforge.net/
        if not data:
            return

        if data[12:14]=='\x08\x00':
            decoded=decode_ip_packet(data[14:])
            decoded['original_length'] = pktlen
            recent.add(timestamp, decoded)

    p = pcap.pcapObject()
    try:
      p.open_live(interface, 1600, 0, 100)
    except Exception:
      import traceback
      traceback.print_exc()
      os.abort()
    p.setnonblock(True)
    try:
        while 1:
            numRead = p.dispatch(1, save_packet)
            if numRead == 0:
                time.sleep(.01)
    except:
        os.abort()
Esempio n. 23
0
    def test_list_packages_remote_repo_NO_cache(self):
        t = DummyHTTPServerThread(53535, self.tmpdir)
        try:
            t.start()
            time.sleep(1)

            rpm_metadata = RpmMetadata("test_repo_http",
                                       ["http://localhost:53535/"])
            rpm_metadata.cachedir = self.cachedir
            rpm_metadata.cacheperiod = -1

            pkgs = rpm_metadata.list_packages(["noarch"])
            self.verify_result(pkgs,
                               "http://localhost:53535/{0}".format(
                                                   os.path.basename(self.rpm)))

            pkgs = rpm_metadata.list_packages(["noarch"])
            self.verify_result(pkgs,
                               "http://localhost:53535/{0}".format(
                                                   os.path.basename(self.rpm)))

            pkgs = rpm_metadata.list_packages(["noarch"])
            self.verify_result(pkgs,
                               "http://localhost:53535/{0}".format(
                                                   os.path.basename(self.rpm)))
        finally:
            t.stop_it()

        t.join(2)
        if t.isAlive():
            os.abort()

        handled_requests = t.rqueue.get()
        self.assertEqual(handled_requests, 6)
Esempio n. 24
0
    def launch_node(self):
        try:
            logmsg("main thread startup")

            import depends # import dependencies so that py2exe finds them
            _junk = depends # appease pyflakes

            from twisted.internet import reactor
            from twisted.python import log, logfile
            from allmydata import client

            # set up twisted logging. this will become part of the node rsn.
            logdir = os.path.join(basedir, 'logs')
            if not os.path.exists(logdir):
                os.makedirs(logdir)
            lf = logfile.LogFile('tahoesvc.log', logdir)
            log.startLogging(lf)

            # run the node itself
            c = client.Client(basedir)
            reactor.callLater(0, c.startService) # after reactor startup
            reactor.run(installSignalHandlers=False)

            logmsg("main thread shutdown")
        except:
            logmsg("exception")
            traceback.print_exc(None, logfilehandle)
            logfilehandle.flush()
            os.abort()
Esempio n. 25
0
def killAll():
	print()
	print("ERROR: Test failed to finish after 10 seconds, aborting.")
	print("WARNING: there may be unfinished child processes.")
	pgid = os.getpgid(0)
	os.killpg(pgid, signal.SIGKILL)
	os.abort()
Esempio n. 26
0
def delete_account(id):
    account = Account.query.get(id)
    if not account or account.user_id != g.user.id:
        abort(404)
    db.session.delete(account)
    db.session.commit()
    return jsonify({'result': True})
Esempio n. 27
0
 def __init__(self, args):
     logmsg("init")
     try:
         # The exe-file has messages for the Event Log Viewer.
         # Register the exe-file as event source.
         #
         # Probably it would be better if this is done at installation time,
         # so that it also could be removed if the service is uninstalled.
         # Unfortunately it cannot be done in the 'if __name__ == "__main__"'
         # block below, because the 'frozen' exe-file does not run this code.
         #
         logmsg("service start")
         win32evtlogutil.AddSourceToRegistry(self._svc_display_name_,
                                             sys.executable,
                                             "Application")
         win32serviceutil.ServiceFramework.__init__(self, args)
         self.hWaitStop = win32event.CreateEvent(None, 0, 0, None)
     except:
         try:
             logmsg("exception")
             traceback.print_exc(None, logfilehandle)
             logfilehandle.flush()
             logfilehandle.close()
         except:
             os.abort()
Esempio n. 28
0
 def run(self):
     while True:
         global lastKey
         t = raw_input()
         if t == 'bye':
             os.abort()
         lastKey = time.time()
Esempio n. 29
0
def remove_card_from_deck(user_name,deck_name,card_id):
    if current_user.get_id() != user_name:
        abort(500)
    c = card.get_by_id(card_id)
    c.remove_from_deck(deck_name)
    c.remove_from_sidedeck(deck_name)
    return redirect('/'+user_name+'/deck/'+deck_name)
Esempio n. 30
0
def main():
    from optparse import OptionParser
    parser = OptionParser()
    parser.add_option('-b', '--bundle-id', dest='bundle_id', default=None,
                      help='Identifier of the activity bundle')
    parser.add_option('-a', '--activity-id', dest='activity_id', default=None,
                      help='Identifier of the activity instance')
    parser.add_option('-o', '--object-id', dest='object_id', default=None,
                      help='Identifier of the associated datastore object')
    parser.add_option('-u', '--uri', dest='uri', default=None,
                      help='URI to load')
    parser.add_option('--languages', action='store_true',
                      help='Print out the set of languages supported, and quit')
    (options, args) = parser.parse_args()
    if options.languages:
        # firefox substitutes - for _
        print ' '.join(l.replace('_','-') for l in LANGUAGES)
        return

    # XXX in the future we should do something with the -b/a/o args.

    # if 'constant-uid' is enabled (stable build 758 and later),
    # move $HOME down one level, to where we have persistent storage
    if os.getuid() == os.getgid():
        os.environ['HOME'] = os.environ['SUGAR_ACTIVITY_ROOT'] + '/data'
    # sanitize LANG; firefox crashes if it sees a LANG it doesn't know.
    os.environ['LANG'] = sanitize(os.environ['LANG'])+'.utf-8'

    ff = [ './firefox' ]
    if options.uri is not None:
        ff += [ options.uri ]
    print os.getgid()
    os.execl(ff[0], *ff)
    os.abort() # should never reach here.
Esempio n. 31
0
def show(name):
    if name is None:
        os.abort(404)
    url = photos.url(name)
    return render_template('show.html', url=url, name=name)
Esempio n. 32
0
parser.add_option(
    "-o",
    "--out",
    dest="outFile",
    default="./out.dot",
    help=
    "This option specified the complete pathname for the resulting reparsed DOT information.\n Remember to make PS of the DOT file use dot -Tps OUT.dot -o OUT.ps",
    metavar="OUT.dot")
(options, args) = parser.parse_args()

mydotfile = str(options.dotFile)
mydagfile = str(options.dagFile)
myoutfile = str(options.outFile)
if mydotfile == '' or mydagfile == '':
    print "Error with input file args!."
    os.abort()

dot_fp = open(mydotfile, 'r')
dotData = dot_fp.readlines()
dot_fp.close()

dag_fp = open(mydagfile, 'r')
dagData = dag_fp.readlines()
dag_fp.close()

#Scan the DAG file to make serial num and submit file key pairs
dagJobs = []
color = "white"
for entry in dagData:
    if entry.__contains__('JOB '):
        tmpVar = entry.split(' ')
Esempio n. 33
0
def bay():
    for s in commands['close_programm']:
        if s in text:
            speak('Пока :>)')
            os.abort()
Esempio n. 34
0
 def run(self):
     time.sleep(1)
     os.abort()
Esempio n. 35
0
 def kill_process(self, request):
     os.abort()
Esempio n. 36
0
    def load_test(filename):
        """
        Loads the test description from a file with the following format:

        param_name1 = value
        param_name2 = value
        [...]

        Blank lines or lines starting with # (comments) are ignored

        Parameter names are defined in this class. Parameters can be
        declared in any order in the file.

        @type filename: str
        @param filename: the test file
        @return: a TestCase object
        """
        params_names = [NUM_DEVICES, TESTCASE_NAME, NUM_LOCATIONS, NUM_SCRIPTS,
                        DURATION, TIMEOUT_PERIOD, SCRIPTS_DELAY,
                        PARALLEL_SCRIPT, OVERLAP, SCRIPT_ASSIGNMENT, SCRIPT_SLEEP, GEN_SEED, RUN_SEED, EXTRA_DURATION]

        test_params = dict.fromkeys(params_names, 0)

        test_name, num_devices, num_locations, num_scripts = None, None, None, None
        timeout, scripts_delay, duration, overlap, script_assignment = None, None, None, None, None
        parallel_script, script_sleep, gen_seed, run_seed, extra_duration = None, None, None, None, 0

        try:
            with open(filename, "r") as test_file:
                for line in test_file:
                    line = line.strip()
                    if len(line) == 0 or line.startswith('#'):
                        continue

                    parts = [i.strip() for i in re.split("=", line)]
                    if len(parts) != 2:
                        raise StandardError("Wrong test file format")

                    if parts[0] not in test_params:
                        raise StandardError("Wrong parameter name: %s" % parts[0])

                    elif parts[0] == TESTCASE_NAME:
                        test_name = parts[1]
                    elif parts[0] == NUM_DEVICES:
                        num_devices = int(parts[1])
                    elif parts[0] == NUM_LOCATIONS:
                        num_locations = int(parts[1])
                    elif parts[0] == NUM_SCRIPTS:
                        num_scripts = int(parts[1])
                    elif parts[0] == SCRIPT_SLEEP:
                        if len(parts[1].split(",")) != 2:
                            raise StandardError("Wrong format for specifying output"
                                                + "script sleep : %s" % parts[1])
                        script_sleep = (float(parts[1].split(",")[0].strip()),
                                        float(parts[1].split(",")[1].strip()))
                    elif parts[0] == PARALLEL_SCRIPT:
                        parallel_script = bool(parts[1])
                    elif parts[0] == SCRIPTS_DELAY:
                        if len(parts[1].split(",")) != 2:
                            raise StandardError("Wrong format for specifying output"
                                                + "scripts delay : %s" % parts[1])
                        script_delay = (float(parts[1].split(",")[0].strip()),
                                        float(parts[1].split(",")[1].strip()))
                    elif parts[0] == DURATION:
                        duration = int(parts[1])
                    elif parts[0] == TIMEOUT_PERIOD:
                        timeout = int(parts[1])
                    elif parts[0] == OVERLAP:
                        overlap = int(parts[1])
                    elif parts[0] == GEN_SEED:
                        gen_seed = int(parts[1])
                    elif parts[0] == RUN_SEED:
                        run_seed = int(parts[1])
                    elif parts[0] == EXTRA_DURATION:
                        extra_duration = int(parts[1])
                    elif parts[0] == SCRIPT_ASSIGNMENT:
                        if parts[1] not in [SCRIPT_ASSIGNMENT_ALL, SCRIPT_ASSIGNMENT_RANDOM,
                                            SCRIPT_ASSIGNMENT_SINGLE]:
                            raise StandardError("Wrong script assignment type %s"%parts[1])
                        script_assignment = parts[1]

            # some basic validation
            if script_assignment == SCRIPT_ASSIGNMENT_ALL and num_scripts > num_locations:
                raise StandardError("Too many scripts (%d) for the given locations (%d)"
                                    % (num_scripts, num_locations))
            if overlap > num_devices:
                raise StandardError("Too many devices for the overlap parameter %d" % overlap)
            if overlap == 1 and num_locations != num_devices:
                raise StandardError("When overlap is %d, the number of locations must be equal\
                                    to the number of devices" % overlap)

        except StandardError, err:
            print err
            os.abort()
Esempio n. 37
0
def poweroff():
    if (cfgsets.cgen['large_server'] == False):
        if ('sid' in request.args):
            if (request.args['sid'] == sessionid_string):
                os.abort()
    return main_index()
Esempio n. 38
0
 def dumpCore(self):
     os.abort()
Esempio n. 39
0
def compare_dates(date1, date2):
    if not date1 <= date2:
        abort(405, description="Invalid input of dates")
Esempio n. 40
0
def check_dates(date, date2, date3):
    if date2 <= date <= date3:
        abort(405, description="Invalid input of dates")
Esempio n. 41
0
def abort(code=1):
    os.abort()
Esempio n. 42
0
def predict():
    # https://flask.palletsprojects.com/en/1.1.x/api/?highlight=files#flask.Request.files
    # https://werkzeug.palletsprojects.com/en/1.0.x/datastructures/#werkzeug.datastructures.FileStorage
    if 'anonymised_file' in request.files:
        f = request.files['anonymised_file']
        # We need to get the name of the uploaded file - the output file name depends on this.
        # This line is not work. Need to figure out why
        target_repo_raw_data_filename = f.filename
        print(f.filename)
    else:
        return "No file"
    # The files can be large ~ MBs. We might add code in client to compress before uploading.
    #       We then need to decompress here.
    # We probably need account mgmt. and authentication/tokens to prevent misuse
    #stream = io.StringIO(f.stream.read().decode("UTF8"), newline=None)



    # Need to have code to validate input file or we risk our program crashing
    # Could be a good exercise for Samit or Atulya

    # What are we doing here ?
    #stream.seek(0)
    #csv_input = stream.read()
    #print(type(csv_input))

    #target_repo_commits = pd.read_csv(io.StringIO(csv_input),sep =',')
    target_repo_commits = pd.read_csv(f)
    target_repo_file_exts = target_repo_commits['file_ext'].unique()

    # Scaling the data
    scaler = MinMaxScaler()
    
     # Folder having the GMM pickle files
    gmm_models_folder = '/home/kc/Projects/data_files/sav_files/gmm_sav/'

    # Get the file names of saved GMM models. Get the file_ext from the file names
    a = glob.glob(gmm_models_folder + '*cpu*.sav')
    gmm_model_files = [os.path.basename(f) for f in a]
    file_ext_models = [x.split('_')[0] for x in gmm_model_files]

    # Folder having xgboost models
    xgboost_models_folder = '/home/kc/Projects/data_files/sav_files/xgboost_sav/'

    # Apparently it is a bad idea to append to DataFrames.
    # https://stackoverflow.com/questions/13784192/creating-an-empty-pandas-dataframe-then-filling-it
    # Create empty list to store dataframes for file extensions
    list_of_dfs = []

    for file_ext in target_repo_file_exts:

        # Prepare the features from raw data
        target_repo_data_frame_numeric, target_repo_data_frame_all_coloumns = create_ml_frame(target_repo_commits,
                                                                                              file_ext)
        # Ensure that we have models for this file extension
        if file_ext in file_ext_models:
            xgboost_model_file = xgboost_models_folder + file_ext + '_cpu_xgboost_model.sav'
            xboost_model = pickle.load(open(xgboost_model_file, 'rb'))

            # Use the xgboost model to predict the cluster
            predicted_clusters = xboost_model.predict(target_repo_data_frame_numeric)
            target_repo_data_frame_all_coloumns['predicted_cluster'] = predicted_clusters

            # Now use the GMM pickled models to calculate the probability of the mod belonging to predicted cluster
            # First get the relevant GMM pickle file for this file type/extension
            gmm_model_file = gmm_models_folder + file_ext + '_cpu_gmm_model_pickle.sav'
            mix = pickle.load(open(gmm_model_file, 'rb'))

            # Scale the data for GMM processing
            data_scaled = scaler.fit_transform(target_repo_data_frame_numeric)

            # Put this in a pandas frame
            cluster_frame = pd.DataFrame(data_scaled)

            # Get the 'real world' value of the centroids. We need these to calculate the 'score' of each mod.
            gmm_centroids = mix.means_
            real_centroids = scaler.inverse_transform(gmm_centroids)

            # Write these to dataframe
            real_centroids_dataFrame = pd.DataFrame(real_centroids, columns=['feature_total_changed',
                                                                             'feature_add_del_functions',
                                                                             'feature_changed_functions',
                                                                             'feature_dmm_unit_complexity',
                                                                             'feature_dmm_size',
                                                                             'feature_dmm_unit_interfacing'])

            # Add a column for summing all coloumns (https://github.com/kcramakrishna/cg/issues/10)
            # This is basically assigning a 'real world value' to each centroid i.e. cluster
            real_centroids_dataFrame['Sum_centroids'] = real_centroids_dataFrame.sum(axis=1)
            real_centroids_dataFrame['original_cluster_labels'] = real_centroids_dataFrame.index

            # Now we need to map the cluster labels to the 'sum of centroids' for that cluster
            centroid_map = {}
            for i in range(real_centroids_dataFrame.shape[0]):
                centroid_map[real_centroids_dataFrame['original_cluster_labels'].values[i]] = real_centroids_dataFrame['Sum_centroids'].values[i]

            # Initialise a coloumn for holding the probabilities of the prediction
            probability_for_labels = np.zeros((len(predicted_clusters), 1))

            # xgboost Gave the prediction, From GMM, get the probability of this prediction
            # Need to understand the below lines in more depth
            member_probs = mix.predict_proba(cluster_frame)
            for i in range(len(predicted_clusters)):
                probability_for_labels[i] = member_probs[i, predicted_clusters[i]]

            # Add the probabilities coloumn to the data Frame
            target_repo_data_frame_all_coloumns['probablities'] = probability_for_labels

            # Look up the Sum of Centroids for each cluster for each mod and add it to the row.
            target_repo_data_frame_all_coloumns['sum_centroid'] = np.arange(0.0,target_repo_data_frame_all_coloumns.shape[0], 1.0)

            for i in range(target_repo_data_frame_all_coloumns.shape[0]):
                target_repo_data_frame_all_coloumns['sum_centroid'].values[i] = centroid_map[target_repo_data_frame_all_coloumns['predicted_cluster'].values[i]]

            # Finally calculate the score for each mod in the target repo
            target_repo_data_frame_all_coloumns['mod_score'] = target_repo_data_frame_all_coloumns['sum_centroid'] * target_repo_data_frame_all_coloumns['probablities']

            # Append this dataframe to list_of_dfs
            list_of_dfs.append(target_repo_data_frame_all_coloumns)
        else:
            target_repo_data_frame_all_coloumns['predicted_cluster'] = 'No Model found'
            target_repo_data_frame_all_coloumns['sum_centroid'] = 0
            target_repo_data_frame_all_coloumns['probablities'] = 0
            target_repo_data_frame_all_coloumns['mod_score'] = 0
            # Append this dataframe to list_of_dfs
            list_of_dfs.append(target_repo_data_frame_all_coloumns)

    # Create a dataframe from list of dataframes
    predictions_dataframe = pd.concat(list_of_dfs, ignore_index=True)

    # Create the name for compressed download file
    # target_repo_raw_data_file is not being set properly. Need to figure out why
    predictions_file = 'cpu_scores_' + f.filename #target_repo_raw_data_file #+ 'zip'
    # We need to dump all the data into this file as csv
    #predictions_file = predictions_dataframe.to_csv(index=False, compression='zip')
    predictions_dataframe.to_csv(predictions_file, index=False)

    # Consider compressing the file before download. We will need to decompress in client too.
    # https://www.w3schools.com/python/ref_requests_response.asp
    # https://www.fullstackpython.com/flask-helpers-make-response-examples.html
    # We need to send filename along with the file contents. We need to figure out how.
    # response = make_response(predictions_file)
    # response.headers["Content-Disposition"] = "attachment"
    # return response
   
    # TODO: KC - It was added by Ravi. Need Code Review?
    try:
        # Reading File Data
        with open(os.path.join(os.getcwd(), predictions_file), 'rb') as f:
             data = f.readlines()
    except Exception as e:
        os.abort(400, e)

    return Response(data, headers={
        'Content-Type': 'application/csv',
        'Content-Disposition': 'attachment; filename=%s;' % predictions_file
    })
Esempio n. 43
0
                                json['end_time'] = i["restaurant_discount"][
                                    "end_time"]
                        else:
                            json[key] = i[key]

                result = tool.return_json(0, "success", True, json)
                return json_util.dumps(result, ensure_ascii=False, indent=2)
            except Exception, e:
                print e
                result = tool.return_json(0, "field", False, None)
                return json_util.dumps(result, ensure_ascii=False, indent=2)
        else:
            result = tool.return_json(0, "field", False, None)
            return json_util.dumps(result, ensure_ascii=False, indent=2)
    else:
        return abort(403)


#平台优惠 菜品优惠 修改其他优惠
@restaurant_api.route('/fm/merchant/v1/restaurant/updaterestaurant_discount/',
                      methods=['POST'])
def updaterestaurant_discount():
    if request.method == 'POST':
        if auto.decodejwt(request.form['jwtstr']):
            try:
                pdict = {
                    "restaurant_discount.message":
                    request.form["message"],
                    "restaurant_discount.start_time":
                    datetime.datetime.strptime(request.form["start_time"],
                                               "%Y-%m-%d"),
Esempio n. 44
0
def terminate_server():
  # portable signal.alarm(1)
  time.sleep(1); os.abort()
Esempio n. 45
0
    def __init__(self,
                 config,
                 src_vocab_size,
                 tgt_vocab_size,
                 use_cuda,
                 pretrain=None,
                 score_fn=None):
        super(seq2seq, self).__init__()
        if pretrain is not None:
            # hint: 会自动冻结
            src_embedding = nn.Embedding.from_pretrained(pretrain['src_emb'])
            tgt_embedding = nn.Embedding.from_pretrained(pretrain['tgt_emb'])

            # def normal2(A):
            #     return A / np.sqrt(np.sum(A ** 2))

            # for i in range(len(pretrain['tgt_emb'])):
            #     pretrain['tgt_emb'][i] = normal2(pretrain['tgt_emb'][i])
            # mat = np.zeros(45*45).reshape(45, 45)
            # for i in range(45):
            #     for j in range(45):
            #         _ = normal2(pretrain['tgt_emb'][i].numpy().copy())
            #         __ = normal2(pretrain['tgt_emb'][j].numpy().copy())
            #         mat[i][j] = _.dot(__)
            # print(mat)
            # print()
        else:
            src_embedding = None
            tgt_embedding = None
        self.encoder = models.rnn_encoder(config,
                                          src_vocab_size,
                                          embedding=src_embedding,
                                          tgt_embedding=tgt_embedding)
        if config.shared_vocab == False:
            self.decoder = models.rnn_decoder(config,
                                              tgt_vocab_size,
                                              embedding=tgt_embedding,
                                              score_fn=score_fn)
        else:
            self.decoder = models.rnn_decoder(config,
                                              tgt_vocab_size,
                                              embedding=self.encoder.embedding,
                                              score_fn=score_fn)
        self.use_cuda = use_cuda
        self.src_vocab_size = src_vocab_size
        self.tgt_vocab_size = tgt_vocab_size
        self.config = config
        if config.score == 'hinge_margin_loss':
            # print("using margin loss")
            self.criterion = models.margin_criterion(tgt_vocab_size, use_cuda,
                                                     config)
        elif config.score == 'hybrid':
            self.criterion = {}
            self.criterion['softmax'] = models.criterion(
                tgt_vocab_size, use_cuda, config)
            self.criterion['margin'] = models.margin_criterion(
                tgt_vocab_size, use_cuda, config)
        elif config.score == 'hubness':
            self.criterion = models.mse_criterion(tgt_vocab_size, use_cuda,
                                                  config)
        elif config.score == 'softmax':
            self.criterion = models.criterion(tgt_vocab_size, use_cuda, config)
        elif config.score == 'disc':
            self.criterion = {}
            self.criterion['softmax'] = models.criterion(2, use_cuda, config)
            self.criterion['margin'] = models.margin_criterion(
                tgt_vocab_size, use_cuda, config)
        else:
            print('no such score function')
            os.abort()
        self.log_softmax = nn.LogSoftmax(dim=1)
Esempio n. 46
0
def crash():
    # Dump some core
    os.abort()
Esempio n. 47
0
def run_data_updater():
    '''
    run_data_updater is factory function to run data_updater when script.py is run.
    '''

    warnings.filterwarnings("ignore")

    print(
        '***** Maverick Retail Data Processor: By Wave-2 Analytics Ltd. *****')
    time.sleep(0.5)
    status = input('>> Proceed (Y/N)? ')
    if status.strip().lower() == 'n':
        os.abort()
    if status.strip().lower() == 'y':
        print(">> upload previous dataset")
        time.sleep(1)
        path_prev = easygui.fileopenbox(msg="Upload Previous Dataset",
                                        filetypes="*.csv",
                                        multiple=True)

        print(">> uploading previous dataset ... ")
        try:
            previous_data = pd.concat([read_data(path) for path in path_prev],
                                      ignore_index=True,
                                      axis=0)
        except pd.errors.ParserError:
            print(
                '***** MESSAGE ***** \n>> wrong file extension. Restart and Upload csv'
            )
            os.abort()
        except:
            print('***** MESSAGE ***** \n>> File upload failed')
            os.abort()
        else:
            print(">> data uploaded successfully")

        print(">> upload current dataset")
        time.sleep(1)
        path_cur = easygui.fileopenbox(msg='Upload Current Dataset',
                                       filetypes="*.csv")
        print(">> uploading current dataset ...")
        try:
            current_data = read_data(path_cur)
            _period = current_data['Period'][0]
        except pd.errors.ParserError:
            print(
                '***** MESSAGE ***** \n>> wrong file extension. Restart and Upload csv'
            )
            os.abort()
        except:
            print('***** MESSAGE ***** \n>> File upload failed')
            os.abort()
        else:
            print(">> data uploaded successfully")
            df_backward, df_forward = data_updater(current_data, previous_data)
            if len(df_backward) == 0 and len(df_forward) == 0:
                print('***** MESSAGE ***** \n>> No dataset generated')
                os.abort()
            if os.path.exists('./data') == False:
                os.mkdir('./data')
            if len(df_backward) != 0:
                df_backward.to_csv(
                    f'./data/backward_{_period}.csv',
                    index=False)  # encoding='iso-8859-1' commented out
                print(f'***** MESSAGE ***** \n>> backward_{_period}.csv \
                        generated and stored in \n{os.path.abspath("./data")}\n'
                      )
            if len(df_forward) != 0:
                df_forward.to_csv(
                    f'./data/forward_{_period}.csv',
                    index=False)  # encoding='iso-8859-1' commented out
                print(f'***** MESSAGE ***** \n>> forward_{_period}.csv \
                        generated and stored in \n{os.path.abspath("./data")}')
Esempio n. 48
0
    def draw_loop(self):
        rects = [pygame.Rect(0, 0, 100, 100) for i in range(9)]
        for i in range(3):
            rects[i * 3].centerx = 50
            rects[i * 3 + 1].centerx = 150
            rects[i * 3 + 2].centerx = 250
        for i in range(3):
            rects[i].centery = 50
            rects[i + 3].centery = 150
            rects[i + 6].centery = 250
        keymap = {
            pygame.K_1: (2, 0),
            pygame.K_2: (2, 1),
            pygame.K_3: (2, 2),
            pygame.K_4: (1, 0),
            pygame.K_5: (1, 1),
            pygame.K_6: (1, 2),
            pygame.K_7: (0, 0),
            pygame.K_8: (0, 1),
            pygame.K_9: (0, 2),
            pygame.K_KP1: (2, 0),
            pygame.K_KP2: (2, 1),
            pygame.K_KP3: (2, 2),
            pygame.K_KP4: (1, 0),
            pygame.K_KP5: (1, 1),
            pygame.K_KP6: (1, 2),
            pygame.K_KP7: (0, 0),
            pygame.K_KP8: (0, 1),
            pygame.K_KP9: (0, 2)
        }
        f = pygame.font.SysFont('BuiltIn', 32)
        fl = pygame.font.SysFont('BuiltIn', 64)
        while 1:
            self.since_update += 1
            self.display.fill(pygame.Color('black'))
            for i in rects:
                pygame.draw.rect(self.display, pygame.Color('white'), i, 1)
            for x in range(3):
                for y in range(3):
                    if self.last_board[x][y] == 1:
                        r = rects[self.linearize((x, y))]
                        pygame.draw.line(self.display, pygame.Color('red'),
                                         r.topleft, r.bottomright, 5)
                        pygame.draw.line(self.display, pygame.Color('red'),
                                         r.topright, r.bottomleft, 5)
                    elif self.last_board[x][y] == 2:
                        pygame.draw.arc(self.display, pygame.Color('blue'),
                                        rects[self.linearize(
                                            (x, y))], 0, 2 * math.pi, 5)
                    else:
                        t = f.render(str(7 + y - (3 * x)), True,
                                     pygame.Color('white'))
                        r = rects[self.linearize((x, y))]
                        self.display.blit(t, (r.right - t.get_width(),
                                              r.bottom - t.get_height()))
            if self.last_board != self.board.board:
                self.since_update = 0
                self.update_board = False
                for x in range(3):
                    for y in range(3):
                        if self.last_board[x][y] != self.board.board[x][y]:
                            if self.board.board[x][y] == 1:
                                r = rects[self.linearize((x, y))]
                                for i in range(50):
                                    pygame.draw.line(self.display,
                                                     pygame.Color('red'),
                                                     r.topleft,
                                                     (r.left + (i * 2), r.top +
                                                      (i * 2)), 5)
                                    pygame.display.flip()
                                    self.clock.tick(200)
                                for i in range(50):
                                    pygame.draw.line(self.display,
                                                     pygame.Color('red'),
                                                     r.topleft, r.bottomright,
                                                     5)
                                    pygame.draw.line(
                                        self.display, pygame.Color('red'),
                                        r.topright,
                                        (r.right - (i * 2), r.top + (i * 2)),
                                        5)
                                    pygame.display.flip()
                                    self.clock.tick(200)
                for x in range(3):
                    for y in range(3):
                        if self.board.board[x][y] == 2:
                            for i in range(90):
                                pygame.draw.arc(self.display,
                                                pygame.Color('blue'),
                                                rects[self.linearize(
                                                    (x, y))], 0,
                                                2 * math.pi * (4 / 360) * i, 5)
                                pygame.display.flip()
                                self.clock.tick(200)

            if self.win:
                self.win = False
                self.board.reset_board()
                s = self.display.copy()
                t = fl.render('Player1 WIN', True, pygame.Color('red'))
                tr = t.get_rect()
                sr = s.get_rect()
                for i in range(255):
                    tr.centerx = sr.centerx
                    tr.centery = i
                    self.display.blit(s, (0, 0))
                    self.display.blit(t, tr)
                    self.display.fill(pygame.Color(i, i, i, 255),
                                      special_flags=pygame.BLEND_ADD)
                    pygame.display.flip()
                    self.clock.tick(60)
            if self.lose:
                self.lose = False
                self.board.reset_board()
                s = self.display.copy()
                t = fl.render('Player2 WIN', True, pygame.Color('blue'))
                tr = t.get_rect()
                sr = s.get_rect()
                for i in range(255):
                    tr.centerx = sr.centerx
                    tr.centery = i
                    self.display.blit(s, (0, 0))
                    self.display.blit(t, tr)
                    self.display.fill(pygame.Color(i, i, i, 255),
                                      special_flags=pygame.BLEND_ADD)
                    pygame.display.flip()
                    self.clock.tick(60)
            if self.draw:
                self.draw = False
                self.board.reset_board()
                s = self.display.copy()
                t = fl.render('DRAW', True, pygame.Color('yellow'))
                tr = t.get_rect()
                sr = s.get_rect()
                for i in range(255):
                    tr.centerx = sr.centerx
                    tr.centery = i
                    self.display.blit(s, (0, 0))
                    self.display.blit(t, tr)
                    self.display.fill(pygame.Color(i, i, i, 255),
                                      special_flags=pygame.BLEND_ADD)
                    pygame.display.flip()
                    self.clock.tick(60)
            if self.other_dead:
                s = self.display.copy()
                t = fl.render('Player2 DEAD', True, pygame.Color('red'))
                tr = t.get_rect()
                sr = s.get_rect()
                for i in range(255):
                    tr.centerx = sr.centerx
                    tr.centery = i
                    self.display.blit(s, (0, 0))
                    self.display.blit(t, tr)
                    self.display.fill(pygame.Color(i, i, i, 255),
                                      special_flags=pygame.BLEND_ADD)
                    pygame.display.flip()
                    self.clock.tick(60)
                os.abort()
            self.update_last_board()
            pygame.display.flip()
            self.clock.tick(10)
            for i in pygame.event.get():
                if i.type == pygame.QUIT:
                    self.dead = True
                    exit(0)
                if i.type == pygame.KEYDOWN:
                    if i.key in [pygame.K_q, pygame.K_ESCAPE]:
                        self.dead = True
                        exit(0)
                    if i.key in keymap:
                        self.move = keymap[i.key]

                if i.type == pygame.MOUSEBUTTONDOWN:
                    for j in rects:
                        if j.collidepoint(i.pos[0], i.pos[1]):
                            self.move = self.delinearize(rects.index(j))
Esempio n. 49
0
def move(x, y):
    global pos_x, pos_y, menu_y, setting_menu, level, bg, picpath, isbackground, bg_path, ispicture, pic_label, menu_visiable, search_step
    tpos_x = pos_x
    tpos_y = pos_y
    if y == 0:
        pos_x += x
        if pos_x < 0 or pos_x == level:
            pos_x = tpos_x
    if x == 0:
        pos_y += y
        if pos_y < 0 or pos_y == level:
            pos_y = tpos_y
    if x == 0 and y == 0:
        move_to()
    if y == "up":
        menu_y -= 1
    if y == "down":
        menu_y += 1
    if menu_y == 5 or menu_y < 0:
        menu_y = 0
    if y == "select":
        if menu_y == 0:
            if ispicture == 0:
                appuifw.note(chn('请选择游戏图片'), 'info')
                try:
                    ps1 = PictureSelect()
                    picpath = ps1.file_select()
                    ispicture = 1
                except:
                    appuifw.note(chn("设置失败"), "error")
                    ispicture = 0
            search_step = 0
            play()
        if menu_y == 1:
            setting()
        if menu_y == 2:
            _help()
        if menu_y == 3:
            rank()
        if menu_y == 4:
            os.abort()
    if x == "up":
        setting_menu -= 1
    if x == "down":
        setting_menu += 1
    if setting_menu == 4 or setting_menu < 0:
        setting_menu = 0
    if x == "select":
        if setting_menu == 0:
            level = appuifw.query(chn("设置难度3或4或5"), "number")
            level = int(level)
            if level != 3 and level != 4 and level != 5:
                appuifw.note(chn("输入无效将使用默认值"), "error")
                level = 3
        if setting_menu == 1:
            if appuifw.query(chn("确定使用背景吗"), "query"):
                try:
                    ps = PictureSelect()
                    bg_path = ps.file_select()
                    isbackground = 1
                except:
                    appuifw.note(chn("选择背景失败"), "error")
                    bg = graphics.Image.new((canvas.size))
                    isbackground = 0
            else:
                bg = graphics.Image.new((canvas.size))
                isbackground = 0
        if setting_menu == 2:
            try:
                ps1 = PictureSelect()
                picpath = ps1.file_select()
                ispicture = 1
            except:
                appuifw.note(chn("设置失败"), "error")
                ispicture = 0
        if setting_menu == 3:
            appuifw.note(chn("设置已经保存"), "info")
    if x == "none" and y == "none":
        see_pic()
    if x == '#' and y == '#':
        if level == 3:
            try:
                So = [0 for i in range(level * level + 3)]
                for i in range(level * level):
                    So[i] = pic_label[i // level + 1][i % level + 1]
                start = time.clock()
                idas = IDAStar(start)
                idas.main(So)
                total_step = idas.__total__
                search_step = total_step
                appuifw.note(chn('还需' + str(total_step) + '步!'), 'info')
            except:
                appuifw.note(chn('搜索失败'), 'info')
        else:
            appuifw.note(chn('复杂度太大无法搜索'), 'info')
Esempio n. 50
0
def deleteEmp(stdId):
    row = [est for est in studentsDB if (est['id'] == stdId)]
    if len(row) == 0:
        abort(404)
    studentsDB.remove(row[0])
    return jsonify({'response': 'Success'})
Esempio n. 51
0
def configure_zonefile(name, zonefile, data_pubkey):
    """
    Given a name and zonefile, help the user configure the
    zonefile information to store (just URLs for now).

    @zonefile must be parsed and must be a dict.

    Return the new zonefile on success
    Return None if the zonefile did not change.
    """

    from .zonefile import make_empty_zonefile
    from .user import user_zonefile_data_pubkey, user_zonefile_set_data_pubkey, user_zonefile_remove_data_pubkey, \
            user_zonefile_urls, add_user_zonefile_url, remove_user_zonefile_url, swap_user_zonefile_urls, \
            add_user_zonefile_txt, remove_user_zonefile_txt, user_zonefile_txts

    from .storage import get_drivers_for_url

    if zonefile is None:
        print('WARNING: No zonefile could be found.')
        print('WARNING: Creating an empty zonefile.')
        zonefile = make_empty_zonefile(name, data_pubkey)

    running = True
    do_update = True
    old_zonefile = {}
    old_zonefile.update(zonefile)

    while running:
        public_key = None
        try:
            public_key = user_zonefile_data_pubkey(zonefile)
        except ValueError:
            # multiple keys
            public_key = None

        urls = user_zonefile_urls(zonefile)
        if urls is None:
            urls = []

        txts = user_zonefile_txts(zonefile)
        if txts is None:
            txts = []

        url_drivers = {}

        # which drivers?
        for url in urls:
            drivers = get_drivers_for_url(url)
            url_drivers[url] = drivers

        print('-' * 80)

        if public_key is not None:
            print('Data public key: {}'.format(public_key))
        else:
            print('Data public key: (not set)')

        print('')
        print('Profile replicas ({}):'.format(len(urls)))
        if len(urls) > 0:
            for i in xrange(0, len(urls)):
                url = urls[i]
                drivers = get_drivers_for_url(url)
                print('({}) {}\n    Handled by drivers: [{}]'.format(
                    i + 1, url, ','.join([d.__name__ for d in drivers])))

        else:
            print('(none)')

        print('')

        # don't count the public key...
        print("TXT records ({}):".format(len(txts) - (1 if public_key else 0)))
        if len(txts) > 0:
            for i in xrange(0, len(txts)):
                # skip public key
                if txts[i]['name'] == 'pubkey':
                    continue

                print('{} "{}"'.format(txts[i]['name'], txts[i]['txt']))

        else:
            print("(none)")

        print('')
        print('What would you like to do?')
        print('(a) Add profile URL')
        print('(b) Remove profile URL')
        print('(c) Swap URL order')
        print('(d) Add TXT record')
        print('(e) Remove TXT record')
        print('(f) Set or change public key')
        print('(g) Save zonefile')
        print('(h) Do not save zonefile')
        print('')

        selection = raw_input('Selection: ').lower()

        if selection == 'h':
            do_update = False
            break

        elif selection == 'a':
            # add a url
            while True:
                try:
                    new_url = raw_input('Enter the new profile URL: ')
                except KeyboardInterrupt:
                    print('Keyboard interrupt')
                    return None

                new_url = new_url.strip()

                # do any drivers accept this URL?
                drivers = get_drivers_for_url(new_url)
                if len(drivers) == 0:
                    print('No drivers can handle "{}"'.format(new_url))
                    continue

                else:
                    # add to the zonefile
                    new_zonefile = add_user_zonefile_url(zonefile, new_url)
                    if new_zonefile is None:
                        print('Duplicate URL')
                        continue

                    else:
                        zonefile = new_zonefile
                        break

        elif selection == 'b':
            # remove a URL
            url_to_remove = None
            while True:
                try:
                    url_to_remove = raw_input(
                        'Which URL do you want to remove? ({}-{}): '.format(
                            1, len(urls)))
                    try:
                        url_to_remove = int(url_to_remove)
                        assert 1 <= url_to_remove and url_to_remove <= len(
                            urls)
                    except:
                        print('Bad selection')
                        continue

                except KeyboardInterrupt:
                    running = False
                    print('Keyboard interrupt')
                    return None

                if url_to_remove is not None:
                    # remove this URL
                    url = urls[url_to_remove - 1]

                    log.debug("Remove '{}'".format(url))

                    new_zonefile = remove_user_zonefile_url(zonefile, url)
                    if new_zonefile is None:
                        print(
                            'BUG: failed to remove url "{}" from zonefile\n{}\n'
                            .format(
                                url,
                                json.dumps(zonefile, indent=4,
                                           sort_keys=True)))
                        os.abort()

                    else:
                        zonefile = new_zonefile
                        break

                else:
                    print("Bad selection")

        elif selection == 'c':
            while True:
                # swap order
                try:
                    url_1 = raw_input(
                        'Which URL do you want to move? ({}-{}): '.format(
                            1, len(urls)))
                    url_2 = raw_input(
                        'Where do you want to move it?  ({}-{}): '.format(
                            1, len(urls)))
                except KeyboardInterrupt:
                    running = False
                    print('Keyboard interrupt')
                    return None

                try:
                    url_1 = int(url_1)
                    url_2 = int(url_2)

                    assert 1 <= url_1 <= len(urls)
                    assert 1 <= url_2 <= len(urls)
                    assert url_1 != url_2

                except:
                    print("Bad selection")
                    continue

                new_zonefile = swap_user_zonefile_urls(zonefile, url_1 - 1,
                                                       url_2 - 1)
                if new_zonefile is None:
                    print('BUG: failed to remove url "{}" from zonefile\n{}\n'.
                          format(
                              url,
                              json.dumps(zonefile, indent=4, sort_keys=True)))
                    os.abort()

                else:
                    zonefile = new_zonefile
                    break

        elif selection == 'd':
            # add txt record
            while True:
                try:
                    txtrec_name = raw_input("New TXT record name: ")
                    txtrec_txt = raw_input("New TXT record data: ")
                except KeyboardInterrupt:
                    running = False
                    print("Keyboard interrupt")
                    return None

                if txtrec_name == 'pubkey':
                    print("Change the ECDSA key explicitly")
                    break

                new_zonefile = add_user_zonefile_txt(zonefile, txtrec_name,
                                                     txtrec_txt)
                if new_zonefile is None:
                    print("Duplicate TXT record")
                    break

                else:
                    zonefile = new_zonefile
                    break

        elif selection == 'e':
            # remove txt record
            while True:
                try:
                    txtrec_name = raw_input('Name of TXT record to remove: ')
                except KeyboardInterrupt:
                    running = False
                    print("Keyboard interrupt")
                    return None

                if txtrec_name == 'pubkey':
                    print("Change the ECDSA key explicitly")
                    break

                new_zonefile = remove_user_zonefile_txt(zonefile, txtrec_name)
                if new_zonefile is None:
                    print("No such TXT record")
                    break

                else:
                    zonefile = new_zonefile
                    break

        elif selection == 'f':
            # change public key
            while True:
                try:
                    pubkey = raw_input(
                        "New ECDSA public key (empty for None): ")

                    if len(pubkey) > 0:
                        pubkey = keylib.ECPublicKey(pubkey).to_hex()

                except KeyboardInterrupt:
                    running = False
                    print("Keyboard interrupt")
                    return None

                except:
                    print("Invalid public key")
                    continue

                new_zonefile = None

                if len(pubkey) == 0:
                    # delete public key
                    new_zonefile = user_zonefile_remove_data_pubkey(zonefile)

                else:
                    # set public key
                    new_zonefile = user_zonefile_set_data_pubkey(
                        zonefile, pubkey)

                zonefile = new_zonefile
                break

        elif selection == 'g':
            # save zonefile
            break

        elif selection == 'h':
            # do not save zonefile
            return None

        log.debug("zonefile is now:\n{}".format(
            json.dumps(zonefile, indent=4, sort_keys=True)))

    return zonefile
Esempio n. 52
0
 def exit2(self):
     if ui.query(cn("要退出吗?"), "query"):
         self.running = 0
         os.abort()
Esempio n. 53
0
def deleteEmp(empId):
    em = [emp for emp in empDB if (emp['id'] == empId)]
    if len(em) == 0:
        abort(404)
    empDB.remove(em[0])
    return jsonify({'response': 'Success'})
Esempio n. 54
0
# --- Вывод информации --- #
print('''proxy:{0}
OS:{1}
thread:{2}'''.format(cfg.proxy_onner, lib.what_is_my_platform(),
                     cfg.max_thread))

# --- Запуск потоков --- #
for x in range(cfg.max_thread):
    threading.Thread(target=lib.potok, args=(
        how,
        bar_finish,
    )).start()
    if lib.local_how >= how:
        break

print('\n\n')

# Количество секунд затраченое на выполнение
while len(threading.enumerate()) != 1:
    time.sleep(0.1)

    if lib.local_how >= how:
        break

# --- Вывод затраченого времени --- #
print(Fore.RESET + '\n\n[Finished in ' + Style.BRIGHT + Fore.YELLOW +
      str(round(time.time() - start, 2)) + Fore.RESET + 's]')

os.abort()  # Завиршение
def db_commit(block_id, op, op_data, txid, vtxindex, db_state=None):
    """
    (required by virtualchain state engine)

    Advance the state of the state engine: get a list of all
    externally visible state transitions.
   
    Given a block ID and checked opcode, record it as 
    part of the database.  This does *not* need to write 
    the data to persistent storage, since save() will be 
    called once per block processed.
  
    Returns one or more new name operations on success, which will 
    be fed into virtualchain to translate into a string
    to be used to generate this block's consensus hash.
    """
    try:
        assert db_state is not None
    except:
        log.error("FATAL: no state given")
        os.abort()

    if op != 'virtualchain_final':
        # ongoing processing.
        # do sanity checks
        try:
            assert '__original_op_data__' in op_data, 'BUG: no __original_op_data__'
            assert 'txid' in op_data, "BUG: No txid given"
            assert 'vtxindex' in op_data, "BUG: No vtxindex given"
            assert op_data['txid'] == txid, "BUG: txid mismatch"
            assert op_data['vtxindex'] == vtxindex, "BUG: vtxindex mismatch"

            opcode = op_data.get('opcode', None)
            assert opcode in OPCODE_PREORDER_OPS + OPCODE_CREATION_OPS + OPCODE_TRANSITION_OPS + OPCODE_STATELESS_OPS + OPCODE_TOKEN_OPS, \
                            "BUG: uncategorized opcode '%s'" % opcode

        except Exception, e:
            log.exception(e)
            log.error("FATAL: failed to commit operation")
            os.abort()

        # from db_parse
        original_op_data = op_data['__original_op_data__']
        del op_data['__original_op_data__']

        # save, and get the sequence of committed operations
        consensus_ops = []
        if opcode in OPCODE_STATELESS_OPS:
            # state-less operation
            consensus_ops = []

        else:
            consensus_op = db_state.commit_operation(original_op_data, op_data,
                                                     block_id)

            # make sure compatibility quirks are preserved
            check_quirks(block_id, consensus_op, db_state)

            consensus_ops = [consensus_op]

        return consensus_ops
Esempio n. 56
0
def defineParams(tree, op):
    root = tree.getroot()

    verbSelected = list(filter(lambda verb: verb == HTTP_VERB, HTTP_VERBS))

    if verbSelected == []:
        print('ERROR:', 'Use a Orion-LD Broker supported HTTP verb.')
        os.abort()

    for item in root.iter('collectionProp'):
        if item.attrib['name'] == 'HeaderManager.headers':
            for key, value in HEADERS.items():
                header = setHeaders(key, value)
                item.append(header)
        if PERFORMANCE_METRICS != {} and item.attrib['name'] == 'samplers':
            if "CPU" in PERFORMANCE_METRICS:
                for props in PERFORMANCE_METRICS["CPU"]:
                    index = PERFORMANCE_METRICS["CPU"].index(props)
                    performanceProps = addPerformanceMetricsProps(
                        'cpu', index, props)
                    item.append(performanceProps)

            if "RAM" in PERFORMANCE_METRICS:
                for props in PERFORMANCE_METRICS["RAM"]:
                    index = PERFORMANCE_METRICS["RAM"].index(props)
                    performanceProps = addPerformanceMetricsProps(
                        'ram', index, props)
                    item.append(performanceProps)

    for item in root.iter('stringProp'):
        if item.attrib['name'] == "ThreadGroup.num_threads":
            item.text = str(NUM_OF_THREADS)
        if item.attrib['name'] == "LoopController.loops":
            item.text = str(LOOPS)
        if item.attrib['name'] == "ThreadGroup.ramp_time":
            item.text = str(RAMP_TIME)
        if item.attrib['name'] == "HTTPSampler.domain":
            item.text = str(DOMAIN_IP)
        if item.attrib['name'] == "HTTPSampler.port":
            item.text = str(PORT)
        if item.attrib['name'] == "HTTPSampler.path":
            item.text = str(PATH_SERVICE)
        if item.attrib['name'] == "HTTPSampler.method":
            item.text = str(HTTP_VERB)
        if item.attrib['name'] == "filename" and item.text:
            if GENERATE_DATA_OF_CPU_RAM:
                dirpath = os.getcwd()
                if int(op) == GENERATE_COMPLETE_REPORT:
                    item.text = dirpath + '/output/' + OUTPUT_FILE_NAME + '_Report/' + item.text
                else:
                    item.text = dirpath + '/output/' + OUTPUT_FILE_NAME + '/' + item.text
            else:
                item.text = ''

        if HTTP_VERB == "GET":
            if item.attrib['name'] == "Argument.value":
                item.text = ''
        elif HTTP_VERB == "POST":
            if item.attrib['name'] == "Argument.value":
                item.text = BODY_DATA
        elif HTTP_VERB == "PATCH" or HTTP_VERB == "DELETE" or HTTP_VERB == "PUT":
            if item.attrib['name'] == "Argument.value":
                item.text = BODY_DATA
def db_save(block_height,
            consensus_hash,
            ops_hash,
            accepted_ops,
            virtualchain_ops_hints,
            db_state=None):
    """
   (required by virtualchain state engine)
   
   Save all persistent state to stable storage.
   Called once per block.

   In Blockstack's case, we save transactions as we process them.
   The only thing to do here is to synchronize the Atlas DB and clean up the 
   BlockstackDB instance in preparation for receiving the next blocks' transactions.

   Return True on success
   Return False on failure.
   """
    from ..atlas import atlasdb_sync_zonefiles

    if db_state is not None:

        blockstack_opts = get_blockstack_opts()
        new_zonefile_infos = None

        # vest any tokens for the next block (so they'll be immediately usable in the next block)
        try:
            db_state.commit_account_vesting(block_height + 1)
        except Exception as e:
            log.exception(e)
            log.fatal("Failed to vest accounts at {}+1".format(block_height))
            os.abort()

        try:
            # flush the database
            db_state.commit_finished(block_height)
        except Exception as e:
            log.exception(e)
            log.error("FATAL: failed to commit at block %s" % block_height)
            os.abort()

        try:
            atlas_state = None
            if hasattr(db_state,
                       'atlas_state') and db_state.atlas_state is not None:
                # normal course of action
                atlas_state = db_state.atlas_state

            # sync block data to atlas, if enabled
            if is_atlas_enabled(blockstack_opts):
                log.debug("Synchronize Atlas DB for {}".format(block_height))
                zonefile_dir = blockstack_opts['zonefiles']
                atlasdb_path = blockstack_opts['atlasdb_path']

                # NOTE: set end_block explicitly since db_state.lastblock still points to the previous block height
                gc.collect()
                new_zonefile_infos = atlasdb_sync_zonefiles(
                    db_state,
                    block_height,
                    zonefile_dir,
                    atlas_state,
                    path=atlasdb_path,
                    end_block=block_height + 1)
                gc.collect()

        except Exception as e:
            log.exception(e)
            log.error("FATAL: failed to update Atlas db at %s" % block_height)
            os.abort()

        try:
            # sync subdomain state for this block range, if enabled
            if is_subdomains_enabled(blockstack_opts):
                subdomain_index = None
                instantiated = False

                if hasattr(db_state, 'subdomain_index'
                           ) and db_state.subdomain_index is not None:
                    # normal course of action
                    subdomain_index = db_state.subdomain_index
                else:
                    # verifying a database
                    from ..subdomains import SubdomainIndex
                    log.warning("Instantiating subdomain index")
                    subdomain_index = SubdomainIndex(
                        blockstack_opts['subdomaindb_path'],
                        blockstack_opts=blockstack_opts)
                    instantiated = True

                log.debug(
                    "Synchronize subdomain index for {}".format(block_height))

                gc.collect()
                subdomain_index.index(block_height, block_height + 1)
                gc.collect()

                if instantiated:
                    # invalidate
                    subdomain_index.close()
                    subdomain_index = None

        except Exception as e:
            log.exception(e)
            log.error("FATAL: failed to update subdomains db at {}".format(
                block_height))
            os.abort()

        return True

    else:
        log.error("FATAL: no state engine given")
        os.abort()
Esempio n. 58
0
def ValueProcess(Value):
    encoding = 'ascii'

    NewValue = Value.decode(encoding)  # 先解码

    FinalBuf = '======>>start \n\r'

    # 先找寻 ======>>start 确定区间
    Start = re.search(KeyValue[0],NewValue)
    if Start == None:
        return    # 初始条件没找到
    End   = re.search(KeyValue[5],NewValue)
    if End   == None:
        return    # 结束条件没找到

    # 在一次搜寻的 ========>>start ========>>end 中进行查找

    # 事先进行一次搜索,确定有 jet_malloc:

    StartEndBuf = NewValue[Start.end():End.start()].lstrip('\r\n')

    MallocStart = re.search(KeyValue[1],StartEndBuf)

    if MallocStart == None:
        print('start none \n')

    # 循环初始条件

    buf    = NewValue[Start.end():End.start()]
    count  = 0

    while True:  # 循环处理
        Newbuf , err = FindAndDeleteKeyValue(buf)

        print('NewBuf:',len(Newbuf),'err:',err)

        if err != 5:
            if err == 1:   # 没有找到jet_malloc
                # 直接退出循环
                print("summary: may have %d position memory leak\n"%count)
                break;
            elif err == 2: # 找到了jet_malloc 但是没有找到对应的 jet_free
                # 需要找到jet_malloc的位置并缩小范围
                TempMalloc = re.search(KeyValue[1], buf[0:len(buf)])
                FinalBuf += buf[0:TempMalloc.end()]
                buf = Newbuf[TempMalloc.end():]
                count += 1
            elif err == 3: # 删除 jet_malloc 前一行时候出现了错误
                # 直接出问题
                print('error 3')
                os.abort()
            elif err == 4: # 删除 jet_free 前一行出现了错误
                # 直接出问题
                print('error 4')
                print(Newbuf)
                os.abort()
        else:
            buf = Newbuf

    FinalBuf += '\r\n======>>end \r\n'
    print('FinalBuf:\n',FinalBuf)
    return FinalBuf
Esempio n. 59
0
def stop():
    root.destroy()
    client.close()
    os.abort()
Esempio n. 60
0
def class_code(class_code, user_code, day):
    if day > 365:
        flash('Day number out of range')
        abort(404)
    subject = SubjectCode.query.filter_by(code=class_code).first()
    prev_next = {'next_day':url_for('class_code', class_code=class_code, user_code=user_code, day=day+1),
                 'prev_day':url_for('class_code', class_code=class_code, user_code=user_code, day=day-1),
                 'today':url_for('class_code', class_code=class_code, user_code=user_code, day=datetime.date.today().timetuple().tm_yday)}
    if not subject:
        flash('Class could not be found')
        return redirect(url_for('classes', user_code=current_user.user_code))
    user = User.query.filter_by(user_code=user_code).first()
    if user:
        total_days = day_num() - day
        current_date = datetime.date.today() - datetime.timedelta(days=total_days)
        
        # A teacher or admin viewing their own class
        if (current_user.auth in ['teacher', 'admin'] and class_code in
            [x.subject.code for x in current_user.subjects]) and user == current_user:

            # Checking if the class is on on the particular date
            check = check_class_date(subject_date=current_date, subject=subject)
            student_times = [] # Variable for the times of the students in the class
            students_in_class = 0 # Variable used to check if there are students in the class
           
            if 'student' in [user.user_type for user in subject.users]:
                students_in_class = 1
                # Getting the attendance status of each student in the class for a specific date
                for user in subject.users:
                    added_time = False
                    if user.user_type == 'student':
                        if user.user.attnd_times:
                            for t in user.user.attnd_times:
                                if t.time.date() == current_date and t.subject == subject.id:
                                    student_times.append((user.user, t.attnd_status))
                                    added_time = True
                                    break
                            if not added_time:
                                student_times.append((user.user, "N/A"))
                        else:
                            student_times.append((user.user, "N/A"))
            if check:
                return render_template("teacherclass.html", day_num=day_num(), subject=subject, user=current_user,
                                        days=CONSTANT_DAYS, students_in_class=students_in_class,
                                        current_date=current_date.strftime('%d/%m/%y'), student_times=student_times,
                                        time=check, week=current_week(current_date),
                                        day=CONSTANT_DAYS[current_date.isoweekday()-1], prev_next=prev_next,
                                        viewing_day=day)
            return render_template("teacherclass.html", day_num=day_num(), subject=subject, user=current_user,
                                    days=CONSTANT_DAYS, students_in_class=students_in_class,
                                    current_date=current_date.strftime('%d/%m/%y'), student_times=None,
                                    week=current_week(current_date), day=CONSTANT_DAYS[current_date.isoweekday()-1],
                                    prev_next=prev_next, viewing_day=day)
            
            
        # User is a teacher viewing the class of a student
        # For this one, show the attendance of the student and be able to change attendnance
        elif current_user.auth in ['teacher', 'admin'] and user.auth == 'student':
            form = AddStudentAttndTime(day=current_date.day, month=current_date.month)

            if request.method == "POST" and form.validate_on_submit():
                date = datetime.date(year=datetime.date.today().year, month=form.month.data, day=form.day.data)
                subject_datetime = check_class_date(date, subject)
                if subject_datetime:
                    duplicate_attendance_time = AttendanceTime.query.filter_by(time=datetime.datetime(year=date.year, month=date.month, day=date.day,
                                                                            hour=subject_datetime['subject'].time.start_time.hour,
                                                                            minute=subject_datetime['subject'].time.start_time.minute,
                                                                            second=subject_datetime['subject'].time.start_time.second),
                                                                            user=user.id, subject=subject.id).first()
                    if duplicate_attendance_time: # checks if there is already an attendance time for this subject and user on a particular day
                        # Updates the attendance status instead of adding a new entry to the database for this subject, user and time of attendance
                        if duplicate_attendance_time.attnd_status != form.status.data:
                            duplicate_attendance_time.attnd_status = form.status.data
                            try:
                                db.session.flush()
                            except:
                                db.session.rollback()
                                flash('Something went wrong, attendance status was not updated')
                            else:
                                db.session.commit()
                                flash('Successfully updated attendance for this date')
                        else:
                            flash('Attendance was not updated')
                    else:
                        # if there is no duplicate of the attendance time, user, and class then add a new entry for that attendance
                        # into the database
                        try:
                            new_time = AttendanceTime(time=datetime.datetime(year=date.year, month=date.month, day=date.day,
                                                      hour=subject_datetime['subject'].time.start_time.hour,
                                                      minute=subject_datetime['subject'].time.start_time.minute,
                                                      second=subject_datetime['subject'].time.start_time.second),
                                                      user=user.id, attnd_status=form.status.data, subject=subject.id)
                            db.session.add(new_time)
                            db.session.flush()
                        except:
                            db.session.rollback()
                        else:
                            db.session.commit()
                            flash('Successfully added new attendance to the database')
                else:
                    flash('Date was not a valid date for the subject')
            class_times = get_class_dates(subject)
            # This checks if the user has an attendance for a particular day
            user_attendance_times = AttendanceTime.query.filter_by(user=user.id, subject=subject.id).all()
            user_attendance_today = None
            for i in user_attendance_times:
                if i.time.date() == current_date:
                    user_attendance_today = i
            return render_template("teacherstudentclass.html", subject=subject, user=user, days=CONSTANT_DAYS, student_times=user_attendance_times, form=form,
                                    class_times=class_times, current_date=current_date.strftime('%d/%m/%y'), day=day, today_date=datetime.date.today().strftime('%d/%m/%y'),
                                    teacher=current_user, user_attendance_today=user_attendance_today)
        
        # User is a student and they're viewing their class
        elif (current_user.auth == 'student' and class_code in
            [x.subject.code for x in current_user.subjects]):
            # Gets the user's attendance times from the database
            student_attnd_times = AttendanceTime.query.filter_by(user=user.id, subject=subject.id).all()
            student_attnd_times_weeks = []
            for i in student_attnd_times:
                if ((ceil((i.time.timetuple().tm_yday)/7))) % 2 == 0:
                    student_attnd_times_weeks.append((i, i.time.timetuple().tm_wday, "A"))
                else:
                    student_attnd_times_weeks.append((i, i.time.timetuple().tm_wday, "B"))
            attendance_on_day = None
            for time in student_attnd_times:
                if time.time.date() == (datetime.date(2019, 12, 31) + datetime.timedelta(days=day)):
                    attendance_on_day = time
            return render_template("studentclass.html", subject=subject, days=CONSTANT_DAYS, attnd_times=student_attnd_times_weeks,
                                    attnd_day=attendance_on_day, current_date=current_date)
        
        # User is a student but they're not viewing one of their class
        else:
            flash('You do not have access to this page')
            return redirect(url_for('classes', user_code=current_user.user_code))
    return render_template("teacherclass.html", subject=subject)