def endGame(bot, currentMessage, chat_id): # /quit behavior ends the game for everyone
    gameRecords = Base("chatStorage/records.pdl")
    gameRecords.open()
    rec = gameRecords._groupChatID[str(chat_id)] # select all the records with chat_id (only 1)
    if not rec:
        botSendFunctions.sendText(bot, chat_id, "Invalid command format")
        return
    rec = rec[-1] # Strip the last record from the list

    pointsBoard = "Here are the scores\n" # Send the final scores to the group
    for name, points in zip(rec['memberUsernames'].split(), rec['memberPoints'].split()):
        pointsBoard += str(name) + ": " + str(points) + " point(s)\n"
    botSendFunctions.sendText(bot, chat_id, pointsBoard)

    for player in rec['memberChatIDs'].split(): # Clean out the playerCards global
        try:
            del globalVars.playerCards[player]
        except Exception: # If a player isn't there ignore it
            pass
    try:
        del globalVars.resp[rec['gameID']] # Delete the game's responses key
        del globalVars.currBlackCard[rec['gameID']] # Delete the game's current black card key
        del globalVars.whiteCards[rec['gameID']]
    except Exception:
        pass
    gameRecords.delete(rec) # Remove the database record
    gameRecords.commit() # Save the changes
    botSendFunctions.sendText(bot, chat_id, "Goodbye!")
예제 #2
0
    def do_Delete (self, result, request, args):
        def transfomType(x):
            if isinstance(x, unicode): return str(x)
            else: return x
    
        #######    Replace this section by your logic   #######
        db = Base('database_service6.pdl')
        db.create('testId', 'testMessage', mode="open")
        result = db(testId = int(args['testId']))
        
        if len(result) == 0:
            responseCode = 404 #ResponseCode.Ok
            responseBody = json.dumps(result, sort_keys=True, indent=4, separators=(',', ': '))
        else:
            responseCode = 200 #ResponseCode.Ok
            responseBody = json.dumps(result[0], sort_keys=True, indent=4, separators=(',', ': '))
            db.delete(result[0])
            db.commit()
        #######    Replace this section by your logic   #######


        request.setResponseCode(responseCode)
        resp = utils.serviceResponse(responseCode, responseBody)
        
        return resp
예제 #3
0
class YahoourlsearcherPipeline(object):
    def open_spider(self, spider):

        filename = "urls_log.txt"
        self.log_target = codecs.open(filename, 'a+', encoding='utf-8')
        self.log_target.truncate()

        self.db = Base('URL_database.pdl')
        self.db.create('url', 'date', mode="open")
        self.log_target.write("***New url scraping session started at: "+ str(datetime.datetime.strftime(datetime.datetime.now(), ' %Y-%m-%d %H:%M:%S ')) + " ***" +"\n")
        print("***New url scraping session started at: "+ str(datetime.datetime.strftime(datetime.datetime.now(), ' %Y-%m-%d %H:%M:%S ')) + " ***" +"\n")
        self.log_target.write("*** Total url in the Database BEFORE new search: "+ str(len(self.db)) + " ***" + "\n")


        dispatcher.connect(self.spider_closed, signals.spider_closed)


    def process_item(self, item, spider):
        self.db.insert(url=item['url'],
                       date=item['date']
                       )
        self.log_target.write(item['url'] + "  " + item['date'] + "\n")
        self.db.commit()
        return item

    def spider_closed(self, spider):
        url_structure = []
        print ("End of database")
        i = 1
        for r in self.db:
            #print (str(r["url"]) + " " + str(r["date"]) + " \n")
            url_structure.append(url_date(r["url"],r["date"]))
            i += 1
        print (str(i) + "Url in the DB \n")
        self.log_target.write("Session ends at: "+ str(datetime.datetime.strftime(datetime.datetime.now(), ' %Y-%m-%d %H:%M:%S ')) + "\n")
        print ("Session ends at: "+ str(datetime.datetime.strftime(datetime.datetime.now(), ' %Y-%m-%d %H:%M:%S ')) + "\n")
        self.log_target.write("*** Total url in the Database AFTER the search: "+ str(len(self.db)) + " ***" + "\n")

        print ("Elementi presenti nel database: "+ str(len(self.db)) + " in struttura: " + str(len(url_structure)))
        all_record = []
        for r in self.db:
            all_record.append(r)
        self.db.delete(all_record)
        print ("Elementi presenti nel database: "+ str(len(self.db)))

        #set qui
        url_structure = {x.url: x for x in url_structure}.values()


        for any_url in url_structure:
            self.db.insert(any_url.url, any_url.date)


        print ("Elementi presenti nel database: "+ str(len(self.db)))
        self.db.commit()
        self.log_target.write("--- After SET operation: "+ str(len(self.db)) + " --- " + "\n" + "\n" + "\n" + "\n")

        self.log_target.close()
def yolo(request):
    db = Base('backendDB.pdl')
    db.create('Type','Log', 'Run', 'Prefix','Rule','Threshold', 'TimeStamp', 'Status', mode="override")

    # if db.exists(): 
    #     db.open()
    records = [];
    for r in db:
        records.append(r)
    db.delete(records)
    db.commit()
    # encoding.encode("SepsisCasesEventLog.xes", 8)
    
    # prediction.regressior("SepsisCasesEventLog.xes", 8, 'complexIndex', "Kmeans", 'linear')
    # prediction.classifier("SepsisCasesEventLog.xes", 8, 'simpleIndex', "None", 'DecisionTree', 'duration', 'default')
    # prediction.classifier("SepsisCasesEventLog.xes", 8, 'simpleIndex', "Kmeans", 'DecisionTree', 'duration', 'default')
    # prediction.classifier("SepsisCasesEventLog.xes", 8, 'boolean', "Kmeans", 'DecisionTree', 'duration', 'default')

    # prediction.classifier("SepsisCasesEventLog.xes", 8, 'simpleIndex', "None", 'DecisionTree', 'remainingTime', 'default')

    # prediction.classifier("Production.xes", 5, 'simpleIndex', "Kmeans", 'RandomForest')
    # prediction.classifier("Production.xes", 5, 'simpleIndex', "Kmeans", 'DecisionTree')

    # prediction.classifier("Production.xes", 13, 'simpleIndex', "None", 'KNN')
    # prediction.classifier("Production.xes", 13, 'simpleIndex', "None", 'RandomForest')
    # prediction.classifier("Production.xes", 3, 'complexIndex', "Kmeans", 'DecisionTree')



    #regression.linear()

    # df = pd.read_csv(filepath_or_buffer='core_encodedFiles/simpleIndex_Production.xes_16.csv', header=0)
    # data_ = df[["Id", "remainingTime"]]

    # estimator = DBSCAN(eps=0.3, min_samples=10,  metric='haversine')    
    # estimator.fit(data_)
    # print estimator
    # labels = estimator.labels_
    # n_clusters = len(set(labels)) - (1 if -1 in labels else 0)
    # print n_clusters
    # cluster_lists = {i: df.iloc[np.where(estimator.labels_ == i)[0]] for i in range(n_clusters)}
    # print len(cluster_lists)    
    # writeHeader = True
    # for cluster_list in cluster_lists:
    #     clusterd_data = cluster_lists[cluster_list]
    #     original_cluster_data = cluster_lists[cluster_list]
    #     lm = Lasso(fit_intercept=True, warm_start=True)
    #     y = clusterd_data['remainingTime']
    #     clusterd_data = clusterd_data.drop('remainingTime', 1)
    #     lm.fit(clusterd_data, y)
    #     original_cluster_data['prediction'] = lm.predict(clusterd_data)
    #     if writeHeader is True:
    #         original_cluster_data.to_csv('core_results/cluster.csv', sep=',', mode='a', header=True, index=False)
    #         writeHeader = False
    #     else:
    #         original_cluster_data.to_csv('core_results/cluster.csv', sep=',', mode='a', header=False, index=False)
            

    # 

    #regression.linear("Production.xes", 9, 'xg', "sd")
    # # regression.linear("Production.xes", 5, 'simple_index', "sd")
    # regression.xgboost("Production.xes", 9, 'simpleIndex', "sd")
    # # fileName, prefix, encoding, cluster, regression
    # # django_rq.enqueue(tasks.regressionTask,"Production.xes", 5, 'simple_index', "sd", "xgboost")
    return HttpResponse("YOLO")
예제 #5
0
for r in db('age') >= 20:
    print(r)
print('#######')
print('shortguy')
for r in db('name').ilike('jeremy'):
    print(r)
print('#######')
#db support list comprehension
really_really_really_short = next(r for r in db('size') < 1.0)

#update supports record(s) value(s) and updates the indicie
db.update(really_really_really_short, size=0.1)
fp()  #even shorter
db.update(db, age='23')
fp()

#delete supports single and multiple records
db.delete(r for r in db('size') >= 0.2)
fp()
del db[next(r for r in db('size') < 0.2)['__id__']]
fp()

#useful utility functions
db.add_field('mood',
             default='catcucumber')  # adds field, with optional default value
db.drop_field('mood')  # drops field
db.path  # path of db, can be changed
db.name  # name of db, stripped of path
db.fields  # fields of db, excludes __id__ & __version__
len(db)  # number of records in db
예제 #6
0
class DataBase():
    def __init__(self, mode):
        self.db = Base("./mlfv_hosts.pdl")
        self.db.create('ip',
                       'port',
                       'libs',
                       'cpu',
                       'mem',
                       'net',
                       'runs',
                       mode=mode)

    def insert_reg(self, r):
        if (len(r) < 6):
            print("Bad insertion")
            return False
        if self.db.exists():
            print self.db.insert(ip=r['ip'],
                                 port=r['port'],
                                 libs=r['libs'],
                                 cpu=r['cpu'],
                                 mem=r['mem'],
                                 net=r['net'],
                                 runs=0)
            self.db.commit()
            return True

    def remove_reg(self, r):
        if self.db.exists():
            for i in self.db:
                if i['ip'] == r['ip'] and i['port'] == r['port']:
                    self.db.delete(i)
                    self.db.commit()
                    print str(i['ip']) + " removed\n"
                    return True
        return False

    def print_all(self):
        if self.db.exists():
            for r in self.db:
                print r

    def get_less_runs(self):
        if self.db.exists():
            less_runs = sys.maxsize
            for r in self.db:
                if (r['runs'] < less_runs):
                    new_host = r
                    less_runs = r['runs']
            new_host['runs'] += 1
            self.db.commit()
            return new_host

    def get_hosts_cpu_mem(self, cpu, mem):
        print "cpu", cpu, mem
        ret = []
        if self.db.exists():
            for r in (self.db("cpu") >= cpu) & (self.db("mem") >= mem):
                ret.append([
                    r['ip'], r['port'], r['libs'], r['cpu'], r['mem'], r['net']
                ])
            #ret = [r for r in self.db if r['cpu'] >= cpu and r['mem'] >= mem]
            return ret
        else:
            print("Error: no client!")
            return None

    def get_registers_values(self):
        if self.db.exists():
            l = []
            for r in self.db:
                l.append([
                    r['ip'], r['port'], r['libs'], r['cpu'], r['mem'], r['net']
                ])
            a = np.array(l, dtype=object)
            return a
예제 #7
0
class YahoourlsearcherPipeline(object):
    def open_spider(self, spider):

        filename = "urls_log.txt"
        self.log_target = codecs.open(filename, 'a+', encoding='utf-8')
        self.log_target.truncate()

        self.db = Base('URL_database.pdl')
        self.db.create('url', 'date', mode="open")
        self.log_target.write("***New url scraping session started at: " + str(
            datetime.datetime.strftime(datetime.datetime.now(),
                                       ' %Y-%m-%d %H:%M:%S ')) + " ***" + "\n")
        print("***New url scraping session started at: " + str(
            datetime.datetime.strftime(datetime.datetime.now(),
                                       ' %Y-%m-%d %H:%M:%S ')) + " ***" + "\n")
        self.log_target.write(
            "*** Total url in the Database BEFORE new search: " +
            str(len(self.db)) + " ***" + "\n")

        dispatcher.connect(self.spider_closed, signals.spider_closed)

    def process_item(self, item, spider):
        self.db.insert(url=item['url'], date=item['date'])
        self.log_target.write(item['url'] + "  " + item['date'] + "\n")
        self.db.commit()
        return item

    def spider_closed(self, spider):
        url_structure = []
        print("End of database")
        i = 1
        for r in self.db:
            #print (str(r["url"]) + " " + str(r["date"]) + " \n")
            url_structure.append(url_date(r["url"], r["date"]))
            i += 1
        print(str(i) + "Url in the DB \n")
        self.log_target.write("Session ends at: " + str(
            datetime.datetime.strftime(datetime.datetime.now(),
                                       ' %Y-%m-%d %H:%M:%S ')) + "\n")
        print("Session ends at: " + str(
            datetime.datetime.strftime(datetime.datetime.now(),
                                       ' %Y-%m-%d %H:%M:%S ')) + "\n")
        self.log_target.write(
            "*** Total url in the Database AFTER the search: " +
            str(len(self.db)) + " ***" + "\n")

        print("Elementi presenti nel database: " + str(len(self.db)) +
              " in struttura: " + str(len(url_structure)))
        all_record = []
        for r in self.db:
            all_record.append(r)
        self.db.delete(all_record)
        print("Elementi presenti nel database: " + str(len(self.db)))

        #set qui
        url_structure = {x.url: x for x in url_structure}.values()

        for any_url in url_structure:
            self.db.insert(any_url.url, any_url.date)

        print("Elementi presenti nel database: " + str(len(self.db)))
        self.db.commit()
        self.log_target.write("--- After SET operation: " + str(len(self.db)) +
                              " --- " + "\n" + "\n" + "\n" + "\n")

        self.log_target.close()