示例#1
0
 def __init__(self):
     # Initialize
     self.model = None
     self.optimizer = None
     self.scheduler = None
     self.solver = None
     self.opts, self.args = parser.parse_args()
     opts = self.opts
     timestamp = datetime.datetime.now().strftime('%Y%m%d%H%M')
     self.name = ('%s N-%d ep-%d lr-%f geom-%s' %
                  (timestamp, opts.train_size
                   or 0, opts.num_epochs, opts.lr, opts.geometry))
     # Set logger
     if self.opts.no_prefix:
         log_name = self.name + '.log'
     else:
         log_name = 'out.log'
         self.opts.outdir = os.path.join(self.opts.outdir or '', self.name)
     if self.opts.logfile:
         logger.Logger(self.opts.logfile)
     elif self.opts.outdir:
         logger.Logger(self.opts.outdir, log_name)
     print(sys.argv)
     print(opts)
     print('PID %d' % (os.getpid(), ))
示例#2
0
def startup(settingsFile, debugMode=False):
    setting = settings.Settings(settingsFile)
    if setting.check_all_settings() is False:
        logSettings.log(logger.LogLevel.INFO, 'Failed to load settings')
        return
    """ tiBot starting, that checks database, twitter and imgur """
    if debugMode:
        logSettings = logger.Logger(setting.logFileName,
                                    setting.logFolder,
                                    maxSize=setting.logSize,
                                    printLevel=logger.LogLevel.INFO)
    else:
        logSettings = logger.Logger(setting.logFileName,
                                    setting.logFolder,
                                    maxSize=setting.logSize,
                                    printLevel=setting.LOGGER_PRINT_LEVEL)
    logSettings.log(logger.LogLevel.INFO,
                    'tiBot is starting up',
                    forcePrint=True)

    db = check_database(logSettings, setting.database)
    if db is None:
        logSettings.log(logger.LogLevel.CRITICAL,
                        'Database is not set up correctly. Exiting..')
        return

    twit = twitter.Twitter(logSettings, setting)
    twit.authenticated = twit.authenticate()
    if twit.authenticated is False:
        logSettings.log(logger.LogLevel.CRITICAL,
                        'Twitter is not authenticated. Exiting..')
        return

    imgr = imgur.Imgur(logSettings, setting)
    imgr.authenticated = imgr.authenticate()
    if imgr.authenticated is False:
        logSettings.log(logger.LogLevel.CRITICAL,
                        'Imgur is not authenticated. Exiting..')
        return
    if debugMode:
        logSettings.log(
            logger.LogLevel.INFO,
            'Everything is set up, starting tiBot in debug mode..')
        debug.debug(logSettings, setting, db, twit, imgr)
    else:
        logSettings.log(logger.LogLevel.INFO,
                        'Everything is set up, starting tiBot..')
        main.start(logSettings, setting, db, twit, imgr)
示例#3
0
文件: rl_agent.py 项目: justinjfu/awr
    def train(self, max_iter, test_episodes, output_dir, output_iters):
        log_file = os.path.join(output_dir, "log.txt")
        self._logger = logger.Logger()
        self._logger.configure_output_file(log_file)

        model_file = os.path.join(output_dir, "model.ckpt")

        iter = 0
        total_train_return = 0
        total_train_path_count = 0
        test_return = 0
        test_path_count = 0
        start_time = time.time()

        while (iter < max_iter):
            train_return, train_path_count, new_sample_count = self._rollout_train(
                self._samples_per_iter)

            total_train_return += train_path_count * train_return
            total_train_path_count += train_path_count
            avg_train_return = total_train_return / total_train_path_count

            total_samples = self.get_total_samples()
            wall_time = time.time() - start_time
            wall_time /= 60 * 60  # store time in hours

            self._logger.log_tabular("Iteration", iter)
            self._logger.log_tabular("Wall_Time", wall_time)
            self._logger.log_tabular("Samples", total_samples)
            self._logger.log_tabular("Train_Return", avg_train_return)
            self._logger.log_tabular("Train_Paths", total_train_path_count)
            self._logger.log_tabular("Test_Return", test_return)
            self._logger.log_tabular("Test_Paths", test_path_count)

            if (self._need_normalizer_update() and iter == 0):
                self._update_normalizers()

            self._update(iter, new_sample_count)

            if (self._need_normalizer_update()):
                self._update_normalizers()

            if (iter % output_iters == 0):
                test_return, test_path_count = self._rollout_test(
                    test_episodes, print_info=False)
                self._logger.log_tabular("Test_Return", test_return)
                self._logger.log_tabular("Test_Paths", test_path_count)

                self.save_model(model_file)
                self._logger.print_tabular()
                self._logger.dump_tabular()

                #total_train_return = 0
                #total_train_path_count = 0
            else:
                self._logger.print_tabular()

            iter += 1

        return
示例#4
0
 def __init__(self, urls):
     self.urls = urls
     self.headers = config.DOWNLOADER_HEADERS
     self.timeout = config.DOWNLOADER_TIMEOUT
     self.size = config.DOWNLOADER_SIZE
     self.proxy_count = config.DOWNLOADER_PROXY_COUNT
     self.dbredis = dbredis.DBRedis()
     self.logger = logger.Logger()
示例#5
0
 def __init__(self):
     self.dbredis = dbredis.DBRedis()
     self.logger = logger.Logger()
     self.url = config.URL
     with open(config.DATA_FILE, 'r') as datafile:
         self.datas = [
             data.replace('\n', '') for data in datafile.readlines()
         ]
示例#6
0
 def __init__(self):
     self.host = config.REDIS_HOST
     self.port = config.REDIS_PORT
     self.redis = redis.StrictRedis(host=self.host, port=int(self.port), db=0, decode_responses=True)
     self.proxy_name = config.REDIS_PROXY_NAME
     self.start_name = config.REDIS_START_NAME
     self.index_name = config.REDIS_INDEX_NAME
     self.start_count = config.START_COUNT
     self.index_count = config.INDEX_COUNT
     self.logger = logger.Logger()
示例#7
0
文件: avapi.py 项目: jkmadsen/fintech
    def __init__(self, key, headers={}, queries={}, verbose=False):

        #required
        self.key = key

        #options
        self.headers = headers
        self.queries = queries
        self.verbose = verbose

        #config
        self.logger = logger.Logger()
示例#8
0
def startup(args):
    log = logger.Logger('yiasa_log', 'logs')
    log.log(logger.LogLevel.INFO, 'Yiasabot is starting. DebugMode: %s' % args.debug, force_print=True)

    db = check_database(log)
    if db is None:
        return

    if args.debug:
        debug.debug(log, db)
    else:
        main.start(log, db, args)
 def __init__(self, timestamp):
     self.timestamp = timestamp
     self.host = config.POSTGRE_HOST
     self.port = config.POSTGRE_PORT
     self.user = config.POSTGRE_USER
     self.passwd = config.POSTGRE_PASSWD
     self.dbname = config.POSTGRE_DBNAME
     self.conn = psycopg2.connect(host=self.host,
                                  port=self.port,
                                  user=self.user,
                                  password=self.passwd,
                                  dbname=self.dbname)
     self.logger = logger.Logger()
        def __init__(self, sock, addr, command_class):
            #start a thread
            super().__init__(None, daemon=True)

            #if the class logger has not been initialized yet, create it
            try:
                self.__class__.logger
            except:
                self.__class__.logger = logger.Logger("ClientHandler")

            self.active_connections.append(self)
            self.s = sock
            self.addr = addr
            self.commands = command_class(self)
            self.sessid = None
            self.connected = True
示例#11
0
    def __init__(self, args):
        super(Trainer, self).__init__(args)

        #### 0. Setup
        self.save_dir = tools.set_save_dir(args)
        with open(os.path.join(self.save_dir, "args.json"), "w") as j:
            json.dump(vars(args), j)

        #### 1. Models
        model = getattr(models, args.model)(args)
        print(
            "Model param nums: ",
            sum(p.numel() for p in model.parameters() if p.requires_grad),
        )

        self.model = model.cuda()

        #### 2. Opt
        self.optimizer = opts.get_optimizer(args, self.model)
        self.scheduler = None
        if self.args.lr_scheduler is not None:
            self.scheduler = opts.get_scheduler(args, self.optimizer)

        #### 3. Data

        if args.augment is not None:
            augmentation = getattr(augmentations, args.augment)
        else:
            augmentation = None
        self.train_loader, self.val_loader = inputs.get_dataloader(
            args, transform=augmentation)

        #### 4. Logger
        self.writer = writer.Writer(log_dir=self.save_dir)
        self.logger = logger.Logger()
        self.logger.open(os.path.join(self.save_dir, "log.train.txt"),
                         mode="a")
        self.logger.write("\n>> Pytorch version: {}".format(torch.__version__))
        self.logger.write("\n>> Args: {}".format(args))

        # Validator
        self.validator = Validator(
            args,
            is_trainval=True,
            writer=self.writer,
            val_loader=self.val_loader,
        )
    def __init__(self,
                 command_class,
                 host=socket.gethostbyname(socket.gethostname()),
                 port=5000,
                 max_clients=5):
        try:
            self.__class__.logger
        except:
            self.__class__.logger = logger.Logger("Server")

        self.hostname = host
        self.port = port
        self.command_class = command_class
        self.max_clients = max_clients
        self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        self.__running = True
        self.start()
示例#13
0
    def __init__(self, args):
        self.args = args

        #### 0. Setup
        self.save_dir = tools.set_save_dir(args)
        with open(os.path.join(self.save_dir, "args.json"), "w") as j:
            json.dump(vars(args), j)

        #### 1. Data
        # TODO: augmentation
        augmentation = getattr(augmentations, args.augment)
        self.train_loader, self.val_loader = my_input.get_dataloader(
            args, transform=augmentation)

        #### 2. Model
        model = models.PENetClassifier(**vars(args))
        model.load_pretrained(PRETRAINED_WEIGHTS, "0")
        self.model = model.cuda()

        #### 3. Opt
        self.optimizer = opts.get_optimizer(args, self.model)
        self.scheduler = None
        if self.args.lr_scheduler is not None:
            self.scheduler = opts.get_scheduler(args, self.optimizer)

        #### 4. Logger
        self.writer = writer.Writer(log_dir=self.save_dir)
        self.logger = logger.Logger()
        self.logger.open(os.path.join(self.save_dir, "log.train.txt"),
                         mode="a")
        self.logger.write("\n>> Pytorch version: {}".format(torch.__version__))
        self.logger.write("\n>> Args: {}".format(args))

        # self.visualizer = visualizer.Visualizer(
        #     args, "train", self.save_dir, self.writer
        # )

        # Validator
        self.validator = Validator(
            args,
            is_trainval=True,
            writer=self.writer,
            val_loader=self.val_loader,
        )
示例#14
0
# -*- coding: utf-8 -*-

import os
import sys
ROOT = os.getcwd()
sys.path.append(ROOT)

from util import logger

if __name__ == "__main__":
    log = logger.Logger()
    try:
        print(10 / 0)
    except Exception:
        error = traceback.format_exc()
        log.error(message=error)
示例#15
0
                      dest="NoTimer",
                      action='store_true',
                      help="no timestamp should be displayed")

    (options, args) = parser.parse_args()
    return options


def detect_os():
    operation_system = platform.system()
    LOGGER.info("Detected " + operation_system + " as Operation System")
    return operation_system


options = parse_start_arguments()
LOGGER = logger.Logger(options.NoTimer)
os = detect_os()


def start_unity_build_command():
    LOGGER.info("Start Unity Build")
    try:
        build_command = options.UnityPath + " -projectPath " + options.ProjectPath + \
                       " -logfile " + options.LogPath + \
                       " -buildTarget " + options.Target + \
                       " -quit " \
                       "-batchmode " \
                       "-nographics " \
                       "-executeMethod " + options.ExecutionMethod
        if os != "Windows":
            process = subprocess.Popen(build_command,
示例#16
0
 def __init__(self, credential_path):
     self.logger = logger.Logger(name=__name__)
     self.twitter_client = self.build_twitter_client(credential_path)
示例#17
0
    args = my_args

    np.random.seed(args.seed)
    outputpath = os.path.join(args.output, args.id)

    if not os.path.exists(args.output):
        os.mkdir(args.output)

    if not os.path.exists(outputpath):
        os.mkdir(outputpath)

    files = glob.glob(os.path.join(outputpath, '*'))
    for f in files:
        os.remove(f)
    sys.stdout = logger.Logger(os.path.join(outputpath, 'out.log'))

    print('-' * 50)
    print('Run ID:', args.id)
    print('Output directory:', os.path.join(args.output, args.id))
    print('Solver:', args.solver)
    print('Rounds:', args.rounds)
    print('Maximum tries:', args.limit)
    print('-' * 50)

    if args.complexity:
        complexityAnalysis(limit=args.limit,
                           matches=args.matches,
                           outputpath=outputpath,
                           rounds=args.rounds,
                           solver=args.solver,
示例#18
0
import json
import time
from datetime import datetime as dt
from util import logger as Logger
from google.cloud import language
from google.cloud.language_v1 import enums
from google.cloud.language_v1 import types

logger = Logger.Logger(__name__)


def estimate_sentiment(client, sentence):
    document = types.Document(content=sentence,
                              type=enums.Document.Type.PLAIN_TEXT,
                              language="ja_JP")
    sentiment = client.analyze_sentiment(document=document).document_sentiment
    logger.info("score: {}, magnitude: {}".format(sentiment.score,
                                                  sentiment.magnitude))

    return sentiment


def build_client():
    return language.LanguageServiceClient()


client = build_client()
with open("../zombie_2018-11-30.json.senti", encoding="utf8") as f:
    lines = f.readlines()
    result_set = []
    try:
示例#19
0
from util.doc_process import get_postgredb_query
from util.doc_process import filter_html_stopwords_pos
from util.doc_process import filter_tags
from util.doc_process import get_nids_sentences
import subject_queue

from util import simhash
import datetime
import os
import traceback
from multiprocessing import Pool
import jieba
from util import logger

real_dir_path = os.path.split(os.path.realpath(__file__))[0]
logger_9965 = logger.Logger('9965', os.path.join(real_dir_path,  'log/log_9965.txt'))
logger_9966 = logger.Logger('9966', os.path.join(real_dir_path,  'log/log_9966.txt'))


channel_for_multi_vp = ('科技', '外媒', '社会', '财经', '体育', '国际',
                        '娱乐', '养生', '育儿', '股票', '互联网', '健康',
                        '影视', '军事', '历史', '点集', '自媒体')
exclude_chnl = ['搞笑', '趣图', '美女', '萌宠', '时尚', '美食', '美文', '奇闻', '美食',
                '旅游', '汽车', '游戏', '科学', '故事', '探索']


insert_sentence_hash = "insert into news_sentence_hash_cache (nid, sentence, sentence_id, hash_val, first_16, second_16, third_16, fourth_16, ctime, first2_16, second2_16, third2_16, fourth2_16) VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
query_sen_sql = "select ns.nid, ns.hash_val from news_sentence_hash_cache ns inner join newslist_v2 nl on ns.nid=nl.nid " \
                "where (first_16=%s or second_16=%s or third_16=%s or fourth_16=%s) and " \
                "(first2_16=%s or second2_16=%s or third2_16=%s or fourth2_16=%s) and " \
                "nl.state=0 group by ns.nid, ns.hash_val "
示例#20
0
 def __init__(self):
     self.timestamp = time.strftime("%Y%m%d_%H%M%S",
                                    time.localtime(time.time()))
     self.dbredis = dbredis.DBRedis()
     self.dbpostgresql = dbpostgresql.DBPostgresql(timestamp=self.timestamp)
     self.logger = logger.Logger()
示例#21
0
def run_auto(nav):
    log = logger.Logger('Autonomous')
    log.i("Starting autonomous path")
    nav.default_speed = 0.65

    nav.actuator('turret', {'action': 'home'})

    # Request camera data
    nav.actuator('webcamDetect', {})
    time.sleep(0.5)
    data = nav.sense('ringsSeen')
    if len(data) < 1:
        log.e("No ring data!")
        return

    rings_seen = data[0]
    log.d("Got ring data: %d", rings_seen)

    # Wobble 1
    nav.move(-6,    0) # Move off the wall so we don't hit it
    if rings_seen != 0:
        # Go around the ring stack
        nav.move(-28, -12)

    if rings_seen == 0:
        nav.move(-59, -15)
        nav.turn(30)
    elif rings_seen == 1:
        nav.move(-84, 0)
        nav.turn(0)
    elif rings_seen == 4:
        nav.move(-105, -13, speed=0.9)
        nav.turn(25)

    nav.actuator('shooter', {'action': 'start', 'speed': SHOOT_SPEED})
    place_wobble(nav)

    if rings_seen == 4:
        nav.move(-60, 0, reverse=False, speed=0.9)

    # Line up for shooting
    nav.move(-58.5, 0, reverse=False)
    nav.actuator('turret', {'action': 'rotate', 'angle': 0.16})
    nav.turn(90)
    if rings_seen == 0:
        time.sleep(3)
    else:
        time.sleep(1.5)

    shoot_rings(nav, 3)

    if rings_seen == 1:
        # Intake [experimental]
        nav.actuator('turret', {'action': 'home'})
        nav.actuator('intake', {'action': 'intake'})
        nav.turn(10)
        nav.move(-41, 5, reverse=False, speed=0.5)
        time.sleep(1)
        nav.actuator('intake', {'action': 'stop'})
        # nav.actuator('turret', {'action': 'home'})
        nav.move(-58.5, 1)
        nav.turn(3)
        shoot_rings(nav, 1)
        nav.turn(90)
    elif rings_seen == 4:
        nav.actuator('turret', {'action': 'home'})
        # nav.turn(-3)
        nav.actuator('intake', {'action': 'intake', 'speed': -0.75})
        nav.move(-46, 5, reverse=False, speed=0.85)
        nav.actuator('intake', {'action': 'intake', 'speed': 0.82})
        time.sleep(0.15)
        nav.move(-37, 12, reverse=False, speed=0.45)
        time.sleep(1)
        nav.move(-58, 2)
        nav.turn(0)
        nav.actuator('intake', {'action': 'stop'})
        shoot_rings(nav, 3)
        nav.actuator('wobble', {'action': 'down'})

    nav.actuator('shooter', {'action': 'stop'})

    #if rings_seen == 4:
        # second wobble
        # nav.move(-41, 26)
        #nav.move(-27.5, 16)
        #nav.turn(-195)
    #else:
        # second wobble
    if rings_seen == 0 or rings_seen == 1:
        nav.move(-27.5, 17)
        nav.turn(192)
        time.sleep(0.75)
    elif rings_seen == 4:
        nav.move(-27, 18)
        time.sleep(0.25)

    pick_wobble(nav)

    if rings_seen == 0:
        nav.move(-55, -10)
        nav.turn(50)
    elif rings_seen == 1:
        nav.move(-74, -1)
        nav.turn(-9)
    elif rings_seen == 4:
        # nav.move(-47, 18)
        nav.move(-103, -13, speed=0.9)
        # nav.turn(-323)
    place_wobble(nav)

    nav.move(-76, 0, reverse=(rings_seen != 4))

    log.i("Path complete")
示例#22
0
# -*- coding: utf-8 -*-
# @Time    : 16/12/23 上午10:37
# @Author  : liulei
# @Brief   : 定时任务
# @File    : timed_task.py
# @Software: PyCharm Community Edition

from util.doc_process import get_postgredb
from redis_process import nid_queue
from graphlab_kmeans.kmeans_for_update import chnl_k_dict
import datetime
from datetime import timedelta
import os
from util import logger
real_dir_path = os.path.split(os.path.realpath(__file__))[0]
logger_9981 = logger.Logger('process9981',  os.path.join(real_dir_path,  'log/log_9981.txt'))


#定义取用户点击的循环周期
period = 3
click_sql = "select c.uid, c.nid, c.ctime, c.itime from newsrecommendclick c \
inner join newslist_v2 nl  on c.nid=nl.nid \
INNER JOIN channellist_v2 cl on nl.chid = cl.id \
where cname in ({0}) and c.itime > '{1}' order by c.itime "
#where cname in ({0}) and c.ctime > now() - INTERVAL '{1} second' "

#last_time = (datetime.datetime.now() - timedelta(seconds=3)).strftime('%Y-%m-%d %H:%M:%S.%f')
last_time = datetime.datetime.now() - timedelta(seconds=3)

channels = ', '.join("\'" + ch+"\'" for ch in chnl_k_dict.keys())
def get_clicks_5m():
示例#23
0
# -*- coding: utf-8 -*-
# @Time    : 16/12/23 上午10:37
# @Author  : liulei
# @Brief   : 定时任务
# @File    : timed_task.py
# @Software: PyCharm Community Edition

from util.doc_process import get_postgredb
from redis_process import nid_queue
from data_process import channel_for_topic_dict
import datetime
from datetime import timedelta
from util import logger
import os
real_dir_path = os.path.split(os.path.realpath(__file__))[0]
logger_9989 = logger.Logger('process9989_3',  os.path.join(real_dir_path,  'log/log_9989_3.txt'))
#定义取用户点击的循环周期
period = 3
#click_sql = "select uid, nid, ctime from newsrecommendclick where ctime > now() - INTERVAL '5 minute'"
click_sql = "select c.uid, c.nid, c.ctime, c.itime from newsrecommendclick c \
inner join newslist_v2 nl  on c.nid=nl.nid \
INNER JOIN channellist_v2 cl on nl.chid = cl.id \
where cname in ({0}) and c.itime > '{1}' order by c.itime"
#where cname in ({0}) and c.ctime > now() - INTERVAL '{1} second' and c.stime>0"

#last_time = (datetime.datetime.now() - timedelta(seconds=3)).strftime('%Y-%m-%d %H:%M:%S.%f')
last_time = datetime.datetime.now() - timedelta(seconds=3)

channels = ', '.join("\'" + ch+"\'" for ch in channel_for_topic_dict.keys())
def get_clicks_5m():
    logger_9989.info('news epoch...')
示例#24
0
import os
import traceback
from util import logger
import pandas as pd
from util.doc_process import get_postgredb_query
from util.doc_process import get_postgredb
import datetime
import math
import json
from heapq import nlargest
from operator import itemgetter

TEST_FLAG = False

real_dir_path = os.path.split(os.path.realpath(__file__))[0] #文件所在路径
log_cf = logger.Logger('log_cf', os.path.join(real_dir_path, 'log', 'log.txt'))
log_cf_clear_data = logger.Logger('log_cf_clear_data', os.path.join(real_dir_path, 'log', 'log_clear_data.txt'))


################################################################################
#@brief: 获取最新的topic版本
################################################################################
def get_newest_topic_v():
    topic_sql = "select model_v from user_topics_v2 group by model_v"
    conn, cursor = get_postgredb_query()
    cursor.execute(topic_sql)
    rows = cursor.fetchall()
    topic_vs = []
    for row in rows:
        topic_vs.append(row[0])
    conn.close()
import pickle

FIXED_PARAMETERS, config = params.load_parameters()
modname = FIXED_PARAMETERS["model_name"]

if not os.path.exists(FIXED_PARAMETERS["log_path"]):
    os.makedirs(FIXED_PARAMETERS["log_path"])
if not os.path.exists(config.tbpath):
    os.makedirs(config.tbpath)
    config.tbpath = FIXED_PARAMETERS["log_path"]

if config.test:
    logpath = os.path.join(FIXED_PARAMETERS["log_path"], modname) + "_test.log"
else:
    logpath = os.path.join(FIXED_PARAMETERS["log_path"], modname) + ".log"
logger = logger.Logger(logpath)

model = FIXED_PARAMETERS["model_type"]

module = importlib.import_module(".".join(['models', model]))
MyModel = getattr(module, 'MyModel')

# Logging parameter settings at each launch of training script
# This will help ensure nothing goes awry in reloading a model and we consistenyl use the same hyperparameter settings.
logger.Log("FIXED_PARAMETERS\n %s" % FIXED_PARAMETERS)

######################### LOAD DATA #############################

if config.debug_model:
    # training_snli, dev_snli, test_snli, training_mnli, dev_matched, dev_mismatched, test_matched, test_mismatched = [],[],[],[],[],[], [], []
    test_matched = load_nli_data(FIXED_PARAMETERS["dev_matched"],
class Commands:
    class Endpoints:
        endpoints = {}

        @classmethod
        def route(cls, route):
            def wrapper(func):
                cls.endpoints[route] = func
                return func
            return wrapper
    
    logged_in = {}
    #init logger
    logger = logger.Logger("Commands", file=True, loglevel=logging.INFO, fileformatter=logging.Formatter("[%(asctime)s] %(user)s(%(uname)s) requested %(msg)s"))

    def __init__(self, socket):
        #read in dataset
        self.dataset = pd.read_csv(path.join(path.dirname(__file__), "data/kepler.csv"))
        self.socket = socket
    
    @staticmethod
    def get_endpoint_counts():
        return {k: v.counter for k,v in Commands.Endpoints.endpoints.items()}

    #return all koi object with a confirmed disposition
    @Endpoints.route("confirmed")
    @count_function
    def get_confirmed(self):
        return pickle.dumps(self.dataset[self.dataset['koi_disposition'] == 'CONFIRMED'])
    
    #get koi objects by search query on the name. May be incomplete name
    @Endpoints.route("kepler_name")
    @count_function
    def get_kepler_name(self, name):
        return pickle.dumps(self.dataset[self.dataset['kepler_name'].str.contains(name, na=False, regex=False)])


    #login the user and send back the session id
    @Endpoints.route("login")
    @count_function
    def login(self, uname, fullname, email):
        sessid = str(uuid.uuid4())
        self.logged_in[sessid] = dict(username = uname, fullname = fullname, email = email, socket=self.socket)
        return sessid

    #log the user out, and remove the session from logged in
    @Endpoints.route("logout")
    @count_function
    def logout(self, session_id):
        if session_id in self.logged_in.keys():
            self.logged_in.pop(session_id)
            return 200
        return 404
    
    #filter by koi score
    @Endpoints.route("koi_score")
    @count_function
    def get_koi_score(self, score, operand='lt'):
        try:
            #operands to be used
            ops = {
                'lt': operator.lt,
                'le': operator.le,
                'eq': operator.eq,
                'ge': operator.ge,
                'gt': operator.gt
            }
            score = float(score)

            #if score out of range, raise error
            if score > 1 or score < 0:
                raise ValueError()
            #return the filtered data as a pickled pandas dataframe
            return pickle.dumps(self.dataset[ops[operand.lower()](self.dataset['koi_score'], score)])
        except Exception as e:
            logging.error(e)
            return 400 #will mainly trigger if operand is not defined

    #get a countplot of the koi dispositions
    @Endpoints.route("countplot")
    @count_function
    def countplot(self):

        #generate a graph, and save as a temporary image
        temp_id = uuid.uuid4()
        dirpath = path.dirname(__file__) + f"/temp"
        fp = dirpath + f"/{temp_id}.png"
        
        dataset = self.dataset.copy()
        dataset['koi_disposition'].dropna(axis=0, inplace=True)
        sns.countplot(data=dataset, x='koi_disposition')
        if not path.exists(dirpath):
            os.mkdir(dirpath)
        del dataset
        #reset pyplot and save the graph
        plt.savefig(fp)
        plt.close('all')

        #keep trying to open the image, may cause errors because pyplot is still saving it
        while True:
            try:
                with open(fp, 'rb') as f:
                    data = f.readlines()
                    f.close()
                if data: #once the image is read, exit loop
                    break
            except:
                pass

        #try to remove the temp image to save memory on disk
        while True:
            try:
                os.remove(fp)
                print(os.path.exists(fp))
                if not path.exists(fp):
                    break
            except Exception as e:
                print(e)
        #return image bytes
        return data

    #get all possible column filters for scatterplot
    @Endpoints.route("column_names")
    @count_function
    def get_columns(self):
        return list(self.dataset.columns)

    #plot 2 columns in a scatterplot to analyze correlation
    @Endpoints.route("scatterplot")
    @count_function
    def scatterplot(self, x='koi_teq', y='koi_srad'):

        #same method of working as countplot
        temp_id = uuid.uuid4()
        dirpath = path.dirname(__file__) + f"/temp"
        fp = dirpath + f"/{temp_id}.png"
        
        dataset = self.dataset[[x, y]].dropna(axis=0, inplace=False)
        plt.scatter(dataset[x], dataset[y])
        plt.xlabel(x)
        plt.ylabel(y)
        if not path.exists(dirpath):
            os.mkdir(dirpath)
        
        plt.savefig(fp)
        plt.close('all')

        while True:
            try:
                with open(fp, 'rb') as f:
                    data = f.readlines()
                    f.close()
                if data:
                    break
            except:
                pass

        while True:
            try:
                os.remove(fp)
                print(os.path.exists(fp))
                if not path.exists(fp):
                    break
            except Exception as e:
                print(e)
        return data