Exemplo n.º 1
0
def init():
    global DB_DOCKER_PORTS
    global SERVICE_INSTANCES_LIST
    try:
        # SERVICE_INSTANCES_LIST => "MEMORY DB"
        LOG.info(
            '[lifecycle.data.app.db] [init] Initializing SERVICE_INSTANCES_LIST ...'
        )
        SERVICE_INSTANCES_LIST = []

        # DB_DOCKER_PORTS: PORTS DATABASE for each of the Lifecycles / agents => "PHYSICAL DB"
        LOG.info(
            '[lifecycle.data.app.db] [init] Initializing DB_DOCKER_PORTS ...')
        DB_DOCKER_PORTS = Base(config.dic['LM_WORKING_DIR_VOLUME'] +
                               config.dic['DB_DOCKER_PORTS']
                               )  #Base(config.dic['DB_DOCKER_PORTS'])
        # create new base with field names
        if not DB_DOCKER_PORTS.exists():
            DB_DOCKER_PORTS.create('port', 'mapped_to')
        else:
            DB_DOCKER_PORTS.open()
            records = DB_DOCKER_PORTS()
    except:
        LOG.exception(
            '[lifecycle.data.app.db] [init] Exception: Error while initializing db components'
        )
Exemplo n.º 2
0
 def __init__(self):
     db = Base('rjdb.pdl')
     if db.exists():
         db.open()
     else:
         db.create(*FIELDS)
     self.db = db
Exemplo n.º 3
0
def init():
    global DB_SHARING_MODEL
    global DB_USER_PROFILE
    try:
        # DB_SHARING_MODEL:
        LOG.info(
            "[usermgnt.data.standalone.db] [init] Initializing DB_SHARING_MODEL ["
            + config.dic['UM_WORKING_DIR_VOLUME'] +
            config.dic['DB_SHARING_MODEL'] + "] ...")
        DB_SHARING_MODEL = Base(config.dic['UM_WORKING_DIR_VOLUME'] +
                                config.dic['DB_SHARING_MODEL'])
        if not DB_SHARING_MODEL.exists():
            # create new base with field names
            DB_SHARING_MODEL.create('id', 'user_id', 'device_id', 'max_apps',
                                    'battery_limit')
        else:
            DB_SHARING_MODEL.open()

        # DB_USER_PROFILE:
        LOG.info(
            "[usermgnt.data.standalone.db] [init] Initializing DB_USER_PROFILE ["
            + config.dic['UM_WORKING_DIR_VOLUME'] +
            config.dic['DB_USER_PROFILE'] + "] ...")
        DB_USER_PROFILE = Base(config.dic['UM_WORKING_DIR_VOLUME'] +
                               config.dic['DB_USER_PROFILE'])
        if not DB_USER_PROFILE.exists():
            # create new base with field names
            DB_USER_PROFILE.create('id', 'user_id', 'device_id',
                                   'service_consumer', 'resource_contributor')
        else:
            DB_USER_PROFILE.open()
    except:
        LOG.exception(
            '[usermgnt.data.standalone.db] [init] Exception: Error while initializing db components'
        )
Exemplo n.º 4
0
 def state_extraction():
     db = Base("scnscraper/abap.pydb")
     if db.exists():
         db.open()
         record = db(type = "Question")
         print("# discussion scraped: " + str(record.__len__()))
         print("Answered: " + str(db(resolve = "Answered.").__len__()))
         print("Answered with solution: "+ str(db(resolve = "solution").__len__()))
         print("Not Answered: " + str(db(resolve = "Not Answered.").__len__()))
         print("Assumed Answered: " + str(db(resolve = "Assumed Answered.").__len__()))
Exemplo n.º 5
0
 def load_db(self, check=True, fix=False, save_to_file=False):
     self.log('Opening {0} database(s)'.format(len(self.db_files)), logging.INFO)
     for db_name, db_file in self.db_files.items():
         _db = Base(db_file, save_to_file=save_to_file)
         _db.open()
         self.log('Database {0} opened, records #: {1}'.format(db_name, len(_db)), logging.DEBUG)
         self.db.update({db_name: _db})
         _db.create_index('uid')
         _db.create_index('type')
     if check is True:
         self.check_db(fix)
Exemplo n.º 6
0
    def test_open_memory_with_existing_filename(self):
        self.filter_db = Base(test_db_name, save_to_file=True)
        self.filter_db.create('unique_id', 'name', "active", mode="override")
        self.filter_db.commit()

        db = Base(test_db_name, save_to_file=False)
        db.open()
        self.assertEqual(db.fields, ['unique_id', 'name', "active"])

        db = Base(test_db_name, save_to_file=False)
        db.create('unique_id2', 'name2', "active2", mode="override")
        self.assertEqual(db.fields, ['unique_id2', 'name2', "active2"])
Exemplo n.º 7
0
 def state_extraction():
     db = Base("scnscraper/abap.pydb")
     if db.exists():
         db.open()
         record = db(type="Question")
         print("# discussion scraped: " + str(record.__len__()))
         print("Answered: " + str(db(resolve="Answered.").__len__()))
         print("Answered with solution: " +
               str(db(resolve="solution").__len__()))
         print("Not Answered: " +
               str(db(resolve="Not Answered.").__len__()))
         print("Assumed Answered: " +
               str(db(resolve="Assumed Answered.").__len__()))
 def load_db(self, check=True, fix=False, save_to_file=False):
     self.log('Opening {0} database(s)'.format(len(self.db_files)), logging.INFO)
     for db_name, db_file in self.db_files.items():
         _db = Base(db_file, save_to_file=save_to_file)
         _db.open()
         self.log('Database {0} opened, records #: {1}'.format(db_name, len(_db)), logging.DEBUG)
         self.db.update({db_name: _db})
         _db.create_index('uid')
         _db.create_index('type')
         self.log("Db {0}: printing simple strawman prediction accuracy for answers with max upvotes as best answer:".format(db_name), logging.INFO)
         self._strawman(_db)
     if check is True:
         self.check_db(fix)
Exemplo n.º 9
0
 def load_db(self, check=True, fix=False, save_to_file=False):
     self.log('Opening {0} database(s)'.format(len(self.db_files)),
              logging.INFO)
     for db_name, db_file in self.db_files.items():
         _db = Base(db_file, save_to_file=save_to_file)
         _db.open()
         self.log(
             'Database {0} opened, records #: {1}'.format(
                 db_name, len(_db)), logging.DEBUG)
         self.db.update({db_name: _db})
         _db.create_index('uid')
         _db.create_index('type')
     if check is True:
         self.check_db(fix)
Exemplo n.º 10
0
class ZipcodeDB(object):
    PYDBLITE_DB_FILE = './zipcode.db'

    def __init__(self, pydblite_db_file=None):
        if pydblite_db_file is None:
            pydblite_db_file = ZipcodeDB.PYDBLITE_DB_FILE
        try:
            self._db = Base(pydblite_db_file)
            self._db.open()
        except Exception:
            self._db = None

    def query_by_zip_code(self, zip_cd):
        logger.debug(
            "**************** entering ZipcodeDB.query_zipcode_db_by_zip_code")
        record = {}  # Empty dict if not found
        if self._db is not None:
            records = self._db(zip_cd=zip_cd)
            if len(records) == 1:
                record = records[0]
        return record

    def get_timezone_for_zip_code(self, zip_code):
        logger.debug("**************** entering ZipcodeDB.get_timezone")

        ret_val = 'NoTZ/' + zip_code
        data = self.query_by_zip_code(zip_code)
        if data:
            # timezone is really just an offset
            timezone = data['data']['timezone']
            dst = data['data']['dst']
            tz_dic = {
                '-5+1': 'US/Eastern',
                '-5+0': 'US/East-Indiana',
                '-6+1': 'US/Central',
                '-7+1': 'US/Mountain',
                '-7+0': 'US/Arizona',
                '-8+1': 'US/Pacific',
                '-9+1': 'US/Alaska',
                '-10+0': 'US/Hawaii',
                '-10+1': 'US/Aleutian'
            }
            key = timezone + '+' + dst
            if key in tz_dic:
                ret_val = tz_dic[key]

        return ret_val
Exemplo n.º 11
0
def init():
    global DB_LM_SERVICE_INSTANCES
    try:
        # DB_LM: LM DATABASE ("PHYSICAL DB")
        LOG.info('[lifecycle.data.app.lm_db] [init] Initializing DB_LM ...')
        DB_LM_SERVICE_INSTANCES = Base(config.dic['LM_WORKING_DIR_VOLUME'] +
                                       config.dic['DB_STANDALONE_MODE'] +
                                       "_service_instances")
        # create new base with field names
        if not DB_LM_SERVICE_INSTANCES.exists():
            DB_LM_SERVICE_INSTANCES.create('id', 'service_instance')
        else:
            DB_LM_SERVICE_INSTANCES.open()
    except:
        LOG.exception(
            '[lifecycle.data.app.lm_db] [init] Exception: Error while initializing db components'
        )
Exemplo n.º 12
0
 def load_db(self, check=True, fix=False, save_to_file=False):
     self.log('Opening {0} database(s)'.format(len(self.db_files)),
              logging.INFO)
     for db_name, db_file in self.db_files.items():
         _db = Base(db_file, save_to_file=save_to_file)
         _db.open()
         self.log(
             'Database {0} opened, records #: {1}'.format(
                 db_name, len(_db)), logging.DEBUG)
         self.db.update({db_name: _db})
         _db.create_index('uid')
         _db.create_index('type')
         self.log(
             "Db {0}: printing simple strawman prediction accuracy for answers with max upvotes as best answer:"
             .format(db_name), logging.INFO)
         self._strawman(_db)
     if check is True:
         self.check_db(fix)
Exemplo n.º 13
0
    try:
        if state:
            state_name = instances[state]
        else:
            state_name = None
        break
    except KeyError:
        print("State {} is not available!".format(state))

# calculate end date.
end_date = start_date + relativedelta(years=int(num_of_year))

# day number between start and end date.
day_num = (end_date - start_date).days
# open table
date_table = date_table.open()
# iterate date and process data.
for item in range(day_num):
    date = (start_date + timedelta(days=item)).date()
    # check date is exists or not.If exists then continue.
    if len(date_table(date=date)) > 0:
        continue
    sequence = len(date_table) + 1
    week_day_num = date.weekday() + 1
    day_name = date.strftime("%A")
    day_short_name = date.strftime("%a")
    month_day_num = date.day
    month_name = date.strftime("%B")
    month_short_name = date.strftime("%b")
    year_num = date.year
    month_num = date.month
Exemplo n.º 14
0
class DataStoring():

    #Inizialize an instantiated object by opening json file and the database
    def __init__(self):
        self.out_file = open("scnscraper/abap.json", "a")
        self.out_file.close()
        self.db = Base("scnscraper/abap.pydb")
        if self.db.exists():
            self.db.open()
        else:
            self.db.create('url', 'uid', 'type', 'author', 'title', 'date_time', 'tags',
                           'views', 'answers', 'resolve', 'upvotes', 'text')

    #for each thread scraped, insert it into db
    def insert_items_into_db(self, threads):
            for thread in threads:
                item = SapItem() # New Item instance
                item = thread
                try:
                    # Insert into db
                    self.db.insert(url = str(item["url"]), uid = str(item["uid"]), type= str(item["type"] ),
                                   author=str(item["author"]), title = str(item["title"]),
                                   date_time = str(item["date_time"] ),tags = str(item["tags"] ),
                                   views = str(item["views"] ), answers = str(item["answers"] ),
                                   resolve = str(item["resolve"] ), upvotes = str(item["upvotes"] ),
                                   text = str(item["text"]))
                except UnicodeEncodeError:
                    print("Unicode Encode Exception!")
            #save changes on disk
            self.db.commit()

    # for each thread scraped, initialize the string to insert into json file
    def threads_to_str(self, threads):
        out_string = "[ "
        if threads.__len__() == 0:
            return ""
        for thread in threads:
            item = SapItem()
            item = thread
            try:
                out_string += "{ url: '" + str(item["url"] ) + "', " + "uid: '" + str(item["uid"] ) + "', "\
                                "type: '" + str(item["type"] )  + "', "\
                                "author: '"+ str(item["author"])  + "', "  \
                                "title: '"+ str(item["title"])  + "', "\
                                "date_time: '"+ str(item["date_time"] )  + "', " \
                                "tags: '"+ str(item["tags"] )  + "', " \
                                "views: '"+ str(item["views"] )  + "', "\
                                "answers: '"+ str(item["answers"] )  + "', " \
                                "resolve: '"+ str(item["resolve"] )  + "', " \
                                "upvotes: '"+ str(item["upvotes"] )  + "', "\
                                "text: '" + str(item["text"]) + "' }\n"
            except UnicodeEncodeError:
                print("Unicode Encode Exception!")

        out_string += " ]\n\n"
        return out_string


    #for each thread scraped, insert it into json file
    def insert_items_into_file(self, threads):
        try:
            self.out_file = open("scnscraper/abap.json", "a") # open in append mode
            #convert into string and insert into file
            self.out_file.write(self.threads_to_str(threads))
            self.out_file.close()
        except:
            print('Exception in writing file')
            self.out_file.close()


    # read the web page index
    def read_index_from_file(self):
        if os.path.exists('scnscraper/index.txt'):
            with open('scnscraper/index.txt') as f:
                index = int(f.readline())
                f.close()
        else:
            f = open('scnscraper/index.txt', 'w')
            index = 2
            f.write(str(index))
            f.close()
        return index

    # Write the web page index
    def write_index_into_file(self, i):
        f = open('scnscraper/index.txt', 'w')
        f.write(str(i))
        f.close()


    # Convert the content of json file into a new db
    def from_json_to_db(self):
        thread = ''
        db = Base("scnscraper/abap.pydb", save_to_file= True)
        # create new base with field names
        db.create('url', 'uid', 'type', 'author',
                       'title', 'date_time', 'tags', 'views',
                       'answers', 'resolve', 'upvotes', 'text', mode='override')
        i=0
        with open('scnsraper/threads.json', 'r') as file:
            for line in file:
                if(line.endswith(" }\n")):
                    thread += line
                    tokens = re.search(r"url:\s'(.*?)',\suid:\s'(.*?)',\stype:\s'(.*?)',\sauthor:\s'(.*?)',\stitle:\s'(.*?)',\sdate_time:\s'(.*?)',\stags:\s'(.*?)',\sviews:\s'(.*?)',\sanswers:\s'(.*?)',\sresolve:\s'(.*?)',\supvotes:\s'(.*?)', text:\s'((.|\n)*)'\s}", str(thread))
                    if tokens is not None:
                        db.insert(url = tokens.group(1), uid = tokens.group(2), type= tokens.group(3),
                                author=tokens.group(4), title = tokens.group(5), date_time = tokens.group(6),
                                tags = tokens.group(7), views = tokens.group(8), answers = tokens.group(9),
                                resolve = tokens.group(10), upvotes = tokens.group(11), text = tokens.group(12))
                        db.commit()
                    print ('\n--------------------------------------------\n')
                    thread = ''
                if(line.startswith(" ]")):
                    print("new page")
                    thread = ''
                if(line.endswith('\n') and (not line.startswith(" ]\n\n")) and (not line.endswith(" }\n"))):
                    thread += line


    def state_extraction():
        db = Base("scnscraper/abap.pydb")
        if db.exists():
            db.open()
            record = db(type = "Question")
            print("# discussion scraped: " + str(record.__len__()))
            print("Answered: " + str(db(resolve = "Answered.").__len__()))
            print("Answered with solution: "+ str(db(resolve = "solution").__len__()))
            print("Not Answered: " + str(db(resolve = "Not Answered.").__len__()))
            print("Assumed Answered: " + str(db(resolve = "Assumed Answered.").__len__()))

    state_extraction = staticmethod(state_extraction)
Exemplo n.º 15
0
class DataStoring():

    #Inizialize an instantiated object by opening json file and the database
    def __init__(self):
        self.out_file = open("scnscraper/abap.json", "a")
        self.out_file.close()
        self.db = Base("scnscraper/abap.pydb")
        if self.db.exists():
            self.db.open()
        else:
            self.db.create('url', 'uid', 'type', 'author', 'title',
                           'date_time', 'tags', 'views', 'answers', 'resolve',
                           'upvotes', 'text')

    #for each thread scraped, insert it into db
    def insert_items_into_db(self, threads):
        for thread in threads:
            item = SapItem()  # New Item instance
            item = thread
            try:
                # Insert into db
                self.db.insert(url=str(item["url"]),
                               uid=str(item["uid"]),
                               type=str(item["type"]),
                               author=str(item["author"]),
                               title=str(item["title"]),
                               date_time=str(item["date_time"]),
                               tags=str(item["tags"]),
                               views=str(item["views"]),
                               answers=str(item["answers"]),
                               resolve=str(item["resolve"]),
                               upvotes=str(item["upvotes"]),
                               text=str(item["text"]))
            except UnicodeEncodeError:
                print("Unicode Encode Exception!")
        #save changes on disk
        self.db.commit()

    # for each thread scraped, initialize the string to insert into json file
    def threads_to_str(self, threads):
        out_string = "[ "
        if threads.__len__() == 0:
            return ""
        for thread in threads:
            item = SapItem()
            item = thread
            try:
                out_string += "{ url: '" + str(item["url"] ) + "', " + "uid: '" + str(item["uid"] ) + "', "\
                                "type: '" + str(item["type"] )  + "', "\
                                "author: '"+ str(item["author"])  + "', "  \
                                "title: '"+ str(item["title"])  + "', "\
                                "date_time: '"+ str(item["date_time"] )  + "', " \
                                "tags: '"+ str(item["tags"] )  + "', " \
                                "views: '"+ str(item["views"] )  + "', "\
                                "answers: '"+ str(item["answers"] )  + "', " \
                                "resolve: '"+ str(item["resolve"] )  + "', " \
                                "upvotes: '"+ str(item["upvotes"] )  + "', "\
                                "text: '" + str(item["text"]) + "' }\n"
            except UnicodeEncodeError:
                print("Unicode Encode Exception!")

        out_string += " ]\n\n"
        return out_string

    #for each thread scraped, insert it into json file
    def insert_items_into_file(self, threads):
        try:
            self.out_file = open("scnscraper/abap.json",
                                 "a")  # open in append mode
            #convert into string and insert into file
            self.out_file.write(self.threads_to_str(threads))
            self.out_file.close()
        except:
            print('Exception in writing file')
            self.out_file.close()

    # read the web page index
    def read_index_from_file(self):
        if os.path.exists('scnscraper/index.txt'):
            with open('scnscraper/index.txt') as f:
                index = int(f.readline())
                f.close()
        else:
            f = open('scnscraper/index.txt', 'w')
            index = 2
            f.write(str(index))
            f.close()
        return index

    # Write the web page index
    def write_index_into_file(self, i):
        f = open('scnscraper/index.txt', 'w')
        f.write(str(i))
        f.close()

    # Convert the content of json file into a new db
    def from_json_to_db(self):
        thread = ''
        db = Base("scnscraper/abap.pydb", save_to_file=True)
        # create new base with field names
        db.create('url',
                  'uid',
                  'type',
                  'author',
                  'title',
                  'date_time',
                  'tags',
                  'views',
                  'answers',
                  'resolve',
                  'upvotes',
                  'text',
                  mode='override')
        i = 0
        with open('scnsraper/threads.json', 'r') as file:
            for line in file:
                if (line.endswith(" }\n")):
                    thread += line
                    tokens = re.search(
                        r"url:\s'(.*?)',\suid:\s'(.*?)',\stype:\s'(.*?)',\sauthor:\s'(.*?)',\stitle:\s'(.*?)',\sdate_time:\s'(.*?)',\stags:\s'(.*?)',\sviews:\s'(.*?)',\sanswers:\s'(.*?)',\sresolve:\s'(.*?)',\supvotes:\s'(.*?)', text:\s'((.|\n)*)'\s}",
                        str(thread))
                    if tokens is not None:
                        db.insert(url=tokens.group(1),
                                  uid=tokens.group(2),
                                  type=tokens.group(3),
                                  author=tokens.group(4),
                                  title=tokens.group(5),
                                  date_time=tokens.group(6),
                                  tags=tokens.group(7),
                                  views=tokens.group(8),
                                  answers=tokens.group(9),
                                  resolve=tokens.group(10),
                                  upvotes=tokens.group(11),
                                  text=tokens.group(12))
                        db.commit()
                    print('\n--------------------------------------------\n')
                    thread = ''
                if (line.startswith(" ]")):
                    print("new page")
                    thread = ''
                if (line.endswith('\n') and (not line.startswith(" ]\n\n"))
                        and (not line.endswith(" }\n"))):
                    thread += line

    def state_extraction():
        db = Base("scnscraper/abap.pydb")
        if db.exists():
            db.open()
            record = db(type="Question")
            print("# discussion scraped: " + str(record.__len__()))
            print("Answered: " + str(db(resolve="Answered.").__len__()))
            print("Answered with solution: " +
                  str(db(resolve="solution").__len__()))
            print("Not Answered: " +
                  str(db(resolve="Not Answered.").__len__()))
            print("Assumed Answered: " +
                  str(db(resolve="Assumed Answered.").__len__()))

    state_extraction = staticmethod(state_extraction)
Exemplo n.º 16
0
from readinstrument import MyTrade
import persist_last_value

api_key = os.getenv("API_KEY")
token = os.getenv("PUB_TOKEN")
user = os.getenv("USER_ID")
# Initialise.
# kws = WebSocket("your_api_key", "your_public_token", "logged_in_user_id")
# kws = WebSocket(api_key, token, user)
kws = WebSocket("lzxojcmp16le5ep8", "9dd813876abe0c06dfca066221f5d1da", "DD1846")

# Initialize DB.

db = Base(db_name, sqlite_compat=True)
if db.exists():
    db.open()
else:
    db.create('time', 'instrument_token', 'last_price', 'mode', 'tradeable')



# Save Initial Time
now = datetime.datetime.now()
tmp_filename = PREVIOUS_TIME+str(int(time.time()))
persist_last_value.save_object(tmp_filename, now)

factor = 1
minutes = 5
trade = MyTrade(fac=factor, c_min=minutes)