Exemple #1
0
def init():
    global DB_SHARING_MODEL
    global DB_USER_PROFILE
    try:
        # DB_SHARING_MODEL:
        LOG.info(
            "[usermgnt.data.standalone.db] [init] Initializing DB_SHARING_MODEL ["
            + config.dic['UM_WORKING_DIR_VOLUME'] +
            config.dic['DB_SHARING_MODEL'] + "] ...")
        DB_SHARING_MODEL = Base(config.dic['UM_WORKING_DIR_VOLUME'] +
                                config.dic['DB_SHARING_MODEL'])
        if not DB_SHARING_MODEL.exists():
            # create new base with field names
            DB_SHARING_MODEL.create('id', 'user_id', 'device_id', 'max_apps',
                                    'battery_limit')
        else:
            DB_SHARING_MODEL.open()

        # DB_USER_PROFILE:
        LOG.info(
            "[usermgnt.data.standalone.db] [init] Initializing DB_USER_PROFILE ["
            + config.dic['UM_WORKING_DIR_VOLUME'] +
            config.dic['DB_USER_PROFILE'] + "] ...")
        DB_USER_PROFILE = Base(config.dic['UM_WORKING_DIR_VOLUME'] +
                               config.dic['DB_USER_PROFILE'])
        if not DB_USER_PROFILE.exists():
            # create new base with field names
            DB_USER_PROFILE.create('id', 'user_id', 'device_id',
                                   'service_consumer', 'resource_contributor')
        else:
            DB_USER_PROFILE.open()
    except:
        LOG.exception(
            '[usermgnt.data.standalone.db] [init] Exception: Error while initializing db components'
        )
Exemple #2
0
def init():
    global DB_DOCKER_PORTS
    global SERVICE_INSTANCES_LIST
    try:
        # SERVICE_INSTANCES_LIST => "MEMORY DB"
        LOG.info(
            '[lifecycle.data.app.db] [init] Initializing SERVICE_INSTANCES_LIST ...'
        )
        SERVICE_INSTANCES_LIST = []

        # DB_DOCKER_PORTS: PORTS DATABASE for each of the Lifecycles / agents => "PHYSICAL DB"
        LOG.info(
            '[lifecycle.data.app.db] [init] Initializing DB_DOCKER_PORTS ...')
        DB_DOCKER_PORTS = Base(config.dic['LM_WORKING_DIR_VOLUME'] +
                               config.dic['DB_DOCKER_PORTS']
                               )  #Base(config.dic['DB_DOCKER_PORTS'])
        # create new base with field names
        if not DB_DOCKER_PORTS.exists():
            DB_DOCKER_PORTS.create('port', 'mapped_to')
        else:
            DB_DOCKER_PORTS.open()
            records = DB_DOCKER_PORTS()
    except:
        LOG.exception(
            '[lifecycle.data.app.db] [init] Exception: Error while initializing db components'
        )
 def __init__(self):
     db = Base('rjdb.pdl')
     if db.exists():
         db.open()
     else:
         db.create(*FIELDS)
     self.db = db
 def state_extraction():
     db = Base("scnscraper/abap.pydb")
     if db.exists():
         db.open()
         record = db(type = "Question")
         print("# discussion scraped: " + str(record.__len__()))
         print("Answered: " + str(db(resolve = "Answered.").__len__()))
         print("Answered with solution: "+ str(db(resolve = "solution").__len__()))
         print("Not Answered: " + str(db(resolve = "Not Answered.").__len__()))
         print("Assumed Answered: " + str(db(resolve = "Assumed Answered.").__len__()))
Exemple #5
0
 def state_extraction():
     db = Base("scnscraper/abap.pydb")
     if db.exists():
         db.open()
         record = db(type="Question")
         print("# discussion scraped: " + str(record.__len__()))
         print("Answered: " + str(db(resolve="Answered.").__len__()))
         print("Answered with solution: " +
               str(db(resolve="solution").__len__()))
         print("Not Answered: " +
               str(db(resolve="Not Answered.").__len__()))
         print("Assumed Answered: " +
               str(db(resolve="Assumed Answered.").__len__()))
Exemple #6
0
def init():
    global DB_LM_SERVICE_INSTANCES
    try:
        # DB_LM: LM DATABASE ("PHYSICAL DB")
        LOG.info('[lifecycle.data.app.lm_db] [init] Initializing DB_LM ...')
        DB_LM_SERVICE_INSTANCES = Base(config.dic['LM_WORKING_DIR_VOLUME'] +
                                       config.dic['DB_STANDALONE_MODE'] +
                                       "_service_instances")
        # create new base with field names
        if not DB_LM_SERVICE_INSTANCES.exists():
            DB_LM_SERVICE_INSTANCES.create('id', 'service_instance')
        else:
            DB_LM_SERVICE_INSTANCES.open()
    except:
        LOG.exception(
            '[lifecycle.data.app.lm_db] [init] Exception: Error while initializing db components'
        )
Exemple #7
0
        return cal.get_holiday_label(date)


# get country_name or state_name
def get_country_or_state(country_name, state_name=None):
    if state_name is None:
        return country_name
    else:
        return state_name


# create pydblite database table
date_table = Base('temporal_data.pdl')

# check db table is exists or not.if not exists then create table.
if not date_table.exists():
    # create table with field.
    date_table.create(
        'date', 'julian_date_num', 'sequence', 'week_day_num', 'day_name',
        'day_short_name', 'month_week_num', 'month_week_begin_date',
        'month_week_end_date', 'quarter_week_num', 'quarter_week_begin_date',
        'quarter_week_end_date', 'year_week_num', 'year_week_begin_date',
        'year_week_end_date', 'month_day_num', 'month_num', 'month_name',
        'month_short_name', 'month_begin_date', 'month_end_date',
        'quarter_day_num', 'quarter_num', 'quarter_name', 'quarter_begin_date',
        'quarter_end_date', 'year_day_num', 'year_num', 'year_begin_date',
        'year_end_date', 'dd_mon_yyyy', 'dd_month_yyyy', 'mon_dd_yyyy',
        'month_dd_yyyy', 'dd_mm_yyyy', 'mm_dd_yyyy', 'mm_dd_yy', 'dd_mm_yy',
        'm_d_yy', 'd_m_yy', 'weekday_flag', 'week_first_day_flag',
        'week_last_day_flag', 'month_first_day_flag', 'month_last_day_flag',
        'quarter_first_day_flag', 'quarter_last_day_flag',
class DataStoring():

    #Inizialize an instantiated object by opening json file and the database
    def __init__(self):
        self.out_file = open("scnscraper/abap.json", "a")
        self.out_file.close()
        self.db = Base("scnscraper/abap.pydb")
        if self.db.exists():
            self.db.open()
        else:
            self.db.create('url', 'uid', 'type', 'author', 'title', 'date_time', 'tags',
                           'views', 'answers', 'resolve', 'upvotes', 'text')

    #for each thread scraped, insert it into db
    def insert_items_into_db(self, threads):
            for thread in threads:
                item = SapItem() # New Item instance
                item = thread
                try:
                    # Insert into db
                    self.db.insert(url = str(item["url"]), uid = str(item["uid"]), type= str(item["type"] ),
                                   author=str(item["author"]), title = str(item["title"]),
                                   date_time = str(item["date_time"] ),tags = str(item["tags"] ),
                                   views = str(item["views"] ), answers = str(item["answers"] ),
                                   resolve = str(item["resolve"] ), upvotes = str(item["upvotes"] ),
                                   text = str(item["text"]))
                except UnicodeEncodeError:
                    print("Unicode Encode Exception!")
            #save changes on disk
            self.db.commit()

    # for each thread scraped, initialize the string to insert into json file
    def threads_to_str(self, threads):
        out_string = "[ "
        if threads.__len__() == 0:
            return ""
        for thread in threads:
            item = SapItem()
            item = thread
            try:
                out_string += "{ url: '" + str(item["url"] ) + "', " + "uid: '" + str(item["uid"] ) + "', "\
                                "type: '" + str(item["type"] )  + "', "\
                                "author: '"+ str(item["author"])  + "', "  \
                                "title: '"+ str(item["title"])  + "', "\
                                "date_time: '"+ str(item["date_time"] )  + "', " \
                                "tags: '"+ str(item["tags"] )  + "', " \
                                "views: '"+ str(item["views"] )  + "', "\
                                "answers: '"+ str(item["answers"] )  + "', " \
                                "resolve: '"+ str(item["resolve"] )  + "', " \
                                "upvotes: '"+ str(item["upvotes"] )  + "', "\
                                "text: '" + str(item["text"]) + "' }\n"
            except UnicodeEncodeError:
                print("Unicode Encode Exception!")

        out_string += " ]\n\n"
        return out_string


    #for each thread scraped, insert it into json file
    def insert_items_into_file(self, threads):
        try:
            self.out_file = open("scnscraper/abap.json", "a") # open in append mode
            #convert into string and insert into file
            self.out_file.write(self.threads_to_str(threads))
            self.out_file.close()
        except:
            print('Exception in writing file')
            self.out_file.close()


    # read the web page index
    def read_index_from_file(self):
        if os.path.exists('scnscraper/index.txt'):
            with open('scnscraper/index.txt') as f:
                index = int(f.readline())
                f.close()
        else:
            f = open('scnscraper/index.txt', 'w')
            index = 2
            f.write(str(index))
            f.close()
        return index

    # Write the web page index
    def write_index_into_file(self, i):
        f = open('scnscraper/index.txt', 'w')
        f.write(str(i))
        f.close()


    # Convert the content of json file into a new db
    def from_json_to_db(self):
        thread = ''
        db = Base("scnscraper/abap.pydb", save_to_file= True)
        # create new base with field names
        db.create('url', 'uid', 'type', 'author',
                       'title', 'date_time', 'tags', 'views',
                       'answers', 'resolve', 'upvotes', 'text', mode='override')
        i=0
        with open('scnsraper/threads.json', 'r') as file:
            for line in file:
                if(line.endswith(" }\n")):
                    thread += line
                    tokens = re.search(r"url:\s'(.*?)',\suid:\s'(.*?)',\stype:\s'(.*?)',\sauthor:\s'(.*?)',\stitle:\s'(.*?)',\sdate_time:\s'(.*?)',\stags:\s'(.*?)',\sviews:\s'(.*?)',\sanswers:\s'(.*?)',\sresolve:\s'(.*?)',\supvotes:\s'(.*?)', text:\s'((.|\n)*)'\s}", str(thread))
                    if tokens is not None:
                        db.insert(url = tokens.group(1), uid = tokens.group(2), type= tokens.group(3),
                                author=tokens.group(4), title = tokens.group(5), date_time = tokens.group(6),
                                tags = tokens.group(7), views = tokens.group(8), answers = tokens.group(9),
                                resolve = tokens.group(10), upvotes = tokens.group(11), text = tokens.group(12))
                        db.commit()
                    print ('\n--------------------------------------------\n')
                    thread = ''
                if(line.startswith(" ]")):
                    print("new page")
                    thread = ''
                if(line.endswith('\n') and (not line.startswith(" ]\n\n")) and (not line.endswith(" }\n"))):
                    thread += line


    def state_extraction():
        db = Base("scnscraper/abap.pydb")
        if db.exists():
            db.open()
            record = db(type = "Question")
            print("# discussion scraped: " + str(record.__len__()))
            print("Answered: " + str(db(resolve = "Answered.").__len__()))
            print("Answered with solution: "+ str(db(resolve = "solution").__len__()))
            print("Not Answered: " + str(db(resolve = "Not Answered.").__len__()))
            print("Assumed Answered: " + str(db(resolve = "Assumed Answered.").__len__()))

    state_extraction = staticmethod(state_extraction)
Exemple #9
0
class DataStoring():

    #Inizialize an instantiated object by opening json file and the database
    def __init__(self):
        self.out_file = open("scnscraper/abap.json", "a")
        self.out_file.close()
        self.db = Base("scnscraper/abap.pydb")
        if self.db.exists():
            self.db.open()
        else:
            self.db.create('url', 'uid', 'type', 'author', 'title',
                           'date_time', 'tags', 'views', 'answers', 'resolve',
                           'upvotes', 'text')

    #for each thread scraped, insert it into db
    def insert_items_into_db(self, threads):
        for thread in threads:
            item = SapItem()  # New Item instance
            item = thread
            try:
                # Insert into db
                self.db.insert(url=str(item["url"]),
                               uid=str(item["uid"]),
                               type=str(item["type"]),
                               author=str(item["author"]),
                               title=str(item["title"]),
                               date_time=str(item["date_time"]),
                               tags=str(item["tags"]),
                               views=str(item["views"]),
                               answers=str(item["answers"]),
                               resolve=str(item["resolve"]),
                               upvotes=str(item["upvotes"]),
                               text=str(item["text"]))
            except UnicodeEncodeError:
                print("Unicode Encode Exception!")
        #save changes on disk
        self.db.commit()

    # for each thread scraped, initialize the string to insert into json file
    def threads_to_str(self, threads):
        out_string = "[ "
        if threads.__len__() == 0:
            return ""
        for thread in threads:
            item = SapItem()
            item = thread
            try:
                out_string += "{ url: '" + str(item["url"] ) + "', " + "uid: '" + str(item["uid"] ) + "', "\
                                "type: '" + str(item["type"] )  + "', "\
                                "author: '"+ str(item["author"])  + "', "  \
                                "title: '"+ str(item["title"])  + "', "\
                                "date_time: '"+ str(item["date_time"] )  + "', " \
                                "tags: '"+ str(item["tags"] )  + "', " \
                                "views: '"+ str(item["views"] )  + "', "\
                                "answers: '"+ str(item["answers"] )  + "', " \
                                "resolve: '"+ str(item["resolve"] )  + "', " \
                                "upvotes: '"+ str(item["upvotes"] )  + "', "\
                                "text: '" + str(item["text"]) + "' }\n"
            except UnicodeEncodeError:
                print("Unicode Encode Exception!")

        out_string += " ]\n\n"
        return out_string

    #for each thread scraped, insert it into json file
    def insert_items_into_file(self, threads):
        try:
            self.out_file = open("scnscraper/abap.json",
                                 "a")  # open in append mode
            #convert into string and insert into file
            self.out_file.write(self.threads_to_str(threads))
            self.out_file.close()
        except:
            print('Exception in writing file')
            self.out_file.close()

    # read the web page index
    def read_index_from_file(self):
        if os.path.exists('scnscraper/index.txt'):
            with open('scnscraper/index.txt') as f:
                index = int(f.readline())
                f.close()
        else:
            f = open('scnscraper/index.txt', 'w')
            index = 2
            f.write(str(index))
            f.close()
        return index

    # Write the web page index
    def write_index_into_file(self, i):
        f = open('scnscraper/index.txt', 'w')
        f.write(str(i))
        f.close()

    # Convert the content of json file into a new db
    def from_json_to_db(self):
        thread = ''
        db = Base("scnscraper/abap.pydb", save_to_file=True)
        # create new base with field names
        db.create('url',
                  'uid',
                  'type',
                  'author',
                  'title',
                  'date_time',
                  'tags',
                  'views',
                  'answers',
                  'resolve',
                  'upvotes',
                  'text',
                  mode='override')
        i = 0
        with open('scnsraper/threads.json', 'r') as file:
            for line in file:
                if (line.endswith(" }\n")):
                    thread += line
                    tokens = re.search(
                        r"url:\s'(.*?)',\suid:\s'(.*?)',\stype:\s'(.*?)',\sauthor:\s'(.*?)',\stitle:\s'(.*?)',\sdate_time:\s'(.*?)',\stags:\s'(.*?)',\sviews:\s'(.*?)',\sanswers:\s'(.*?)',\sresolve:\s'(.*?)',\supvotes:\s'(.*?)', text:\s'((.|\n)*)'\s}",
                        str(thread))
                    if tokens is not None:
                        db.insert(url=tokens.group(1),
                                  uid=tokens.group(2),
                                  type=tokens.group(3),
                                  author=tokens.group(4),
                                  title=tokens.group(5),
                                  date_time=tokens.group(6),
                                  tags=tokens.group(7),
                                  views=tokens.group(8),
                                  answers=tokens.group(9),
                                  resolve=tokens.group(10),
                                  upvotes=tokens.group(11),
                                  text=tokens.group(12))
                        db.commit()
                    print('\n--------------------------------------------\n')
                    thread = ''
                if (line.startswith(" ]")):
                    print("new page")
                    thread = ''
                if (line.endswith('\n') and (not line.startswith(" ]\n\n"))
                        and (not line.endswith(" }\n"))):
                    thread += line

    def state_extraction():
        db = Base("scnscraper/abap.pydb")
        if db.exists():
            db.open()
            record = db(type="Question")
            print("# discussion scraped: " + str(record.__len__()))
            print("Answered: " + str(db(resolve="Answered.").__len__()))
            print("Answered with solution: " +
                  str(db(resolve="solution").__len__()))
            print("Not Answered: " +
                  str(db(resolve="Not Answered.").__len__()))
            print("Assumed Answered: " +
                  str(db(resolve="Assumed Answered.").__len__()))

    state_extraction = staticmethod(state_extraction)
Exemple #10
0
from constants import *
from readinstrument import MyTrade
import persist_last_value

api_key = os.getenv("API_KEY")
token = os.getenv("PUB_TOKEN")
user = os.getenv("USER_ID")
# Initialise.
# kws = WebSocket("your_api_key", "your_public_token", "logged_in_user_id")
# kws = WebSocket(api_key, token, user)
kws = WebSocket("lzxojcmp16le5ep8", "9dd813876abe0c06dfca066221f5d1da", "DD1846")

# Initialize DB.

db = Base(db_name, sqlite_compat=True)
if db.exists():
    db.open()
else:
    db.create('time', 'instrument_token', 'last_price', 'mode', 'tradeable')



# Save Initial Time
now = datetime.datetime.now()
tmp_filename = PREVIOUS_TIME+str(int(time.time()))
persist_last_value.save_object(tmp_filename, now)

factor = 1
minutes = 5
trade = MyTrade(fac=factor, c_min=minutes)