示例#1
0
def process(input_dir, output_dir, parts, length):
    replace_spaces(input_dir)
    original_mp3_files = glob.glob(input_dir + "/*.mp3")

    for input_file_path in original_mp3_files:
        mp3files = split_mp3.split_file(input_file_path, output_dir, parts, length)
        random.shuffle(mp3files)
        for filepath in mp3files:
            print "Processing {}".format(filepath)
            print "Check if already in database"
            analyze = database.doesnt_exist(filepath)
            if analyze:
                print "Analyze"
                try:
                    key, tempo, right_key  = sonic_functions.analyze(filepath)
                    if key > 11:
                        key = ((key - 9) % 12)
                    right_key = right_key.replace(" ", "").title()
                    name, cover, artist = echo_functions.retrieve_inf(filepath)
                    for sc in "!@#$%^&*()[]{}';:,<>?|`~-=+":
                        name = name.replace(sc, "")
                        artist = artist.replace(sc, "")
                    # crop the edges
                    print "Fade the edges"
                    song = pydub.AudioSegment.from_mp3(filepath).fade_in(10000).fade_out(10000)
                    song.export(filepath, format="mp3")
                    duration = song.duration_seconds
                    database.insert(filepath, key, tempo, name, cover, artist, right_key, duration)
                except (KeyError, xml.etree.ElementTree.ParseError, IndexError):
                    pass
            else:
                print "Skipping..."
示例#2
0
文件: chat.py 项目: nnamdiib/chatroom
 def on_message(self, message):      
     msg = json.loads(message)
     msg['username'] = self.__rh.client_info[self.__clientID]['nick']
     msg['room'] = self.__rh.client_info[self.__clientID]['room']
     pmessage = json.dumps(msg)
     rconns = self.__rh.roomate_cwsconns(self.__clientID)
     for conn in rconns:
         conn.write_message(pmessage)
     database.insert(pmessage, db_filename)
示例#3
0
def addAlarm():
	description = request.form['description']
	time		= request.form['time']
	# Format: HH:MM
	minute		= time.split(':')[-1]
	hour 		= time.split(':')[0]
	day 		= request.form['day']
	db.insert(description, hour, minute, day)
	return redirect("/")
示例#4
0
def main():
    post_title = request.args.get("post_title",None)
    post_body = request.args.get("post_body",None)
    post_button = request.args.get("post_button",None)

    if post_button == None:
        titles = database.getTitles()
        return render_template("home.html", titles=titles)
    else:
        database.insert('posts',post_title,post_body)
        return redirect(url_for('post',post_title=post_title))
示例#5
0
def update():
	print('Initializing database...')
	database.initialize()

	print('Getting available terms...')
	terms = scrape.getTerms()

	print('Terms retrieved. Updating table...')
	database.insert('terms', terms)

	print('Getting faculty information and dates for available terms...')
	termsInfo = {
		'faculties' : [],
		'dates' : []
	}
	for term in terms:
		termInfo = scrape.getFacultiesAndDates(term['id'])
		termsInfo['faculties'].extend(termInfo['faculties'])
		termsInfo['dates'].extend(termInfo['dates'])

	print('Faculties and dates retrieved. Updating tables...')
	database.insert('faculties', termsInfo['faculties'])
	database.insert('dates', termsInfo['dates'])

	print('Checking the database for new stats to retrieve...')
	missingStats = database.getMissingEmploymentStats()
	count = len(missingStats)

	print('{count} records to retrieve. Starting retrieval...'.format(count=count))
	employment = []
	termsMap = processTerms(database.getTerms())
	facultiesMap = {}
	for termKey in termsMap:
		facultiesMap[termKey] = processFaculties(database.getFaculties(termKey))
	current = 1
	for entry in missingStats:
		delay = random.random()*.5
		time.sleep(delay)
		print('({percent:5.2f}%):{current}/{count} - Fetching info on {faculty} from {date} in {term}...'.format(percent=float(current)/count * 100, current=current, count=count, faculty=facultiesMap[entry[0]][entry[2]], date=str(entry[1]), term=termsMap[entry[0]]))
		employment.extend(scrape.getEmploymentStats(*entry))
		if current % 25 == 0:
			print('Saving new entries...')
			database.insert('employment', employment)
			employment = []
		current+=1
	database.insert('employment', employment)
	
	print('Update complete.')
示例#6
0
文件: product.py 项目: lunayo/Brazaar
def addProduct():
    requestBody = loads(request.body.read())
    token = requestBody['token']
    product = requestBody['product']
    name = product['name']
    images = product['images']
    product.setdefault('quantity', 1)
    # delete images key
    del product['images']

    # # send notification to followers
    # user = "******"
    # content = user + " has posted new product : " + name + "!"

    # #  simulate 10 followers
    # token = randomword(15)
    # for i in range(1,10) :
        # sendNotification(token=token, content=content)

    productID = database.insert(database="brazaar2", 
                                collection="products",
                                data=dict(product))

    if productID :
        greenlet = gevent.spawn(uploadImages, str(productID), images)
        greenlet.ready()
        greenlet.get()

        return dumps(product)
    else :
        response.status = 300
        return {'error': 'insert to collection error'}
示例#7
0
def post(post_title):
    posts = database.getPost(post_title)
    comment_button = request.args.get("comment_button",None)

    if comment_button == None:
        comments = database.getComments(post_title)
        return render_template("post.html",
                               post_title=post_title, 
                               posts=posts,comments=comments)
    else:
        newComment = request.args.get("comment",None)
        database.insert('comments',post_title,newComment)
        comments = database.getComments(post_title)
        return render_template("post.html",
                               post_title=post_title,
                               posts=posts,comments=comments)
示例#8
0
文件: reader.py 项目: jkominek/reader
 def NewFolder(self, evt):
     dialog = wx.TextEntryDialog(self, "Name of folder...", "New Folder")
     res = dialog.ShowModal()
     if res == wx.CANCEL:
         return
     name = dialog.GetValue().strip()
     if len(name)>0:
         id = database.insert("insert into folders (name, parent, ordering) values (?, ?, (select 1+max(ordering) from folders where parent=?))", (name, 1, 1))
         self.AddFolderToTree(id, 1, name)
示例#9
0
文件: entry.py 项目: kkeelltt/kelt
def finish(key=None):
    session = bottle.request.environ.get('beaker.session')

    # 不正なアクセスでないかチェック
    if not key == session.id:
        return bottle.template('error', error=valid.state('lost_key'))

    # 承認待ちリストに突っ込む
    database.insert(session)

    # 運用部宛に申請依頼メールを送信
    subject = 'Request for account ({club_account})'.format(**session)
    for_admin = message.write_second(session)
    msg = message.create_msg(FROM_ADDR, ADMIN_ADDR, subject, for_admin)
    message.send_msg(SMTP_SVR, msg)

    # セッションを削除
    session.delete()

    return bottle.template('finish')
示例#10
0
文件: routes.py 项目: dieanne/WebApp
def upload():
    form = UploadForm(request.form)
    if request.method == 'POST':     #  and form.validate():
        name = form.name.data
        subject = form.subject.data
        homework = form.homework.data
        file = request.files['file'] 
        date = dt.now()
        if file and allowed_file(file.filename):
            filename = secure_filename(file.filename)
            file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
            entry = Entry(name, subject, homework, filename, date)
            insert(entry)
            return redirect(url_for('uploaded_file', filename=filename))
        else:
            return render_template('upload.html', 
            title = 'Upload',
            form = form)
    else:        
        return render_template('upload.html', 
        title = 'Upload',
        form = form)
示例#11
0
文件: reader.py 项目: jkominek/reader
    def NewFeed(self, evt):
        dialog = wx.TextEntryDialog(self, "Feed URL...", "New Feed")
        res = dialog.ShowModal()
        if res == wx.CANCEL:
            return
        url = dialog.GetValue().strip()

        selected_item = self.feed_list_ctrl.GetSelection()
        if selected_item.is_folder:
            dest_folder = selected_item.db_id
        else:
            dest_folder = selected_item.in_folder
            
        if len(url)>0:
            id = database.insert("insert into feeds (name, folder, ordering, url) values (?, ?, (select 1+max(ordering) from feeds where folder=?), ?)", ("feed", dest_folder, dest_folder, url))
            self.AddFeedToTree(id, dest_folder, "feed", url)
示例#12
0
文件: bot.py 项目: 0x3c3e/BookmarkBot
def upd(bot, update):
    text = hanler(update.message.text)
    if " " not in text:
        bot.sendMessage(update.message.chat_id, text=translation.split_char_error)
        return
    message_tmp = text.split(" ")
    if not message_tmp[-1].strip().isdigit():
        bot.sendMessage(update.message.chat_id, text=translation.page_number_error)
        return
    if len(message_tmp)<2:
        bot.sendMessage(update.message.chat_id, text=translation.not_enougth_params)
        return
    page = int(message_tmp[-1])
    title = " ".join(message_tmp[:-1]).strip()
    msg = database.insert(update.message.chat_id, title, page)
    bot.sendMessage(update.message.chat_id, text=msg)
示例#13
0
文件: product.py 项目: lunayo/Brazaar
def createUser():
    user = {}
    requestBody = loads(request.body.read())
    token = requestBody['token']
    user['username'] = requestBody['username']
    user['firstName'] = requestBody['firstName']
    user['lastName'] = requestBody['lastName']

    userID = database.insert(database="brazaar2", 
                            collection="users",
                            data=dict(user))
        
    if userID :
        return dumps(user)
    else :
        response.status = 300
        return {'error': 'insert to collection error'}
示例#14
0
def create_message():
    estimate = hulop.localize_image(
        request.files['image'],
        request.form['user'],
        request.form['map']
    )
    if estimate:
        loc = estimate['t']
        new_id = database.insert(
            'hotspot_messages',
            ('message','x','y','z'),
            (request.form['message'], loc[0], loc[1], loc[2])
        )
        hotspot = database.query('select * from hotspot_messages where id=?', [new_id], one=True)
        return json.dumps(hotspot), 201
    else:
        return json.dumps({'error': 'could not localize'}), 400
示例#15
0
文件: product.py 项目: lunayo/Brazaar
def registerDeviceToken() :
    requestBody = loads(request.body.read())
    token = requestBody['token']
    
    query = {'token' : token}
    tokenID = database.find(database="brazaar2",
                            collection="devices",
                            query=query)
    if not tokenID :
        tokenID = database.insert(database="brazaar2",
                                    collection="devices",
                                    data=requestBody)

    if tokenID :
        return dumps(requestBody)
    else :
        response.status = 300
        return {'error': 'register token error'}

    return 
示例#16
0
文件: runner.py 项目: gfechio/python
def blacklist(options, issue, summary):
    options = options.split(":")[2]
    non_pci_server = config.servers["squid"] 
    vanIndex = random.randrange(0,len(non_pci_server))
    van_server = non_pci_server[vanIndex]
    sys.stdout.write("%s used as vanguard server "+van_server+"\n") 
    del non_pci_server[vanIndex]
    sys.stdout.write("Removing "+van_server+" from list (To not deploy twice)\n")
    
    for url in block_urls_parse(options):
        sys.stdout.write("Blocking folllowing URLs -> "+url) 
        jira_con.comment(issue,"Blocking folllowing URLs -> "+url)
        put_file = puppet.put_file(config.puppet["server"], config.conn["user"], config.conn["password"],config.puppet["dir"], config.puppet["file"], url) 

        if put_file == "OK":

            if db.insert(config.conn["user"], url) == "OK":
                system_test.run(van_server, config.conn["user"], config.conn["password"])
                web_test.run(van_server, url)

            else:
                jira_con.comment(issue, "Automation error, please check")
                db.inactive(config.conn["user"], url)
                sys.exit()

        elif put_file == "NOK":
            jira_con.comment(issue, "URL already blacklisted")
            jira_con.transition_close(issue)

        else:
            db.inactive(config.conn["user"], url)
            system_test.run(van_server, config.conn["user"], config.conn["password"])
            sys.stderr.write("Problem during execution please check!")
            jira_con.comment(issue, 'Problem during execution, bot could not solve') 

    jira_con.comment(issue, 'Tests passed successfully, applying to the pool')
    puppet.kick(config.puppet["server"], config.conn["user"], config.conn["password"])

    jira_con.comment(issue, 'Everything done closing ticket. Run puppet deploy manually')
    jira_con.transition_close(issue)
示例#17
0
文件: product.py 项目: lunayo/Brazaar
def addFollowing():
    requestBody = loads(request.body.read())
    token = requestBody['token']
    relationship = requestBody['relationship']
    username = relationship['username']
    following = relationship['following']
    # check if user exists
    query = {"username" : username}
    user = database.find(database="brazaar2",
                         collection="users",
                         query=query)

    if user :
        followingID = database.insert(database="brazaar2",
                                        collection="followings",
                                        data=relationship)
        if followingID :
            return dumps(relationship)
        else :
            response.status = 300
            return {'error': 'Relationship exists'}
    else :
        response.status = 300
        return {'error': 'User not exists'}
示例#18
0
    def setUpClass(cls) -> None:
        """Create the expected schema in the test database"""

        drop_all_user_databases()
        database_name = "data_types_database"
        schema_name = "dbo"

        query_list = list(create_database(database_name, "Latin1_General_CS_AS"))
        query_list.extend(enable_database_tracking(database_name))

        text_values = [
            (0, None, None, None),
            (1, "abc", "def", "ghi".encode('utf-8'))
        ]
        text_schema = {
            'selected': True,
            'properties': {
                'pk': {
                    'inclusion': 'automatic',
                    'maximum': 2147483647,
                    'minimum': -2147483648,
                    'type': ['integer'],
                    'selected': True},
                'rowversion_synonym_timestamp': {'inclusion': 'available', 'selected': True, 'type': ['string', 'null']},
                'varchar_text': {},
                'nvarchar_text': {},
                'varbinary_data': {},
                "_sdc_deleted_at": {'format': 'date-time', 'type': ['string', 'null']}},
            'type': 'object'}

        other_values = [
            (0, None, None, None, "827376B0-AEF4-11E9-8002-0800276BC1DF", None, None, None),
            (1, None, None, None, "ACC9A986-AEF4-11E9-8002-0800276BC1DF", None, None, None),
            (2, None, None, None, "B792681C-AEF4-11E9-8002-0800276BC1DF", None, None, None)
        ]
        other_schema = {
            'selected': True,
            'properties': {
                'markup': {},
                'variant': {},
                'geospacial': {},
                'SpecialPurposeColumns': {},
                'tree': {},
                'guid': {
                    'inclusion': 'available',
                    'selected': True,
                    'pattern': '[A-F0-9]{8}-([A-F0-9]{4}-){3}[A-F0-9]{12}',
                    'type': ['string', 'null']},
                'geospacial_map': {},
                'pk': {
                    'inclusion': 'automatic',
                    'maximum': 2147483647,
                    'minimum': -2147483648,
                    'type': ['integer'],
                    'selected': True},
                'version': {'inclusion': 'available', 'selected': True, 'type': ['string', 'null']},
                "_sdc_deleted_at": {'format': 'date-time', 'type': ['string', 'null']}},
            'type': 'object'}

        comp_values = [
            (0, datetime(1970, 7, 8, 3), datetime.now()),
            (1, datetime(1970, 1, 1, 0), datetime.now())
        ]
        comp_schema = {
            'selected': True,
            'properties': {
                'started_at': {
                    'selected': False,
                    'type': ['string', 'null'],
                    'inclusion': 'available',
                    'format': 'date-time'},
                'durations_days': {
                    'inclusion': 'available',
                    'maximum': 2147483647,
                    'minimum': -2147483648,
                    'type': ['integer', 'null'],
                    'selected': True},
                'ended_at': {
                    'format': 'date-time',
                    'inclusion': 'available',
                    'type': ['string', 'null'],
                    'selected': False},
                'pk': {
                    'inclusion': 'automatic',
                    'maximum': 2147483647,
                    'minimum': -2147483648,
                    'type': ['integer'],
                    'selected': True},
                "_sdc_deleted_at": {'format': 'date-time', 'type': ['string', 'null']}},
            'type': 'object'}

        cls.EXPECTED_METADATA = {
            'data_types_database_dbo_text_and_image_deprecated_soon': {
                'is-view': False,
                'schema-name': schema_name,
                'row-count': 0,
                'values': text_values,
                'table-key-properties': {'pk'},
                'selected': None,
                'database-name': database_name,
                'stream_name': 'text_and_image_deprecated_soon',
                'fields': [
                    {"pk": {'sql-datatype': 'int', 'selected-by-default': True, 'inclusion': 'automatic'}},
                    {"nvarchar_text": {'sql-datatype': 'ntext', 'selected-by-default': False,
                                       'inclusion': 'unavailable'}},
                    {"varchar_text": {'sql-datatype': 'text', 'selected-by-default': False,
                                      'inclusion': 'unavailable'}},
                    {"varbinary_data": {'sql-datatype': 'image', 'selected-by-default': False,
                                        'inclusion': 'unavailable'}},
                    {"rowversion_synonym_timestamp": {'sql-datatype': 'timestamp', 'selected-by-default': True,
                                                      'inclusion': 'available'}}],
                'schema': text_schema},
            'data_types_database_dbo_weirdos': {
                'is-view': False,
                'schema-name': schema_name,
                'row-count': 0,
                'values': other_values,
                'table-key-properties': {'pk'},
                'selected': None,
                'database-name': database_name,
                'stream_name': 'weirdos',
                'fields': [
                    {"pk": {'sql-datatype': 'int', 'selected-by-default': True, 'inclusion': 'automatic'}},
                    {"geospacial": {'sql-datatype': 'geometry', 'selected-by-default': False,
                                    'inclusion': 'unavailable'}},
                    {"geospacial_map": {'sql-datatype': 'geography', 'selected-by-default': False,
                                        'inclusion': 'unavailable'}},
                    {"markup": {'sql-datatype': 'xml', 'selected-by-default': False, 'inclusion': 'unavailable'}},
                    {"guid": {'sql-datatype': 'uniqueidentifier', 'selected-by-default': True,
                              'inclusion': 'available'}},
                    {"tree": {'sql-datatype': 'hierarchyid', 'selected-by-default': False, 'inclusion': 'unavailable'}},
                    {"variant": {'sql-datatype': 'sql_variant', 'selected-by-default': False,
                                 'inclusion': 'unavailable'}},
                    {"SpecialPurposeColumns": {'sql-datatype': 'xml', 'selected-by-default': False,
                                               'inclusion': 'unavailable'}},
                    {"version": {'sql-datatype': 'timestamp', 'selected-by-default': True, 'inclusion': 'available'}}],
                'schema': other_schema},
            'data_types_database_dbo_computed_columns': {
                'is-view': False,
                'schema-name': schema_name,
                'row-count': 0,
                'values': comp_values,
                'table-key-properties': {'pk'},
                'selected': None,
                'database-name': database_name,
                'stream_name': 'computed_columns',
                'fields': [
                    {"pk": {'sql-datatype': 'int', 'selected-by-default': True, 'inclusion': 'automatic'}},
                    {"started_at": {'sql-datatype': 'datetimeoffset', 'selected-by-default': True,
                                    'inclusion': 'available'}},
                    {"ended_at": {'sql-datatype': 'datetimeoffset', 'selected-by-default': True,
                                  'inclusion': 'available'}},
                    {"durations_days": {'sql-datatype': 'int', 'selected-by-default': True,
                                        'inclusion': 'unavailable'}}],
                'schema': comp_schema},
        }

        # test timestamp and usnupported
        table_name = "text_and_image_deprecated_soon"
        column_name = ["pk", "nvarchar_text", "varchar_text", "varbinary_data",
                       "rowversion_synonym_timestamp"]
        column_type = ["int", "ntext", "text", "image", "timestamp"]
        primary_key = {"pk"}
        column_def = [" ".join(x) for x in list(zip(column_name, column_type))]
        query_list.extend(create_table(database_name, schema_name, table_name, column_def,
                                       primary_key=primary_key, tracking=True))
        query_list.extend(insert(database_name, schema_name, table_name, text_values, column_name[:-1]))

        # test uniqueidentifier and rowversion
        table_name = "weirdos"
        column_name = [
            "pk", "geospacial", "geospacial_map", "markup", "guid", "tree",
            "variant", "SpecialPurposeColumns", "version"
        ]
        column_type = [
            "int", "geometry", "geography", "xml", "uniqueidentifier", "hierarchyid",
            "sql_variant", "xml COLUMN_SET FOR ALL_SPARSE_COLUMNS", "rowversion"
        ]
        primary_key = {"pk"}
        column_def = [" ".join(x) for x in list(zip(column_name, column_type))]
        query_list.extend(create_table(database_name, schema_name, table_name, column_def,
                                       primary_key=primary_key, tracking=True))
        # not sure why I have to do this but getting error - Parameter information is missing from a user-defined type.
        for value in other_values:
            query_list.extend(insert(database_name, schema_name, table_name, [value], column_name[:-1]))

        table_name = "computed_columns"
        column_name = ["pk", "started_at", "ended_at", "durations_days"]
        column_type = ["int", "datetimeoffset", "datetimeoffset", "AS DATEDIFF(day, started_at, ended_at)"]
        primary_key = {"pk"}
        column_def = [" ".join(x) for x in list(zip(column_name, column_type))]
        query_list.extend(create_table(database_name, schema_name, table_name, column_def,
                                       primary_key=primary_key, tracking=True))
        query_list.extend(insert(database_name, schema_name, table_name, comp_values, column_name[:-1]))
        mssql_cursor_context_manager(*query_list)

        # update values with rowversions
        rows = mssql_cursor_context_manager(*["select version from data_types_database.dbo.weirdos order by pk"])
        rows = ["0x{}".format(value.hex().upper()) for value, in rows]
        cls.EXPECTED_METADATA['data_types_database_dbo_weirdos']['values'] = \
            [other_values[row] + (version,) for row, version in enumerate(rows)]

        rows = mssql_cursor_context_manager(*[
            "select rowversion_synonym_timestamp from data_types_database.dbo.text_and_image_deprecated_soon order by pk"])
        rows = ["0x{}".format(value.hex().upper()) for value, in rows]
        cls.EXPECTED_METADATA['data_types_database_dbo_text_and_image_deprecated_soon']['values'] = \
            [text_values[row] + (version,) for row, version in enumerate(rows)]

        rows = mssql_cursor_context_manager(
            *["select durations_days from data_types_database.dbo.computed_columns order by pk"])
        cls.EXPECTED_METADATA['data_types_database_dbo_computed_columns']['values'] = \
            [comp_values[row] + tuple(version) for row, version in enumerate(rows)]

        cls.expected_metadata = cls.discovery_expected_metadata
示例#19
0
文件: test.py 项目: yanghonggang/cbt
        # make readahead into an int
        params[3] = int(params[3][7:])

        # Make op_size into an int
        params[4] = int(params[4][8:])

        # Make cprocs into an int
        params[5] = int(params[5][17:])

        # Make io_depth int an int
        params[6] = int(params[6][9:])

        params_hash = mkhash(params)
        params = [params_hash] + params
        params.extend([0, 0])
        database.insert(params)

        for line in open(inputname):
            if "aggrb" in line:
                bw = getbw(splits(line, 'aggrb=', ','))
                if "READ" in line:
                    database.update_readbw(params_hash, bw)
                elif "WRITE" in line:
                    database.update_writebw(params_hash, bw)
    html = HTMLGenerator()
    html.add_html(html.read_file('/home/nhm/src/cbt/include/html/table.html'))
    html.add_style(html.read_file('/home/nhm/src/cbt/include/css/table.css'))
    html.add_script(
        html.read_file('/home/nhm/src/cbt/include/js/jsxcompressor.min.js'))
    html.add_script(html.read_file('/home/nhm/src/cbt/include/js/d3.js'))
    html.add_script(html.read_file('/home/nhm/src/cbt/include/js/d3var.js'))
示例#20
0
文件: sites-add.py 项目: sr/beadmin
if len(userfromdb) == 0:
	print 'Votre utilisateur n\'a pas été autorisé à avoir un site.'
	print 'Merci de contacter l\'administrateur.'
	sys.exit()

id_user = list(userfromdb)[0].id

while True:
	domain = text('Nom de domaine du site :')
	if re.match(r'^([a-zA-Z0-9_\-]+\.)+(fr|cc|com|org|net|info|name|be|eu)$', domain):
		break

domains = db.select('domains', where="name = '%s'" % domain)

if len(domains) == 0:
	id_domain = db.insert('domains', name=domain)
else:
	id_domain = list(domains)[0].id
	if len(db.select('websites', where="id_domains = '%s'" % id)) == 1:
		print 'Ce domaine possède déjà une configuration sur Lighttpd'
		sys.exit()

msg = '''# Configurez ici votre hôte virtuel avec les instructions de
# configuration de lighttpd (voir la doc).
# Si le site est un site PHP, cette étape est souvent inutile.
# En cas de doutes, contactez un administrateur.'''

if len(db.select('websites', where="id_domains = '%s'" % id_domain)) == 1:
	print 'Le domaine %s possède déjà une configuration sur Lighttpd' % domain
	sys.exit()
示例#21
0
def add_command():
    database.insert(date_text.get(), earning_text.get(), exercise_text.get(), study_text.get(), diet_text.get(), python_text.get())
    list.delete(0,END)
    list.insert(END,(date_text.get(), earning_text.get(), exercise_text.get(), study_text.get(), diet_text.get(), python_text.get()))
示例#22
0
 def insert_data(self, query, val):
     return insert(query, val, self.mydb)
示例#23
0
    def setUpClass(cls) -> None:
        """Create the expected schema in the test database"""
        drop_all_user_databases()
        database_name = "data_types_database"
        schema_name = "dbo"

        # use all valid unicode characters
        chars = list(range(0, 55296))
        chars.extend(
            range(57344, 65534)
        )  # TODO - BUG https://stitchdata.atlassian.net/browse/SRCE-1217
        chars.extend(range(65535, sys.maxunicode))
        chars.reverse()  # pop starting with ascii characters

        char_values = [(pk, "".join([chr(chars.pop()) for _ in range(2)]))
                       for pk in range(16)]
        char_values.extend([
            (16, None),
        ])
        char_schema = {
            'type': 'object',
            'selected': True,
            'properties': {
                'char_2': {
                    'type': ['string', 'null'],
                    'maxLength': 2,
                    'inclusion': 'available',
                    'selected': True
                },
                # 'minLength': 2},
                'pk': {
                    'maximum': 2147483647,
                    'type': ['integer'],
                    'inclusion': 'automatic',
                    'selected': True,
                    'minimum': -2147483648
                }
            }
        }

        varchar_values = [(pk, chr(chars.pop()), "".join([
            chr(chars.pop()) for _ in range(15)
        ]), "".join([chr(chars.pop()) for _ in range(randint(1, 16))]))
                          for pk in range(3)]
        varchar_values.extend([
            (3, None, None, None),
        ])
        varchar_schema = {
            'type': 'object',
            'selected': True,
            'properties': {
                'pk': {
                    'maximum': 2147483647,
                    'type': ['integer'],
                    'inclusion': 'automatic',
                    'selected': True,
                    'minimum': -2147483648
                },
                'varchar_8000': {
                    'type': ['string', 'null'],
                    'maxLength': 8000,
                    'inclusion': 'available',
                    'selected': True
                },
                # 'minLength': 0},
                'varchar_5': {
                    'type': ['string', 'null'],
                    'maxLength': 5,
                    'inclusion': 'available',
                    'selected': True
                },
                # 'minLength': 0},
                'varchar_max': {
                    'type': ['string', 'null'],
                    'maxLength': 2147483647,
                    'inclusion': 'available',
                    'selected': True
                }
            }
        }
        # 'minLength': 0}}}

        chars.reverse()
        nchar_values = [(pk, "".join([chr(chars.pop()) for _ in range(4)]))
                        for pk in range(3)]
        #  expect that values are right padded with spaces in the db.
        nchar_values = [
            (x, "{}{}".format(y,
                              " " * ((16 - len(y.encode('utf-16-le'))) // 2)))
            for x, y in nchar_values
        ]
        nchar_values.extend([
            (3, None),
        ])
        nchar_schema = {
            'type': 'object',
            'selected': True,
            'properties': {
                'nchar_8': {
                    'type': ['string', 'null'],
                    'maxLength': 8,
                    'inclusion': 'available',
                    'selected': True
                },
                # 'minLength': 8},  # length is based on bytes, not characters
                'pk': {
                    'maximum': 2147483647,
                    'type': ['integer'],
                    'inclusion': 'automatic',
                    'selected': True,
                    'minimum': -2147483648
                }
            }
        }

        nvarchar_values = [(pk, chr(chars.pop()), "".join([
            chr(chars.pop()) for _ in range(8)
        ]), "".join([chr(chars.pop()) for _ in range(randint(1, 8))]))
                           for pk in range(50)]
        nvarchar_values.extend([
            (50, None, None, None),
        ])

        pk = 51
        while len(chars):
            #  Use the rest of the characters
            nvarchar_values.extend([(pk, chr(chars.pop()), "".join([
                chr(chars.pop()) for _ in range(min(len(chars), 800))
            ]) if len(chars) else "", "".join([
                chr(chars.pop())
                for _ in range(min(len(chars), randint(1, 800)))
            ]) if len(chars) else "")])
            pk += 1

        nvarchar_schema = {
            'type': 'object',
            'selected': True,
            'properties': {
                'nvarchar_max': {
                    'type': ['string', 'null'],
                    'maxLength': 2147483647,
                    'inclusion': 'available',
                    'selected': True
                },
                # 'minLength': 0},
                'pk': {
                    'maximum': 2147483647,
                    'type': ['integer'],
                    'inclusion': 'automatic',
                    'selected': True,
                    'minimum': -2147483648
                },
                'nvarchar_4000': {
                    'type': ['string', 'null'],
                    'maxLength': 4000,
                    'inclusion': 'available',
                    'selected': True
                },
                # 'minLength': 0},
                'nvarchar_5': {
                    'type': ['string', 'null'],
                    'maxLength': 5,
                    'inclusion': 'available',
                    'selected': True
                }
            }
        }
        # 'minLength': 0}}}

        cls.EXPECTED_METADATA = {
            'data_types_database_dbo_char_data': {
                'is-view':
                False,
                'schema-name':
                schema_name,
                'row-count':
                0,
                'values':
                char_values,
                'table-key-properties': {'pk'},
                'selected':
                None,
                'database-name':
                database_name,
                'stream_name':
                'char_data',
                'fields': [{
                    'pk': {
                        'sql-datatype': 'int',
                        'selected-by-default': True,
                        'inclusion': 'automatic'
                    }
                }, {
                    'char_2': {
                        'sql-datatype': 'char',
                        'selected-by-default': True,
                        'inclusion': 'available'
                    }
                }],
                'schema':
                char_schema
            },
            'data_types_database_dbo_varchar_data': {
                'is-view':
                False,
                'schema-name':
                schema_name,
                'row-count':
                0,
                'values':
                varchar_values,
                'table-key-properties': {'pk'},
                'selected':
                None,
                'database-name':
                database_name,
                'stream_name':
                'varchar_data',
                'fields': [{
                    'pk': {
                        'sql-datatype': 'int',
                        'selected-by-default': True,
                        'inclusion': 'automatic'
                    }
                }, {
                    'varchar_5': {
                        'sql-datatype': 'varchar',
                        'selected-by-default': True,
                        'inclusion': 'available'
                    }
                }, {
                    'varchar_8000': {
                        'sql-datatype': 'varchar',
                        'selected-by-default': True,
                        'inclusion': 'available'
                    }
                }, {
                    'varchar_max': {
                        'sql-datatype': 'varchar',
                        'selected-by-default': True,
                        'inclusion': 'available'
                    }
                }],
                'schema':
                varchar_schema
            },
            'data_types_database_dbo_nchar_data': {
                'is-view':
                False,
                'schema-name':
                schema_name,
                'row-count':
                0,
                'values':
                nchar_values,
                'table-key-properties': {'pk'},
                'selected':
                None,
                'database-name':
                database_name,
                'stream_name':
                'nchar_data',
                'fields': [{
                    'pk': {
                        'sql-datatype': 'int',
                        'selected-by-default': True,
                        'inclusion': 'automatic'
                    }
                }, {
                    'nchar_8': {
                        'sql-datatype': 'nchar',
                        'selected-by-default': True,
                        'inclusion': 'available'
                    }
                }],
                'schema':
                nchar_schema
            },
            'data_types_database_dbo_nvarchar_data': {
                'is-view':
                False,
                'schema-name':
                schema_name,
                'row-count':
                0,
                'values':
                nvarchar_values,
                'table-key-properties': {'pk'},
                'selected':
                None,
                'database-name':
                database_name,
                'stream_name':
                'nvarchar_data',
                'fields': [{
                    'pk': {
                        'sql-datatype': 'int',
                        'selected-by-default': True,
                        'inclusion': 'automatic'
                    }
                }, {
                    'nvarchar_5': {
                        'sql-datatype': 'nvarchar',
                        'selected-by-default': True,
                        'inclusion': 'available'
                    }
                }, {
                    'nvarchar_4000': {
                        'sql-datatype': 'nvarchar',
                        'selected-by-default': True,
                        'inclusion': 'available'
                    }
                }, {
                    'nvarchar_max': {
                        'sql-datatype': 'nvarchar',
                        'selected-by-default': True,
                        'inclusion': 'available'
                    }
                }],
                'schema':
                nvarchar_schema
            },
        }
        query_list = list(
            create_database(database_name, "Latin1_General_CS_AS"))

        table_name = "char_data"
        column_name = ["pk", "char_2"]  # , "char_8000"]
        column_type = ["int", "char(2)"]  # , "char(8000)"]
        primary_key = {"pk"}
        column_def = [" ".join(x) for x in list(zip(column_name, column_type))]
        query_list.extend(
            create_table(database_name,
                         schema_name,
                         table_name,
                         column_def,
                         primary_key=primary_key))
        query_list.extend(
            insert(database_name, schema_name, table_name, char_values))

        table_name = "varchar_data"
        column_name = ["pk", "varchar_5", "varchar_8000", "varchar_max"]
        column_type = ["int", "varchar(5)", "varchar(8000)", "varchar(max)"]
        primary_key = {"pk"}
        column_def = [" ".join(x) for x in list(zip(column_name, column_type))]
        query_list.extend(
            create_table(database_name,
                         schema_name,
                         table_name,
                         column_def,
                         primary_key=primary_key))
        query_list.extend(
            insert(database_name, schema_name, table_name, varchar_values))

        table_name = "nchar_data"
        column_name = ["pk", "nchar_8"]  # , "nchar_4000"]
        column_type = ["int", "nchar(8)"]  # , "nchar(4000)"]
        primary_key = {"pk"}
        column_def = [" ".join(x) for x in list(zip(column_name, column_type))]
        query_list.extend(
            create_table(database_name,
                         schema_name,
                         table_name,
                         column_def,
                         primary_key=primary_key))
        # strip padding off query data
        nchar_query_values = [(x, y.rstrip() if isinstance(y, str) else y)
                              for x, y in nchar_values]
        query_list.extend(
            insert(database_name, schema_name, table_name, nchar_query_values))

        table_name = "nvarchar_data"
        column_name = ["pk", "nvarchar_5", "nvarchar_4000", "nvarchar_max"]
        column_type = ["int", "nvarchar(5)", "nvarchar(4000)", "nvarchar(max)"]
        primary_key = {"pk"}
        column_def = [" ".join(x) for x in list(zip(column_name, column_type))]
        query_list.extend(
            create_table(database_name,
                         schema_name,
                         table_name,
                         column_def,
                         primary_key=primary_key))

        query_list.extend(
            insert(database_name, schema_name, table_name, nvarchar_values))
        query_list.extend(
            ['-- there are {} characters left to test'.format(len(chars))])

        cls.expected_metadata = cls.discovery_expected_metadata

        mssql_cursor_context_manager(*query_list)
示例#24
0
import sqlite3
import time
import database

conn = sqlite3.connect("dns.db")
database.setup('sqlite3', conn, False)
database.execute(
    "create table keyvalue (key varchar(10) PRIMARY key, value varchar(128))")
if 0:
    for i in xrange(1000000):
        database.insert([['key', str(i).rjust(10, '0')],
                         ['value', str(i).rjust(10, '0')]], "keyvalue", False)
        if i % 5000 == 0:
            conn.commit()
else:
    for i in xrange(10000, 20000):
        t1 = time.time()
        result = database.select(
            ["count(1)"], [["key", "=", str(i).rjust(10, '0')]], "keyvalue", 1)
        t2 = time.time()
        print(t2 - t1)
示例#25
0
while (1):

    system("raspistill -o Img.png")

    user = face_recognition.recognize("Img.png")
    if user == -2:
        print "LIFT is Idle No face Detected"
        continue
    entry = int(raw_input("Enter Current Floor No :"))

    if user == -1:
        #	print "AAya"
        exit = int(raw_input("Enter Destination :"))
        user = int(time.time())
        database.insert(user, entry, exit)

        if abs(entry - cur_floor_lift1) < abs(entry - cur_floor_lift2):
            cur_floor_lift1 = move_lift1(cur_floor_lift1, entry)
            cur_floor_lift1 = move_lift1(entry, exit)

        else:
            cur_floor_lift2 = move_lift2(cur_floor_lift2, entry)
            cur_floor_lift2 = move_lift2(entry, exit)

        face_recognition.post("Img.png", user)

    elif database.find(user, entry) == -1:
        #	print "Pss"
        exit = int(raw_input("Enter Destination :"))
示例#26
0
    def test_run(self):
        """stream_expected_data[self.VALUES]
        Verify that a full sync can send capture all data and send it in the correct format
        for integer and boolean (bit) data.
        Verify that the fist sync sends an activate immediately.
        Verify that the table version is incremented up
        """
        print("running test {}".format(self.name()))

        conn_id = self.create_connection()

        # run in check mode
        check_job_name = runner.run_check_mode(self, conn_id)

        # verify check  exit codes
        exit_status = menagerie.get_exit_status(conn_id, check_job_name)
        menagerie.verify_check_exit_status(self, exit_status, check_job_name)

        # get the catalog information of discovery
        found_catalogs = menagerie.get_catalogs(conn_id)
        additional_md = [{
            "breadcrumb": [],
            "metadata": {
                'replication-method': 'INCREMENTAL',
                'replication-key': 'replication_key_column'
            }
        }]

        BaseTapTest.select_all_streams_and_fields(conn_id,
                                                  found_catalogs,
                                                  additional_md=additional_md)

        # clear state
        menagerie.set_state(conn_id, {})
        sync_job_name = runner.run_sync_mode(self, conn_id)

        # verify tap and target exit codes
        exit_status = menagerie.get_exit_status(conn_id, sync_job_name)
        menagerie.verify_sync_exit_status(self, exit_status, sync_job_name)

        # verify record counts of streams
        record_count_by_stream = runner.examine_target_output_file(
            self, conn_id, self.expected_streams(),
            self.expected_primary_keys_by_stream_id())
        expected_count = {
            k: len(v['values'])
            for k, v in self.expected_metadata().items()
        }
        self.assertEqual(record_count_by_stream, expected_count)

        # verify records match on the first sync
        records_by_stream = runner.get_records_from_target_output()

        non_selected_properties = []

        table_version = dict()
        for stream in self.expected_streams():
            with self.subTest(stream=stream):
                stream_expected_data = self.expected_metadata()[stream]
                table_version[stream] = records_by_stream[stream][
                    'table_version']

                # verify on the first sync you get
                # activate version message before and after all data for the full table
                # and before the logical replication part
                self.assertEqual(
                    records_by_stream[stream]['messages'][0]['action'],
                    'activate_version')
                self.assertEqual(
                    records_by_stream[stream]['messages'][-1]['action'],
                    'activate_version')
                self.assertTrue(
                    all([
                        m["action"] == "upsert"
                        for m in records_by_stream[stream]['messages'][1:-1]
                    ]),
                    msg="Expect all but the first message to be upserts")
                self.assertEqual(len(
                    records_by_stream[stream]['messages'][1:-1]),
                                 len(stream_expected_data[self.VALUES]),
                                 msg="incorrect number of upserts")

                column_names = [
                    list(field_data.keys())[0]
                    for field_data in stream_expected_data[self.FIELDS]
                ]

                expected_messages = [{
                    "action": "upsert",
                    "data": {
                        column: value
                        for column, value in list(zip(column_names,
                                                      row_values))
                        if column not in non_selected_properties
                    }
                } for row_values in sorted(stream_expected_data[self.VALUES],
                                           key=lambda row:
                                           (row[1] is not None, row[1]))]

                # Verify all data is correct for incremental
                for expected_row, actual_row in list(
                        zip(expected_messages,
                            records_by_stream[stream]['messages'][1:-1])):
                    with self.subTest(expected_row=expected_row):
                        self.assertEqual(actual_row["action"], "upsert")
                        self.assertEqual(
                            len(expected_row["data"].keys()),
                            len(actual_row["data"].keys()),
                            msg="there are not the same number of columns")
                        for column_name, expected_value in expected_row[
                                "data"].items():
                            if isinstance(expected_value, datetime):
                                # sql server only keeps milliseconds not microseconds
                                self.assertEqual(
                                    expected_value.isoformat().replace(
                                        '000+00:00',
                                        'Z').replace('+00:00', 'Z'),
                                    actual_row["data"][column_name],
                                    msg="expected: {} != actual {}".format(
                                        expected_value.isoformat().replace(
                                            '000+00:00',
                                            'Z').replace('+00:00', 'Z'),
                                        actual_row["data"][column_name]))
                            elif isinstance(expected_value, time):
                                # sql server time has second resolution only
                                self.assertEqual(
                                    expected_value.replace(
                                        microsecond=0).isoformat().replace(
                                            '+00:00', ''),
                                    actual_row["data"][column_name],
                                    msg="expected: {} != actual {}".format(
                                        expected_value.isoformat().replace(
                                            '+00:00', 'Z'),
                                        actual_row["data"][column_name]))
                            elif isinstance(expected_value, date):
                                # sql server time has second resolution only
                                self.assertEqual(
                                    expected_value.isoformat() +
                                    'T00:00:00+00:00',
                                    actual_row["data"][column_name],
                                    msg="expected: {} != actual {}".format(
                                        expected_value.isoformat() +
                                        'T00:00:00+00:00',
                                        actual_row["data"][column_name]))
                            else:
                                self.assertEqual(
                                    expected_value,
                                    actual_row["data"][column_name],
                                    msg="expected: {} != actual {}".format(
                                        expected_value,
                                        actual_row["data"][column_name]))
                print("records are correct for stream {}".format(stream))

                # verify state and bookmarks
                state = menagerie.get_state(conn_id)
                bookmark = state['bookmarks'][stream]

                self.assertIsNone(
                    state.get('currently_syncing'),
                    msg="expected state's currently_syncing to be None")
                self.assertIsNone(bookmark.get('current_log_version'),
                                  msg="no log_version for incremental")
                self.assertIsNone(bookmark.get('initial_full_table_complete'),
                                  msg="no full table for incremental")
                # find the max value of the replication key
                expected_bookmark = max([
                    row[1] for row in stream_expected_data[self.VALUES]
                    if row[1] is not None
                ])
                self.assertEqual(bookmark['replication_key_value'],
                                 expected_bookmark.isoformat())
                # self.assertEqual(bookmark['replication_key'], 'replication_key_value')

                self.assertEqual(
                    bookmark['version'],
                    table_version[stream],
                    msg="expected bookmark for stream to match version")

                expected_schemas = self.expected_metadata()[stream]['schema']
                self.assertEqual(records_by_stream[stream]['schema'],
                                 expected_schemas,
                                 msg="expected: {} != actual: {}".format(
                                     expected_schemas,
                                     records_by_stream[stream]['schema']))

        # ----------------------------------------------------------------------
        # invoke the sync job AGAIN and after insert, update, delete or rows
        # ----------------------------------------------------------------------

        database_name = "data_types_database"
        schema_name = "dbo"
        table_name = "dates_and_times"
        column_name = [
            "pk", "replication_key_column", "date_and_time",
            "bigger_range_and_precision_datetime", "datetime_with_timezones",
            "datetime_no_seconds", "its_time"
        ]
        insert_value = [
            (5, date(9999, 12, 30),
             datetime(9999, 12, 31, 23, 59, 59, 997000, tzinfo=timezone.utc),
             datetime(9999, 12, 31, 23, 59, 59, 999000, tzinfo=timezone.utc),
             datetime(9999,
                      12,
                      31,
                      10,
                      14,
                      tzinfo=timezone(timedelta(hours=14))).isoformat(),
             datetime(2079, 6, 6, 23, 59, tzinfo=timezone.utc),
             time(23, 59, 59, tzinfo=timezone.utc)),
            (6, date(2018, 12, 29),
             datetime(9999, 12, 31, 23, 59, 59, 997000, tzinfo=timezone.utc),
             datetime(9999, 12, 31, 23, 59, 59, 999000, tzinfo=timezone.utc),
             datetime(9999,
                      12,
                      31,
                      10,
                      14,
                      tzinfo=timezone(timedelta(hours=14))).isoformat(),
             datetime(2079, 6, 6, 23, 59, tzinfo=timezone.utc),
             time(23, 59, 59, tzinfo=timezone.utc))
        ]
        update_value = [
            (3, date(9999, 12, 31),
             datetime(9999, 12, 31, 23, 59, 59, 997000, tzinfo=timezone.utc),
             datetime(9999, 12, 31, 23, 59, 59, 999000, tzinfo=timezone.utc),
             datetime(9999,
                      12,
                      31,
                      10,
                      14,
                      tzinfo=timezone(timedelta(hours=10))).isoformat(),
             datetime(2079, 6, 6, 23, 59, tzinfo=timezone.utc),
             time(23, 59, 59, tzinfo=timezone.utc)),
            (4, date(2018, 12, 30),
             datetime(9999, 12, 31, 23, 59, 59, 997000, tzinfo=timezone.utc),
             datetime(9999, 12, 31, 23, 59, 59, 999000, tzinfo=timezone.utc),
             datetime(9999,
                      12,
                      31,
                      10,
                      14,
                      tzinfo=timezone(timedelta(hours=6))).isoformat(),
             datetime(2079, 6, 6, 23, 59, tzinfo=timezone.utc),
             time(23, 59, 59, tzinfo=timezone.utc))
        ]
        delete_value = [(2, )]
        query_list = (insert(database_name, schema_name, table_name,
                             insert_value))
        query_list.extend(
            delete_by_pk(database_name, schema_name, table_name, delete_value,
                         column_name[:1]))
        query_list.extend(
            update_by_pk(database_name, schema_name, table_name, update_value,
                         column_name))
        mssql_cursor_context_manager(*query_list)

        insert_value = [
            (5, date(9999, 12, 30),
             datetime(9999, 12, 31, 23, 59, 59, 997000, tzinfo=timezone.utc),
             datetime(9999, 12, 31, 23, 59, 59, 999000, tzinfo=timezone.utc),
             datetime(9999,
                      12,
                      31,
                      10,
                      14,
                      tzinfo=timezone(timedelta(hours=14))).astimezone(
                          timezone.utc),
             datetime(2079, 6, 6, 23, 59, tzinfo=timezone.utc),
             time(23, 59, 59, tzinfo=timezone.utc)),
            (6, date(2018, 12, 29),
             datetime(9999, 12, 31, 23, 59, 59, 997000, tzinfo=timezone.utc),
             datetime(9999, 12, 31, 23, 59, 59, 999000, tzinfo=timezone.utc),
             datetime(9999,
                      12,
                      31,
                      10,
                      14,
                      tzinfo=timezone(timedelta(hours=14))).astimezone(
                          timezone.utc),
             datetime(2079, 6, 6, 23, 59, tzinfo=timezone.utc),
             time(23, 59, 59, tzinfo=timezone.utc))
        ]
        update_value = [
            (3, date(9999, 12, 31),
             datetime(9999, 12, 31, 23, 59, 59, 997000, tzinfo=timezone.utc),
             datetime(9999, 12, 31, 23, 59, 59, 999000, tzinfo=timezone.utc),
             datetime(9999,
                      12,
                      31,
                      10,
                      14,
                      tzinfo=timezone(timedelta(hours=10))).astimezone(
                          timezone.utc),
             datetime(2079, 6, 6, 23, 59, tzinfo=timezone.utc),
             time(23, 59, 59, tzinfo=timezone.utc)),
            (4, date(2018, 12, 30),
             datetime(9999, 12, 31, 23, 59, 59, 997000, tzinfo=timezone.utc),
             datetime(9999, 12, 31, 23, 59, 59, 999000, tzinfo=timezone.utc),
             datetime(9999,
                      12,
                      31,
                      10,
                      14,
                      tzinfo=timezone(timedelta(hours=6))).astimezone(
                          timezone.utc),
             datetime(2079, 6, 6, 23, 59, tzinfo=timezone.utc),
             time(23, 59, 59, tzinfo=timezone.utc))
        ]

        insert_value = insert_value[:-1]  # only repl_key >= gets included
        update_value = update_value[:-1]
        self.EXPECTED_METADATA["data_types_database_dbo_dates_and_times"][
            "values"] = [(
                1, date(9999, 12, 29),
                datetime(9999, 12, 31, 23, 59, 59, 997000,
                         tzinfo=timezone.utc),
                datetime(9999, 12, 31, 23, 59, 59, 999000,
                         tzinfo=timezone.utc),
                datetime(
                    9999, 12, 31, 10, 14, tzinfo=timezone(
                        timedelta(hours=14))).astimezone(timezone.utc),
                datetime(2079, 6, 6, 23, 59, tzinfo=timezone.utc),
                time(23, 59, 59, tzinfo=timezone.utc))
                         ] + update_value + insert_value

        sync_job_name = runner.run_sync_mode(self, conn_id)

        # verify tap and target exit codes
        exit_status = menagerie.get_exit_status(conn_id, sync_job_name)
        menagerie.verify_sync_exit_status(self, exit_status, sync_job_name)
        record_count_by_stream = runner.examine_target_output_file(
            self, conn_id, self.expected_streams(),
            self.expected_primary_keys_by_stream_id())
        expected_count = {
            k: len(v['values'])
            for k, v in self.expected_metadata().items()
        }
        self.assertEqual(record_count_by_stream, expected_count)
        records_by_stream = runner.get_records_from_target_output()

        for stream in self.expected_streams():
            with self.subTest(stream=stream):
                stream_expected_data = self.expected_metadata()[stream]
                new_table_version = records_by_stream[stream]['table_version']

                # verify on a subsequent sync you get activate version message only after all data
                self.assertEqual(
                    records_by_stream[stream]['messages'][0]['action'],
                    'activate_version')
                self.assertEqual(
                    records_by_stream[stream]['messages'][-1]['action'],
                    'activate_version')
                self.assertTrue(
                    all([
                        message["action"] == "upsert" for message in
                        records_by_stream[stream]['messages'][1:-1]
                    ]))
                self.assertEqual(len(
                    records_by_stream[stream]['messages'][1:-1]),
                                 len(stream_expected_data[self.VALUES]),
                                 msg="incorrect number of upserts")

                column_names = [
                    list(field_data.keys())[0]
                    for field_data in stream_expected_data[self.FIELDS]
                ]

                expected_messages = [{
                    "action": "upsert",
                    "data": {
                        column: value
                        for column, value in list(zip(column_names,
                                                      row_values))
                        if column not in non_selected_properties
                    }
                } for row_values in sorted(stream_expected_data[self.VALUES],
                                           key=lambda row:
                                           (row[1] is not None, row[1]))]

                # remove sequences from actual values for comparison
                [
                    message.pop("sequence")
                    for message in records_by_stream[stream]['messages'][1:-1]
                ]

                # Verify all data is correct
                for expected_row, actual_row in list(
                        zip(expected_messages,
                            records_by_stream[stream]['messages'][1:-1])):
                    with self.subTest(expected_row=expected_row):
                        self.assertEqual(actual_row["action"], "upsert")

                        # we only send the _sdc_deleted_at column for deleted rows
                        self.assertEqual(
                            len(expected_row["data"].keys()),
                            len(actual_row["data"].keys()),
                            msg="there are not the same number of columns")
                        for column_name, expected_value in expected_row[
                                "data"].items():
                            if isinstance(expected_value, datetime):
                                # sql server only keeps milliseconds not microseconds
                                self.assertEqual(
                                    expected_value.isoformat().replace(
                                        '000+00:00',
                                        'Z').replace('+00:00', 'Z'),
                                    actual_row["data"][column_name],
                                    msg="expected: {} != actual {}".format(
                                        expected_value.isoformat().replace(
                                            '000+00:00',
                                            'Z').replace('+00:00', 'Z'),
                                        actual_row["data"][column_name]))
                            elif isinstance(expected_value, time):
                                # sql server time has second resolution only
                                self.assertEqual(
                                    expected_value.replace(
                                        microsecond=0).isoformat().replace(
                                            '+00:00', ''),
                                    actual_row["data"][column_name],
                                    msg="expected: {} != actual {}".format(
                                        expected_value.isoformat().replace(
                                            '+00:00', 'Z'),
                                        actual_row["data"][column_name]))
                            elif isinstance(expected_value, date):
                                # sql server time has second resolution only
                                self.assertEqual(
                                    expected_value.isoformat() +
                                    'T00:00:00+00:00',
                                    actual_row["data"][column_name],
                                    msg="expected: {} != actual {}".format(
                                        expected_value.isoformat() +
                                        'T00:00:00+00:00',
                                        actual_row["data"][column_name]))
                            else:
                                self.assertEqual(
                                    expected_value,
                                    actual_row["data"][column_name],
                                    msg="expected: {} != actual {}".format(
                                        expected_value,
                                        actual_row["data"][column_name]))
                print("records are correct for stream {}".format(stream))

                # verify state and bookmarks
                state = menagerie.get_state(conn_id)
                bookmark = state['bookmarks'][stream]

                self.assertIsNone(
                    state.get('currently_syncing'),
                    msg="expected state's currently_syncing to be None")
                self.assertIsNone(bookmark.get('current_log_version'),
                                  msg="no log_version for incremental")
                self.assertIsNone(bookmark.get('initial_full_table_complete'),
                                  msg="no full table for incremental")
                # find the max value of the replication key
                expected_bookmark = max([
                    row[1] for row in stream_expected_data[self.VALUES]
                    if row[1] is not None
                ])
                self.assertEqual(bookmark['replication_key_value'],
                                 expected_bookmark.isoformat())
                # self.assertEqual(bookmark['replication_key'], 'replication_key_value')

                self.assertEqual(
                    bookmark['version'],
                    table_version[stream],
                    msg="expected bookmark for stream to match version")
                self.assertEqual(
                    bookmark['version'],
                    new_table_version,
                    msg="expected bookmark for stream to match version")

                state = menagerie.get_state(conn_id)
                bookmark = state['bookmarks'][stream]

                expected_schemas = self.expected_metadata()[stream]['schema']
                self.assertEqual(records_by_stream[stream]['schema'],
                                 expected_schemas,
                                 msg="expected: {} != actual: {}".format(
                                     expected_schemas,
                                     records_by_stream[stream]['schema']))
示例#27
0
def become_merchant():
    if 'user_id' in session and db.exists('USER', 'id', session['user_id']):
        client = db.get('CLIENT', 'id', session['user_id']);
        db.insert('MERCHANT', ('id', 'name'), (session['user_id'], client['name']))
    return jsonify({'SUCCESS': True}), 200
示例#28
0
def create_checkout():
    ''' Create/Update Checkout
    ---
    put:
        description: Updates the Checkout information. Updating the information will replace all the original information.
        tags:
            - Payment
        parameters:
            - in: path
              name: checkout_token
              schema:
                type: string
              required: true
              description: Checkout's ID value given when it is created.
        requestBody:
            required: true
            content:
              application/json:
                schema:
                  type: object
                  properties:
                    AMOUNT:
                      type: number
                      description: Total amount to be paid by the client. This value must be equal to the sum of the item's price.
                    MERCHANT:
                      type: string
                      description: ID that identifies merchant in the system.
                    RETURN_URL:
                      type: string
                      description: URL to where the client in redirect if the payment is successful
                    CANCEL_URL:
                      type: string
                      description: URL to where the client in redirect if the payment is cancelled
                    CURRENCY:
                      type: string
                      description: Three characters currency code. Default value is 'EUR'. [https://www.xe.com/iso4217.php]
                      default : EUR
                    ITEMS:
                      type: array
                      items:
                            type : object
                            properties:
                                NAME:
                                    type : string
                                    description: Checkout item's name. Default value is "Item". This parameter is required if you fill any other item parameter.
                                    default : Item
                                PRICE:
                                    type : number
                                    description: Checkout item's price. Default value is the one given in 'AMOUNT'. This parameter is required if you fill any other item parameter.
                                QUANTITY:
                                    type : integer
                                    description: Checkout item's quantity. Default value is 1. This parameter is not required at any situation.
                                    default : 1
                                IMAGE:
                                    type : string
                                    description: Checkout item's image URL. It must be from your domain. This parameter is not required at any situation.
                                URL:
                                    type : string
                                    description: Checkout item's URL to your domain. This parameter is not required at any situation.
                  required:
                    - AMOUNT
                    - MERCHANT
                    - RETURN_URL
                    - CANCEL_URL
        responses:
            201:
                description: A JSON containing result of the proccess
                content:
                    application/json:
                      schema:
                        properties:
                          SUCCESS:
                            type: boolean
            400:
                description: A JSON containing a ERROR that identifies the problem
                content:
                    application/json:
                      schema:
                        properties:
                          ERROR:
                            type: string
    post:
        description: Creates a checkout.
        tags:
            - Payment
        requestBody:
            required: true
            content:
              application/json:
                schema:
                  type: object
                  properties:
                    AMOUNT:
                      type: number
                      description: Total amount to be paid by the client. This value must be equal to the sum of the item's price.
                    MERCHANT:
                      type: string
                      description: ID that identifies merchant in the system.
                    RETURN_URL:
                      type: string
                      description: URL to where the client in redirect if the payment is successful
                    CANCEL_URL:
                      type: string
                      description: URL to where the client in redirect if the payment is cancelled
                    CURRENCY:
                      type: string
                      description: Three characters currency code. Default value is 'EUR'. [https://www.xe.com/iso4217.php]
                      default : EUR
                    ITEMS:
                      type: array
                      items:
                            type : object
                            properties:
                                NAME:
                                    type : string
                                    description: Checkout item's name. Default value is "Item". This parameter is required if you fill any other item parameter.
                                    default : Item
                                PRICE:
                                    type : number
                                    description: Checkout item's price. Default value is the one given in 'AMOUNT'. This parameter is required if you fill any other item parameter.
                                QUANTITY:
                                    type : integer
                                    description: Checkout item's quantity. Default value is 1. This parameter is not required at any situation.
                                    default : 1
                                IMAGE:
                                    type : string
                                    description: Checkout item's image URL. It must be from your domain. This parameter is not required at any situation.
                                URL:
                                    type : string
                                    description: Checkout item's URL to your domain. This parameter is not required at any situation.
                  required:
                    - AMOUNT
                    - MERCHANT
                    - RETURN_URL
                    - CANCEL_URL
        responses:
            201:
                description: A JSON containing a TOKEN that identifies the Checkout
                content:
                    application/json:
                      schema:
                        properties:
                          TOKEN:
                            type: string
            400:
                description: A JSON containing a ERROR that identifies the problem
                content:
                    application/json:
                      schema:
                        properties:
                          ERROR:
                            type: string
    '''
    # request.form looks ugly and takes too much space...
    param = request.json
    keys = param.keys()
    expected_keys = ['AMOUNT', 'RETURN_URL', 'CANCEL_URL', 'MERCHANT', 'CURRENCY', 'ITEMS']

    # Checking for required parameters
    if not param or not check_keys(expected_keys[:-2], keys):
        return jsonify({'ERROR': error_message('invalid_request')}), 400

    # Cheking if URI are valid
    if not uri_validator(param['RETURN_URL']) or not uri_validator(param['CANCEL_URL']):
        return jsonify({'ERROR': error_message('invalid_url')}), 400

    # Checking if amount is a valid number
    if not is_number(param['AMOUNT']):
        return jsonify({'ERROR': error_message('invalid_amount')}), 400

    # Checking if merchant exists
    if not db.exists('MERCHANT', 'id', param['MERCHANT']):
        return jsonify({'ERROR': error_message('invalid_merchant')}), 400

    # If request is POST a.k.a creating a new checkout
    if request.method == 'POST':
        while True:
            token = secrets.token_urlsafe(16)
            if not db.exists('CHECKOUT', 'id', token):
                break
    # Else updating existing one
    else:
        if(delete_checkout()[1] == 200):
            token = request.args['checkout_token']
        else:
            return jsonify({'ERROR': error_message('invalid_checkout')}), 400

    # Sorting keys according to db insertion order
    sorted(keys, key=lambda x: expected_keys.index(x))

    # Checking for optional parameters
    if not 'CURRENCY' in keys:
        param['CURRENCY'] = None

    # Inserting new checkout to database
    try:
        db.insert('CHECKOUT', \
            ('id', 'amount', 'return_url', 'cancel_url', 'merchant', 'currency'), \
            tuple( [token] + [param[k] for k in expected_keys[:-1]] ) )
    except Exception as e:
        print(e)
        return jsonify({'ERROR': error_message('db_error')}), 500

    # Adding items to checkout if given by the merchant
    if 'ITEMS' in keys and not add_items(param['ITEMS'], token, param['AMOUNT']):
        delete_checkout()
        return jsonify({'ERROR': error_message('add_items')}), 400

    # Everything went well, returning token for new checkout or true if it was an update
    return (jsonify({'CHECKOUT_TOKEN': token}), 201) if request.method == 'POST' else (jsonify({'SUCCESS': True}), 200)
示例#29
0
def create_partner(partner):
    database.insert(partner)
示例#30
0
    def setUpClass(cls) -> None:
        """Create the expected schema in the test database"""
        drop_all_user_databases()
        database_name = "data_types_database"
        schema_name = "dbo"

        numeric_values = [
            (0, Decimal('-99999.9999'), Decimal('-9999999.999999999999'),
             Decimal('-999999.9999999999999999999999'),
             Decimal('-99999999999999999999999999999999999.999')),
            (1, 0, 0, 0, 0), (2, None, None, None, None),
            (3, Decimal('99999.9999'), Decimal('9999999.999999999999'),
             Decimal('999999.9999999999999999999999'),
             Decimal('99999999999999999999999999999999999.999')),
            (4, Decimal('96701.9382'), Decimal('-4371716.186100650268'),
             Decimal('-367352.306093776232045517794'),
             Decimal('-81147872128956247517327931319278572.985')),
            (5, Decimal('-73621.9366'), Decimal('2564047.277589545531'),
             Decimal('336177.4754683699464233786667'),
             Decimal('46946462608534127558389411015159825.758')),
            (6, Decimal('-3070.7339'), Decimal('6260062.158440967433'),
             Decimal('-987006.0035971607740533206418'),
             Decimal('95478671259010046866787754969592794.61'))
        ]
        numeric_precision_scale = [(9, 4), (19, 12), (28, 22), (38, 3)]

        # TODO - Remove this workaround when we fix decimal precision and to test
        # numeric_values = [
        #     (0, Decimal('-9999999.99'), Decimal('-9999999999999.99')),
        #     (1, 0, 0),
        #     (2, None, None),
        #     (3, Decimal('9999999.99'), Decimal('9999999999999.99')),
        #     (4, Decimal('-4133076.27'), Decimal('8499042653781.28')),
        #     (5, Decimal('-8629188.35'), Decimal('-4589639716080.97')),
        #     (6, Decimal('-9444926.01'), Decimal('7151189415270.4'))]
        # numeric_precision_scale = [(9, 2), (15, 2)]
        numeric_schema = {
            'type': 'object',
            'properties': {
                'numeric_9_4': {
                    'exclusiveMaximum': True,
                    'type': ['number', 'null'],
                    'selected': True,
                    'multipleOf': 0.0001,
                    'maximum': 1e5,
                    'inclusion': 'available',
                    'exclusiveMinimum': True,
                    'minimum': -1e5
                },
                'numeric_19_12': {
                    'exclusiveMaximum': True,
                    'type': ['number', 'null'],
                    'selected': True,
                    'multipleOf': 1e-12,
                    'maximum': 1e7,
                    'inclusion': 'available',
                    'exclusiveMinimum': True,
                    'minimum': -1e7
                },
                'numeric_28_22': {
                    'exclusiveMaximum': True,
                    'type': ['number', 'null'],
                    'selected': True,
                    'multipleOf': 1e-22,
                    'maximum': 1e6,
                    'inclusion': 'available',
                    'exclusiveMinimum': True,
                    'minimum': -1e6
                },
                'numeric_38_3': {
                    'exclusiveMaximum': True,
                    'type': ['number', 'null'],
                    'selected': True,
                    'multipleOf': .001,
                    'maximum': 1e35,
                    'inclusion': 'available',
                    'exclusiveMinimum': True,
                    'minimum': -1e35
                },
                'pk': {
                    'maximum': 2147483647,
                    'type': ['integer'],
                    'inclusion': 'automatic',
                    'minimum': -2147483648,
                    'selected': True
                }
            },
            'selected': True
        }
        # numeric_schema = {
        #     'type': 'object',
        #     'properties': {
        #         'numeric_9_2': {
        #             'exclusiveMaximum': True,
        #             'type': ['number', 'null'],
        #             'selected': True,
        #             'multipleOf': 0.01,
        #             'maximum': 10000000,
        #             'inclusion': 'available',
        #             'exclusiveMinimum': True,
        #             'minimum': -10000000},
        #         'numeric_15_2': {
        #             'exclusiveMaximum': True,
        #             'type': ['number', 'null'],
        #             'selected': True,
        #             'multipleOf': 0.01,
        #             'maximum': 10000000000000,
        #             'inclusion': 'available',
        #             'exclusiveMinimum': True,
        #             'minimum': -10000000000000},
        #         'pk': {
        #             'maximum': 2147483647,
        #             'type': ['integer'],
        #             'inclusion': 'automatic',
        #             'minimum': -2147483648,
        #             'selected': True}},
        #     'selected': True}

        decimal_values = [
            (0, Decimal('-99999.9999'), Decimal('-9999999999999.999999'),
             Decimal('-9999999999999999999999.999999'),
             Decimal('-9999999999999999999999999.9999999999999')),
            (1, 0, 0, 0, 0), (2, None, None, None, None),
            (3, Decimal('99999.9999'), Decimal('9999999999999.999999'),
             Decimal('9999999999999999999999.999999'),
             Decimal('9999999999999999999999999.9999999999999')),
            (4, Decimal('-92473.8401'), Decimal('-4182159664734.645653'),
             Decimal('6101329656084900380190.268036'),
             Decimal('4778017533841887320066645.9761464001349')),
            (5, Decimal('-57970.8157'), Decimal('7735958802279.086687'),
             Decimal('4848737828398517845540.057905'),
             Decimal('2176036096567853905237453.5152648989022')),
            (6, Decimal('57573.9037'), Decimal('5948502499261.181557'),
             Decimal('-6687721783088280707003.076638'),
             Decimal('-6264019242578746090842245.3746225058202'))
        ]
        decimal_precision_scale = [(9, 4), (19, 6), (28, 6), (38, 13)]

        # TODO - Remove this workaround when we fix decimal precision and to test
        # decimal_values = [
        #     (0, Decimal('-9999.99999'), Decimal('-999999999999.999')),
        #     (1, 0, 0),
        #     (2, None, None),
        #     (3, Decimal('9999.99999'), Decimal('999999999999.999')),
        #     (4, Decimal('7191.0647'), Decimal('284159490729.628')),
        #     (5, Decimal('6470.19405'), Decimal('-631069143780.173')),
        #     (6, Decimal('4708.67525'), Decimal('-570692336616.609'))]
        # decimal_precision_scale = [(9, 5), (15, 3)]
        decimal_schema = {
            'type': 'object',
            'properties': {
                'decimal_9_4': {
                    'exclusiveMaximum': True,
                    'type': ['number', 'null'],
                    'selected': True,
                    'multipleOf': 0.0001,
                    'maximum': 1e5,
                    'inclusion': 'available',
                    'exclusiveMinimum': True,
                    'minimum': -1e5
                },
                'decimal_19_6': {
                    'exclusiveMaximum': True,
                    'type': ['number', 'null'],
                    'selected': True,
                    'multipleOf': 1e-6,
                    'maximum': 1e13,
                    'inclusion': 'available',
                    'exclusiveMinimum': True,
                    'minimum': -1e13
                },
                'decimal_28_6': {
                    'exclusiveMaximum': True,
                    'type': ['number', 'null'],
                    'selected': True,
                    'multipleOf': 1e-6,
                    'maximum': 1e22,
                    'inclusion': 'available',
                    'exclusiveMinimum': True,
                    'minimum': -1e22
                },
                'decimal_38_13': {
                    'exclusiveMaximum': True,
                    'type': ['number', 'null'],
                    'selected': True,
                    'multipleOf': 1e-13,
                    'maximum': 1e25,
                    'inclusion': 'available',
                    'exclusiveMinimum': True,
                    'minimum': -1e25
                },
                'pk': {
                    'maximum': 2147483647,
                    'type': ['integer'],
                    'inclusion': 'automatic',
                    'minimum': -2147483648,
                    'selected': True
                }
            },
            'selected': True
        }
        # decimal_schema = {
        #     'type': 'object',
        #     'properties': {
        #         'decimal_15_3': {
        #             'exclusiveMaximum': True,
        #             'type': ['number', 'null'],
        #             'selected': True,
        #             'multipleOf': 0.001,
        #             'maximum': 1000000000000,
        #             'inclusion': 'available',
        #             'exclusiveMinimum': True,
        #             'minimum': -1000000000000},
        #         'decimal_9_5': {
        #             'exclusiveMaximum': True,
        #             'type': ['number', 'null'],
        #             'selected': True,
        #             'multipleOf': 1e-05,
        #             'maximum': 10000,
        #             'inclusion': 'available',
        #             'exclusiveMinimum': True, 'minimum': -10000},
        #         'pk': {
        #             'maximum': 2147483647,
        #             'type': ['integer'],
        #             'inclusion': 'automatic',
        #             'minimum': -2147483648,
        #             'selected': True}},
        #     'selected': True}

        cls.EXPECTED_METADATA = {
            'data_types_database_dbo_numeric_precisions': {
                'is-view':
                False,
                'schema-name':
                schema_name,
                'row-count':
                0,
                'values':
                numeric_values,
                'table-key-properties': {'pk'},
                'selected':
                None,
                'database-name':
                database_name,
                'stream_name':
                'numeric_precisions',
                'fields': [{
                    'pk': {
                        'sql-datatype': 'int',
                        'selected-by-default': True,
                        'inclusion': 'automatic'
                    }
                }, {
                    'numeric_9_4': {
                        'sql-datatype': 'numeric(9,4)',
                        'selected-by-default': True,
                        'inclusion': 'available'
                    }
                }, {
                    'numeric_19_12': {
                        'sql-datatype': 'numeric(19,12)',
                        'selected-by-default': True,
                        'inclusion': 'available'
                    }
                }, {
                    'numeric_28_22': {
                        'sql-datatype': 'numeric(28,22)',
                        'selected-by-default': True,
                        'inclusion': 'available'
                    }
                }, {
                    'numeric_38_3': {
                        'sql-datatype': 'numeric(38,3)',
                        'selected-by-default': True,
                        'inclusion': 'available'
                    }
                }],
                'schema':
                numeric_schema
            },
            'data_types_database_dbo_decimal_precisions': {
                'is-view':
                False,
                'schema-name':
                schema_name,
                'row-count':
                0,
                'values':
                decimal_values,
                'table-key-properties': {'pk'},
                'selected':
                None,
                'database-name':
                database_name,
                'stream_name':
                'decimal_precisions',
                'fields': [{
                    'pk': {
                        'sql-datatype': 'int',
                        'selected-by-default': True,
                        'inclusion': 'automatic'
                    }
                }, {
                    'decimal_9_4': {
                        'sql-datatype': 'decimal(9,4)',
                        'selected-by-default': True,
                        'inclusion': 'available'
                    }
                }, {
                    'decimal_19_6': {
                        'sql-datatype': 'decimal(19,6)',
                        'selected-by-default': True,
                        'inclusion': 'available'
                    }
                }, {
                    'decimal_28_6': {
                        'sql-datatype': 'decimal(28,6)',
                        'selected-by-default': True,
                        'inclusion': 'available'
                    }
                }, {
                    'decimal_38_13': {
                        'sql-datatype': 'decimal(38,13)',
                        'selected-by-default': True,
                        'inclusion': 'available'
                    }
                }],
                'schema':
                decimal_schema
            }
        }
        query_list = list(
            create_database(database_name, "Latin1_General_CS_AS"))
        # query_list.extend(create_schema(database_name, schema_name))

        # TODO - BUG https://stitchdata.atlassian.net/browse/SRCE-1075
        table_name = "numeric_precisions"
        precision_scale = numeric_precision_scale
        column_type = [
            "numeric({},{})".format(precision, scale)
            for precision, scale in precision_scale
        ]
        column_name = ["pk"] + [
            x.replace("(", "_").replace(",", "_").replace(")", "")
            for x in column_type
        ]
        column_type = ["int"] + column_type
        primary_key = {"pk"}
        column_def = [" ".join(x) for x in list(zip(column_name, column_type))]
        query_list.extend(
            create_table(database_name,
                         schema_name,
                         table_name,
                         column_def,
                         primary_key=primary_key))
        query_list.extend(
            insert(
                database_name, schema_name, table_name, cls.EXPECTED_METADATA[
                    "data_types_database_dbo_numeric_precisions"]["values"]))

        table_name = "decimal_precisions"
        precision_scale = decimal_precision_scale
        column_type = [
            "decimal({},{})".format(precision, scale)
            for precision, scale in precision_scale
        ]
        column_name = ["pk"] + [
            x.replace("(", "_").replace(",", "_").replace(")", "")
            for x in column_type
        ]
        column_type = ["int"] + column_type
        primary_key = {"pk"}
        column_def = [" ".join(x) for x in list(zip(column_name, column_type))]
        query_list.extend(
            create_table(database_name,
                         schema_name,
                         table_name,
                         column_def,
                         primary_key=primary_key))
        query_list.extend(
            insert(
                database_name, schema_name, table_name, cls.EXPECTED_METADATA[
                    "data_types_database_dbo_decimal_precisions"]["values"]))

        mssql_cursor_context_manager(*query_list)

        cls.expected_metadata = cls.discovery_expected_metadata
示例#31
0
                     is_primary=False,
                     is_foreign='IMAGES',
                     is_done=True)
new_table.commit(conn)

#################

IMAGE = [df.columns[-1].upper() + '_STRING']
FACIAL_KEYSET = list(map(lambda x: x.upper(), df.columns[:-1]))
FACIAL_KEYSET.append('IMAGE_ID')

for i in range(0, len(df)):
    image_string = df.iloc[i][-1]
    facial_keypoint = list(df.iloc[i][:-1].values)

    ins = db.insert('IMAGES')
    ins.insert_into_table((image_string, ), IMAGE)
    cursor = ins.commit_one(conn)
    cursor.close()

    facial_keypoint.append(cursor.lastrowid)
    ins = db.insert('FACIAL_KEYPOINTS')
    ins.insert_into_table(tuple(facial_keypoint), FACIAL_KEYSET)

    cursor = ins.commit_one(conn)
conn.commit()

#################

df = pd.read_csv(testing_dataset)
示例#32
0
文件: createuser.py 项目: sr/beadmin
#!/usr/bin/env python
#-*- coding: utf8 -*-

from dialog import *
import database as db
import dbconf
import re
import sys
import os
import pwgen

login = sys.argv[1]
password = pwgen.Pwgen().returnPwgen(8, useCapital=1, useNumeral=1, notAmbiguousChars=1)

os.system('usermod -p `mkpasswd %s` %s' % (password, login))
db.insert('users', login=login)

print db.query("""GRANT USAGE
	ON * . *
	TO '""" + login + """' @'%%'
	IDENTIFIED BY '""" + password + """' 
	WITH MAX_QUERIES_PER_HOUR 0 
	MAX_CONNECTIONS_PER_HOUR 0 
	MAX_UPDATES_PER_HOUR 0 
	MAX_USER_CONNECTIONS 0 ;""")

print 'Utilisateur %s créé' % login
print 'Mot de passe : "%s"' % password
示例#33
0
文件: hello.py 项目: dmydlarz/ztis
	def post(self):
		parser = reqparse.RequestParser()
		parser.add_argument('data', type = str)
		args = parser.parse_args()
		data_id = database.insert(args['data'])
		return flask.json.dumps(data_id, default=json_util.default), 201
        # cv2.imshow('image', nimg)
        # cv2.waitKey(0)

    face_embedding = model_faceanalysis.get_embedding(nimg)
    print(face_embedding.shape)

    # test code
    print(face_embedding)

    # add the name of the person + corresponding face
    # embedding to their respective list
    knownNames.append(name)
    knownEmbeddings.append(face_embedding)
    total += 1

print(total, " faces embedded")

# test code
print(knownNames)
print('----------')
print(knownEmbeddings)

# save to output
# data = {"embeddings": knownEmbeddings, "names": knownNames}
# f = open(args.embeddings, "wb")
# f.write(pickle.dumps(data))
# f.close()

# insert into database
insert(knownNames, knownEmbeddings, args.key)
示例#35
0
import database
import hulop

USER = '******'
MAP = 'cole-qolt'

IMAGE_DIR = './examples/'

with app.app_context():
    for s in database.query('select session from answers_label group by session'):
        filename = "{0}{1}.png".format(IMAGE_DIR, s['session'])
        bounding_boxes = []
        answer_ids = []
        for answer in database.query('select * from answers_label where session = ?', [s['session']]):
            if answer['x1']:
                bounding_boxes.append([answer['x1'],answer['y1'],answer['x2'],answer['y2']])
                answer_ids.append(answer['id'])
        with open(filename, 'rb') as image:
            points = hulop.bounding_to_3d(image, USER, MAP, bounding_boxes)
        for i, box in enumerate(points):
            if len(box) < 1:
                x1,y1,x2,y2 =  bounding_boxes[i]
                print "No points found for image '{0}' ({1},{2},{3},{4})".format(filename,x1,y1,x2,y2)
            for p in box:
                print filename, p['x'],p['y'],p['z']
                database.insert(
                    'answer_to_3d_point',
                    ('answer_id','x','y','z'),
                    (answer_ids[i], p['x'],p['y'],p['z'])
                )
main_content = soup.find('div', id='mainColumn')\
    .find('div', id="fpMainContent")\
    .find('div', attrs={"class": "gridCategory removeHidden", "data-blockname": "best"})



products = main_content.find_all('div', 'fpGridBox grid  frontpage firedeal')

for product in products:
    print(product.find('img')['title'])



print(len(products))

existed_items = []

while 1:
    # if the program is runned the first time, add all deals to database without any notification
    if database.check_empty():
        for product in products:
            database.insert(product.find('img')['title'])
    else:
        for product in products:
            firedeal = product.find('img')['title']
            if not database.find(firedeal):
                notification.balloon_tip("Slickdeal Firedeal", firedeal)
                database.insert(firedeal)

    time.sleep(300)
示例#37
0
        time.sleep(0.02)


if __name__ == "__main__":

    logger.app_logger.info('Application Start')
    print(console.Fore.RED + "Red")
    print(console.Color.RED + "BLACK" + console.Color.END)
    # rsyslog.open()
    # rsyslog.logging(syslog.LOG_ALERT, 'Application started')
    now = datetime.now()
    start_time = time.time()

    os.system('cls')  # コンソールクリア
    # database.create_table()  # テーブル生成
    logger.logging_environment()  # 実行環境ログ
    config.logging_paramter()  # Parameter読み込み

    translation.init()
    print(_('*****************************This is sample message.'))
    print('This string isn\'t translated.')

    proc()

    # line.send_message("Application End")
    proc_time = (time.time() - start_time)
    database.insert(now, proc_time)  # データベース追加
    logger.app_logger.info('Application End(%.6lf[sec])', proc_time)
    # rsyslog.logging(syslog.LOG_ALERT, 'Application End')
    # rsyslog.close()
示例#38
0
config.read('../config.cfg')

#with open('location.json') as data_file:    
#   data = json.load(data_file)
#company = data["places"][0] # for test 


# read table name
tablename = "uber_weather"

# create database if not exist
#dynamodb_table = database.create_database(tablename)

dynamodb_table = database.get_table(tablename)
order = 0
while True:
    order = order + 1
    baseurl = "https://query.yahooapis.com/v1/public/yql?"
    yql_query = "select item.condition from weather.forecast where woeid = 12761478"
    yql_url = baseurl + urllib.urlencode({'q':yql_query}) + "&format=json"
    result = urllib2.urlopen(yql_url).read()
    preprocess_data = json.loads(result)
    weather = preprocess_data['query']['results']['channel']['item']['condition']['text'].encode('utf-8')
    time = preprocess_data['query']['results']['channel']['item']['condition']['date'].encode('utf-8')
    
    # insert into dynamodb 
    item = {"weather":weather,"time":time}
    print(item)
    database.insert(dynamodb_table, item)            

    time.sleep(2)
示例#39
0
def update():
    """
    更新数据库,异步函数
    :return: 无返回
    """
    global update_process
    global update_process_percent

    update_process = "INITIATING"
    update_process_percent = 0.0
    con = sqlite3.connect('essay.db')
    database.init(con)  # 初始化数据库

    fetch_status, total_essay = fetch.total_essay_number()  # 得到当前cs.AI分类下的所有论文数
    if not fetch_status:  # 如果拉取失败,返回服务器错误
        raise Exception("Cannot get the count of total essay number")
    start_offset = (total_essay - 1) // request_max_results * request_max_results  # 由于是从后往前翻页,故计算开始的offset值
    last_updated = database.latest_update_time(con)  # 得到数据库中最晚更新的论文的时间戳,晚于其更新的论文都是未插入数据库的

    update_process = "GETTING ESSAYS INFO"
    essay_to_insert = []
    pdf_to_fetch = []
    break_flag = False
    for i in range(start_offset, -1, -request_max_results):
        update_process_percent = 1 - (i / total_essay)
        essays = list()  # 论文集
        trail_counter = 0  # 失败计数器,由于此处是频繁拉取所以需要多次尝试机会
        while essays is None or len(essays) == 0:
            if trail_counter >= 5:  # 超出尝试次数,服务器错误
                return
            status, essays = fetch.fetch_data(i, request_max_results)  # 尝试去拉取
            trail_counter = trail_counter + 1
        for essay in essays:
            # 要插入的论文,更新必须晚于数据库中更新最晚的论文,且不位于数据库中
            if essay["updated"] > last_updated or len(database.query(con, "id", essay["id"])) == 0:
                essay_to_insert.append(essay)
                if pdf_end_time > essay["updated"] >= pdf_start_time:  # 在2020年10月1日后发表,2021年1月1日前停止记录,先记录要下载的pdf
                    pdf_to_fetch.append((essay["pdf"], essay["id"]))
            else:
                break_flag = True  # 由于返回值论文是从晚到早的,若出现了相同的论文,必定是之前已经插入到数据库的论文
                break
        if break_flag:
            break

    update_process = "INSERT INTO DATABASE"
    database.insert(con, essay_to_insert)  # 向数据库里push数据

    if os.path.exists("pdf_list.tmp"):  # 获取之前缓存的要拉取的pdf的文件
        temp_file = open("pdf_list.tmp")
        pdf_to_fetch.extend(json.loads(temp_file.read()))
        temp_file.close()
    temp_file = open("pdf_list.tmp", "w")  # 往pdf_list.tmp文件中放置当前要拉取的pdf,作为缓存
    temp_file.write(json.dumps(pdf_to_fetch))
    temp_file.close()

    update_process = "DOWNLOADING PDF"
    count = 1
    for essay in pdf_to_fetch:  # 此处开始下载pdf
        update_process_percent = count / len(pdf_to_fetch)
        fetch.download_pdf(essay[0], essay[1])
        count = count + 1

    if os.path.exists("pdf_list.tmp"):  # 下载完毕,删除pdf_list.tmp
        os.remove("pdf_list.tmp")
    con.close()
# read table name
tablename = "uber_weather"

# create database if not exist
# dynamodb_table = database.create_database(tablename)

dynamodb_table = database.get_table(tablename)  # first, get the table
order = 0  # initialize order to 0
# start the loop

while True:
    order = order + 1  # update order 
    baseurl = "https://query.yahooapis.com/v1/public/yql?"
    yql_query = "select item.condition from weather.forecast where woeid = 12761478"   # use yahoo query sentence to get manhattan weather
    yql_url = baseurl + urllib.urlencode({'q':yql_query}) + "&format=json"   # constitute the url to request
    result = urllib2.urlopen(yql_url).read()  # get the result returned by yahoo_weather api
    preprocess_data = json.loads(result)   # transform result from json format to python dictionary format
    try:
    	weather = preprocess_data['query']['results']['channel']['item']['condition']['text'].encode('utf-8')  # try to get weather result
    except:
	    continue
	
    Time = preprocess_data['query']['results']['channel']['item']['condition']['date'].encode('utf-8')   # extract time information
    
    # insert into dynamodb 
    item = {"weather":weather,"time":Time,"order":order}   # create new item 
    print(item)  
    database.insert(dynamodb_table, item) # insert new item into our dynamodb--uber_weather            

    time.sleep(600) # wait 10 minutes and fetch and insert item again because weather data is normally updated one hour
示例#41
0
    def setUpClass(cls) -> None:
        """Create the expected schema in the test database"""
        drop_all_user_databases()
        database_name = "data_types_database"
        schema_name = "dbo"

        values = [
            (0, date(1, 1, 1), datetime(1753, 1, 1, 0, 0, tzinfo=timezone.utc),
             datetime(1, 1, 1, 0, 0, tzinfo=timezone.utc),
             datetime(1, 1, 1, 13, 46,
                      tzinfo=timezone(timedelta(hours=-14))).isoformat(),
             datetime(1900, 1, 1, 0, 0,
                      tzinfo=timezone.utc), time(0, 0, tzinfo=timezone.utc)),
            (1, date(9999, 12, 31),
             datetime(9999, 12, 31, 23, 59, 59, 997000, tzinfo=timezone.utc),
             datetime(9999, 12, 31, 23, 59, 59, 999000, tzinfo=timezone.utc),
             datetime(9999,
                      12,
                      31,
                      10,
                      14,
                      tzinfo=timezone(timedelta(hours=14))).isoformat(),
             datetime(2079, 6, 6, 23, 59, tzinfo=timezone.utc),
             time(23, 59, 59, tzinfo=timezone.utc)),
            (2, None, None, None, None, None, None),
            (3, date(4533, 6, 9),
             datetime(3099, 2, 6, 4, 27, 37, 983000, tzinfo=timezone.utc),
             datetime(9085, 4, 30, 21, 52, 57, 492920, tzinfo=timezone.utc),
             datetime(5749,
                      4,
                      3,
                      1,
                      47,
                      47,
                      110809,
                      tzinfo=timezone(timedelta(hours=10,
                                                minutes=5))).isoformat(),
             datetime(2031, 4, 30, 19, 32, tzinfo=timezone.utc),
             time(21, 9, 56, 0, tzinfo=timezone.utc)),
            (4, date(3476, 10, 14),
             datetime(7491, 4, 5, 8, 46, 0, 360000, tzinfo=timezone.utc),
             datetime(8366, 7, 13, 17, 15, 10, 102386, tzinfo=timezone.utc),
             datetime(2642,
                      6,
                      19,
                      21,
                      10,
                      28,
                      546280,
                      tzinfo=timezone(timedelta(hours=6,
                                                minutes=15))).isoformat(),
             datetime(2024, 6, 22, 0, 36, tzinfo=timezone.utc),
             time(2, 14, 4, 0, tzinfo=timezone.utc))
        ]

        schema = {
            'selected': True,
            'properties': {
                'its_time': {
                    'selected': True,
                    'inclusion': 'available',
                    'type': ['string', 'null']
                },
                'pk': {
                    'maximum': 2147483647,
                    'selected': True,
                    'inclusion': 'automatic',
                    'type': ['integer'],
                    'minimum': -2147483648
                },
                'just_a_date': {
                    'selected': True,
                    'inclusion': 'available',
                    'type': ['string', 'null'],
                    'format': 'date-time'
                },
                'date_and_time': {
                    'selected': True,
                    'inclusion': 'available',
                    'type': ['string', 'null'],
                    'format': 'date-time'
                },
                "bigger_range_and_precision_datetime": {
                    'selected': True,
                    'inclusion': 'available',
                    'type': ['string', 'null'],
                    'format': 'date-time'
                },
                "datetime_with_timezones": {
                    'selected': True,
                    'inclusion': 'available',
                    'type': ['string', 'null'],
                    'format': 'date-time'
                },
                "datetime_no_seconds": {
                    'selected': True,
                    'inclusion': 'available',
                    'type': ['string', 'null'],
                    'format': 'date-time'
                }
            },
            'type': 'object'
        }

        fields = [{
            'pk': {
                'sql-datatype': 'int',
                'selected-by-default': True,
                'inclusion': 'automatic'
            }
        }, {
            'just_a_date': {
                'sql-datatype': 'date',
                'selected-by-default': True,
                'inclusion': 'available'
            }
        }, {
            'date_and_time': {
                'sql-datatype': 'datetime',
                'selected-by-default': True,
                'inclusion': 'available'
            }
        }, {
            'bigger_range_and_precision_datetime': {
                'sql-datatype': 'datetime2',
                'selected-by-default': True,
                'inclusion': 'available'
            }
        }, {
            'datetime_with_timezones': {
                'sql-datatype': 'datetimeoffest',
                'selected-by-default': True,
                'inclusion': 'available'
            }
        }, {
            'datetime_no_seconds': {
                'sql-datatype': 'smalldatetime',
                'selected-by-default': True,
                'inclusion': 'available'
            }
        }, {
            'its_time': {
                'sql-datatype': 'time',
                'selected-by-default': True,
                'inclusion': 'available'
            }
        }]

        query_list = list(
            create_database(database_name, "Latin1_General_CS_AS"))
        # query_list.extend(create_schema(database_name, schema_name))

        table_name = "dates_and_times"
        primary_key = {"pk"}

        column_name = [
            "pk", "just_a_date", "date_and_time",
            "bigger_range_and_precision_datetime", "datetime_with_timezones",
            "datetime_no_seconds", "its_time"
        ]
        column_type = [
            "int", "date", "datetime", "datetime2", "datetimeoffset",
            "smalldatetime", "time"
        ]

        column_def = [" ".join(x) for x in list(zip(column_name, column_type))]
        query_list.extend(
            create_table(database_name,
                         schema_name,
                         table_name,
                         column_def,
                         primary_key=primary_key))
        query_list.extend(
            insert(database_name, schema_name, table_name, values))

        mssql_cursor_context_manager(*query_list)

        values = [
            (0, date(1, 1, 1), datetime(1753, 1, 1, 0, 0, tzinfo=timezone.utc),
             datetime(1, 1, 1, 0, 0, tzinfo=timezone.utc),
             datetime(1, 1, 1, 13, 46, tzinfo=timezone(
                 timedelta(hours=-14))).astimezone(timezone.utc),
             datetime(1900, 1, 1, 0, 0,
                      tzinfo=timezone.utc), time(0, 0, tzinfo=timezone.utc)),
            (1, date(9999, 12, 31),
             datetime(9999, 12, 31, 23, 59, 59, 997000, tzinfo=timezone.utc),
             datetime(9999, 12, 31, 23, 59, 59, 999000, tzinfo=timezone.utc),
             datetime(9999,
                      12,
                      31,
                      10,
                      14,
                      tzinfo=timezone(timedelta(hours=14))).astimezone(
                          timezone.utc),
             datetime(2079, 6, 6, 23, 59, tzinfo=timezone.utc),
             time(23, 59, 59, tzinfo=timezone.utc)),
            (2, None, None, None, None, None, None),
            (3, date(4533, 6, 9),
             datetime(3099, 2, 6, 4, 27, 37, 983000, tzinfo=timezone.utc),
             datetime(9085, 4, 30, 21, 52, 57, 492920, tzinfo=timezone.utc),
             datetime(5749,
                      4,
                      3,
                      1,
                      47,
                      47,
                      110809,
                      tzinfo=timezone(timedelta(
                          hours=10, minutes=5))).astimezone(timezone.utc),
             datetime(2031, 4, 30, 19, 32, tzinfo=timezone.utc),
             time(21, 9, 56, 0, tzinfo=timezone.utc)),
            (4, date(3476, 10, 14),
             datetime(7491, 4, 5, 8, 46, 0, 360000, tzinfo=timezone.utc),
             datetime(8366, 7, 13, 17, 15, 10, 102386, tzinfo=timezone.utc),
             datetime(2642,
                      6,
                      19,
                      21,
                      10,
                      28,
                      546280,
                      tzinfo=timezone(timedelta(
                          hours=6, minutes=15))).astimezone(timezone.utc),
             datetime(2024, 6, 22, 0, 36, tzinfo=timezone.utc),
             time(2, 14, 4, 0, tzinfo=timezone.utc))
        ]

        cls.EXPECTED_METADATA = {
            '{}_{}_{}'.format(database_name, schema_name, table_name): {
                'is-view': False,
                'schema-name': schema_name,
                'row-count': 0,
                'values': values,
                'table-key-properties': primary_key,
                'selected': None,
                'database-name': database_name,
                'stream_name': table_name,
                'fields': fields,
                'schema': schema
            }
        }

        cls.expected_metadata = cls.discovery_expected_metadata
示例#42
0
文件: mysql-add.py 项目: sr/beadmin
	print 'Merci de contacter l\'administrateur.'
	sys.exit()

id_user = list(userfromdb)[0].id

while True:
	dbname = text('Suffixe du nom de la base de donnée :')
	if valid_dbname(dbname): break
	else:
		print 'Le suffixe doit utiliser les caractères `a-z A-Z 0-9 _ et -`, et'
		print 'ne doit pas dépasser les 30 caractères.'

dbs = db.select('bases', where="name = '%s' AND id_users='%s'" % (dbname, id_user))

if len(dbs) == 0:
	id_base = db.insert('bases', name=dbname, id_users=id_user)
	db.query('CREATE DATABASE `%s_%s`;' % (user, dbname))
	db.query("GRANT ALL PRIVILEGES ON `" + user + "_" + dbname + "` . * TO '" + user + "'@'%%' WITH GRANT OPTION;")
else:
	print 'Cette base de donnée existe déjà !'
	sys.exit()

print
print 'La base de donnée a été crée sur MySQL.'
print
print 'Le serveur est accessible via `localhost` et `mysql.bearnaise.net`'
print 'ou `mysql.votredomaine.tld`.'
print 'Le nom de la base de donnée est `%s_%s`.' % (user, dbname)
print 'Votre nom d\'utilisateur mysql est `%s`.' % (user)
print 'Votre mot de passe est celui qui vous a été envoyé précédement.'
print
示例#43
0
    def setUpClass(cls) -> None:
        """Create the expected schema in the test database"""
        drop_all_user_databases()
        database_name = "data_types_database"
        schema_name = "dbo"

        values = [
            (0, 1.1754944e-38, 2.2250738585072014e-308, 1.1754944e-38),
            (1, 3.4028235e+38, 1.7976931348623157e+308, 3.4028235e+38),
            (2, -1.1754944e-38, -2.2250738585072014e-308, -1.1754944e-38),
            (3, -3.4028235e+38, -1.7976931348623157e+308, -3.4028235e+38),
            (4, 0.0, 0.0, 0.0), (5, None, None, None),
            (6, 7.830105e-33, 6.46504535047369e-271, 4.0229383e-27),
            (7, 4.4540307e-21, 7.205251086772512e-202, 7.196247e-19),
            (8, 647852.6, 2.1597057137884757e+40, 8.430207e+34),
            (9, 3603.407, 8.811948588549982e+23, 9.1771755e+35),
            (10, -8.451405e-24, -1.783306877438393e-178, -2.2775854e-31),
            (11, -5.8271772e-27, -9.344274532947989e-227, -3.5728205e-18),
            (12, -8.519153e+23, -2.3035944912603858e+241, -5.7120217e+35),
            (13, -30306750.0, -5.222263032559684e+106, -1.9535917e+27)
        ]

        schema = {
            'selected': True,
            'type': 'object',
            'properties': {
                'float_24': {
                    'selected': True,
                    'type': ['number', 'null'],
                    'inclusion': 'available'
                },
                'float_53': {
                    'selected': True,
                    'type': ['number', 'null'],
                    'inclusion': 'available'
                },
                'real_24_bits': {
                    'selected': True,
                    'type': ['number', 'null'],
                    'inclusion': 'available'
                },
                'pk': {
                    'selected': True,
                    'type': ['integer'],
                    'maximum': 2147483647,
                    'minimum': -2147483648,
                    'inclusion': 'automatic'
                }
            }
        }

        cls.EXPECTED_METADATA = {
            'data_types_database_dbo_float_precisions': {
                'is-view':
                False,
                'schema-name':
                schema_name,
                'row-count':
                0,
                'values':
                values,
                'table-key-properties': {'pk'},
                'selected':
                None,
                'database-name':
                database_name,
                'stream_name':
                'float_precisions',
                'fields': [{
                    'pk': {
                        'sql-datatype': 'int',
                        'selected-by-default': True,
                        'inclusion': 'automatic'
                    }
                }, {
                    'float_24': {
                        'sql-datatype': 'real',
                        'selected-by-default': True,
                        'inclusion': 'available'
                    }
                }, {
                    'float_53': {
                        'sql-datatype': 'float',
                        'selected-by-default': True,
                        'inclusion': 'available'
                    }
                }, {
                    'real_24_bits': {
                        'sql-datatype': 'real',
                        'selected-by-default': True,
                        'inclusion': 'available'
                    }
                }],
                'schema':
                schema
            }
        }

        query_list = list(
            create_database(database_name, "Latin1_General_CS_AS"))
        # query_list.extend(create_schema(database_name, schema_name))

        table_name = "float_precisions"
        column_name = ["pk", "float_24", "float_53", "real_24_bits"]
        column_type = ["int", "float(24)", "float(53)", "real"]
        primary_key = {"pk"}
        column_def = [" ".join(x) for x in list(zip(column_name, column_type))]
        query_list.extend(
            create_table(database_name,
                         schema_name,
                         table_name,
                         column_def,
                         primary_key=primary_key))
        query_list.extend(
            insert(
                database_name, schema_name, table_name, cls.EXPECTED_METADATA[
                    "data_types_database_dbo_float_precisions"]["values"]))

        mssql_cursor_context_manager(*query_list)

        cls.expected_metadata = cls.discovery_expected_metadata
示例#44
0
def insert_raid_comment(comment, username, raid_id, comment_id):
    return db.insert('raid_comments', { 'comment_id': comment_id, 'raid_id': raid_id, 'username': escape(username, 32), 'comment': escape(comment, 200) } )
示例#45
0
    def test_run(self):
        """
        Verify that a full sync can send capture all data and send it in the correct format
        for integer and boolean (bit) data.
        Verify that the fist sync sends an activate immediately.
        Verify that the table version is incremented up
        """
        print("running test {}".format(self.name()))

        conn_id = self.create_connection()

        # run in check mode
        check_job_name = runner.run_check_mode(self, conn_id)

        # verify check  exit codes
        exit_status = menagerie.get_exit_status(conn_id, check_job_name)
        menagerie.verify_check_exit_status(self, exit_status, check_job_name)

        # get the catalog information of discovery
        found_catalogs = menagerie.get_catalogs(conn_id)
        additional_md = [{"breadcrumb": [], "metadata": {'replication-method': 'LOG_BASED'}}]

        # Don't select unsupported data types
        non_selected_properties = ["nvarchar_text", "varchar_text", "varbinary_data",
                                   "geospacial", "geospacial_map", "markup", "tree",
                                   "variant", "SpecialPurposeColumns", "started_at", "ended_at"]
        BaseTapTest.select_all_streams_and_fields(
            conn_id, found_catalogs, additional_md=additional_md,
            non_selected_properties=non_selected_properties)

        # clear state
        menagerie.set_state(conn_id, {})
        sync_job_name = runner.run_sync_mode(self, conn_id)

        # verify tap and target exit codes
        exit_status = menagerie.get_exit_status(conn_id, sync_job_name)
        menagerie.verify_sync_exit_status(self, exit_status, sync_job_name)

        # verify record counts of streams
        record_count_by_stream = runner.examine_target_output_file(
            self, conn_id, self.expected_streams(), self.expected_primary_keys_by_stream_id())
        expected_count = {k: len(v['values']) for k, v in self.expected_metadata().items()}
        # self.assertEqual(record_count_by_stream, expected_count)

        # verify records match on the first sync
        records_by_stream = runner.get_records_from_target_output()

        table_version = dict()
        for stream in self.expected_streams():
            with self.subTest(stream=stream):
                stream_expected_data = self.expected_metadata()[stream]
                table_version[stream] = records_by_stream[stream]['table_version']

                # verify on the first sync you get
                # activate version message before and after all data for the full table
                # and before the logical replication part
                if records_by_stream[stream]['messages'][-1].get("data"):
                    last_row_data = True
                else:
                    last_row_data = False

                self.assertEqual(
                    records_by_stream[stream]['messages'][0]['action'],
                    'activate_version')
                self.assertEqual(
                    records_by_stream[stream]['messages'][-2]['action'],
                    'activate_version')
                if last_row_data:
                    self.assertEqual(
                        records_by_stream[stream]['messages'][-3]['action'],
                        'activate_version')
                else:
                    self.assertEqual(
                        records_by_stream[stream]['messages'][-1]['action'],
                        'activate_version')
                self.assertEqual(
                    len([m for m in records_by_stream[stream]['messages'][1:] if m["action"] == "activate_version"]),
                    2,
                    msg="Expect 2 more activate version messages for end of full table and beginning of log based")

                column_names = [
                    list(field_data.keys())[0] for field_data in stream_expected_data[self.FIELDS]
                ]

                expected_messages = [
                    {
                        "action": "upsert", "data":
                        {
                            column: value for column, value
                            in list(zip(column_names, stream_expected_data[self.VALUES][row]))
                            if column not in non_selected_properties
                        }
                    } for row in range(len(stream_expected_data[self.VALUES]))
                ]

                # Verify all data is correct for the full table part
                if last_row_data:
                    final_row = -3
                else:
                    final_row = -2

                for expected_row, actual_row in list(
                        zip(expected_messages, records_by_stream[stream]['messages'][1:final_row])):
                    with self.subTest(expected_row=expected_row):

                        self.assertEqual(actual_row["action"], "upsert")
                        self.assertEqual(len(expected_row["data"].keys()), len(actual_row["data"].keys()),
                                         msg="there are not the same number of columns")
                        for column_name, expected_value in expected_row["data"].items():
                            self.assertEqual(expected_value, actual_row["data"][column_name],
                                             msg="expected: {} != actual {}".format(
                                                 expected_row, actual_row))

                # Verify all data is correct for the log replication part if sent
                if records_by_stream[stream]['messages'][-1].get("data"):
                    for column_name, expected_value in expected_messages[-1]["data"].items():
                        self.assertEqual(expected_value,
                                         records_by_stream[stream]['messages'][-1]["data"][column_name],
                                         msg="expected: {} != actual {}".format(
                                             expected_row, actual_row))

                print("records are correct for stream {}".format(stream))

                # verify state and bookmarks
                state = menagerie.get_state(conn_id)
                bookmark = state['bookmarks'][stream]

                self.assertIsNone(state.get('currently_syncing'), msg="expected state's currently_syncing to be None")
                self.assertIsNotNone(
                    bookmark.get('current_log_version'),
                    msg="expected bookmark to have current_log_version because we are using log replication")
                self.assertTrue(bookmark['initial_full_table_complete'], msg="expected full table to be complete")
                inital_log_version = bookmark['current_log_version']

                self.assertEqual(bookmark['version'], table_version[stream],
                                 msg="expected bookmark for stream to match version")

                expected_schemas = self.expected_metadata()[stream]['schema']
                self.assertEqual(records_by_stream[stream]['schema'],
                                 expected_schemas,
                                 msg="expected: {} != actual: {}".format(expected_schemas,
                                                                         records_by_stream[stream]['schema']))

        # ----------------------------------------------------------------------
        # invoke the sync job AGAIN and after insert, update, delete or rows
        # ----------------------------------------------------------------------

        database_name = "data_types_database"
        schema_name = "dbo"
        table_name = "text_and_image_deprecated_soon"
        column_name = ["pk", "nvarchar_text", "varchar_text", "varbinary_data",
                       "rowversion_synonym_timestamp"]
        insert_value = [(2, "JKL", "MNO", "PQR".encode('utf-8'))]
        update_value = [(1, "JKL", "MNO", "PQR".encode('utf-8'))]
        delete_value = [(0, )]
        query_list = (insert(database_name, schema_name, table_name, insert_value, column_name[:-1]))
        query_list.extend(delete_by_pk(database_name, schema_name, table_name, delete_value, column_name[:1]))
        query_list.extend(update_by_pk(database_name, schema_name, table_name, update_value, column_name))
        mssql_cursor_context_manager(*query_list)
        values = insert_value + update_value
        rows = mssql_cursor_context_manager(*[
            "select rowversion_synonym_timestamp from data_types_database.dbo.text_and_image_deprecated_soon "
            "where pk in (0,1,2) order by pk desc"])
        rows = [tuple(row) for row in rows]
        rows = [("0x{}".format(value.hex().upper()), ) for value, in rows]
        row_with_version = [x[0] + x[1] + (None, ) for x in zip(values, rows)]
        row_with_version.append((0, None, None, None, None, datetime.utcnow()))
        row_with_version[1], row_with_version[2] = row_with_version[2], row_with_version[1]
        self.EXPECTED_METADATA['data_types_database_dbo_text_and_image_deprecated_soon']['values'] = row_with_version
        self.EXPECTED_METADATA["data_types_database_dbo_text_and_image_deprecated_soon"]["fields"].append(
            {"_sdc_deleted_at": {
                'sql-datatype': 'datetime', 'selected-by-default': True, 'inclusion': 'automatic'}}
        )

        database_name = "data_types_database"
        schema_name = "dbo"
        table_name = "weirdos"
        column_name = [
            "pk", "geospacial", "geospacial_map", "markup", "guid", "tree",
            "variant", "SpecialPurposeColumns", "version"]
        insert_value = [(3, None, None, None, str(uuid.uuid1()).upper(), None, None, None)]
        update_value = [(1, None, None, None, str(uuid.uuid1()).upper(), None, None, None)]
        delete_value = [(0,)]
        query_list = (insert(database_name, schema_name, table_name, insert_value, column_name[:-1]))
        query_list.extend(delete_by_pk(database_name, schema_name, table_name, delete_value, column_name[:1]))
        query_list.extend(update_by_pk(database_name, schema_name, table_name, update_value, column_name))
        mssql_cursor_context_manager(*query_list)
        values = insert_value + update_value
        rows = mssql_cursor_context_manager(*[
            "select version from data_types_database.dbo.weirdos "
            "where pk in (0,1,3) order by pk desc"])
        rows = [tuple(row) for row in rows]
        rows = [("0x{}".format(value.hex().upper()), ) for value, in rows]
        row_with_version = [x[0] + x[1] + (None, ) for x in zip(values, rows)]
        row_with_version.append((0, None, None, None, None, None, None, None, None, datetime.utcnow()))
        row_with_version[1], row_with_version[2] = row_with_version[2], row_with_version[1]
        self.EXPECTED_METADATA['data_types_database_dbo_weirdos']['values'] = row_with_version
        self.EXPECTED_METADATA["data_types_database_dbo_weirdos"]["fields"].append(
            {"_sdc_deleted_at": {
                'sql-datatype': 'datetime', 'selected-by-default': True, 'inclusion': 'automatic'}}
        )

        database_name = "data_types_database"
        schema_name = "dbo"
        table_name = "computed_columns"
        column_name = ["pk", "started_at", "ended_at", "durations_days"]
        insert_value = [(2, datetime(1980, 5, 30, 16), datetime.now())]
        update_value = [(1, datetime(1942, 11, 30), datetime(2017, 2, 12))]
        delete_value = [(0,)]
        query_list = (insert(database_name, schema_name, table_name, insert_value, column_name[:-1]))
        query_list.extend(delete_by_pk(database_name, schema_name, table_name, delete_value, column_name[:1]))
        query_list.extend(update_by_pk(database_name, schema_name, table_name, update_value, column_name))
        mssql_cursor_context_manager(*query_list)
        values = insert_value + update_value  # + [delete_value[0] + (None, None)]
        rows = mssql_cursor_context_manager(
            *["select durations_days from data_types_database.dbo.computed_columns "
              "where pk in (0,1,2) order by pk desc"])
        rows = [tuple(row) for row in rows]
        row_with_duration = [x[0] + x[1] + (None, ) for x in zip(values, rows)]
        row_with_duration.append((0, None, None, None, datetime.utcnow()))
        row_with_duration[1], row_with_duration[2] = row_with_duration[2], row_with_duration[1]
        self.EXPECTED_METADATA['data_types_database_dbo_computed_columns']['values'] = row_with_duration
        self.EXPECTED_METADATA["data_types_database_dbo_computed_columns"]["fields"].append(
            {"_sdc_deleted_at": {
                'sql-datatype': 'datetime', 'selected-by-default': True, 'inclusion': 'automatic'}}
        )

        sync_job_name = runner.run_sync_mode(self, conn_id)

        # verify tap and target exit codes
        exit_status = menagerie.get_exit_status(conn_id, sync_job_name)
        menagerie.verify_sync_exit_status(self, exit_status, sync_job_name)
        record_count_by_stream = runner.examine_target_output_file(
            self, conn_id, self.expected_streams(), self.expected_primary_keys_by_stream_id())
        expected_count = {k: len(v['values']) for k, v in self.expected_metadata().items()}
        self.assertEqual(record_count_by_stream, expected_count)
        records_by_stream = runner.get_records_from_target_output()

        for stream in self.expected_streams():
            with self.subTest(stream=stream):
                stream_expected_data = self.expected_metadata()[stream]
                new_table_version = records_by_stream[stream]['table_version']

                # verify on a subsequent sync you get activate version message only after all data
                self.assertEqual(
                    records_by_stream[stream]['messages'][0]['action'],
                    'activate_version')
                self.assertTrue(all(
                    [message["action"] == "upsert" for message in records_by_stream[stream]['messages'][1:]]
                ))

                column_names = [
                    list(field_data.keys())[0] for field_data in stream_expected_data[self.FIELDS]
                ]

                expected_messages = [
                    {
                        "action": "upsert", "data":
                        {
                            column: value for column, value
                            in list(zip(column_names, stream_expected_data[self.VALUES][row]))
                            if column not in non_selected_properties
                        }
                    } for row in range(len(stream_expected_data[self.VALUES]))
                ]

                # remove sequences from actual values for comparison
                [message.pop("sequence") for message
                 in records_by_stream[stream]['messages'][1:]]

                # Verify all data is correct
                for expected_row, actual_row in list(
                        zip(expected_messages, records_by_stream[stream]['messages'][1:])):
                    with self.subTest(expected_row=expected_row):
                        self.assertEqual(actual_row["action"], "upsert")

                        # we only send the _sdc_deleted_at column for deleted rows
                        self.assertGreaterEqual(len(expected_row["data"].keys()), len(actual_row["data"].keys()),
                                         msg="there are not the same number of columns")

                        for column_name, expected_value in expected_row["data"].items():
                            if column_name != "_sdc_deleted_at":
                                self.assertEqual(expected_value, actual_row["data"][column_name],
                                                 msg="expected: {} != actual {}".format(
                                                     expected_row, actual_row))
                            elif expected_value:
                                # we have an expected value for a deleted row
                                try:
                                    actual_value = datetime.strptime(actual_row["data"][column_name],
                                                                     "%Y-%m-%dT%H:%M:%S.%fZ")
                                except ValueError:
                                    actual_value = datetime.strptime(actual_row["data"][column_name],
                                                                     "%Y-%m-%dT%H:%M:%SZ")
                                self.assertGreaterEqual(actual_value, expected_value - timedelta(seconds=15))
                                self.assertLessEqual(actual_value, expected_value + timedelta(seconds=15))
                            else:
                                # the row wasn't deleted so we can either not pass the column or it can be None
                                self.assertIsNone(actual_row["data"].get(column_name))

                print("records are correct for stream {}".format(stream))

                # verify state and bookmarks
                state = menagerie.get_state(conn_id)
                bookmark = state['bookmarks'][stream]

                self.assertIsNone(state.get('currently_syncing'), msg="expected state's currently_syncing to be None")
                self.assertIsNotNone(
                    bookmark.get('current_log_version'),
                    msg="expected bookmark to have current_log_version because we are using log replication")
                self.assertTrue(bookmark['initial_full_table_complete'], msg="expected full table to be complete")
                new_log_version = bookmark['current_log_version']
                self.assertGreater(new_log_version, inital_log_version,
                                   msg='expected log version to increase')

                self.assertEqual(bookmark['version'], table_version[stream],
                                 msg="expected bookmark for stream to match version")
                self.assertEqual(bookmark['version'], new_table_version,
                                 msg="expected bookmark for stream to match version")

                expected_schemas = self.expected_metadata()[stream]['schema']
                self.assertEqual(records_by_stream[stream]['schema'],
                                 expected_schemas,
                                 msg="expected: {} != actual: {}".format(expected_schemas,
                                                                         records_by_stream[stream]['schema']))
示例#46
0
def insert_raid_participation(raid_id, raider_id, participation_type_id):
    return db.insert('raid_participants', { 'raid_id': raid_id, 'raider_id': raider_id, 'participation_type_id': participation_type_id, 'party_count': 1 } )
示例#47
0
#with open('location.json') as data_file:
#   data = json.load(data_file)
#company = data["places"][0] # for test

# read table name
tablename = "uber_weather"

# create database if not exist
#dynamodb_table = database.create_database(tablename)

dynamodb_table = database.get_table(tablename)
order = 0
while True:
    order = order + 1
    baseurl = "https://query.yahooapis.com/v1/public/yql?"
    yql_query = "select item.condition from weather.forecast where woeid = 12761478"
    yql_url = baseurl + urllib.urlencode({'q': yql_query}) + "&format=json"
    result = urllib2.urlopen(yql_url).read()
    preprocess_data = json.loads(result)
    weather = preprocess_data['query']['results']['channel']['item'][
        'condition']['text'].encode('utf-8')
    time = preprocess_data['query']['results']['channel']['item']['condition'][
        'date'].encode('utf-8')

    # insert into dynamodb
    item = {"weather": weather, "time": time}
    print(item)
    database.insert(dynamodb_table, item)

    time.sleep(2)
示例#48
0
def insert_raider(telegram_id, username, nickname=None):
    if nickname:
        return db.insert('raiders', { 'telegram_id': telegram_id, 'username': escape(username, 32), 'nickname': escape(nickname, 32) } )
    else:
        return db.insert('raiders', { 'telegram_id': telegram_id, 'username': escape(username, 32) } )
示例#49
0
文件: module.py 项目: NullMode/zarp
 def _insert(self, query, parameters=None):
     if parameters is None:
         return database.insert(query)
     else:
         return database.insert(query, parameters)
示例#50
0
def interacao(frase):
    #re1 = re.search(r'Firefox', self.frase)
    #keyW = re1.group(0);
    comando = frase
    if 'abra o firefox' in comando:
        frase = 'Abrindo Firefox'
        a = Pesquise(frase)
        a.fala(frase)
        sleep(1)
        os.system('firefox')
    elif 'abra o google' in comando:
        frase = 'Abrindo Google.'
        a = Pesquise(frase)
        a.fala(frase)
        sleep(1)
        os.system('firefox https://www.google.com.br')
    elif 'tudo bem' in comando:
        frase = 'Estou sim e você?'
        a = Pesquise(frase)
        a.fala(frase)
        sleep(1)
    elif 'quando o mundo vai acabar' in comando:
        frase = 'Conforme o site Abril, por volta do ano 7500000000'
        a = Pesquise(frase)
        a.fala(frase)
        sleep(1)
    elif 'abra o facebook' in comando or 'abra o meu facebook' in comando:
        frase = 'Abrindo Facebook.'
        a = Pesquise(frase)
        a.fala(frase)
        sleep(1)
        os.system('firefox https://www.facebook.com')
    elif 'abra o whatsapp' in comando or 'abra o meu whatsapp' in comando:
        frase = 'Abrindo WhatsApp.'
        a = Pesquise(frase)
        a.fala(frase)
        sleep(1)
        os.system('firefox https://web.whatsapp.com')
    elif 'abra o github' in comando:
        frase = 'Abrindo GitHub.'
        a = Pesquise(frase)
        a.fala(frase)
        sleep(1)
        os.system('firefox https://www.github.com')
    elif 'abra o meu github' in comando and 'meu' in comando and 'repositório' in comando:
        frase = 'Abrindo seu GitHub.'
        a = Pesquise(frase)
        a.fala(frase)
        sleep(1)
        os.system('firefox https://www.github.com/ProfessorJamesBach/')
    elif 'abra o visual studio code' in comando:
        frase = 'Abrindo Visual Studio Code.'
        a = Pesquise(frase)
        a.fala(frase)
        sleep(1)
        os.system('code')
    elif 'Que horas sao' in comando or 'que horas sao' in comando:
        try:
            horas = datetime.now()
            hora = horas.hour
            min = horas.minute
            print str(hora) + ':' + str(min)
            frase = 'Sao exatamente ' + str(hora) + ' horas e ' + str(
                min) + ' minutos.'
            a = Pesquise(frase)
            a.fala(frase)
        except Exception as err:
            print 'Erro: ' + str(err)
    elif 'eu te amo' in comando:
        frase = 'Nossa relacao de IA para Humano ja e otima.'
        a = Pesquise(frase)
        a.fala(frase)
        sleep(1)
    elif 'pesquise' in comando or 'pesquisa' in comando:
        pesquisa = comando[15:]
        p = Pesquise(pesquisa)
        p.pesquise(pesquisa)
    elif 'toque' in comando or 'Toque' in comando:
        mu = comando[12:]  #Alexa Toque --
        m = Pesquise(mu)
        m.tocar(mu)
    elif 'quem te criou' in comando or 'quem e seu criador' in comando:
        frase = 'Eu fui criada por Hugo Henrique'
        a = Pesquise(frase)
        a.fala(frase)
    elif 'qual o ip de' in comando or 'qual ip de' in comando:
        if 'qual o ip de' in comando:
            ip = comando[12:]
            print 'Pingando: {}'.format(ip)
            m = Pesquise(ip)
            m.ping(ip)
        elif 'qual ip de' in comando:
            ip = comando[10:]
            print 'Pingando: {}'.format(ip)
            m = Pesquise(ip)
            m.ping(ip)
        elif 'qual o ip do' in comando:
            ip = comando[12:]
            print 'Pingando: {}'.format(ip)
            m = Pesquise(ip)
            m.ping(ip)
    elif 'qual o preco do' in comando or 'quanto custa um' in comando or 'quanto custa o' in comando or 'qual preco do' in comando:
        try:
            coin = Cotacao(comando)
            coin.preco(comando)
        except Exception as err:
            f = 'Ocorreu algum erro.'
            a = Pesquise(f)
            a.fala(f)
            print 'Erro: ' + str(err)
    elif 'meu nome e' in comando or 'eu sou o' in comando or 'eu me chamo' in comando:
        try:
            if "meu nome e" in comando:
                horas = datetime.now()
                hora = horas.hour
                min = horas.minute
                seg = horas.second
                data = str(hora) + ':' + str(min) + ':' + str(seg)
                #alexa meu nome e hugo
                #01234567890123456
                nome = comando[17:]
                print 'Cadastrando usuário: {}'.format(nome)
                sql = 'INSERT INTO alexa_nomes(nome, data, hertz) VALUES (%s,%s,%s)'  #.format(nome,data,'150Hz')
                sql_data = (0, nome, data, '150Hz')
                database.insert(sql, sql_data)
            elif "eu sou o" in comando:
                #alexa eu sou o hugo
                #012345678901234
                nome = comando[15:]
                print 'Cadastrando usuário: {}'.format(nome)
            elif "eu me chamo" in comando:
                #alexa eu me chamo hugo
                #012345678901234567
                nome = comando[18:]
                print 'Cadastrando usuário: {}'.format(nome)
            else:
                pass
        except Exception as err:
            f = 'Ocorreu algum erro.'
            a = Pesquise(f)
            a.fala(f)
            print 'Erro: ' + str(err)
    elif 'como esta o clima' in comando or 'qual a temperatura atual' in comando:
        tokenIP = '08e7b32026b7f15eb659455c1eaa09c6'
        tokenCLIMA = 'a6fe231cc95a95d6f6800c29573ba2ac'
        import ip
        import json
        meuIP = ip.meuIp()
        urlIP = 'http://api.ipstack.com/{}?access_key={}&format=1'.format(
            meuIP, tokenIP)
        #print '[+] URL IP: ' + urlIP
        try:
            request = requests.get(urlIP)
            request = request.json()
            cidade = request['city']
            estado = request['region_code']
        except Exception as err:
            print 'Erro: ' + str(err) + '.'
        urlCLIMA = "http://apiadvisor.climatempo.com.br/api/v1/locale/city?name={}&state={}&token={}".format(
            cidade, estado, tokenCLIMA)
        #print '[+] URL CLIMA (CIDADE ID): ' + urlCLIMA
        try:
            cidadeID = requests.get(urlCLIMA)
            cidadeID = cidadeID.json()
            cidadeID = cidadeID[0]
            cidadeID = cidadeID['id']
            #print '[+] Cidade ID: '+str(cidadeID)
        except Exception as err:
            print 'Erro: ' + str(err) + '!'
        urlCLIMA = "http://apiadvisor.climatempo.com.br/api/v1/weather/locale/{}/current?token={}".format(
            str(cidadeID), tokenCLIMA)
        print '[+] URL CLIMA (DADOS): ' + urlCLIMA
        try:
            restCLIMA = requests.get(urlCLIMA)
            restCLIMA = restCLIMA.json()
            restCLIMA2 = restCLIMA['data']
            frase = 'Para ' + str(
                restCLIMA['name']) + ' a temperatura atual e de ' + str(
                    restCLIMA2['temperature']) + ' graus, sensacao de ' + str(
                        restCLIMA2['sensation']) + ', previsao de ' + str(
                            restCLIMA2['condition'])
            a = Pesquise(frase)
            a.fala(frase)
        except Exception as err:
            print 'Erro: ' + str(err) + '!!'
    elif 'gere um cpf' in comando or 'gerar cpf' in comando or 'gerar um cpf' in comando or 'gera um cpf' in comando or 'gera cpf' in comando:
        import json
        from alexa import remover_acentos
        tokenCPF = '995db8c466c13d70b0ff99f922655921'
        urlCPF = 'http://geradorapp.com/api/v1/cpf/generate?token={}'.format(
            tokenCPF)
        request = requests.get(urlCPF)
        resposta = request.json()
        data = resposta['data']
        if resposta['status'] == "0":
            print '[-] {}'.format(remover_acentos(data['message']))
        elif resposta['status'] == "1":
            print '[+] {}'.format(remover_acentos(data['message']))
            print '[+] Número: {}'.format(data['number'])
            print '[+] Número Formatado: {}'.format(data['number_formatted'])
        else:
            frase = 'Ocorreu algum erro.'
            print '[196] {}'.format(frase)
            a = Pesquise(frase)
            a.fala(frase)
    elif 'aonde fica o cep' in comando or 'onde fica o cep' in comando:
        from cep import buscaCEP
        buscaCEP(comando)
    elif 'localize um ip' in comando or 'localizacao do ip' in comando or 'onde fica o ip' in comando or 'aonde fica o ip' in comando or 'localize o ip' in comando:
        import ip
        tokenIP = '08e7b32026b7f15eb659455c1eaa09c6'
        ip2 = raw_input('Digite o IP: ')
        ip2 = str(ip2)
        print '[+] IP de Busca: \33[32m{}\33[0;0m'.format(ip2)
        ip.localizeIP(ip2, tokenIP)
    elif 'que dia e hoje' in comando or 'qual a data de hoje' in comando:
        from datetime import date
        data_atual = date.today()
        ano = data_atual.year
        mes = data_atual.month
        dia = data_atual.day
        frase = '{} do {} de {}'.format(str(dia), str(mes), str(ano))
    elif 'configure um despertador' in comando or 'me lembre de uma coisa' in comando or 'crie um alarme' in comando or 'me lembre de uma coisa' in comando or 'configure o despertador' in comando:
        nome_alarme = raw_input('Digite o nome do alarme: ')
        frase = 'Digite o dia do evento'
        data_alarme = raw_input('Digite o dia: ')
        hora_alarme = raw_input('Digite a hora: ')
        sql = 'INSERT INTO alexa_alarmes(nome, data, hora) VALUES (%s, %s, %s)'
        sql_data = (nome_alarme, data_alarme, hora_alarme)
        database.insert(sql, sql_data)
        frase = 'Alarme configurado'
        a = Pesquise(frase)
        a.fala(frase)
    elif 'ultimas noticias' in comando or 'noticias do dia' in comando or 'me fale as ultimas noticias' in comando:
        A = Pesquise(comando)
        A.noticias()
    elif 'abra o jogo da velha' in comando or 'jogar jogo da velha' in comando:
        import subprocess
        cmd = ['xterm']
        cmd.extend(['-e', 'cd .. && cd inys_jogos && ./forca; exec $SHELL'])
    elif 'abra o alarme' in comando or 'abra o script do alarme' in comando or 'abrir alarme' in comando or 'abrir o alarme' in comando:
        import subprocess
        cmd = ['xterm']
        cmd.extend(['-e', 'python alarmes.py; exec $SHELL'])
        subprocess.Popen(cmd, stdout=subprocess.PIPE)
    elif 'mandar um e-mail' in comando or 'mande um e-mail' in comando or 'escrever um e-mail' in comando:
        import subprocess
        cmd = ['xterm']
        cmd.extend(['-e', 'python enviarEmail.py; exec $SHELL'])
        subprocess.Popen(cmd, stdout=subprocess.PIPE)
        cdm = ['xterm']
        cdm.extend(['-e', 'nano mensagem.html; exec $SHELL'])
        subprocess.Popen(cdm, stdout=subprocess.PIPE)
# elif 'chame um taxi' in comando:
#
    else:
        frase = 'Eu ainda nao fui programada para isso ... voce poderia me ensinar.'
        a = Pesquise(frase)
        a.fala(frase)
        os.system('python inys.py')
示例#51
0
def addTask():
    database.insert(taskVar.get(), assignVar.get())
    ViewList()
示例#52
0
def inputdata(qbox, abox, entry):
    q = qbox.get()
    a = abox.get()
    d = entry.get()
    database.insert(q, a, d)
    deckedit(entry)
示例#53
0
文件: domains-add.py 项目: sr/beadmin
import dbconf
import re
import sys
import os

if os.environ.has_key('SUDO_USER'):
        user =  os.environ['SUDO_USER']
else:
        user = '******'

userfromdb = db.select('users', where="login = '******'" % user)
if len(userfromdb) == 0:
	print 'Votre utilisateur n\'a pas été autorisé à utiliser cet outil.'
	print 'Merci de contacter l\'administrateur.'
	sys.exit()

id_user = list(userfromdb)[0].id

while True:
	domain = text('Nom de domaine du site :')
	if re.match(r'^([a-zA-Z0-9_\-]+\.)+(fr|com|org|net|info|name|be|eu)$', domain):
		break

domains = db.select('domains', where="name = '%s'" % domain)

if len(domains) == 0:
	db.insert('domains', name=domain)
	print 'Domaine ajouté !'
else:
	print 'Ce domaine existe déjà.'
示例#54
0
    def setUpClass(cls) -> None:
        """Create the expected schema in the test database"""

        database_name = "data_types_database"
        schema_name = "dbo"

        cls.EXPECTED_METADATA = {
            'data_types_database_dbo_integers': {
                'is-view':
                False,
                'schema-name':
                schema_name,
                'row-count':
                0,
                'values': [(0, -9223372036854775808, -2147483648, -32768),
                           (1, 0, 0, 0),
                           (2, 9223372036854775807, 2147483647, 32767),
                           (3, None, None, None),
                           (4, 5603121835631323156, 9665315, 11742),
                           (5, -4898597031243117659, 140946744, -16490),
                           (6, -5168593529138936444, -1746890910, 2150),
                           (7, 1331162887494168851, 1048867088, 12136),
                           (8, -4495110645908459596, -1971955745, 18257),
                           (9, -1575653240237191360, -533282078, 22022),
                           (10, 6203877631305833079, 271324086, -18782),
                           (11, 7293147954924079156, 1003163272, 3593),
                           (12, -1302715001442736465, -1626372079, 3788),
                           (13, -9062593720232233398, 1646478731, 17621)],
                'table-key-properties': {'pk'},
                'selected':
                None,
                'database-name':
                database_name,
                'stream_name':
                'integers',
                'fields': [{
                    'pk': {
                        'sql-datatype': 'int',
                        'selected-by-default': True,
                        'inclusion': 'automatic'
                    }
                }, {
                    'MyBigIntColumn': {
                        'sql-datatype': 'bigint',
                        'selected-by-default': True,
                        'inclusion': 'available'
                    }
                }, {
                    'MyIntColumn': {
                        'sql-datatype': 'int',
                        'selected-by-default': True,
                        'inclusion': 'available'
                    }
                }, {
                    'MySmallIntColumn': {
                        'sql-datatype': 'smallint',
                        'selected-by-default': True,
                        'inclusion': 'available'
                    }
                }],
                'schema': {
                    'type': 'object',
                    'properties': {
                        'MySmallIntColumn': {
                            'type': ['integer', 'null'],
                            'minimum': -32768,
                            'maximum': 32767,
                            'inclusion': 'available',
                            'selected': True
                        },
                        'pk': {
                            'type': ['integer'],
                            'minimum': -2147483648,
                            'maximum': 2147483647,
                            'inclusion': 'automatic',
                            'selected': True
                        },
                        'MyBigIntColumn': {
                            'type': ['integer', 'null'],
                            'minimum': -9223372036854775808,
                            'maximum': 9223372036854775807,
                            'inclusion': 'available',
                            'selected': True
                        },
                        'MyIntColumn': {
                            'type': ['integer', 'null'],
                            'minimum': -2147483648,
                            'maximum': 2147483647,
                            'inclusion': 'available',
                            'selected': True
                        }
                    },
                    'selected': True
                }
            },
            'data_types_database_dbo_tiny_integers_and_bools': {
                'is-view':
                False,
                'schema-name':
                schema_name,
                'row-count':
                0,
                'values': [(0, 0, False), (1, 255, True), (2, None, None),
                           (3, 230, False), (4, 6, True), (5, 236, True),
                           (6, 27, True), (7, 132, True), (8, 251, False),
                           (9, 187, True), (10, 157, True), (11, 51, True),
                           (12, 144, True)],
                'table-key-properties': {'pk'},
                'selected':
                None,
                'database-name':
                database_name,
                'stream_name':
                'tiny_integers_and_bools',
                'fields': [{
                    'pk': {
                        'sql-datatype': 'int',
                        'selected-by-default': True,
                        'inclusion': 'automatic'
                    }
                }, {
                    'MyTinyIntColumn': {
                        'sql-datatype': 'tinyint',
                        'selected-by-default': True,
                        'inclusion': 'available'
                    }
                }, {
                    'my_boolean': {
                        'sql-datatype': 'bit',
                        'selected-by-default': True,
                        'inclusion': 'available'
                    }
                }],
                'schema': {
                    'type': 'object',
                    'properties': {
                        'MyTinyIntColumn': {
                            'type': ['integer', 'null'],
                            'minimum': 0,
                            'maximum': 255,
                            'inclusion': 'available',
                            'selected': True
                        },
                        'pk': {
                            'type': ['integer'],
                            'minimum': -2147483648,
                            'maximum': 2147483647,
                            'inclusion': 'automatic',
                            'selected': True
                        },
                        'my_boolean': {
                            'type': ['boolean', 'null'],
                            'inclusion': 'available',
                            'selected': True
                        }
                    },
                    'selected': True
                }
            }
        }

        drop_all_user_databases()

        query_list = list(
            create_database(database_name, "Latin1_General_CS_AS"))

        table_name = "integers"
        column_name = [
            "pk", "MyBigIntColumn", "MyIntColumn", "MySmallIntColumn"
        ]
        column_type = ["int", "bigint", "int", "smallint"]
        primary_key = {"pk"}
        column_def = [" ".join(x) for x in list(zip(column_name, column_type))]
        query_list.extend(
            create_table(database_name,
                         schema_name,
                         table_name,
                         column_def,
                         primary_key=primary_key))
        query_list.extend(
            insert(
                database_name, schema_name, table_name,
                cls.EXPECTED_METADATA["data_types_database_dbo_integers"]
                ["values"]))

        table_name = "tiny_integers_and_bools"
        column_name = ["pk", "MyTinyIntColumn", "my_boolean"]
        column_type = ["int", "tinyint", "bit"]
        primary_key = {"pk"}
        column_def = [" ".join(x) for x in list(zip(column_name, column_type))]
        query_list.extend(
            create_table(database_name,
                         schema_name,
                         table_name,
                         column_def,
                         primary_key=primary_key))
        query_list.extend(
            insert(
                database_name, schema_name, table_name, cls.EXPECTED_METADATA[
                    "data_types_database_dbo_tiny_integers_and_bools"]
                ["values"]))

        mssql_cursor_context_manager(*query_list)
        cls.expected_metadata = cls.discovery_expected_metadata
示例#55
0
文件: test.py 项目: ASBishop/cbt
        # make readahead into an int
        params[3] = int(params[3][7:])

        # Make op_size into an int
        params[4] = int(params[4][8:])

        # Make cprocs into an int
        params[5] = int(params[5][17:])

        # Make io_depth int an int
        params[6] = int(params[6][9:])

        params_hash = mkhash(params)
        params = [params_hash] + params
        params.extend([0,0])
        database.insert(params)

        for line in open(inputname):
            if "aggrb" in line:
                 bw = getbw(splits(line, 'aggrb=', ','))
                 if "READ" in line:
                     database.update_readbw(params_hash, bw)
                 elif "WRITE" in line:
                     database.update_writebw(params_hash, bw)
    html = HTMLGenerator()
    html.add_html(html.read_file('/home/nhm/src/cbt/include/html/table.html'))
    html.add_style(html.read_file('/home/nhm/src/cbt/include/css/table.css'))
    html.add_script(html.read_file('/home/nhm/src/cbt/include/js/jsxcompressor.min.js'))
    html.add_script(html.read_file('/home/nhm/src/cbt/include/js/d3.js'))
    html.add_script(html.read_file('/home/nhm/src/cbt/include/js/d3var.js'))
    html.add_script(html.format_data(database.fetch_table(['opsize', 'testtype'])))
示例#56
0
    def test_run(self):
        """stream_expected_data[self.VALUES]
        Verify that a full sync can send capture all data and send it in the correct format
        for integer and boolean (bit) data.
        Verify that the fist sync sends an activate immediately.
        Verify that the table version is incremented up
        """
        print("running test {}".format(self.name()))

        conn_id = self.create_connection()

        # run in check mode
        check_job_name = runner.run_check_mode(self, conn_id)

        # verify check  exit codes
        exit_status = menagerie.get_exit_status(conn_id, check_job_name)
        menagerie.verify_check_exit_status(self, exit_status, check_job_name)

        # get the catalog information of discovery
        found_catalogs = menagerie.get_catalogs(conn_id)
        # TODO - change the replication key back to replication_key_column when rowversion is supported
        additional_md = [{
            "breadcrumb": [],
            "metadata": {
                'replication-method': 'INCREMENTAL',
                'replication-key': 'temp_replication_key_column'
            }
        }]

        non_selected_properties = [
            "nvarchar_text", "varchar_text", "varbinary_data", "geospacial",
            "geospacial_map", "markup", "tree", "variant",
            "SpecialPurposeColumns", "started_at", "ended_at"
        ]
        BaseTapTest.select_all_streams_and_fields(
            conn_id,
            found_catalogs,
            non_selected_properties=non_selected_properties,
            additional_md=additional_md)

        # clear state
        menagerie.set_state(conn_id, {})
        sync_job_name = runner.run_sync_mode(self, conn_id)

        # verify tap and target exit codes
        exit_status = menagerie.get_exit_status(conn_id, sync_job_name)
        menagerie.verify_sync_exit_status(self, exit_status, sync_job_name)

        # verify record counts of streams
        record_count_by_stream = runner.examine_target_output_file(
            self, conn_id, self.expected_streams(),
            self.expected_primary_keys_by_stream_id())
        expected_count = {
            k: len(v['values'])
            for k, v in self.expected_metadata().items()
        }
        self.assertEqual(record_count_by_stream, expected_count)

        # verify records match on the first sync
        records_by_stream = runner.get_records_from_target_output()

        table_version = dict()
        for stream in self.expected_streams():
            with self.subTest(stream=stream):
                stream_expected_data = self.expected_metadata()[stream]
                table_version[stream] = records_by_stream[stream][
                    'table_version']

                # verify on the first sync you get
                # activate version message before and after all data for the full table
                # and before the logical replication part
                self.assertEqual(
                    records_by_stream[stream]['messages'][0]['action'],
                    'activate_version')
                self.assertEqual(
                    records_by_stream[stream]['messages'][-1]['action'],
                    'activate_version')
                self.assertTrue(
                    all([
                        m["action"] == "upsert"
                        for m in records_by_stream[stream]['messages'][1:-1]
                    ]),
                    msg="Expect all but the first message to be upserts")
                self.assertEqual(len(
                    records_by_stream[stream]['messages'][1:-1]),
                                 len(stream_expected_data[self.VALUES]),
                                 msg="incorrect number of upserts")

                column_names = [
                    list(field_data.keys())[0]
                    for field_data in stream_expected_data[self.FIELDS]
                ]

                expected_messages = [
                    {
                        "action": "upsert",
                        "data": {
                            column: value
                            for column, value in list(
                                zip(column_names, row_values))
                            if column not in non_selected_properties
                        }  # TODO - change to -1 for using rowversion for replication key
                    } for row_values in sorted(
                        stream_expected_data[self.VALUES],
                        key=lambda row: (row[1] is not None, row[1]))
                ]

                # Verify all data is correct for incremental
                for expected_row, actual_row in zip(
                        expected_messages,
                        records_by_stream[stream]['messages'][1:-1]):
                    with self.subTest(expected_row=expected_row):
                        self.assertEqual(actual_row["action"], "upsert")
                        self.assertEqual(
                            len(expected_row["data"].keys()),
                            len(actual_row["data"].keys()),
                            msg="there are not the same number of columns")
                        for column_name, expected_value in expected_row[
                                "data"].items():
                            if isinstance(expected_value, datetime):
                                # sql server only keeps milliseconds not microseconds
                                self.assertEqual(
                                    expected_value.isoformat().replace(
                                        '000+00:00',
                                        'Z').replace('+00:00', 'Z'),
                                    actual_row["data"][column_name],
                                    msg="expected: {} != actual {}".format(
                                        expected_value.isoformat().replace(
                                            '000+00:00',
                                            'Z').replace('+00:00', 'Z'),
                                        actual_row["data"][column_name]))
                            else:
                                self.assertEqual(
                                    expected_value,
                                    actual_row["data"][column_name],
                                    msg="expected: {} != actual {}".format(
                                        expected_row, actual_row))
                print("records are correct for stream {}".format(stream))

                # verify state and bookmarks
                state = menagerie.get_state(conn_id)
                bookmark = state['bookmarks'][stream]

                self.assertIsNone(
                    state.get('currently_syncing'),
                    msg="expected state's currently_syncing to be None")
                self.assertIsNone(bookmark.get('current_log_version'),
                                  msg="no log_version for incremental")
                self.assertIsNone(bookmark.get('initial_full_table_complete'),
                                  msg="no full table for incremental")
                # find the max value of the replication key TODO - change to -1 for using rowversion for replication key
                self.assertEqual(
                    bookmark['replication_key_value'],
                    re.sub(
                        r'\d{3}Z', "Z",
                        max([
                            row[1] for row in stream_expected_data[self.VALUES]
                        ]).strftime("%Y-%m-%dT%H:%M:%S.%fZ")))
                # self.assertEqual(bookmark['replication_key'], 'replication_key_value')

                self.assertEqual(
                    bookmark['version'],
                    table_version[stream],
                    msg="expected bookmark for stream to match version")

                expected_schemas = self.expected_metadata()[stream]['schema']
                self.assertEqual(records_by_stream[stream]['schema'],
                                 expected_schemas,
                                 msg="expected: {} != actual: {}".format(
                                     expected_schemas,
                                     records_by_stream[stream]['schema']))

        # ----------------------------------------------------------------------
        # invoke the sync job AGAIN and after insert, update, delete or rows
        # ----------------------------------------------------------------------

        database_name = "data_types_database"
        schema_name = "dbo"
        table_name = "text_and_image_deprecated_soon"
        column_name = [
            "pk", "temp_replication_key_column", "nvarchar_text",
            "varchar_text", "varbinary_data", "replication_key_column"
        ]
        insert_value = [(3,
                         datetime(2018,
                                  12,
                                  31,
                                  23,
                                  59,
                                  59,
                                  993000,
                                  tzinfo=timezone.utc), "JKL", "MNO",
                         "PQR".encode('utf-8'))]
        update_value = [(0,
                         datetime(2018,
                                  12,
                                  31,
                                  23,
                                  59,
                                  59,
                                  997000,
                                  tzinfo=timezone.utc), "JKL", "MNO",
                         "PQR".encode('utf-8'))]
        query_list = (insert(database_name,
                             schema_name,
                             table_name,
                             insert_value,
                             column_names=column_name[:-1]))
        query_list.extend(
            update_by_pk(database_name, schema_name, table_name, update_value,
                         column_name))
        mssql_cursor_context_manager(*query_list)

        values = insert_value + [(
            1, datetime(2018, 12, 31, 23, 59, 59, 987000, tzinfo=timezone.utc),
            "abc", "def", "ghi".encode('utf-8'))] + update_value
        rows = mssql_cursor_context_manager(*[
            "select replication_key_column from data_types_database.dbo.text_and_image_deprecated_soon "
            "where pk in (0, 1,3) order by pk desc"
        ])
        rows = [tuple(row) for row in rows]
        rows = [("0x{}".format(value.hex().upper()), ) for value, in rows]
        row_with_version = [x[0] + x[1] for x in zip(values, rows)]
        self.EXPECTED_METADATA[
            'data_types_database_dbo_text_and_image_deprecated_soon'][
                'values'] = row_with_version

        database_name = "data_types_database"
        schema_name = "dbo"
        table_name = "weirdos"
        column_name = [
            "pk", "temp_replication_key_column", "geospacial",
            "geospacial_map", "markup", "guid", "tree", "variant",
            "SpecialPurposeColumns", "replication_key_column"
        ]
        insert_value = [(3,
                         datetime(9999,
                                  12,
                                  31,
                                  23,
                                  59,
                                  59,
                                  993000,
                                  tzinfo=timezone.utc), None, None, None,
                         str(uuid.uuid1()).upper(), None, None, None)]
        update_value = [(1,
                         datetime(9999,
                                  12,
                                  31,
                                  23,
                                  59,
                                  59,
                                  997000,
                                  tzinfo=timezone.utc), None, None, None,
                         str(uuid.uuid1()).upper(), None, None, None)]
        delete_value = [(0, )]
        query_list = (insert(database_name, schema_name, table_name,
                             insert_value, column_name[:-1]))
        query_list.extend(
            delete_by_pk(database_name, schema_name, table_name, delete_value,
                         column_name[:1]))
        query_list.extend(
            update_by_pk(database_name, schema_name, table_name, update_value,
                         column_name))
        mssql_cursor_context_manager(*query_list)

        values = insert_value + [
            (2, datetime(9999, 12, 31, 23, 59, 59, 990000,
                         tzinfo=timezone.utc), None, None, None,
             "B792681C-AEF4-11E9-8002-0800276BC1DF", None, None, None)
        ] + update_value
        rows = mssql_cursor_context_manager(*[
            "select replication_key_column from data_types_database.dbo.weirdos "
            "where pk in (1, 2, 3) order by pk desc"
        ])
        rows = [tuple(row) for row in rows]
        rows = [("0x{}".format(value.hex().upper()), ) for value, in rows]
        row_with_version = [x[0] + x[1] for x in zip(values, rows)]
        self.EXPECTED_METADATA['data_types_database_dbo_weirdos'][
            'values'] = row_with_version

        database_name = "data_types_database"
        schema_name = "dbo"
        table_name = "computed_columns"
        column_name = [
            "pk", "temp_replication_key_column", "started_at", "ended_at",
            "replication_key_column"
        ]
        insert_value = [(2,
                         datetime(9998,
                                  12,
                                  31,
                                  23,
                                  59,
                                  59,
                                  990000,
                                  tzinfo=timezone.utc),
                         datetime(1980, 5, 30, 16), datetime.now())]
        update_value = [(0,
                         datetime(9998,
                                  12,
                                  31,
                                  23,
                                  59,
                                  59,
                                  997000,
                                  tzinfo=timezone.utc), datetime(1942, 11, 30),
                         datetime(2017, 2, 12))]
        query_list = (insert(database_name, schema_name, table_name,
                             insert_value, column_name[:-1]))
        query_list.extend(
            update_by_pk(database_name, schema_name, table_name, update_value,
                         column_name))
        mssql_cursor_context_manager(*query_list)
        values = insert_value + [(
            1, datetime(9998, 12, 31, 23, 59, 59, 987000, tzinfo=timezone.utc),
            datetime(1970, 1, 1, 0), datetime.now())] + update_value
        rows = mssql_cursor_context_manager(*[
            "select replication_key_column from data_types_database.dbo.computed_columns "
            "where pk in (0, 1, 2) order by pk desc"
        ])
        rows = [tuple(row) for row in rows]
        row_with_duration = [x[0] + x[1] for x in zip(values, rows)]
        self.EXPECTED_METADATA['data_types_database_dbo_computed_columns'][
            'values'] = row_with_duration

        sync_job_name = runner.run_sync_mode(self, conn_id)

        # verify tap and target exit codes
        exit_status = menagerie.get_exit_status(conn_id, sync_job_name)
        menagerie.verify_sync_exit_status(self, exit_status, sync_job_name)
        record_count_by_stream = runner.examine_target_output_file(
            self, conn_id, self.expected_streams(),
            self.expected_primary_keys_by_stream_id())
        expected_count = {
            k: len(v['values'])
            for k, v in self.expected_metadata().items()
        }
        self.assertEqual(record_count_by_stream, expected_count)
        records_by_stream = runner.get_records_from_target_output()

        for stream in self.expected_streams():
            with self.subTest(stream=stream):
                stream_expected_data = self.expected_metadata()[stream]
                new_table_version = records_by_stream[stream]['table_version']

                # verify on a subsequent sync you get activate version message only after all data
                self.assertEqual(
                    records_by_stream[stream]['messages'][0]['action'],
                    'activate_version')
                self.assertEqual(
                    records_by_stream[stream]['messages'][-1]['action'],
                    'activate_version')
                self.assertTrue(
                    all([
                        message["action"] == "upsert" for message in
                        records_by_stream[stream]['messages'][1:-1]
                    ]))
                self.assertEqual(len(
                    records_by_stream[stream]['messages'][1:-1]),
                                 len(stream_expected_data[self.VALUES]),
                                 msg="incorrect number of upserts")

                column_names = [
                    list(field_data.keys())[0]
                    for field_data in stream_expected_data[self.FIELDS]
                ]

                expected_messages = [{
                    "action": "upsert",
                    "data": {
                        column: value
                        for column, value in list(zip(column_names,
                                                      row_values))
                        if column not in non_selected_properties
                    }
                } for row_values in sorted(stream_expected_data[self.VALUES],
                                           key=lambda row:
                                           (row[1] is not None, row[1]))]

                # remove sequences from actual values for comparison
                [
                    message.pop("sequence")
                    for message in records_by_stream[stream]['messages'][1:-1]
                ]

                # Verify all data is correct
                for expected_row, actual_row in list(
                        zip(expected_messages,
                            records_by_stream[stream]['messages'][1:-1])):
                    with self.subTest(expected_row=expected_row):
                        self.assertEqual(actual_row["action"], "upsert")

                        # we only send the _sdc_deleted_at column for deleted rows
                        self.assertEqual(
                            len(expected_row["data"].keys()),
                            len(actual_row["data"].keys()),
                            msg="there are not the same number of columns")
                        for column_name, expected_value in expected_row[
                                "data"].items():
                            if isinstance(expected_value, datetime):
                                # sql server only keeps milliseconds not microseconds
                                self.assertEqual(
                                    expected_value.isoformat().replace(
                                        '000+00:00',
                                        'Z').replace('+00:00', 'Z'),
                                    actual_row["data"][column_name],
                                    msg="expected: {} != actual {}".format(
                                        expected_value.isoformat().replace(
                                            '000+00:00',
                                            'Z').replace('+00:00', 'Z'),
                                        actual_row["data"][column_name]))
                            else:
                                self.assertEqual(
                                    expected_value,
                                    actual_row["data"][column_name],
                                    msg="expected: {} != actual {}".format(
                                        expected_row, actual_row))
                        print(
                            "records are correct for stream {}".format(stream))

                # verify state and bookmarks
                state = menagerie.get_state(conn_id)
                bookmark = state['bookmarks'][stream]

                self.assertIsNone(
                    state.get('currently_syncing'),
                    msg="expected state's currently_syncing to be None")
                self.assertIsNone(bookmark.get('current_log_version'),
                                  msg="no log_version for incremental")
                self.assertIsNone(bookmark.get('initial_full_table_complete'),
                                  msg="no full table for incremental")
                # find the max value of the replication key
                self.assertEqual(
                    bookmark['replication_key_value'],
                    re.sub(
                        r'\d{3}Z', "Z",
                        max([
                            row[1] for row in stream_expected_data[self.VALUES]
                        ]).strftime("%Y-%m-%dT%H:%M:%S.%fZ")))
                # self.assertEqual(bookmark['replication_key'], 'replication_key_value')

                self.assertEqual(
                    bookmark['version'],
                    table_version[stream],
                    msg="expected bookmark for stream to match version")
                self.assertEqual(
                    bookmark['version'],
                    new_table_version,
                    msg="expected bookmark for stream to match version")

                state = menagerie.get_state(conn_id)
                bookmark = state['bookmarks'][stream]

                expected_schemas = self.expected_metadata()[stream]['schema']
                self.assertEqual(records_by_stream[stream]['schema'],
                                 expected_schemas,
                                 msg="expected: {} != actual: {}".format(
                                     expected_schemas,
                                     records_by_stream[stream]['schema']))
示例#57
0
			for i in range(len(params.X)):
				params.X[i] = params.X[i] + params.dX[i] 
			#przepisanie obliczonych wartosci
			C1.przepisz(C1_new)
			C2.przepisz(C2_new)
			C3.przepisz(C3_new)
			C4.przepisz(C4_new)
			C5.przepisz(C5_new)
			C_ir.przepisz(C5)
			C_r.przepisz(C_u)
			C_r.s_o = 0.0

			#zapis do bazy
			r = C1.stworzRekord(params)
			db.insert('C1',r)
			r = C2.stworzRekord(params)
        		db.insert('C2',r)
			r = C3.stworzRekord(params)
        		db.insert('C3',r)
			r = C4.stworzRekord(params)
        		db.insert('C4',r)
			r = C5.stworzRekord(params)
        		db.insert('C5',r)
			r = C_ir.stworzRekord(params)
        		db.insert('internal_recycle',r)
			r = C_r.stworzRekord(params)
        		db.insert('external_recycle',r)

			db.insertSettler('settler', params.X)
			db.commitChanges()
示例#58
0
# import database
#
#
# print(database.insert())
#
# print(database.update())
#
# print(database.select())

from database import insert, update
from demo import insert as ins
print(insert())
print(ins())
print(update())
示例#59
0
# -*- coding: utf-8 -*-

import action
import random
import os
import filecmp
import sys
import time
import database
import thread
import threading
import achievement as achi

mysql = database.insert()
dbtemphax = database.temphax()
dbworld = database.world()
dbgateway = database.gateway()

#### Actual commands ####

def sheen():
    sheenstuff = ["Winning", "Bi-winning", "Win Win everywhere", "Your face will melt off!", "Whats not to love?", "Epic Winning!", "Win here, win there, win everywhere!", "Absolute victory!", "That's how I roll", "No pants? - Winning!", "Duuh, WINNING! WINNING!", "The only thing Im addicted to right now is winning."]
    win = random.randint(0, 11)
    bro = (sheenstuff[win])
    action.say(bro, 0)

def version(v):
    action.say("Running MineMon version: " + v+" by Oscar Carlberg", 0.2)
    action.say("New in this release:", 0.5)
    changes = database.get_changelog()
    changes = changes[0]['changes']
示例#60
0
def insert_message_tracking(raid_id, chat_id, message_id):
    return db.insert('message_tracking', { 'raid_id': raid_id, 'chat_id': chat_id, 'message_id': message_id } )