Exemple #1
0
    def setCustomFields(self, key, updates):
        """Set a list of fields for this issue in Jira

        The updates parameter is a dictionary of key values where the key is the custom field name
        and the value is the new value to set.

        /!\ This only works for fields of type text.
        """

        issue = self.getIssue(key)
        update = {'fields': {}}

        for updatename, updatevalue in updates.items():
            remotevalue = issue.get('named').get(updatename)
            if not remotevalue or remotevalue != updatevalue:
                fieldkey = [
                    k for k, v in issue.get('names').iteritems()
                    if v == updatename
                ][0]
                update['fields'][fieldkey] = updatevalue

        if not update['fields']:
            # No fields to update
            debug('No updates required')
            return True

        resp = self.request('issue/%s' % (str(key)),
                            method='PUT',
                            data=json.dumps(update))

        if resp['status'] != 204:
            raise JiraException('Issue was not updated: %s' %
                                (str(resp['status'])))

        return True
    def decommission_test(self):
        cluster = self.cluster

        tokens = cluster.balanced_tokens(4)
        cluster.populate(4, tokens=tokens).start()
        node1, node2, node3, node4 = cluster.nodelist()

        session = self.patient_cql_connection(node1)
        self.create_ks(session, 'ks', 2)
        self.create_cf(session, 'cf', columns={'c1': 'text', 'c2': 'text'})

        insert_c1c2(session, n=30000, consistency=ConsistencyLevel.QUORUM)

        cluster.flush()
        sizes = [node.data_size() for node in cluster.nodelist() if node.is_running()]
        init_size = sizes[0]
        assert_almost_equal(*sizes)

        time.sleep(.5)
        node4.decommission()
        node4.stop()
        cluster.cleanup()
        time.sleep(.5)

        # Check we can get all the keys
        for n in xrange(0, 30000):
            query_c1c2(session, n, ConsistencyLevel.QUORUM)

        sizes = [node.data_size() for node in cluster.nodelist() if node.is_running()]
        debug(sizes)
        assert_almost_equal(sizes[0], sizes[1])
        assert_almost_equal((2.0 / 3.0) * sizes[0], sizes[2])
        assert_almost_equal(sizes[2], init_size)
Exemple #3
0
    def setCustomFields(self, key, updates):
        """Set a list of fields for this issue in Jira

        The updates parameter is a dictionary of key values where the key is the custom field name
        and the value is the new value to set.

        /!\ This only works for fields of type text.
        """

        issue = self.getIssue(key)
        update = {'fields': {}}

        for updatename, updatevalue in updates.items():
            remotevalue = issue.get('named').get(updatename)
            if not remotevalue or remotevalue != updatevalue:
                fieldkey = [k for k, v in issue.get('names').iteritems() if v == updatename][0]
                update['fields'][fieldkey] = updatevalue

        if not update['fields']:
            # No fields to update
            debug('No updates required')
            return True

        resp = self.request('issue/%s' % (str(key)), method='PUT', data=json.dumps(update))

        if resp['status'] != 204:
            raise JiraException('Issue was not updated: %s' % (str(resp['status'])))

        return True
def createReceivingReport():
    try:
        devicesToSend = []
        strToSend = []
        gps_stats = 66
        bat = tools.getBatteryPercentage()
        st_bat = struct.pack(">I", bat)
        st_gps_stats = struct.pack(">I", gps_stats)
        whiteLen = struct.pack(">I", len(globalVars.devices_whitelist))
        blackLen = struct.pack(">I", len(globalVars.devices_blacklist))
        strToSend.append(struct.pack(">I", 173)[3]) # Protocol
        # strToSend.append(struct.pack(">I", 0)) # Command ID
        strToSend.append(globalVars.longitude[3]) # Gateway Longitude
        strToSend.append(globalVars.longitude[2]) # Gateway Longitude
        strToSend.append(globalVars.longitude[1]) # Gateway Longitude
        strToSend.append(globalVars.longitude[0]) # Gateway Longitude
        strToSend.append(globalVars.latitude[3]) # Gateway Latitude 
        strToSend.append(globalVars.latitude[2]) # Gateway Latitude 
        strToSend.append(globalVars.latitude[1]) # Gateway Latitude 
        strToSend.append(globalVars.latitude[0]) # Gateway Latitude 
        strToSend.append(st_gps_stats[3]) # Gateway GPS Status & Report type HARDCODE
        strToSend.append(st_bat[3])
        strToSend.append(whiteLen[2])
        strToSend.append(whiteLen[3])
        strToSend.append(blackLen[2])
        strToSend.append(blackLen[3])
        tools.debug("Step 8 - Creating ack report: " + str(strToSend),'v')
        globalVars.lora_sent_acks.append(Device(addr="ackresp",raw=strToSend))
        # tools.manage_devices_send(devicesToSend)
    except BaseException as e1:
        checkError("Step 5 - Error creating ack report", e1)
        return [] 
Exemple #5
0
    def resolveMultiple(self, names = []):
        """Return multiple instances"""
        if type(names) != list:
            if type(names) == str:
                names = list(names)
            else:
                raise Exception('Unexpected variable type')

        # Nothing has been passed, we use resolve()
        if len(names) < 1:
            M = self.resolve()
            if M:
                return [ M ]
            else:
                return [ ]

        # Try to resolve each instance
        result = []
        for name in names:
            M = self.resolve(name = name)
            if M:
                result.append(M)
            else:
                debug('Could not find instance called %s' % name)
        return result
Exemple #6
0
 def save(self):
     current_tab = self.tabs[self.tabWidget.currentIndex()]
     file = open(current_tab._filepath, "w")
     file.write(current_tab.QSciEditor.text())
     file.close()
     self.refresh_outline()
     debug(current_tab._filepath + " is saved...")
Exemple #7
0
def BlackListNewDevice(devString):
    try:
        print("##### Blacklist new device ")
        if globalVars.MAC_TYPE == "LORA":
            listDevices = [devString[i:i+16] for i in range(0, len(devString), 16)]
        elif globalVars.MAC_TYPE == "BLE":
            listDevices = [devString[i:i+12] for i in range(0, len(devString), 12)]

        print("##### Creating registers in blacklist: " + str(listDevices))
        f = open('/sd/blacklist.csv', 'r')
        black_lines = f.readlines()
        f.close()
        lstDummy = []
        with open('/sd/blacklist.csv', 'w') as out:
            for dev in listDevices:
                exists = False
                for wht in black_lines:
                    if dev in wht.split(',')[0]:
                        exists = True
                        break
                if exists == False:
                    black_lines.append(str(str(dev).lower() + "," + str(int(utime.time()))+ "\r\n"))

            for ln in black_lines:
                out.write(ln)
                lstDummy.append(ln.split(',')[0].lower())

            globalVars.devices_blacklist = lstDummy

        tools.debug("New blacklist inserted: " + str(globalVars.devices_blacklist),'vv')
    except Exception as e:
        print("##### - Error writing new device in blacklist: " + str(e))  
Exemple #8
0
def WhiteListDeleteSpecificDevice(devString):
    try:
        # print("In deleted specific device method - Step 1")
        listDevices = [
            devString[i:i + 12] for i in range(0, len(devString), 12)
        ]
        lstToSave = []
        f = open('/sd/whitelist.csv', 'r')
        sent_lines = f.readlines()
        f.close()
        # print("In deleted specific device method - Step 2")
        with open('/sd/whitelist.csv', 'w') as out:
            for ln in sent_lines:
                if ln.split(',')[0] not in listDevices:
                    out.write(ln)
                    lstToSave.append(ln.split(',')[0])

        globalVars.devices_whitelist = lstToSave

        tools.debug(
            "New whitelist deleted: " + str(globalVars.devices_whitelist),
            'vv')
    except Exception as e:
        print("##### - Error cleaning whitelist: " + str(e))
        return 6, "##### - Error cleaning whitelist: " + str(e)
Exemple #9
0
def send_MQTT_message(device):
    global client
    try:
        tools.debug("MQTT Sending message", "v")
        mac_pos = [
            str(device.addr)[i:i + 2]
            for i in range(0, len(str(device.addr)), 2)
        ]
        dev_addr = ""
        for num, a in enumerate(mac_pos):
            dev_addr = dev_addr + a
            if num < (len(mac_pos) - 1):
                dev_addr = dev_addr + ":"

        msg_to = {
            "gpsQuality": 10,
            "lastEventDate": int(utime.time()) * 1000,
            "latitude": 40.34155561314692,
            "login": "",
            "longitude": -3.8205912308152103,
            "numSerie": str(dev_addr).upper()
        }
        client.publish(topic=globalVars.mqtt_topic,
                       msg=ujson.dumps(msg_to),
                       retain=False,
                       qos=1)
        return 1
    except BaseException as e:
        checkError("Error sending POST - ", e)
        client.disconnect()
        check_connectivity(e)
        return 0
Exemple #10
0
def mqtt_disconnect():
    global client
    try:
        tools.debug("Disconnecting MQTT", "v")
        client.disconnect()
    except BaseException as e:
        checkError("Error connecting to MQTT", e)
Exemple #11
0
 def drop():
     result = (None, None, None)
     try:
         debug('Dropping database')
         result = self.cli('/admin/tool/behat/cli/util.php', args='--drop', stdout=None, stderr=None)
     except:
         pass
     return result
Exemple #12
0
 def install():
     result = (None, None, None)
     try:
         debug('Installing Behat tables')
         result = self.cli('/admin/tool/behat/cli/util.php', args='--install', stdout=None, stderr=None)
     except:
         pass
     return result
Exemple #13
0
 def enable():
     result = (None, None, None)
     try:
         debug('Enabling Behat')
         result = self.cli('/admin/tool/behat/cli/util.php', args='--enable')
     except:
         pass
     return result
Exemple #14
0
def run_chrome(profile, output, refresh, undupe, clean, import_txt,
               get_hostname, output_name, list_profile, x_org_gui):
    tools.debug("profile       [", str(profile), "]\noutput        [",
                str(output), "]\nrefresh       [", str(refresh),
                "]\nundupe        [", str(undupe), "]\nclean         [",
                str(clean), "]\nimport_txt    [", str(import_txt),
                "]\nget_hostname  [", str(get_hostname), "]\noutput_name   [",
                str(output_name),
                "]\nlist_profiles [" + str(list_profile) + "]")
    if x_org_gui:
        import chromeExport
        chromeExport.main()
    elif list_profile:
        if not list_profile.isdigit():
            list_profile = preset.all_profiles
        tools.list_profiles(list_profile)
    else:
        if import_txt == preset.none and profile == preset.none:
            tools.underline()
            tools.display(preset.message["missing_parameter"])
            tools.overline()
        else:
            tools.display(preset.new_line + preset.new_line)
            tools.underline()
            tools.display(preset.message["starting_export"])

            if profile:
                email, full, name = get_profile(profile)
                bookmarks = bookMarks.generate_bookmarks(profile)
                tools.underline()
                tools.display(preset.message["process_user"] + ": {", full,
                              "} [" + email + "]")
                tools.overline()
                bookmarks_data = bookMarks.generate_data(bookmarks)
            else:
                bookmarks_data = []

            if import_txt:
                bookmarks_data = append_data_table(
                    bookmarks_data, import_text_file(import_txt))

            if not output_name:
                if output == "xlsx":
                    output_name = preset.xlsx_filename
                else:
                    output_name = preset.html_filename

            refresh = normalize(refresh)
            undupe = normalize(undupe)
            clean = normalize(clean)
            get_hostname = normalize(get_hostname)

            if output == "xlsx":
                generate_work_book(output_name, bookmarks_data, refresh,
                                   undupe, clean, get_hostname)
            else:
                generate_web_page(output_name, bookmarks_data, refresh, undupe,
                                  clean, get_hostname)
Exemple #15
0
 def enable():
     result = (None, None, None)
     try:
         debug('Enabling Behat')
         result = self.cli('/admin/tool/behat/cli/util.php',
                           args='--enable')
     except:
         pass
     return result
Exemple #16
0
    def get_outline(self, file, kinds):
        opts = ['-x', 'c-header', '-std=c++11', '-D__CODE_GENERATOR__']
        tu = self.index.parse(file, opts)
        if not tu:
            debug("Unable to analyse file")
            return None

        root = QStandardItemModel()
        self.append(root, tu.cursor, kinds)
        return root
 def export_products(self, cr, uid, ids, context=None):
     if context is None: context = {}
     shop_ids = self.pool.get('sale.shop').search(cr, uid, [])
     for referential_id in ids:
         for shop in self.pool.get('sale.shop').browse(cr, uid, shop_ids, context):
             context['conn_obj'] = self.external_connection(cr, uid, referential_id, context=context)
             #shop.export_catalog
             tools.debug((cr, uid, shop, context,))
             shop.export_products(cr, uid, shop, context)
     return True
Exemple #18
0
def setSubProject(index,single):
	if(single):pid=''#exec.background("sh %s/%s/run.sh"%(PROJHOME,index));
	else:
		pid=shell_exec("cd %s/%s;qsub lammps.pbs;"%(PROJHOME,index));
		from tools import debug
		debug("pbsid=%s"%pid)
		debug(time.strftime('%Y-%m-%d %H:%M:%S'))
		print "submit: %s\t%s/%s"%(pid,PROJHOME,index);
	#sleep(1);
	return pid;
Exemple #19
0
 def create(self, cr, uid, vals, context=None):
     sentCode = vals['code']
     debug(sentCode)
     if sentCode == self.get_search(cr, uid, 'account.asset.asset'):
         codep = self.pool.get('ir.sequence').get(cr, uid,
                                                  'account.asset.asset')
     return super(ccorp_addons_account_assets, self).create(cr,
                                                            uid,
                                                            vals,
                                                            context=None)
Exemple #20
0
 def outline_clicked(self, index):
     loc = (self.treeOutline.model().itemFromIndex(index).location)
     debug("Node Location: " + str(loc.line) + "," + str(loc.column))
     self.tabs[self.tabWidget.currentIndex()].QSciEditor.setCursorPosition(
         loc.line - 1, loc.column - 1)
     self.tabs[self.tabWidget.currentIndex()].QSciEditor.ensureLineVisible(
         loc.line - 1)
     self.tabs[
         self.tabWidget.currentIndex()].QSciEditor.ensureCursorVisible()
     self.tabs[self.tabWidget.currentIndex()].QSciEditor.setFocus()
Exemple #21
0
 def drop():
     result = (None, None, None)
     try:
         debug('Dropping database')
         result = self.cli('/admin/tool/behat/cli/util.php',
                           args='--drop',
                           stdout=None,
                           stderr=None)
     except:
         pass
     return result
Exemple #22
0
 def install():
     result = (None, None, None)
     try:
         debug('Installing Behat tables')
         result = self.cli('/admin/tool/behat/cli/util.php',
                           args='--install',
                           stdout=None,
                           stderr=None)
     except:
         pass
     return result
Exemple #23
0
 def checkCachedClones(self, stable = True, integration = True):
     """Clone the official repository in a local cache"""
     cacheStable = os.path.join(self.cache, 'moodle.git')
     cacheIntegration = os.path.join(self.cache, 'integration.git')
     if not os.path.isdir(cacheStable) and stable:
         debug('Cloning stable repository into cache...')
         result = process('%s clone %s %s' % (C.get('git'), C.get('remotes.stable'), cacheStable))
         result = process('%s fetch -a' % C.get('git'), cacheStable)
     if not os.path.isdir(cacheIntegration) and integration:
         debug('Cloning integration repository into cache...')
         result = process('%s clone %s %s' % (C.get('git'), C.get('remotes.integration'), cacheIntegration))
         result = process('%s fetch -a' % C.get('git'), cacheIntegration)
Exemple #24
0
    def _sale_shop(self, cr, uid, callback, context=None):
        if context is None:
            context = {}
        proxy = self.pool.get('sale.shop')
        domain = [ ('magento_shop', '=', True), ('auto_import', '=', True) ]

        ids = proxy.search(cr, uid, domain, context=context)
        if ids:
            callback(cr, uid, ids, context=context)

        tools.debug(callback)
        tools.debug(ids)
        return True
def sendLoRaWANMessage():
    global lora_socket
    try:
        if lora.has_joined():
            sendAckMessageThread(lora_socket)
        else:
            tools.debug("Impossible to send because device is not joined", 'v')
            join_lora()
            if lora.has_joined():
                sendAckMessageThread(lora_socket)
    except BaseException as eee:

        checkError("Error sending LoRaWAN message",eee)
Exemple #26
0
def generate_data(instance):
    data_header = []

    for folder in instance.folders:

        folder_item = None
        folder_date_added = preset.no_date
        folder_date_modified = preset.no_date
        folder_guid = preset.empty
        folder_id = preset.empty
        folder_last_visited = preset.no_date
        folder_name = preset.empty
        folder_sync_transaction_version = preset.empty
        folder_type = preset.empty
        folder_url = preset.empty

        for item in folder:
            if item == preset.children:
                folder_item = read_content(folder[item])
            elif item == preset.meta_info:
                for element in folder[item]:
                    if element == preset.last_visited:
                        folder_last_visited = folder[item][element]
            elif item == preset.date_added:
                folder_date_added = folder[item]
            elif item == preset.date_modified:
                folder_date_modified = folder[item]
            elif item == preset.guid:
                folder_guid = folder[item]
            elif item == preset.item_id:
                folder_id = folder[item]
            elif item == preset.item_name:
                folder_name = folder[item]
            elif item == preset.sync_transaction_version:
                folder_sync_transaction_version = folder[item]
            elif item == preset.item_type:
                folder_type = folder[item]
            elif item == preset.url:
                folder_url = folder[item]
            else:
                tools.debug(preset.message["warning"] + str(item))

        folder_data = (folder_guid, utils.to_number(folder_id),
                       utils.to_number(folder_sync_transaction_version),
                       folder_type, utils.to_date(folder_date_added),
                       utils.to_date(folder_date_modified),
                       utils.to_date(folder_last_visited), folder_name,
                       folder_url)
        for item in folder_item:
            data_header.append(folder_data + item + preset.trail)
    return data_header
Exemple #27
0
def forceRTC(dt, type_dt):
    global rtc
    try:
        # dt = pycom.nvs_get('rtc')

        if type_dt == "tuple":
            print("Step RTC - Forcing Tuple RTC to " + str(dt))
            rtc.init(dt)
        elif type_dt == "epoch":
            print("Step RTC - Forcing Epoch RTC to " + str(int(dt)))
            rtc.init(utime.gmtime(int(dt)))

        utime.sleep(3)
        tools.debug("Setting time: " + str(int(utime.time())), "v")
        try:
            pycom.nvs_set('clock', str(int(utime.time())))
        except OSError as err:
            tools.debug("Error setting RTC: " + str(err), "v")

        utime.sleep(5)
        try:
            dt_current = pycom.nvs_get('clock')
        except OSError as err:
            dt_current = -1
            tools.debug("Error getting RTC: " + str(err), "v")

        tools.debug(
            "Current time: " + str(int(dt_current)) + " - RTC: " +
            str(getDatetime()), "v")
    except Exception as e1:
        checkError("Step RTC - Error initializing parametetr", e1)
 def checkNextReset(self):
     dt = utime.gmtime()
     tools.debug(
         "Scheduler: " + str(dt[6]) + " " + str(dt[2]) + "-" + str(dt[1]) +
         "-" + str(dt[0]) + " " + str(dt[3]) + ":" + str(dt[4]) + ":" +
         str(dt[5]), 'v')
     if dt[3] == int(globalVars.dailyreset.split(":")[0]) and dt[4] == int(
             globalVars.dailyreset.split(":")[1]) and dt[5] > int(
                 globalVars.dailyreset.split(":")[2]) and dt[5] < (
                     int(globalVars.dailyreset.split(":")[2]) + 15):
         tools.debug(
             "Scheduler - Reseting module because of daily schedule", "v")
         utime.sleep(2)
         machine.reset()
    def crash_during_decommission_test(self):
        """
        If a node crashes whilst another node is being decommissioned,
        upon restarting the crashed node should not have invalid entries
        for the decommissioned node
        @jira_ticket CASSANDRA-10231
        """
        cluster = self.cluster
        cluster.populate(3).start(wait_other_notice=True)

        node1, node2 = cluster.nodelist()[0:2]

        t = DecommissionInParallel(node1)
        t.start()

        null_status_pattern = re.compile(".N(?:\s*)127\.0\.0\.1(?:.*)null(?:\s*)rack1")
        while t.is_alive():
            out = self.show_status(node2)
            if null_status_pattern.search(out):
                debug("Matched null status entry")
                break
            debug("Restarting node2")
            node2.stop(gently=False)
            node2.start(wait_for_binary_proto=True, wait_other_notice=False)

        debug("Waiting for decommission to complete")
        t.join()
        self.show_status(node2)

        debug("Sleeping for 30 seconds to allow gossip updates")
        time.sleep(30)
        out = self.show_status(node2)
        self.assertFalse(null_status_pattern.search(out))
Exemple #30
0
    def crash_during_decommission_test(self):
        """
        If a node crashes whilst another node is being decommissioned,
        upon restarting the crashed node should not have invalid entries
        for the decommissioned node
        @jira_ticket CASSANDRA-10231
        """
        cluster = self.cluster
        cluster.populate(3).start(wait_other_notice=True)

        node1, node2 = cluster.nodelist()[0:2]

        t = DecommissionInParallel(node1)
        t.start()

        null_status_pattern = re.compile(
            ".N(?:\s*)127\.0\.0\.1(?:.*)null(?:\s*)rack1")
        while t.is_alive():
            out = self.show_status(node2)
            if null_status_pattern.search(out):
                debug("Matched null status entry")
                break
            debug("Restarting node2")
            node2.stop(gently=False)
            node2.start(wait_for_binary_proto=True, wait_other_notice=False)

        debug("Waiting for decommission to complete")
        t.join()
        self.show_status(node2)

        debug("Sleeping for 30 seconds to allow gossip updates")
        time.sleep(30)
        out = self.show_status(node2)
        self.assertFalse(null_status_pattern.search(out))
    def checkDutyCycle(self):
        try:
            dt = utime.gmtime()
            if globalVars.flag_rtc_syncro == False:
                tools.debug(
                    "Scheduler - RTC is not syncronized, so not possible to check the DutyCycle properly",
                    "v")
                return
            dt_dm = str(dt[6]) + " " + str(dt[2]) + "-" + str(
                dt[1]) + "-" + str(dt[0]) + " " + str(dt[3]) + ":" + str(
                    dt[4]) + ":" + str(dt[5])
            tools.debug(
                "Scheduler - Current date: " + str(dt_dm) +
                " - Daily ends at: " + globalVars.dailyStandBy +
                "- Downlinks begin at: " + globalVars.startDownlink, "v")
            # --------- S1 (Sleep Cycle 1) - From End of the day to first Downlink message ---------------
            if dt[3] == int(
                    globalVars.dailyStandBy.split(":")
                [0]) and dt[4] == int(
                    globalVars.dailyStandBy.split(":")[1]) and dt[5] > int(
                        globalVars.dailyStandBy.split(":")[2]) and dt[5] < (
                            int(globalVars.dailyStandBy.split(":")[2]) + 60):
                rnd_tmp_1 = tools.calculateSleepTime(globalVars.dailyStandBy,
                                                     globalVars.startDownlink)
                tools.debug(
                    "Scheduler - DutyCycle - Going to sleep because the day ends and until the downlinks begin: "
                    + str(rnd_tmp_1), "v")
                tools.deepSleepWiloc(rnd_tmp_1)

            # --------- S2 - Backup sleeping process in case the device is still on when passing the maximum downlink time  ---------------
            if dt[3] == int(
                    globalVars.endDownlink.split(":")[0]) and dt[4] == int(
                        globalVars.endDownlink.split(":")[1]) and dt[5] > int(
                            globalVars.endDownlink.split(":")[2]) and dt[5] < (
                                int(globalVars.endDownlink.split(":")[2]) +
                                60):
                rnm_tmp = tools.calculateSleepTime(globalVars.endDownlink,
                                                   globalVars.dailyStart)
                tools.debug(
                    "Scheduler - DutyCycle - Going to sleep until the day begins: "
                    + str(rnm_tmp), "v")
                tools.deepSleepWiloc(rnm_tmp)
            # ---------- Check if today is the day OFF --------------
            if dt[6] in globalVars.dayOff:
                tools.debug(
                    "Scheduler - Going to sleep because is the day OFF", "v")
                tools.deepSleepWiloc(86460)

        except BaseException as e:
            checkError("Error on scheduler", e)
 def _run(self, word, index, stack):
     if index == len(word):
         return stack.empty()
     symbol = word[index]
     for transition in self.transitions:
         t, V, S = transition
         if t == symbol and stack.head(V):
             s = Stack(stack.items[::-1])
             s.pop()
             s.push(S)
             debug(index, transition, stack)
             if self._run(word, index + 1, s):
                 return True
     return False
Exemple #33
0
def read_content(folder_items):
    url_list = []
    for folder_item in folder_items:
        url_date_added = preset.empty
        url_date_modified = preset.empty
        url_guid = preset.empty
        url_item_id = preset.empty
        url_last_visited = preset.empty
        url_name = preset.empty
        url_sync_transaction_version = preset.empty
        url_item_type = preset.empty
        url_address = preset.empty
        url_icon = preset.empty
        for item in folder_item:
            if item == preset.children:
                read_content(folder_item[item])
            elif item == preset.meta_info:
                for element in folder_item[item]:
                    if element == preset.last_visited:
                        url_last_visited = folder_item[item][element]
            elif item == preset.date_added:
                url_date_added = folder_item[item]
            elif item == preset.date_modified:
                url_date_modified = folder_item[item]
            elif item == preset.guid:
                url_guid = folder_item[item]
            elif item == preset.icon:
                url_icon = folder_item[item]
            elif item == preset.item_id:
                url_item_id = folder_item[item]
            elif item == preset.item_name:
                url_name = folder_item[item]
            elif item == preset.sync_transaction_version:
                url_sync_transaction_version = folder_item[item]
            elif item == preset.item_type:
                url_item_type = folder_item[item]
            elif item == preset.url:
                url_address = folder_item[item]
            else:
                tools.debug(preset.message["warning"] + str(item))
        url_data = (url_guid, utils.to_number(url_item_id),
                    utils.to_number(url_sync_transaction_version),
                    url_item_type, utils.to_date(url_date_added),
                    utils.to_date(url_date_modified),
                    utils.to_date(url_last_visited), url_name,
                    htmlSupport.clean_url(url_address), url_address, url_icon)
        parsed_url = htmlSupport.parse_url(url_address)
        url_list.append(url_data + parsed_url)
    return url_list
Exemple #34
0
def mqtt_connect():
    global client
    try:
        tools.debug("Connecting MQTT", "v")
        client = MQTTClient("gtw001_" + str(int(utime.time())),
                            globalVars.mqtt_url,
                            user=globalVars.mqtt_user,
                            password=globalVars.mqtt_psw,
                            port=1883)
        client.set_callback(response_callback)
        client.connect()
        client.subscribe(topic=globalVars.mqtt_topic)
        return client
    except BaseException as e:
        checkError("Error connecting to MQTT", e)
Exemple #35
0
    def _zoook_sale_shop(self, cr, uid, callback, context=None):
        """
        Sale Shop Schedules
        """
        if context is None:
            context = {}

        ids = self.pool.get('sale.shop').search(cr, uid, [('zoook_shop', '=', True),('zoook_automatic_export', '=', True)], context=context)

        if ids:
            callback(cr, uid, ids, context=context)

        tools.debug(callback)
        tools.debug(ids)
        return True
Exemple #36
0
    def _zoook_sale_shop(self, cr, uid, callback, context=None):
        """
        Sale Shop Schedules
        """
        if context is None:
            context = {}

        ids = self.pool.get('sale.shop').search(cr, uid, [('zoook_shop', '=', True),('zoook_automatic_export', '=', True)], context=context)

        if ids:
            callback(cr, uid, ids, context=context)

        tools.debug(callback)
        tools.debug(ids)
        return True
 def export_products(self, cr, uid, ids, context):
     shop_ids = self.pool.get('sale.shop').search(cr, uid, [])
     for inst in self.browse(cr, uid, ids, context):
         for shop in self.pool.get('sale.shop').browse(
                 cr, uid, shop_ids, context):
             context['conn_obj'] = self.external_connection(cr, uid, inst)
             #shop.export_catalog
             tools.debug((
                 cr,
                 uid,
                 shop,
                 context,
             ))
             shop.export_products(cr, uid, shop, context)
     return True
Exemple #38
0
def bluetooth_scanner():
    global ble_thread
    try:
        while True:
            try:
                tools.debug('BLE - Starting BLE scanner, RSSI: ' + str(int(globalVars.RSSI_NEAR_THRESHOLD,16) - 256) + " - RTC: " + str(int(utime.time())) + " - REFRESH: " + str(globalVars.MAX_REFRESH_TIME) + " - SCAN: " + str(int(globalVars.BLE_SCAN_PERIOD)) + " - SLEEP: " + str(int(globalVars.STANDBY_PERIOD)) + " - DEBUG: " + str(globalVars.debug_cc) ,'v')
                ble_thread = True
                bluetooth = Bluetooth()
                bluetooth.tx_power(Bluetooth.TX_PWR_SCAN, Bluetooth.TX_PWR_P9)
                bluetooth.start_scan(int(globalVars.BLE_SCAN_PERIOD))
                while bluetooth.isscanning():
                    adv = bluetooth.get_adv()
                    if adv:
                        if 'WILOC_01' in str(bluetooth.resolve_adv_data(adv.data, Bluetooth.ADV_NAME_CMPL)):
                            data_raw = str(ubinascii.hexlify(adv.data).decode('utf-8'))
                            if globalVars.MAC_TYPE == "LORA":
                                mac_proc = data_raw[34:50] # LoRa MAC
                            elif globalVars.MAC_TYPE == "BLE":
                                mac_proc = str(ubinascii.hexlify(adv.mac).decode('utf-8')) # MAC BLE
                            tools.debug('Name: '+ str(bluetooth.resolve_adv_data(adv.data, Bluetooth.ADV_NAME_CMPL)) +' MAC: '+ str(mac_proc)+ ' RSSI: ' + str(adv.rssi) + ' DT: '+ str(int(utime.time())) +' RAW: ' + data_raw,'vvv')
                            if mac_proc not in globalVars.mac_scanned:
                                tools.debug('Step 1 - New device detected: ' + str(mac_proc),'vv')
                                globalVars.mac_scanned.append(mac_proc)
                            if adv.rssi >= (int(globalVars.RSSI_NEAR_THRESHOLD,16) - 256):  
                                wilocMain.checkListType(str(mac_proc), globalVars.ALARM_LIST_TYPE)
                            globalVars.scanned_frames.append(Device(addr=mac_proc,rssi=adv.rssi, raw=data_raw))
                        elif 'WIL_C01' in str(bluetooth.resolve_adv_data(adv.data, Bluetooth.ADV_NAME_CMPL)):
                            data_raw = str(ubinascii.hexlify(adv.data).decode('utf-8'))
                            mac_proc = str(ubinascii.hexlify(adv.mac).decode('utf-8')) # MAC BLE
                            #tools.debug('BLE Name: '+ str(bluetooth.resolve_adv_data(adv.data, Bluetooth.ADV_NAME_CMPL)) +' - BLE MAC: '+ str(mac_proc)+ ' - RSSI: ' + str(adv.rssi) + ' - DT: '+ str(int(utime.time())) +' - RAW: ' + data_raw,'vvv')
                            if mac_proc not in globalVars.mac_scanned:
                                tools.debug('Step 1 - BLE New device detected: ' + str(mac_proc),'vv')
                                globalVars.mac_scanned.append(mac_proc)
                            if adv.rssi >= (int(globalVars.RSSI_NEAR_THRESHOLD,16) - 256):  
                                wilocMain.checkListType(str(mac_proc), globalVars.ALARM_LIST_TYPE)
                            globalVars.scanned_frames.append(Device(addr=mac_proc,rssi=adv.rssi, raw=data_raw))

                tools.debug('BLE - Stopping BLE scanner ' + str(int(utime.time())),'v')
                tools.sleepWiloc(int(globalVars.STANDBY_PERIOD))
            except BaseException as ee1:
                checkError("Error scanning Bluetooth",ee1)
                tools.sleepWiloc(int(globalVars.STANDBY_PERIOD))
    except BaseException as e:
        checkError("Error thread Bluetooth", e)
        ble_thread = False
    finally:
        ble_thread = False
        _thread.start_new_thread(bluetooth_scanner,())
    def index_query_test(self):
        """
        Check that a secondary index query times out
        """
        cluster = self.cluster
        cluster.set_configuration_options(values={'read_request_timeout_in_ms': 1000})

        cluster.populate(1).start(wait_for_binary_proto=True, jvm_args=["-Dcassandra.test.read_iteration_delay_ms=100"])  # see above for explanation
        node = cluster.nodelist()[0]
        session = self.patient_cql_connection(node)

        self.create_ks(session, 'ks', 1)
        session.execute("""
            CREATE TABLE test3 (
                id int PRIMARY KEY,
                col int,
                val text
            );
        """)

        session.execute("CREATE INDEX ON test3 (col)")

        for i in xrange(500):
            session.execute("INSERT INTO test3 (id, col, val) VALUES ({}, {}, 'foo')".format(i, i // 10))

        mark = node.mark_log()
        assert_unavailable(lambda c: debug(c.execute("SELECT * from test3 WHERE col < 50 ALLOW FILTERING")), session)
        node.watch_log_for("<SELECT \* FROM ks.test3 WHERE col < 50 (.*)> timed out", from_mark=mark, timeout=30)
    def local_query_test(self):
        """
        Check that a query running on the local coordinator node times out
        """
        cluster = self.cluster
        cluster.set_configuration_options(values={'read_request_timeout_in_ms': 1000})

        # cassandra.test.read_iteration_delay_ms causes the state tracking read iterators
        # introduced by CASSANDRA-7392 to pause by the specified amount of milliseconds during each
        # iteration of non system queries, so that these queries take much longer to complete,
        # see ReadCommand.withStateTracking()
        cluster.populate(1).start(wait_for_binary_proto=True, jvm_args=["-Dcassandra.test.read_iteration_delay_ms=100"])
        node = cluster.nodelist()[0]
        session = self.patient_cql_connection(node)

        self.create_ks(session, 'ks', 1)
        session.execute("""
            CREATE TABLE test1 (
                id int PRIMARY KEY,
                val text
            );
        """)

        for i in xrange(500):
            session.execute("INSERT INTO test1 (id, val) VALUES ({}, 'foo')".format(i))

        mark = node.mark_log()
        assert_unavailable(lambda c: debug(c.execute("SELECT * from test1")), session)
        node.watch_log_for("<SELECT \* FROM ks.test1 (.*)> timed out", from_mark=mark, timeout=30)
    def remote_query_test(self):
        """
        Check that a query running on a node other than the coordinator times out
        """
        cluster = self.cluster
        cluster.set_configuration_options(values={'read_request_timeout_in_ms': 1000})

        cluster.populate(2)
        node1, node2 = cluster.nodelist()

        node1.start(wait_for_binary_proto=True, jvm_args=["-Djoin_ring=false"])  # ensure other node executes queries
        node2.start(wait_for_binary_proto=True, jvm_args=["-Dcassandra.test.read_iteration_delay_ms=100"])  # see above for explanation

        session = self.patient_exclusive_cql_connection(node1)

        self.create_ks(session, 'ks', 1)
        session.execute("""
            CREATE TABLE test2 (
                id int PRIMARY KEY,
                val text
            );
        """)

        for i in xrange(500):
            session.execute("INSERT INTO test2 (id, val) VALUES ({}, 'foo')".format(i))

        mark = node2.mark_log()
        assert_unavailable(lambda c: debug(c.execute("SELECT * from test2")), session)
        node2.watch_log_for("<SELECT \* FROM ks.test2 (.*)> timed out", from_mark=mark, timeout=30)
    def materialized_view_test(self):
        """
        Check that a materialized view query times out
        """
        cluster = self.cluster
        cluster.set_configuration_options(values={'read_request_timeout_in_ms': 1000})

        cluster.populate(2)
        node1, node2 = cluster.nodelist()

        node1.start(wait_for_binary_proto=True, join_ring=False)  # ensure other node executes queries
        node2.start(wait_for_binary_proto=True, jvm_args=["-Dcassandra.monitoring_check_interval_ms=50",
                                                          "-Dcassandra.test.read_iteration_delay_ms=1500"])  # see above for explanation

        session = self.patient_exclusive_cql_connection(node1)

        self.create_ks(session, 'ks', 1)
        session.execute("""
            CREATE TABLE test4 (
                id int PRIMARY KEY,
                col int,
                val text
            );
        """)

        session.execute(("CREATE MATERIALIZED VIEW mv AS SELECT * FROM test4 "
                         "WHERE col IS NOT NULL AND id IS NOT NULL PRIMARY KEY (col, id)"))

        for i in xrange(50):
            session.execute("INSERT INTO test4 (id, col, val) VALUES ({}, {}, 'foo')".format(i, i // 10))

        mark = node2.mark_log()
        statement = SimpleStatement("SELECT * FROM mv WHERE col = 50", consistency_level=ConsistencyLevel.ONE, retry_policy=FallthroughRetryPolicy())
        assert_unavailable(lambda c: debug(c.execute(statement)), session)
        node2.watch_log_for("Some operations timed out", from_mark=mark, timeout=60)
    def index_query_test(self):
        """
        Check that a secondary index query times out
        """
        cluster = self.cluster
        cluster.set_configuration_options(values={'read_request_timeout_in_ms': 1000})

        cluster.populate(1).start(wait_for_binary_proto=True, jvm_args=["-Dcassandra.monitoring_check_interval_ms=50",
                                                                        "-Dcassandra.test.read_iteration_delay_ms=1500"])  # see above for explanation
        node = cluster.nodelist()[0]
        session = self.patient_cql_connection(node)

        self.create_ks(session, 'ks', 1)
        session.execute("""
            CREATE TABLE test3 (
                id int PRIMARY KEY,
                col int,
                val text
            );
        """)

        session.execute("CREATE INDEX ON test3 (col)")

        for i in xrange(500):
            session.execute("INSERT INTO test3 (id, col, val) VALUES ({}, {}, 'foo')".format(i, i // 10))

        mark = node.mark_log()
        statement = session.prepare("SELECT * from test3 WHERE col < ? ALLOW FILTERING")
        statement.consistency_level = ConsistencyLevel.ONE
        statement.retry_policy = FallthroughRetryPolicy()
        assert_unavailable(lambda c: debug(c.execute(statement, [50])), session)
        node.watch_log_for("Some operations timed out", from_mark=mark, timeout=60)
    def remote_query_test(self):
        """
        Check that a query running on a node other than the coordinator times out
        """
        cluster = self.cluster
        cluster.set_configuration_options(values={'read_request_timeout_in_ms': 1000})

        cluster.populate(2)
        node1, node2 = cluster.nodelist()

        node1.start(wait_for_binary_proto=True, join_ring=False)  # ensure other node executes queries
        node2.start(wait_for_binary_proto=True, jvm_args=["-Dcassandra.monitoring_check_interval_ms=50",
                                                          "-Dcassandra.test.read_iteration_delay_ms=1500"])  # see above for explanation

        session = self.patient_exclusive_cql_connection(node1)

        self.create_ks(session, 'ks', 1)
        session.execute("""
            CREATE TABLE test2 (
                id int,
                col int,
                val text,
                PRIMARY KEY(id, col)
            );
        """)

        for i in xrange(500):
            for j in xrange(10):
                session.execute("INSERT INTO test2 (id, col, val) VALUES ({}, {}, 'foo')".format(i, j))

        mark = node2.mark_log()

        statement = SimpleStatement("SELECT * from test2", consistency_level=ConsistencyLevel.ONE, retry_policy=FallthroughRetryPolicy())
        assert_unavailable(lambda c: debug(c.execute(statement)), session)

        statement = SimpleStatement("SELECT * from test2 where id = 1", consistency_level=ConsistencyLevel.ONE, retry_policy=FallthroughRetryPolicy())
        assert_unavailable(lambda c: debug(c.execute(statement)), session)

        statement = SimpleStatement("SELECT * from test2 where id IN (1, 10,  20) AND col < 10", consistency_level=ConsistencyLevel.ONE, retry_policy=FallthroughRetryPolicy())
        assert_unavailable(lambda c: debug(c.execute(statement)), session)

        statement = SimpleStatement("SELECT * from test2 where col > 5 ALLOW FILTERING", consistency_level=ConsistencyLevel.ONE, retry_policy=FallthroughRetryPolicy())
        assert_unavailable(lambda c: debug(c.execute(statement)), session)

        node2.watch_log_for("Some operations timed out", from_mark=mark, timeout=60)
	def execute(self, cr, uid, ids, context=None):
		
		
		res = super(WizardMultiChartsAccounts, self).execute(cr, uid, ids, context)
		
		#getting the wizard object
		obj_self = self.browse(cr,uid,ids[0])
		
		#getting the id of the company from the wizard
		id_companny = obj_self.company_id.id
		
		#getting the char_id from the wizard object
		id_char_template = obj_self.chart_template_id
		
		#getting the list of journal_templates
		lista = self.pool.get('account.journal.template').search(cr, uid, [('chart_template_id','=',id_char_template)])
		
		
		for x in lista:
			obj = self.pool.get('account.journal.template').browse(cr,uid,x)
			vals = {
				'name' : obj.name,
				'code' : obj.code,
				'type' : obj.type,
				'currency' : obj.currency,
				'allow_date' : obj.allow_date,
				'centralisation' : obj.centralisation,
				'entry_posted' : obj.entry_posted,
				'update_posted' : obj.update_posted,
				'group_invoice_lines' : obj.group_invoice_lines,
				'sequence_id' : obj.sequence_id.id,
				'view_id' : obj.view_id.id,
				'default_credit_account_id': obj.default_credit_account_id.id,
				'default_debit_account_id' : obj.default_debit_account_id.id,
				'company_id': id_companny,
			}
			debug(obj.view_id.id)
			journal = self.pool.get('account.journal')
			journal.create(cr,uid,vals, context=context)
			
			
		return res
Exemple #46
0
    def updateCachedClones(self, integration = True, stable = True, verbose = True):
        """Update the cached clone of the repositories"""

        caches = []
        remote = 'origin'

        if integration:
            caches.append(os.path.join(self.cache, 'integration.git'))
        if stable:
            caches.append(os.path.join(self.cache, 'moodle.git'))

        for cache in caches:
            if not os.path.isdir(cache):
                continue

            repo = git.Git(cache, C.get('git'))

            verbose and debug('Working on %s...' % cache)
            verbose and debug('Fetching %s' % remote)
            if not repo.fetch(remote):
                raise Exception('Could not fetch %s in repository %s' % (remote, cache))

            remotebranches = repo.remoteBranches(remote)
            for hash, branch in remotebranches:
                verbose and debug('Updating branch %s' % branch)
                track = '%s/%s' % (remote, branch)
                if not repo.hasBranch(branch) and not repo.createBranch(branch, track = track):
                    raise Exception('Could not create branch %s tracking %s in repository %s' % (branch, track, cache))

                if not repo.checkout(branch):
                    raise Exception('Error while checking out branch %s in repository %s' % (branch, cache))

                if not repo.reset(to = track, hard = True):
                    raise Exception('Could not hard reset to %s in repository %s' % (branch, cache))

            verbose and debug('')

        return True
	def change_currency(self, cr, uid, ids, context=None):
		obj_so = self.pool.get('sale.order')
		obj_so_line = self.pool.get('sale.order.line')
		obj_currency = self.pool.get('res.currency')
		
		data = self.read(cr, uid, ids)[0]
		new_pricelist_id = data['pricelist_id']
		
		sorder = obj_so.browse(cr, uid, context['active_id'], context=context)
		#new_pricelist_id = sorder.pricelist_id and sorder.pricelist_id.id or False
		
		new_currency = self.pool.get('product.pricelist').browse(cr,uid,new_pricelist_id).currency_id.id
		
		if sorder.pricelist_id.currency_id.id == new_currency:
			return {}
		rate = obj_currency.browse(cr, uid, new_currency).rate
		debug(sorder.order_line)
		for line in sorder.order_line:
			new_price = 0
			if sorder.company_id.currency_id.id == sorder.pricelist_id.currency_id.id:
				new_price = line.price_unit * rate
				if new_price <= 0:
					raise osv.except_osv(_('Error'), _('New currency is not confirured properly !'))

			if sorder.company_id.currency_id.id != sorder.pricelist_id.currency_id.id and sorder.company_id.currency_id.id == new_currency:
				old_rate = sorder.pricelist_id.currency_id.rate
				if old_rate <= 0:
					raise osv.except_osv(_('Error'), _('Currnt currency is not confirured properly !'))
				new_price = line.price_unit / old_rate

			if sorder.company_id.currency_id.id != sorder.pricelist_id.currency_id.id and sorder.company_id.currency_id.id != new_currency:
				old_rate = sorder.pricelist_id.currency_id.rate
				if old_rate <= 0:
					raise osv.except_osv(_('Error'), _('Current currency is not confirured properly !'))
				new_price = (line.price_unit / old_rate ) * rate
			obj_so_line.write(cr, uid, [line.id], {'price_unit': new_price})
		obj_so.write(cr, uid, [sorder.id], {'pricelist_id': new_pricelist_id})
		return {'type': 'ir.actions.act_window_close'}
 def run(self):
     node = self.node
     mark = node.mark_log()
     try:
         out, err = node.nodetool("decommission")
         node.watch_log_for("DECOMMISSIONED", from_mark=mark)
         debug(out)
         debug(err)
     except NodetoolError as e:
         debug("Decommission failed with exception: " + str(e))
         pass
Exemple #49
0
    def install(self, dbname = None, engine = None, dataDir = None, fullname = None, dropDb = False):
        """Launch the install script of an Instance"""

        if self.isInstalled():
            raise Exception('Instance already installed!')

        if dataDir == None or not os.path.isdir(dataDir):
            raise Exception('Cannot install instance without knowing where the data directory is')
        if dbname == None:
            dbname = re.sub(r'[^a-zA-Z0-9]', '', self.identifier).lower()[:28]
        if engine == None:
            engine = C.get('defaultEngine')
        if fullname == None:
            fullname = self.identifier.replace('-', ' ').replace('_', ' ').title()
            fullname = fullname + ' ' + C.get('wording.%s' % engine)

        debug('Creating database...')
        db = DB(engine, C.get('db.%s' % engine))
        if db.dbexists(dbname):
            if dropDb:
                db.dropdb(dbname)
                db.createdb(dbname)
            else:
                raise Exception('Cannot install an instance on an existing database (%s)' % dbname)
        else:
            db.createdb(dbname)
        db.selectdb(dbname)

        # Defining wwwroot.
        wwwroot = 'http://%s/' % C.get('host')
        if C.get('path') != '' and C.get('path') != None:
            wwwroot = wwwroot + C.get('path') + '/'
        wwwroot = wwwroot + self.identifier

        debug('Installing %s...' % self.identifier)
        cli = 'admin/cli/install.php'
        params = (wwwroot, dataDir, engine, dbname, C.get('db.%s.user' % engine), C.get('db.%s.passwd' % engine), C.get('db.%s.host' % engine), fullname, self.identifier, C.get('login'), C.get('passwd'))
        args = '--wwwroot="%s" --dataroot="%s" --dbtype="%s" --dbname="%s" --dbuser="******" --dbpass="******" --dbhost="%s" --fullname="%s" --shortname="%s" --adminuser="******" --adminpass="******" --allow-unstable --agree-license --non-interactive' % params
        result = self.cli(cli, args, stdout=None, stderr=None)
        if result[0] != 0:
            raise Exception('Error while running the install, please manually fix the problem.\n- Command was: %s %s %s' % (C.get('php'), cli, args))

        configFile = os.path.join(self.path, 'config.php')
        os.chmod(configFile, 0666)
        try:
            self.addConfig('sessioncookiepath', '/%s/' % self.identifier)
        except Exception as e:
            debug('Could not append $CFG->sessioncookiepath to config.php')

        self.reload()
Exemple #50
0
    def create(self, M):
        """Creates a new backup of M"""

        if M.isInstalled() and M.get('dbtype') != 'mysqli':
            raise BackupDBEngineNotSupported('Cannot backup database engine %s' % M.get('dbtype'))

        name = M.get('identifier')
        if name == None:
            raise Exception('Cannot backup instance without identifier!')

        now = int(time.time())
        backup_identifier = self.createIdentifier(name)
        Wp = Workplace()

        # Copy whole directory, shutil will create topath
        topath = os.path.join(self.path, backup_identifier)
        path = Wp.getPath(name)
        debug('Copying instance directory')
        copy_tree(path, topath, preserve_symlinks = 1)

        # Dump the whole database
        if M.isInstalled():
            debug('Dumping database')
            dumpto = os.path.join(topath, sqlfile)
            fd = open(dumpto, 'w')
            M.dbo().selectdb(M.get('dbname'))
            M.dbo().dump(fd)
        else:
            debug('Instance not installed. Do not dump database.')

        # Create a JSON file containing all known information
        debug('Saving instance information')
        jsonto = os.path.join(topath, jason)
        info = M.info()
        info['backup_origin'] = path
        info['backup_identifier'] = backup_identifier
        info['backup_time'] = now
        json.dump(info, open(jsonto, 'w'), sort_keys = True, indent = 4)

        return True
Exemple #51
0
    def local_query_test(self):
        """
        Check that a query running on the local coordinator node times out:

        - set a 1-second read timeout
        - start the cluster with read_iteration_delay set to 1.5 seconds
            - (this will cause read queries to take longer than the read timeout)
        - CREATE and INSERT into a table
        - SELECT * from the table using a retry policy that never retries, and assert it times out

        @jira_ticket CASSANDRA-7392
        """
        cluster = self.cluster
        cluster.set_configuration_options(values={'read_request_timeout_in_ms': 1000})

        # cassandra.test.read_iteration_delay_ms causes the state tracking read iterators
        # introduced by CASSANDRA-7392 to pause by the specified amount of milliseconds during each
        # iteration of non system queries, so that these queries take much longer to complete,
        # see ReadCommand.withStateTracking()
        cluster.populate(1).start(wait_for_binary_proto=True,
                                  jvm_args=["-Dcassandra.monitoring_check_interval_ms=50",
                                            "-Dcassandra.test.read_iteration_delay_ms=1500"])
        node = cluster.nodelist()[0]
        session = self.patient_cql_connection(node)

        self.create_ks(session, 'ks', 1)
        session.execute("""
            CREATE TABLE test1 (
                id int PRIMARY KEY,
                val text
            );
        """)

        for i in range(500):
            session.execute("INSERT INTO test1 (id, val) VALUES ({}, 'foo')".format(i))

        mark = node.mark_log()
        statement = SimpleStatement("SELECT * from test1",
                                    consistency_level=ConsistencyLevel.ONE,
                                    retry_policy=FallthroughRetryPolicy())
        assert_unavailable(lambda c: debug(c.execute(statement)), session)
        node.watch_log_for("operations timed out", from_mark=mark, timeout=60)
    def decommissioned_node_cant_rejoin_test(self):
        '''
        @jira_ticket CASSANDRA-8801

        Test that a decommissioned node can't rejoin the cluster by:

        - creating a cluster,
        - decommissioning a node, and
        - asserting that the "decommissioned node won't rejoin" error is in the
        logs for that node and
        - asserting that the node is not running.
        '''
        rejoin_err = 'This node was decommissioned and will not rejoin the ring'
        try:
            self.ignore_log_patterns = list(self.ignore_log_patterns)
        except AttributeError:
            self.ignore_log_patterns = []
        self.ignore_log_patterns.append(rejoin_err)

        self.cluster.populate(3).start(wait_for_binary_proto=True)
        node1, node2, node3 = self.cluster.nodelist()

        debug('decommissioning...')
        node3.decommission()
        debug('stopping...')
        node3.stop()
        debug('attempting restart...')
        node3.start(wait_other_notice=False)
        try:
            # usually takes 3 seconds, so give it a generous 15
            node3.watch_log_for(rejoin_err, timeout=15)
        except TimeoutError:
            # TimeoutError is not very helpful to the reader of the test output;
            # let that pass and move on to string assertion below
            pass

        self.assertIn(rejoin_err,
                      '\n'.join(['\n'.join(err_list)
                                 for err_list in node3.grep_log_for_errors()]))

        # Give the node some time to shut down once it has detected
        # its invalid state. If it doesn't shut down in the 30 seconds,
        # consider filing a bug. It shouldn't take more than 10, in most cases.
        start = time.time()
        while start + 30 > time.time() and node3.is_running():
            time.sleep(1)

        self.assertFalse(node3.is_running())
    def decommissioned_node_cant_rejoin_test(self):
        '''
        @jira_ticket CASSANDRA-8801

        Test that a decomissioned node can't rejoin the cluster by:

        - creating a cluster,
        - decomissioning a node, and
        - asserting that the "decomissioned node won't rejoin" error is in the
        logs for that node and
        - asserting that the node is not running.
        '''
        rejoin_err = 'This node was decommissioned and will not rejoin the ring'
        try:
            self.ignore_log_patterns = list(self.ignore_log_patterns)
        except AttributeError:
            self.ignore_log_patterns = []
        self.ignore_log_patterns.append(rejoin_err)

        self.cluster.populate(3).start(wait_for_binary_proto=True)
        [node1, node2, node3] = self.cluster.nodelist()

        debug('decommissioning...')
        node3.decommission()
        debug('stopping...')
        node3.stop()
        debug('attempting restart...')
        node3.start()
        try:
            # usually takes 3 seconds, so give it a generous 15
            node3.watch_log_for(rejoin_err, timeout=15)
        except TimeoutError:
            # TimeoutError is not very helpful to the reader of the test output;
            # let that pass and move on to string assertion below
            pass

        self.assertIn(rejoin_err,
                      '\n'.join(['\n'.join(err_list)
                                 for err_list in node3.grep_log_for_errors()]))
        self.assertFalse(node3.is_running())
 def show_status(self, node):
     out, err = node.nodetool('status')
     debug("Status as reported by node {}".format(node.address()))
     debug(out)
     return out
    def begin_test(self):
        """
        @jira_ticket CASSANDRA-7436
        This test measures the values of MBeans before and after running a load. We expect
        the values to change a certain way, and thus deem them as 'MBeanEqual','MBeanDecrement',
        'MBeanIncrement', or a constant to experss this expected change. If the value does not reflect
        this expected change, then it raises an AssertionError.

        @jira_ticket CASSANDRA-9448
        This test also makes sure to cover all metrics that were renamed by CASSANDRA-9448, in post 3.0
        we also check that the old alias names are the same as the new names.
        """
        cluster = self.cluster
        cluster.populate(1)
        node = cluster.nodelist()[0]
        remove_perf_disable_shared_mem(node)
        cluster.start(wait_for_binary_proto=True)
        session = self.patient_cql_connection(node)
        self.create_ks(session, 'keyspace1', 1)
        session.execute("""
                        CREATE TABLE keyspace1.counter1 (
                            key blob,
                            column1 ascii,
                            value counter,
                            PRIMARY KEY (key, column1)
                        ) WITH COMPACT STORAGE
                            AND CLUSTERING ORDER BY (column1 ASC)
                            AND caching = {'keys': 'ALL', 'rows_per_partition': 'NONE'}
                            AND comment = ''
                            AND compaction = {'min_threshold': '4', 'class': 'org.apache.cassandra.db.compaction.SizeTieredCompactionStrategy', 'max_threshold': '32'}
                            AND compression = {}
                            AND dclocal_read_repair_chance = 0.1
                            AND default_time_to_live = 0
                            AND gc_grace_seconds = 864000
                            AND max_index_interval = 2048
                            AND memtable_flush_period_in_ms = 0
                            AND min_index_interval = 128
                            AND read_repair_chance = 0.0
                            AND speculative_retry = 'NONE';
                        """)

        with JolokiaAgent(node) as jmx:
            debug("Cluster version {}".format(cluster.version()))
            if cluster.version() <= '2.2.X':
                mbean_values = MBEAN_VALUES_PRE('keyspace1', 'counter1')
                mbean_aliases = None
            else:
                mbean_values = MBEAN_VALUES_POST('keyspace1', 'counter1')
                mbean_aliases = MBEAN_VALUES_PRE('keyspace1', 'counter1')

            before = []
            for package, bean, bean_args, attribute, expected in mbean_values:
                mbean = make_mbean(package, type=bean, **bean_args)
                debug(mbean)
                before.append(jmx.read_attribute(mbean, attribute))

            if mbean_aliases:
                alias_counter = 0
                for package, bean, bean_args, attribute, expected in mbean_aliases:
                    mbean = make_mbean(package, type=bean, **bean_args)
                    debug(mbean)
                    self.assertEqual(before[alias_counter], jmx.read_attribute(mbean, attribute))
                    alias_counter += 1

            node.stress(['write', 'n=100K'])

            errors = []
            after = []
            attr_counter = 0
            for package, bean, bean_args, attribute, expected in mbean_values:
                mbean = make_mbean(package, type=bean, **bean_args)
                a_value = jmx.read_attribute(mbean, attribute)
                after.append(a_value)
                b_value = before[attr_counter]
                if expected == 'MBeanIncrement':
                    if b_value >= a_value:
                        errors.append(mbean + " has a before value of " + str(b_value) + " and after value of " + str(a_value) + " and did not increment" + "\n")
                elif expected == 'MBeanDecrement':
                    if b_value <= a_value:
                        errors.append(mbean + " has a before value of " + str(b_value) + " and after value of " + str(a_value) + " and did not decrement" + "\n")
                elif expected == 'MBeanEqual':
                    if b_value != a_value:
                        errors.append(mbean + " has a before value of " + str(b_value) + " and after value of " + str(a_value) + ", which are not equal" + "\n")
                elif expected == 'MBeanZero':
                    if not (b_value == 0 and a_value == 0):
                        errors.append(mbean + " has a before value of " + str(b_value) + " and after value of " + str(a_value) + " and they do not equal zero" + "\n")
                # If expected is none of the above, then expected should be a number.
                else:
                    if a_value != expected:
                        errors.append(mbean + " has an after value of " + str(a_value) + " which does not equal " + str(expected) + "\n")
                attr_counter += 1

            self.assertEqual(len(errors), 0, "\n" + "\n".join(errors))

            if mbean_aliases:
                alias_counter = 0
                for package, bean, bean_args, attribute, expected in mbean_aliases:
                    mbean = make_mbean(package, type=bean, **bean_args)
                    self.assertEqual(after[alias_counter], jmx.read_attribute(mbean, attribute))
                    alias_counter += 1
Exemple #56
0
addon_name    = addon.getAddonInfo('name')
addon_version = addon.getAddonInfo('version')

# Servidor origen
if addon.getSetting('av_source_server') == "0":
  parserJsonUrl = "https://avezy.tk/json.php"
elif addon.getSetting('av_source_server') == "1":
  parserJsonUrl = "http://arenavision.esy.es/json.php"
else:
  parserJsonUrl = "https://avezy.tk/json.php"

# Devel
#parserJsonUrl = "http://localhost/arena/json.php"

# Debug servidor seleccionado
tools.debug("arenavisionezy Servidor: " + addon.getSetting('av_source_server'))
tools.debug("arenavisionezy Json: " + parserJsonUrl)

# Entry point
def run():
    #plugintools.log("arenavisionezy.run")

    # Get params
    params = plugintools.get_params()
    plugintools.log("arenavisionezy.run " + repr(params))

    if params.get("action") is None:
        plugintools.log("arenavisionezy.run No hay accion")
        listado_categorias(params)
    else:
        action = params.get("action")
Exemple #57
0
    def remote_query_test(self):
        """
        Check that a query running on a node other than the coordinator times out:

        - populate the cluster with 2 nodes
        - set a 1-second read timeout
        - start one node without having it join the ring
        - start the other node with read_iteration_delay set to 1.5 seconds
            - (this will cause read queries to take longer than the read timeout)
        - CREATE a table
        - INSERT 5000 rows on a session on the node that is not a member of the ring
        - run SELECT statements and assert they fail
        # TODO refactor SELECT statements:
        #        - run the statements in a loop to reduce duplication
        #        - watch the log after each query
        #        - assert we raise the right error
        """
        cluster = self.cluster
        cluster.set_configuration_options(values={'read_request_timeout_in_ms': 1000})

        cluster.populate(2)
        node1, node2 = cluster.nodelist()

        node1.start(wait_for_binary_proto=True, join_ring=False)  # ensure other node executes queries
        node2.start(wait_for_binary_proto=True,
                    jvm_args=["-Dcassandra.monitoring_check_interval_ms=50",
                              "-Dcassandra.test.read_iteration_delay_ms=1500"])  # see above for explanation

        session = self.patient_exclusive_cql_connection(node1)

        self.create_ks(session, 'ks', 1)
        session.execute("""
            CREATE TABLE test2 (
                id int,
                col int,
                val text,
                PRIMARY KEY(id, col)
            );
        """)

        for i, j in itertools.product(range(500), range(10)):
            session.execute("INSERT INTO test2 (id, col, val) VALUES ({}, {}, 'foo')".format(i, j))

        mark = node2.mark_log()

        statement = SimpleStatement("SELECT * from test2",
                                    consistency_level=ConsistencyLevel.ONE,
                                    retry_policy=FallthroughRetryPolicy())
        assert_unavailable(lambda c: debug(c.execute(statement)), session)

        statement = SimpleStatement("SELECT * from test2 where id = 1",
                                    consistency_level=ConsistencyLevel.ONE,
                                    retry_policy=FallthroughRetryPolicy())
        assert_unavailable(lambda c: debug(c.execute(statement)), session)

        statement = SimpleStatement("SELECT * from test2 where id IN (1, 10,  20) AND col < 10",
                                    consistency_level=ConsistencyLevel.ONE,
                                    retry_policy=FallthroughRetryPolicy())
        assert_unavailable(lambda c: debug(c.execute(statement)), session)

        statement = SimpleStatement("SELECT * from test2 where col > 5 ALLOW FILTERING",
                                    consistency_level=ConsistencyLevel.ONE,
                                    retry_policy=FallthroughRetryPolicy())
        assert_unavailable(lambda c: debug(c.execute(statement)), session)

        node2.watch_log_for("operations timed out", from_mark=mark, timeout=60)