Esempio n. 1
0
    def post2fmpi(self):
        # connection paramter to FMPI
        url = self.company_id.api_host.strip()
        db = self.company_id.fmpi_pgn
        username = self.company_id.api_user
        password = self.company_id.api_pass
        # attempt to connect
        logger.info("Connecting to " + url)
        common = xmlrpclib.ServerProxy('{}/xmlrpc/2/common'.format(url))
        uid = common.authenticate(db, username, password, {})
        if not uid:
            raise exceptions.except_orm(
                _('Remote Authentication Failed'),
                _(url + " failed to authenticate " + username))
            return
        models = xmlrpclib.ServerProxy('{}/xmlrpc/2/object'.format(url))
        if not models:
            raise exceptions.except_orm(
                _('Models: Remote Authentication Failed'),
                _(url + " failed to authenticate " + username))
            return
        # sync from dealer-to-fmpi
        fmpi = self.env['fmpi.service.history'].search([('fmpi_history_id',
                                                         '=', False)])
        if fmpi:
            for line in fmpi:
                logger.info("Posting ID#" + str(line.id))
                id = models.execute_kw(
                    db, uid, password, 'fmpi.service.history', 'create',
                    [{
                        'dealer_id': line.dealer_id.id,
                        'name': line.name,
                        'parts_and_jobs': line.parts_and_jobs,
                        'customer_name': line.customer_name,
                        'charged_to': line.charged_to,
                        'run_km': line.run_km,
                        'one_fu_id': line.one_fu_id.id,
                        'confirmation_date': line.confirmation_date
                    }])
                if not id:
                    raise exceptions.except_orm(
                        _('Failed to post to FMPI'),
                        _("Posting of transaction to FMPI has failed"))
                    return
                line.write({'fmpi_history_id': id})
            # sync from fmpi-to-dealer

#     d_ids = fmpi.search([('fmpi_history_id','!=',False)])
#     if d_ids:
#       aID = []
#       for d_id in d_ids:
#         aID.append(d_id.fmpi_history_id)
#       f_ids = models.execute_kw(db, uid, password, 'fmpi.service.history', 'search',
#         [[['id','not in',aID]]])
#       [record] = models.execute_kw(db, uid, password, 'fmpi.service.history', 'read', [f_ids])
#       if record:
#         for newH in record:
#           fmpi.create({
#             'dealer_id': newH.dealer_id,
#             'name': newH.name,
#             'parts_and_jobs': newH.parts_and_jobs,
#             'customer_name': newH.customer_name,
#             'charged_to': newH.charged_to,
#             'run_km': newH.run_km,
#             'one_fu_id': newH.one_fu_id,
#             'confirmation_date': newH.confirmation_date,
#             'fmpi_history_id': newH.id})
        return
Esempio n. 2
0
 def test_path3(self):
     p = xmlrpclib.ServerProxy(URL + "/is/broken")
     self.assertRaises(xmlrpclib.Fault, p.add, 6, 8)
Esempio n. 3
0
 def test_transport(self):
     t = xmlrpclib.Transport()
     p = xmlrpclib.ServerProxy(self.url, transport=t)
     self.assertEqual(p('transport'), t)
Esempio n. 4
0
 def test_allow_dotted_names_true(self):
     # XXX also need allow_dotted_names_false test.
     server = xmlrpclib.ServerProxy("http://%s:%d/RPC2" % (ADDR, PORT))
     data = server.Fixture.getData()
     self.assertEqual(data, '42')
Esempio n. 5
0
 def test_path1(self):
     p = xmlrpclib.ServerProxy(URL + "/foo")
     self.assertEqual(p.pow(6, 8), 6**8)
     self.assertRaises(xmlrpclib.Fault, p.add, 6, 8)
Esempio n. 6
0
    def schedule_backup(self):
        conf_ids = self.search([])

        for rec in conf_ids:
            db_list = self.get_db_list(rec.host, rec.port)

            if rec.name in db_list:
                try:
                    if not os.path.isdir(rec.folder):
                        os.makedirs(rec.folder)
                except:
                    raise
                # Create name for dumpfile.
                bkp_file = '%s_%s.%s' % (time.strftime('%Y_%m_%d_%H_%M_%S'),
                                         rec.name, rec.backup_type)
                file_path = os.path.join(rec.folder, bkp_file)
                uri = 'http://' + rec.host + ':' + rec.port
                conn = xmlrpclib.ServerProxy(uri + '/xmlrpc/db')
                bkp = ''
                try:
                    # try to backup database and write it away
                    fp = open(file_path, 'wb')
                    odoo.service.db.dump_db(rec.name, fp, rec.backup_type)
                    fp.close()
                except Exception as error:
                    _logger.debug(
                        "Couldn't backup database %s. Bad database administrator password for server running at http://%s:%s"
                        % (rec.name, rec.host, rec.port))
                    _logger.debug("Exact error from the exception: " +
                                  str(error))
                    continue

            else:
                _logger.debug("database %s doesn't exist on http://%s:%s" %
                              (rec.name, rec.host, rec.port))

            # Check if user wants to write to SFTP or not.
            if rec.sftp_write is True:
                try:
                    # Store all values in variables
                    dir = rec.folder
                    pathToWriteTo = rec.sftp_path
                    ipHost = rec.sftp_host
                    portHost = rec.sftp_port
                    usernameLogin = rec.sftp_user
                    passwordLogin = rec.sftp_password
                    _logger.debug('sftp remote path: %s' % pathToWriteTo)

                    try:
                        s = paramiko.SSHClient()
                        s.set_missing_host_key_policy(paramiko.AutoAddPolicy())
                        s.connect(ipHost,
                                  portHost,
                                  usernameLogin,
                                  passwordLogin,
                                  timeout=20)
                        sftp = s.open_sftp()
                    except Exception as error:
                        _logger.critical(
                            'Error connecting to remote server! Error: ' +
                            str(error))

                    try:
                        sftp.chdir(pathToWriteTo)
                    except IOError:
                        # Create directory and subdirs if they do not exist.
                        currentDir = ''
                        for dirElement in pathToWriteTo.split('/'):
                            currentDir += dirElement + '/'
                            try:
                                sftp.chdir(currentDir)
                            except:
                                _logger.info(
                                    '(Part of the) path didn\'t exist. Creating it now at '
                                    + currentDir)
                                # Make directory and then navigate into it
                                sftp.mkdir(currentDir, 777)
                                sftp.chdir(currentDir)
                                pass
                    sftp.chdir(pathToWriteTo)
                    # Loop over all files in the directory.
                    for f in os.listdir(dir):
                        if rec.name in f:
                            fullpath = os.path.join(dir, f)
                            if os.path.isfile(fullpath):
                                try:
                                    sftp.stat(os.path.join(pathToWriteTo, f))
                                    _logger.debug(
                                        'File %s already exists on the remote FTP Server ------ skipped'
                                        % fullpath)
                                # This means the file does not exist (remote) yet!
                                except IOError:
                                    try:
                                        # sftp.put(fullpath, pathToWriteTo)
                                        sftp.put(
                                            fullpath,
                                            os.path.join(pathToWriteTo, f))
                                        _logger.info(
                                            'Copying File % s------ success' %
                                            fullpath)
                                    except Exception as err:
                                        _logger.critical(
                                            'We couldn\'t write the file to the remote server. Error: '
                                            + str(err))

                    # Navigate in to the correct folder.
                    sftp.chdir(pathToWriteTo)

                    # Loop over all files in the directory from the back-ups.
                    # We will check the creation date of every back-up.
                    for file in sftp.listdir(pathToWriteTo):
                        if rec.name in file:
                            # Get the full path
                            fullpath = os.path.join(pathToWriteTo, file)
                            # Get the timestamp from the file on the external server
                            timestamp = sftp.stat(fullpath).st_atime
                            createtime = datetime.datetime.fromtimestamp(
                                timestamp)
                            now = datetime.datetime.now()
                            delta = now - createtime
                            # If the file is older than the days_to_keep_sftp (the days to keep that the user filled in on the Odoo form it will be removed.
                            if delta.days >= rec.days_to_keep_sftp:
                                # Only delete files, no directories!
                                if sftp.isfile(fullpath) and (
                                        ".dump" in file or '.zip' in file):
                                    _logger.info(
                                        "Delete too old file from SFTP servers: "
                                        + file)
                                    sftp.unlink(file)
                    # Close the SFTP session.
                    sftp.close()
                except Exception as e:
                    _logger.debug(
                        'Exception! We couldn\'t back up to the FTP server..')
                    # At this point the SFTP backup failed. We will now check if the user wants
                    # an e-mail notification about this.
                    if rec.send_mail_sftp_fail:
                        try:
                            ir_mail_server = self.env['ir.mail_server']
                            message = "Dear,\n\nThe backup for the server " + rec.host + " (IP: " + rec.sftp_host + ") failed.Please check the following details:\n\nIP address SFTP server: " + rec.sftp_host + "\nUsername: "******"\nPassword: "******"\n\nError details: " + tools.ustr(
                                e) + "\n\nWith kind regards"
                            msg = ir_mail_server.build_email(
                                "auto_backup@" + rec.name + ".com",
                                [rec.email_to_notify], "Backup from " +
                                rec.host + "(" + rec.sftp_host + ") failed",
                                message)
                            ir_mail_server.send_email(self._cr, self._uid, msg)
                        except Exception:
                            pass
            """
            Remove all old files (on local server) in case this is configured..
            """
            if rec.autoremove:
                dir = rec.folder
                # Loop over all files in the directory.
                for f in os.listdir(dir):
                    fullpath = os.path.join(dir, f)
                    # Only delete the ones wich are from the current database
                    # (Makes it possible to save different databases in the same folder)
                    if rec.name in fullpath:
                        timestamp = os.stat(fullpath).st_ctime
                        createtime = datetime.datetime.fromtimestamp(timestamp)
                        now = datetime.datetime.now()
                        delta = now - createtime
                        if delta.days >= rec.days_to_keep:
                            # Only delete files (which are .dump and .zip), no directories.
                            if os.path.isfile(fullpath) and (".dump" in f
                                                             or '.zip' in f):
                                _logger.info(
                                    "Delete local out-of-date file: " +
                                    fullpath)
                                os.remove(fullpath)
Esempio n. 7
0
def rpc() -> client.ServerProxy:
    return client.ServerProxy(
        "http://localhost",
        transport=UnixStreamTransport("/var/run/supervisor.sock"))
Esempio n. 8
0
    def connect(self):
        if not self.use_rest and not self.use_xmlrpc:
            raise ConfluenceConfigurationError(
                """Both REST and XML-RPC """
                """options have been explicitly disabled. Unable to publish."""
            )

        if self.use_rest:
            self.rest_client = Rest(self.config)
            try:
                rsp = self.rest_client.get('space', {
                    'spaceKey': self.space_name,
                    'limit': 1
                })
                if rsp['size'] == 0:
                    raise ConfluenceBadSpaceError(self.space_name)
                self.space_display_name = rsp['results'][0]['name']
                self.use_xmlrpc = False
            except ConfluenceBadApiError:
                if not self.use_xmlrpc:
                    raise
                self.use_rest = False
            except ConfluenceBadServerUrlError:
                if not self.use_xmlrpc:
                    raise
                self.use_rest = False

        if self.use_xmlrpc:
            try:
                transport = None
                if self.proxy or self.timeout:
                    transport = ConfluenceTransport()
                    if self.proxy:
                        transport.set_proxy(self.proxy)
                    if self.timeout:
                        transport.set_timeout(self.timeout)

                self.xmlrpc = xmlrpclib.ServerProxy(self.server_url +
                                                    '/rpc/xmlrpc',
                                                    transport=transport,
                                                    allow_none=True)
            except IOError as ex:
                raise ConfluenceBadServerUrlError(self.server_url, ex)

            if self.server_user:
                try:
                    token = self.xmlrpc.confluence1.login(
                        self.server_user, self.server_pass)
                    try:
                        self.token = self.xmlrpc.confluence2.login(
                            self.server_user, self.server_pass)
                        self.xmlrpc.confluence1.logout(token)
                        self.xmlrpc = self.xmlrpc.confluence2
                    except xmlrpclib.Error:
                        self.token = None
                except xmlrpclib.ProtocolError as ex:
                    if ex.errcode == 403:
                        raise ConfluenceRemoteApiDisabledError(self.server_url)
                    raise ConfluenceBadServerUrlError(self.server_url, ex)
                except (httplib.InvalidURL, socket.error) as ex:
                    raise ConfluenceBadServerUrlError(self.server_url, ex)
                except xmlrpclib.Fault as ex:
                    if ex.faultString.find('AuthenticationFailed') != -1:
                        raise ConfluenceAuthenticationFailedUrlError
                    raise
                if not self.token:
                    raise ConfluenceLegacyError
            else:
                self.token = ''  # Anonymous.

            if self.token:
                try:
                    self.xmlrpc.getSpace(self.token, self.space_name)
                except xmlrpclib.Fault as ex:
                    self.xmlrpc.logout(self.token)
                    raise ConfluenceBadSpaceError(self.space_name)
            else:
                try:
                    self.xmlrpc.confluence2.getSpace(None, self.space_name)
                    self.xmlrpc = self.xmlrpc.confluence2
                except xmlrpclib.Fault as ex:
                    try:
                        self.xmlrpc.confluence1.getSpace(None, self.space_name)
                        self.xmlrpc = self.xmlrpc.confluence1
                    except xmlrpclib.Fault as ex:
                        raise ConfluenceBadSpaceError(self.space_name)
from xmlrpc import client

# The common XML.RPC endpoint

srv = "http://localhost:8069"
common = client.ServerProxy("%s/xmlrpc/2/common" % srv)
common.version()
# Result: {'server_version': '14.0', 'server_version_info': [14, 0, 0, 'final', 0, ''], 'server_serie': '14.0', 'protocol_version': 1}

db, user, password = "******", "admin", "admin"
uid = common.authenticate(db, user, password, {})
print(uid)

# The object XML-RPC endpoint

api = client.ServerProxy('%s/xmlrpc/2/object' % srv)
api.execute_kw(db, uid, password, "res.users", "search_count", [[]])

api.execute_kw(db, uid, password, "res.users", "read",
               [2, ["login", "name", "company_id"]])
# Result: [{'id': 2, 'login': '******', 'name': 'Mitchell Admin', 'company_id': [1, 'YourCompany']}]

domain = [("login", "=", "admin")]
api.execute_kw(db, uid, password, "res.users", "search", [domain])
# Result : [2]

api.execute_kw(db, uid, password, "res.users", "search_read",
               [domain, ["login", "name"]])
# Result: [{'id': 2, 'login': '******', 'name': 'Mitchell Admin'}]

# Same call, using keyword arguments instead of positional arguments
Esempio n. 10
0
File: fos_vqir.py Progetto: zfis/fos
  def action_submit_api(self):
    # 1. make connection to FMPI
    dealer_id = self.company_id.dealer_id.id

    # FMPI's API Connection Parameters
    url = self.company_id.fmpi_host.strip()
    db = self.company_id.fmpi_pgn
    username = self.company_id.fmpi_pgu
    password = self.company_id.fmpi_pgp
    vqir_number = self.name

    # connect to FMPI
    logger.info("Connecting to " + url)
    common = xmlrpclib.ServerProxy('{}/xmlrpc/2/common'.format(url))
    uid = common.authenticate(db, username, password, {})
    if not uid:
      raise exceptions.except_orm(_('Remote Authentication Failed'), _(url + " failed to authenticate " + username))
      return
    models = xmlrpclib.ServerProxy('{}/xmlrpc/2/object'.format(url))
    cur_stamp = fields.datetime.now()

    # Query existence of dealer's VQIR from FMPI Database
    fmpi_existing_vqir = models.execute_kw(db, uid, password,
        'fmpi.vqir', 'search',
        [[['name', '=', vqir_number], ['dealer_id', '=', dealer_id], ['vqir_date', '=', self.vqir_date]]])
    if fmpi_existing_vqir:
      vqir_state_logs = "Document:" + (self.name or 'Empty Document') + "\n" + \
        "Re-submitted by: " + (self.env.user.name or 'No User Name specified') + "\n" + \
        "Re-submitted at: " + datetime.datetime.now().strftime("%m/%d/%Y") + "\n" + \
        "--------------------------------------------------\n"
      self.write({'vqir_state': 'submit',
        'vqir_state_logs': vqir_state_logs + str(self.vqir_state_logs or ''),
        "submitted_date":cur_stamp})

      for existingID in fmpi_existing_vqir:
        fmpi_vqir_id = models.execute_kw(db, uid, password, 'fmpi.vqir', 'write', [[existingID], {
          'vqir_state': 'submit',
          'date_occur': self.date_occur,
          'vqir_type': self.vqir_type,
          'vqir_city': self.vqir_city,
          'place_of_incident': self.place_of_incident,
          'km_1st_trouble': self.km_1st_trouble,
          'run_km': self.run_km,
          'person': self.person,
          'others': self.others,
          'trouble_explanation': self.trouble_explanation,
          'trouble_cause_analysis': self.trouble_cause_analysis,
          'disposal_measures': self.disposal_measures,
          'proposal_for_improvement': self.proposal_for_improvement,
          'driver_name': self.driver_name,
          'ss_name': self.ss_name,
          'ss_street1': self.ss_street1,
          'ss_street2': self.ss_street2,
          'ss_city': self.ss_city,
          'ss_phone': self.ss_phone,
          'ss_mobile': self.ss_mobile,
          'ss_fax': self.ss_fax,
          'ss_email': self.ss_email,
          'users_name': self.users_name,
          'users_street1': self.users_street1,
          'users_street2': self.users_street2,
          'users_city': self.users_city,
          'users_phone': self.users_phone,
          'users_mobile': self.users_mobile,
          'users_fax': self.users_fax,
          'users_email': self.users_email,
          'reps_name': self.reps_name,
          'reps_street1': self.reps_street1,
          'reps_street2': self.reps_street2,
          'reps_city': self.reps_city,
          'reps_phone': self.reps_phone,
          'reps_mobile': self.reps_mobile,
          'reps_fax': self.reps_fax,
          'reps_email': self.reps_email,
          'remarks': self.remarks,
          'fos_fu_id': self.fos_fu_id.id,
          'vqir_state_logs': self.vqir_state_logs,
          'submitted_date': fields.datetime.now(),
          'declined_date': self.declined_date,
          'disapproved_date': self.disapproved_date,
          'ack_date': self.ack_date,
          'paid_date': self.paid_date,
          'url': self.company_id.dealer_host.strip(),
          'db' : self.company_id.dealer_pgn,
          'username' : self.company_id.dealer_pgu,
          'password' : self.company_id.dealer_pgp}])
        # query IDs of parts and jobs from FMPI Database
        fmpi_existing_vqir_pj_ids = models.execute_kw(db, uid, password,
          'fmpi.vqir.parts.and.jobs', 'search', 
            [[['fmpi_vqir_id', '=', existingID]]])
        # delete parts and jobs from FMPI Database
        models.execute_kw(db, uid, password, 'fmpi.vqir.parts.and.jobs', 'unlink', [fmpi_existing_vqir_pj_ids])
        # re-create parts and jobs record based on Dealer's Database
        for ji in self.fos_vqir_parts_and_jobs_line:
          models.execute_kw(db, uid, password, 'fmpi.vqir.parts.and.jobs', 'create', [{
            'name': ji.name,
            'fmpi_vqir_id': existingID,
            'si_number':ji.si_number,
            'si_date':ji.si_date,
            'parts_number': ji.part_id.name,
            'parts_desc': ji.parts_desc,
            'parts_qty': ji.parts_qty,
            'parts_cost': ji.parts_cost,
            'parts_with_fee': ji.parts_with_fee,
            'parts_total': ji.parts_total,
            'job_code': ji.job_code,
            'job_code_desc': ji.job_code_desc,
            'job_qty': ji.job_qty,
            'job_cost': ji.job_cost,
            'dealer_pj_id': ji.id }])
        # query IDs of VQIR Images from FMPI Database
        fmpi_existing_vqir_images_ids = models.execute_kw(db, uid, password,
          'fmpi.vqir.images', 'search', 
            [[['fmpi_vqir_id', '=', existingID]]])
        # delete VQIR Images from FMPI Database
        models.execute_kw(db, uid, password, 'fmpi.vqir.images', 'unlink', [fmpi_existing_vqir_images_ids])
        # re-create VQIR Images record based on Dealer's Database
        for img in self.fos_vqir_images_line:
          models.execute_kw(db, uid, password, 'fmpi.vqir.images', 'create', [{
            'name': img.name,
            'fmpi_vqir_id': existingID,
            'image': img.image,
            'image_medium': img.image_medium,
            'image_small': img.image_small,
            'image_remarks': img.image_remarks,
            }])
    else:
      # 2. call fmpi.vqir model
      vqir_state_logs = "Document:" + (self.name or 'Empty Document') + "\n" + \
        "Submitted by: " + (self.env.user.name or 'No User Name specified') + "\n" + \
        "Submitted at: " + datetime.datetime.now().strftime("%m/%d/%Y") + "\n" + \
        "--------------------------------------------------\n"
      self.write({'vqir_state': 'submit',
        'vqir_state_logs': vqir_state_logs + str(self.vqir_state_logs or ''),
        "submitted_date":cur_stamp})
      fmpi_vqir_id = models.execute_kw(db, uid, password, 'fmpi.vqir', 'create', [{
        'dealer_id': self.company_id.dealer_id.id,
        'name': self.name,
        'vqir_date':self.vqir_date,
        'preapproved_date': self.preapproved_date,
        'payment_receipt': self.payment_receipt,
        'vqir_type': self.vqir_type,
        'vqir_service_type': self.vqir_service_type,
        'vqir_state': 'submit',
        'date_occur': self.date_occur,
        'vqir_city': self.vqir_city,
        'place_of_incident': self.place_of_incident,
        'km_1st_trouble': self.km_1st_trouble,
        'run_km': self.run_km,
        'part': self.part,
        'person': self.person,
        'others': self.others,
        'trouble_explanation': self.trouble_explanation,
        'trouble_cause_analysis': self.trouble_cause_analysis,
        'disposal_measures': self.disposal_measures,
        'proposal_for_improvement': self.proposal_for_improvement,
        'driver_name': self.driver_name,
        'ss_name': self.ss_name,
        'ss_street1': self.ss_street1,
        'ss_street2': self.ss_street2,
        'ss_city': self.ss_city,
        'ss_phone': self.ss_phone,
        'ss_mobile': self.ss_mobile,
        'ss_fax': self.ss_fax,
        'ss_email': self.ss_email,
        'users_name': self.users_name,
        'users_street1': self.users_street1,
        'users_street2': self.users_street2,
        'users_city': self.users_city,
        'users_phone': self.users_phone,
        'users_mobile': self.users_mobile,
        'users_fax': self.users_fax,
        'users_email': self.users_email,
        'date_released': self.date_released,
        'reps_name': self.reps_name,
        'reps_street1': self.reps_street1,
        'reps_street2': self.reps_street2,
        'reps_city': self.reps_city,
        'reps_phone': self.reps_phone,
        'reps_mobile': self.reps_mobile,
        'reps_fax': self.reps_fax,
        'reps_email': self.reps_email,
        'remarks': self.remarks,
        'fos_fu_id': self.fos_fu_id.id,
        'dealer_id': dealer_id,
        'dealer_vqir_id': self.id,
        'dealer_host': self.company_id.dealer_host,
        'dealer_db': self.company_id.dealer_pgn,
        'dealer_port': self.company_id.dealer_port,
        'dealer_pgu': self.company_id.dealer_pgu,
        'dealer_pgp': self.company_id.dealer_pgp,
        'vqir_state_logs': self.vqir_state_logs,
        'approved_date': self.approved_date,
        'submitted_date': fields.datetime.now(),
        'declined_date': self.declined_date,
        'disapproved_date': self.disapproved_date,
        'ack_date': self.ack_date,
        'paid_date': self.paid_date,
        'url': self.company_id.dealer_host.strip(),
        'db' : self.company_id.dealer_pgn,
        'username' : self.company_id.dealer_pgu,
        'password' : self.company_id.dealer_pgp
          }])

      if fmpi_vqir_id:
        logger.info("VQIR ID " + str(fmpi_vqir_id))
        # vqir jobs and parts
        for ji in self.fos_vqir_parts_and_jobs_line:
          fmpi_vqir_parts_and_jobs_id = models.execute_kw(db, uid, password, 'fmpi.vqir.parts.and.jobs', 'create', [{
            'name': ji.name,
            'fmpi_vqir_id': fmpi_vqir_id,
            'si_number':ji.si_number,
            'si_date':ji.si_date,
            'parts_number': ji.part_id.name,
            'parts_desc': ji.parts_desc,
            'parts_qty': ji.parts_qty,
            'parts_cost': ji.parts_cost,
            'parts_with_fee': ji.parts_with_fee,
            'parts_total': ji.parts_total,
            'job_code': ji.job_code,
            'job_code_desc': ji.job_code_desc,
            'job_qty': ji.job_qty,
            'job_cost': ji.job_cost,
            'approved_amount': ji.approved_amount,
            'dealer_pj_id': ji.id }])

        # vqir images
        for img in self.fos_vqir_images_line:
          fmpi_vqir_images_id = models.execute_kw(db, uid, password, 'fmpi.vqir.images', 'create', [{
            'name': img.name,
            'fmpi_vqir_id': fmpi_vqir_id,
            'image': img.image,
            'image_medium': img.image_medium,
            'image_small': img.image_small,
            'image_remarks': img.image_remarks,
            }])
Esempio n. 11
0
def list_pypi_packages():
    """
    Returns a sorted list of all packages on PyPI using the xmlrpc interface
    """
    client = xmlrpclib.ServerProxy('https://pypi.python.org/pypi')
    return sorted(client.list_packages())
Esempio n. 12
0
def call_task(self, url, db, user_id, task_uuid, model, method, **kwargs):
    odoo = xmlrpc_client.ServerProxy('{}/xmlrpc/2/object'.format(url))
    args = [task_uuid, model, method]
    _kwargs = copy.deepcopy(kwargs)

    # Needed in the retry (call), to hide _password.
    _kwargsrepr = copy.deepcopy(kwargs)

    password = _kwargs.get('_password')
    del _kwargs['_password']
    celery_params = _kwargs.get('celery', {})

    logger.info('{model} {method} - celery.task uuid: {uuid}'.format(
        model=model, method=method, uuid=task_uuid))
    logger.info('{model} {method} - kwargs: {kwargs}'.format(
        model=model, method=method, kwargs=_kwargs))

    try:
        logger.info(
            'XML-RPC to Odoo server:\n\n'
            '- url: {url}\n'
            '- db: {db}\n'
            '- user_id: {user_id}\n'
            '- task_uuid: {task_uuid}\n'
            '- model: celery.task\n'
            '- method: rpc_run_task\n'
            '- args: {args}\n'
            '- kwargs {kwargs}\n'.format(
                url=url, db=db, user_id=user_id, task_uuid=task_uuid, model=model, method=method, args=args, kwargs=_kwargs))
        response = odoo.execute_kw(db, user_id, password, 'celery.task', 'rpc_run_task', args, _kwargs)

        if (isinstance(response, tuple) or isinstance(response, list)) and len(response) == 2:
            code = response[0]
            result = response[1]
        else:
            code = OK_CODE
            result = response

        if code == TASK_NOT_FOUND:
            msg = "%s, database: %s" % (result, db)
            raise TaskNotFoundInOdoo(msg)
        elif code in (STATE_RETRY, STATE_FAILURE):
            retry = celery_params.get('retry')
            countdown = celery_params.get('countdown', 1)
            retry_countdown_setting = celery_params.get('retry_countdown_setting')
            retry_countdown_add_seconds = celery_params.get('retry_countdown_add_seconds', 0)
            retry_countdown_multiply_retries_seconds = celery_params.get('retry_countdown_multiply_retries_seconds', 0)

            # (Optionally) increase the countdown either by:
            # - add seconds
            # - countdown * retry requests
            # - retry requests * a given seconds
            if retry and retry_countdown_setting:
                if retry_countdown_setting == RETRY_COUNTDOWN_ADD_SECONDS:
                    countdown = countdown + retry_countdown_add_seconds
                elif retry_countdown_setting == RETRY_COUNTDOWN_MULTIPLY_RETRIES:
                    countdown = countdown * self.request.retries
                elif retry_countdown_setting == RETRY_COUNTDOWN_MULTIPLY_RETRIES_SECCONDS \
                     and retry_countdown_multiply_retries_seconds > 0:
                    countdown = self.request.retries * retry_countdown_multiply_retries_seconds
            celery_params['countdown'] = countdown
            
            if retry:
                msg = 'Retry task... Failure in Odoo {db} (task: {uuid}, model: {model}, method: {method}).'.format(
                    db=db, uuid=task_uuid, model=model, method=method)
                logger.info(msg)

                # Notify the worker to retry.
                logger.info('{task_name} retry params: {params}'.format(task_name=self.name, params=celery_params))
                _kwargsrepr['_password'] = '******'
                _kwargsrepr = repr(_kwargsrepr)
                raise self.retry(kwargsrepr=_kwargsrepr, **celery_params)
            else:
                msg = 'Exit task... Failure in Odoo {db} (task: {uuid}, model: {model}, method: {method})\n'\
                      '  => Check task log/info in Odoo'.format(db=db, uuid=task_uuid, model=model, method=method)
                logger.info(msg)
        else:
            return (code, result)
    except Exception as e:
        """ A rather picky workaround to ignore/silence following exceptions.
        Only logs in case of other Exceptions.
        
        This also prevents concurrent retries causing troubles like
        concurrent DB updates (shall rollback) etc.

        - xmlrpc_client.Fault: Catches exception TypeError("cannot
        marshal None unless allow_none is enabled").  Setting
        allowd_none on the ServcerProxy won't work like expected and
        seems vague.
        - Retry: Celery exception notified to tell worker the task has
        been re-sent for retry.  We don't want to re-retry (double
        trouble here).

        See also odoo/service/wsgi_server.py for xmlrpc.client.Fault
        (codes), e.g: RPC_FAULT_CODE_CLIENT_ERROR = 1
        """
        if isinstance(e, MaxRetriesExceededError):
            # TODO
            # After implementation of "Hide sensitive data (password) by argspec/kwargspec, a re-raise should happen.
            # For now it shows sensitive data in the logs.
            msg = '[TODO] Failure (caught) MaxRetriesExceededError: db: {db}, task: {uuid}, model: {model}, method: {method}.'.format(
                db=db, uuid=task_uuid, model=model, method=method)
            logger.error(msg)
            # Task is probably in state RETRY. Now set it to FAILURE.
            args = [task_uuid, 'FAILURE']
            odoo.execute_kw(db, user_id, password, 'celery.task', 'rpc_set_state', args)
        elif not isinstance(e, Retry):
            # Maybe there's a also a way the store a xmlrpc.client.Fault into the Odoo exc_info field e.g.:
            # args = [xmlrpc_client.Fault.faultCode, xmlrpc_client.Fault.faultString]
            # odoo.execute_kw(db, user_id, password, 'celery.task', 'rpc_set_exception', args)
            #
            # Necessary to implement/call a retry() for other exceptions ?
            msg = '{exception}\n'\
                  '  => SUGGESTIONS: Check former XML-RPC log messages.\n'.format(exception=e)
            logger.error(msg)
            raise e
Esempio n. 13
0
    except:
        return None
    filenames = tar.getnames()
    py_files = [elem for elem in filenames if elem.endswith('.py')]
    for py_file in py_files:
        try:
            content = tar.extractfile(py_file).read().decode()
            yield content
        except:
            yield None
    pyx_files = [elem for elem in filenames if elem.endswith('.pyx')]
    if len(pyx_files) > 0:
        yield 'import cython'


DEFAULT_CLIENT = xmlrpclib.ServerProxy('http://pypi.python.org/pypi')


def extract_package(name, to='pypi-deps.txt', client=DEFAULT_CLIENT):
    tmpfilename = '/tmp/temp_py_package.tar.gz'
    with open(to, 'a') as fout:
        try:
            releases = client.package_releases(name)
        except Exception as e:
            print(e, "internet connection issues?")
            return
        if len(releases) == 0:
            return
        release = client.package_releases(name)[0]  # use only latest release
        docs = client.release_urls(name, release)
        if len(docs) > 0:
Esempio n. 14
0
import xmlrpc.client as xmlrpclib  # For planetlab cental api
from geopy.distance import geodesic
import math
import matplotlib.pyplot as plt
# import localization as lx
# from sklearn import datasets, linear_model
# from .ipGeolocator import ipGeolocator as lx
import netmetGeolocator as lx

from sklearn.model_selection import train_test_split, cross_validate
from sklearn.linear_model import LinearRegression, Ridge, Lasso
from sklearn.metrics import mean_squared_error, r2_score

#%%
############ Planetlab
api_server = xmlrpclib.ServerProxy('https://www.planet-lab.eu/PLCAPI/',
                                   allow_none=True)
auth = {}
auth['AuthMethod'] = 'password'
auth['Username'] = '******'
auth['AuthString'] = 'xxxx'
authorized = api_server.AuthCheck(auth)
#%%
plslice = api_server.GetSlices(auth)[0]
slice_node_ids = plslice["node_ids"]
slice_nodes = api_server.GetNodes(auth, slice_node_ids)
all_nodes = api_server.GetNodes(auth)
#%%
################ GET ALL NODES OF THE SLICE #####################
boot_nodes = list()
all_boot_nodes = list()
i = 0
def mapReduce(input_data, map_fn, reduce_fn, output_location):
    try:
        files = []
        result = ""
        if (map_fn == "word_count_map"):
            response = requests.get(input_data)
            soup = BeautifulSoup(response.text, 'html.parser')
            logging.info('Parsing the input html file %s', input_data)
            s = ""
            count = 0
            for link in soup.find_all('p'):
                s += link.text
            s_len = len(s)
            length = math.ceil(s_len // 3)
            file_input = textwrap.wrap(s, length)
            print(file_input)
            print(len(file_input))
            logging.debug(
                'Calling map_input to store input data in key-value store')
            result = map_input(file_input, map_fn)

        elif (map_fn == "inverted_index_map"):
            for i in input_data:
                response = requests.get(i)
                soup = BeautifulSoup(response.text, 'html.parser')
                s = ""
                for i in soup.find_all('p'):
                    s += i.text
                s = textwrap.fill(s)
                files.append(s)
            result = map_input(files, map_fn)
        if (result.strip() == "STORED"):
            if (map_fn == "word_count_map"):
                creation_status, mapper_ips = create_instances("map")
                logging.info(creation_status)
                if (creation_status):
                    time.sleep(80)
                    logging.info('RPC call to word_count_mapper')
                    logging.info(str(mapper_ips[0]))
                    proxy_1 = xc.ServerProxy("http://%s:%s" %
                                             (str(mapper_ips[0]), str(9000)),
                                             allow_none=True)
                    map_result_1 = proxy_1.spool(map_fn)
                    print(map_result_1)
                    proxy_2 = xc.ServerProxy("http://%s:%s" %
                                             (str(mapper_ips[1]), str(9001)),
                                             allow_none=True)
                    map_result_2 = proxy_2.spool(map_fn)
                    print(map_result_2)
                    proxy_3 = xc.ServerProxy("http://%s:%s" %
                                             (str(mapper_ips[2]), str(9002)),
                                             allow_none=True)
                    map_result_3 = proxy_3.spool(map_fn)
                    print(map_result_3)
                else:
                    logging.error("Mapper instance creation failed")
                    exit(0)
            elif (map_fn == "inverted_index_map"):
                creation_status, mapper_ips = create_instances("map")
                if (creation_status):
                    time.sleep(80)
                    logging.info('RPC call to inverted_index_mapper')
                    proxy_1 = xc.ServerProxy("http://%s:%s" %
                                             (str(mapper_ips[0]), str(9000)),
                                             allow_none=True)
                    map_result_1 = proxy_1.spool(map_fn)
                    print(map_result_1)
                    proxy_2 = xc.ServerProxy("http://%s:%s" %
                                             (str(mapper_ips[1]), str(9001)),
                                             allow_none=True)
                    map_result_2 = proxy_2.spool(map_fn)
                    print(map_result_2)
                    proxy_3 = xc.ServerProxy("http://%s:%s" %
                                             (str(mapper_ips[2]), str(9002)),
                                             allow_none=True)
                    map_result_3 = proxy_3.spool(map_fn)
                    print(map_result_3)
                else:
                    logging.error("Mapper instance creation failed")
                    exit(0)
            if (map_result_1 and map_result_2 and map_result_3):
                result = terminate_instances("map")
                if (result):
                    logging.info("Termination of mapper instances successful")
                else:
                    logging.error("Instances termination failed")
                logging.info('Map task successfully completed')
                logging.debug('Calling shuffle in master')
                shuffle_result = shuffle(map_fn)
                logging.info('The response of shuffle in master is %s',
                             str(shuffle_result))
                shuffle_result = shuffle_result.strip()
                if (shuffle_result == "STORED"):
                    logging.info('Shuffle task successfully completed')
                    if (reduce_fn == "word_count_reduce"):
                        creation_status, reducer_ips = create_instances(
                            "reduce")
                        if (creation_status):
                            time.sleep(80)
                            logging.info('RPC call to word_count reducer')
                            proxy = xc.ServerProxy(
                                "http://%s:%s" %
                                (str(reducer_ips[0]), str(9005)),
                                allow_none=True)
                            reducer_result_1 = proxy.spool(reduce_fn)
                            print(reducer_result_1)
                            proxy = xc.ServerProxy(
                                "http://%s:%s" %
                                (str(reducer_ips[1]), str(9006)),
                                allow_none=True)
                            reducer_result_2 = proxy.spool(reduce_fn)
                            print(reducer_result_2)
                            proxy = xc.ServerProxy(
                                "http://%s:%s" %
                                (str(reducer_ips[2]), str(9007)),
                                allow_none=True)
                            reducer_result_3 = proxy.spool(reduce_fn)
                            print(reducer_result_3)
                        else:
                            logging.error("Reducer instance creation failed")
                            exit(0)
                    elif (reduce_fn == "inverted_index_reduce"):
                        creation_status, reducer_ips = create_instances(
                            "reduce")
                        if (creation_status):
                            time.sleep(80)
                            logging.info('RPC call to inverted_index_reducer')
                            proxy_1 = xc.ServerProxy(
                                "http://%s:%s" %
                                (str(reducer_ips[0]), str(9005)),
                                allow_none=True)
                            reducer_result_1 = proxy_1.spool(reduce_fn)
                            proxy_2 = xc.ServerProxy(
                                "http://%s:%s" %
                                (str(reducer_ips[1]), str(9006)),
                                allow_none=True)
                            reducer_result_2 = proxy_2.spool(reduce_fn)
                            proxy_3 = xc.ServerProxy(
                                "http://%s:%s" %
                                (str(reducer_ips[2]), str(9007)),
                                allow_none=True)
                            reducer_result_3 = proxy_3.spool(reduce_fn)
                            print(reducer_result_1)
                            print(reducer_result_2)
                            print(reducer_result_3)
                        else:
                            logging.error("Reducer instance creation failed")
                            exit(0)
                    if (reducer_result_1 and reducer_result_2
                            and reducer_result_3):
                        result = terminate_instances("reduce")
                        if (result):
                            logging.info(
                                "Termination of reducer instances successful")
                        else:
                            logging.error("Instances termination failed")
                        logging.info('Reduce task successfully completed')
                        res = store_output_file(reduce_fn, output_location)
                        if (res):
                            logging.info(
                                'The output of the map reduce task has been stored in the file %s',
                                output_location)
                            res = backup()
                            if (res):
                                logging.info(
                                    'Map reduce task successfully completed')
                                print("Map reduce task successfully completed")
                                task_completed = True
                            else:
                                logging.error("Backup failed")
                    else:
                        logging.error('Reducer task %s' % str(reduce_fn) +
                                      'failed')
            else:
                logging.error('Mapper task %s' % str(map_fn) + 'failed')

        else:
            logging.error('Storing input data in server failed')

    except Exception as e:
        logging.error("Exception occurred", exc_info=True)
    return task_completed
Esempio n. 16
0
from pathos import logger
logger(level=20, name='pathos.xmlrpc')   # logging.INFO
logger(level=20, name='pathos.selector') # logging.INFO

if __name__ == '__main__':
    
    import os, time
    try:
        import xmlrpc.client as client
    except ImportError:
        import xmlrpclib as client

    s = XMLRPCServer('', 0)
    print('port=%d' % s.port)
    port = s.port

    pid = os.fork()
    if pid > 0: #parent
        def add(x, y): return x + y
        s.register_function(add)
        s.activate()
        #s._selector._info.activate()
        s.serve()
    else: #child
        time.sleep(1)
        s = client.ServerProxy('http://localhost:%d' % port)
        print('1 + 2 = %s' % s.add(1, 2))
        print('3 + 4 = %s' % s.add(3, 4))

# End of file
if __name__ == "__main__":

    print(
        '*************************\nRpcServer Startup\n**************************'
    )
    start = time.clock()
    mp = mp.Process(target=RpcServer)
    mp.start()
    end = time.clock()
    print('Elapsed Time - RpcServer Startup: %.3f\n' % (end - start))
    time.sleep(1)

    hostname = '127.0.0.1'
    port = 9000
    server = xrc.ServerProxy('http://' + hostname + ':' + str(port),
                             allow_none=True,
                             use_builtin_types=True)

    print('*************************\nGetHauls\n**************************')
    start = time.clock()
    hauls = server.get_hauls()
    if hauls:
        print('count: ' + str(len(hauls)))
    for haul in hauls:
        # haul = [x.decode('utf-8') if isinstance(x, bytes) else x for x in haul]
        print(str(haul))
    end = time.clock()
    print('Elapsed Time - GetHauls: %.3f\n' % (end - start))

    # RpcServer - Shutdown
    print(
 def __init__(self):
     """Initialize the BQValidation object."""
     pypi_org = xmlrpclib.ServerProxy('https://pypi.python.org/pypi')
     self.pypi_org_packages = {nn(p) for p in pypi_org.list_packages()}
Esempio n. 19
0
 def get_db_list(self, host, port, context={}):
     uri = 'http://' + host + ':' + port
     conn = xmlrpclib.ServerProxy(uri + '/xmlrpc/db')
     db_list = execute(conn, 'list')
     return db_list
Esempio n. 20
0
def schedule(worker_ips,
             train_partitions,
             valid_partitions,
             train_availability,
             valid_availability,
             input_fn,
             model_fn,
             train_fn,
             initial_msts,
             mst_eval_fn,
             ckpt_root='/tmp',
             preload_data_to_mem=True,
             backend='tf'):
    """
    :param workers:
    :param train_partitions:
    :param valid_partitions:    
    :param train_availability:
    :param valid_availability:    
    :param input_fn:
    :param model_fn:
    :param train_fn:
    :param initial_msts:
    :param mst_eval_fn:
    :param ckpt_root:
    :param preload_data_to_mem:
    """
    begin_time = time.time()

    print('Starting HT job: ' +
          datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))

    if os.path.exists("./logs"):
        shutil.rmtree("./logs")
    os.makedirs("./logs")
    scheduler_log_file = open("./logs/scheduler.log", "w")

    workers = {i: xc.ServerProxy(ip) for i, ip in enumerate(worker_ips)}

    current_msts = [(mst_id, mst) for mst_id, mst in enumerate(initial_msts)]
    mst_evaluation_state = {}

    if os.path.exists(ckpt_root):
        shutil.rmtree(ckpt_root)

    for mst_id, mst in current_msts:
        ckpt_path = ckpt_root + "/" + str(mst_id) + "_" + uuid()
        if not os.path.exists(ckpt_path):
            os.makedirs(ckpt_path)
        ckpt_path = ckpt_path + "/model"
        mst_evaluation_state[mst_id] = {
            "state": "RUNNING",
            "epoch": -1,
            "train_error": [],
            "train_loss": [],
            "valid_error": [],
            "valid_loss": [],
            "ckpt_path": ckpt_path,
            "mst": mst
        }
        log_file = open("./logs/" + str(mst_id) + ".log", 'a')
        log_message(log_file, "Checkpoint Path: " + ckpt_path + "\n")
        log_message(log_file, "MST: " + mst_identifier(mst) + "\n")

    if backend == 'tf':
        exec_fn_string = base64.b64encode(
            dill.dumps(tf_execute_helper, byref=False)).decode("ascii")
    elif backend == 'pytorch':
        exec_fn_string = base64.b64encode(
            dill.dumps(pytorch_execute_helper, byref=False)).decode("ascii")

    preload_fn_string = base64.b64encode(
        dill.dumps(preload_data_helper, byref=False)).decode("ascii")

    input_fn_string = base64.b64encode(dill.dumps(input_fn,
                                                  byref=False)).decode("ascii")
    model_fn_string = base64.b64encode(dill.dumps(model_fn,
                                                  byref=False)).decode("ascii")
    train_fn_string = base64.b64encode(dill.dumps(train_fn,
                                                  byref=False)).decode("ascii")

    if preload_data_to_mem:
        # preload data into the worker memory
        preload_data(workers, input_fn_string, preload_fn_string,
                     train_partitions, valid_partitions, train_availability,
                     valid_availability, scheduler_log_file, begin_time)

    # assume # train partitions = # valid. partitions
    P = len(train_partitions)
    W = len(workers)

    random.seed = 2019

    def _get_runnable_unit(epoch_units, w, availability,
                           epoch_mst_execution_state):
        random.shuffle(epoch_units)
        for idx, (mst_id, mst, partition) in enumerate(epoch_units):
            if availability[w][partition] == 1 and (
                    epoch_mst_execution_state[mst_id] == False):
                del epoch_units[idx]
                return mst_id, mst, partition
        return -1, -1, -1

    iteration = 0
    while len(current_msts) > 0:

        epoch_mst_evaluation_state = {
            mst_id: {
                "train_error": [],
                "train_loss": [],
                "valid_error": [],
                "valid_loss": []
            }
            for mst_id, mst in current_msts
        }

        for mode, availability, partitions in zip(
            ["TRAIN", "VALID"], [train_availability, valid_availability],
            [train_partitions, valid_partitions]):

            epoch_units = [(mst_id, mst, partition) for partition in range(P)
                           for (mst_id, mst) in current_msts]
            epoch_mst_execution_state = {
                mst_id: False
                for mst_id, _ in current_msts
            }
            epoch_machine_state = [None for _ in range(W)]

            epoch_begin_time = time.time()
            while len(epoch_units) > 0 or sum(
                [1 for x in epoch_machine_state if x is not None]) > 0:
                for w in [w for w in range(W) if w in workers]:

                    try:
                        if epoch_machine_state[w] is None:
                            mst_id, mst, p = _get_runnable_unit(
                                epoch_units, w, availability,
                                epoch_mst_execution_state)
                            if mst_id != -1:
                                exec_id = launch_job(
                                    workers[w],
                                    mst_evaluation_state[mst_id]['epoch'] + 1,
                                    [p],
                                    mst_evaluation_state[mst_id]['ckpt_path'],
                                    [partitions[p]], input_fn_string,
                                    model_fn_string, train_fn_string,
                                    exec_fn_string, mst, mode == "TRAIN")
                                epoch_mst_execution_state[mst_id] = True
                                epoch_machine_state[w] = (mst_id, mst, p,
                                                          exec_id)

                                message = "TIME: %d, EVENT: %s_LAUNCHED, ITERATION: %d, WORKER: %d, MST: %d, PARTITIONS: %s, EPOCH: %d, %s\n" % (
                                    time.time() - begin_time, mode, iteration,
                                    w, mst_id, "/".join([str(x) for x in [p]]),
                                    mst_evaluation_state[mst_id]['epoch'] + 1,
                                    mst_identifier(mst))
                                log_message(scheduler_log_file,
                                            message,
                                            print_message=True)
                        elif epoch_machine_state[w] is not None:
                            mst_id, mst, p, exec_id = epoch_machine_state[w]
                            completed, status = check_finished(
                                workers[w], exec_id)
                            if completed:
                                epoch_mst_execution_state[mst_id] = False
                                epoch_machine_state[w] = None

                                log_file = open(
                                    "./logs/" + str(mst_id) + ".log", 'a')
                                log_message(log_file,
                                            status["result"]["message"])

                                loss = status["result"]["loss"]
                                error = status["result"]["error"]

                                if mode == "TRAIN":
                                    epoch_mst_evaluation_state[mst_id][
                                        'train_loss'].extend(loss)
                                    epoch_mst_evaluation_state[mst_id][
                                        'train_error'].extend(error)
                                else:
                                    epoch_mst_evaluation_state[mst_id][
                                        'valid_loss'].extend(loss)
                                    epoch_mst_evaluation_state[mst_id][
                                        'valid_error'].extend(error)

                                message = "TIME: %d, EVENT: %s_COMPLETED, ITERATION: %d, WORKER: %d, MST: %d, PARTITIONS: %s, EPOCH: %d, %s\n" % (
                                    time.time() - begin_time, mode, iteration,
                                    w, mst_id, "/".join([str(x) for x in [p]]),
                                    mst_evaluation_state[mst_id]['epoch'] + 1,
                                    mst_identifier(mst))
                                log_message(scheduler_log_file,
                                            message,
                                            print_message=True)
                    except Exception as e:
                        print(e)
                        print('Worker {0} failure detected....'.format(str(w)))
                        # removing w from available workers
                        workers.pop(w, None)

                        # if there was any mst unit running, remove it back to the queue
                        if epoch_machine_state[w] is not None:
                            mst_id, mst, p, exec_id = epoch_machine_state[w]
                            print(
                                'MST {0} partition {1} moved back to queue....'
                                .format(str(mst_id), str(p)))
                            epoch_units.append((mst_id, mst, p))
                            epoch_machine_state[w] = None
                            epoch_mst_execution_state[mst_id] = False

                        # starting from beginning
                        break

                # check failed workers are up again
                for w in range(W):
                    if w not in workers:
                        try:
                            #print('Checking worker {0}....'.format(str(w)))
                            con = xc.ServerProxy(worker_ips[w])
                            con.is_live()
                            workers[w] = con
                            epoch_machine_state[w] = None
                            print('Worker {0} back online....'.format(str(w)))

                            if preload_data_to_mem:
                                # preload data into the worker memory
                                preload_data([workers[w]], input_fn_string,
                                             preload_fn_string,
                                             train_partitions,
                                             valid_partitions,
                                             [train_availability[w]],
                                             [valid_availability[w]],
                                             scheduler_log_file, begin_time)

                        except Exception as e:
                            #print(e)
                            continue

                sys.stdout.flush()
                time.sleep(config.POLLING_PERIOD)

            message = 'Iteration: {}, {} Elapsed Time: {}\n'.format(
                iteration, mode,
                time.time() - epoch_begin_time)
            log_message(scheduler_log_file, message, print_message=True)

        # update mst evaluation state
        mst_evaluation_state = update_mst_evaluation_state(
            epoch_mst_evaluation_state, mst_evaluation_state)

        # mst evaluation
        current_msts, mst_evaluation_state = evaluate_msts(
            mst_eval_fn, mst_evaluation_state, current_msts, ckpt_root)
        iteration += 1

    print('Total HT job time: ' + str(time.time() - begin_time))
Esempio n. 21
0
from xmlrpc import client as xmlrpclib

##################################################
#  XMLRPC client used to connect to odoo client  #
##################################################

#  DB info

url = 'http://127.0.0.1:8071'
db = 'pos-db'
username = '******'
password = '******'

common = xmlrpclib.ServerProxy('{}/xmlrpc/2/common'.format(url))
models = xmlrpclib.ServerProxy('{}/xmlrpc/2/object'.format(url))

uid = common.login(db, username, password)

#  Search and read records

result = models.execute(db, uid, password, 'res.partner', 'search_read',
                        [['id', '=', 1]])
number_of_customers = models.execute(db, uid, password, 'res.partner',
                                     'search_count', [])

print('Number of customers: ' + str(number_of_customers))
print('result: ' + str(result[0].get('name')))

#  Create records

id = models.execute_kw(db, uid, password, 'res.partner', 'create',
Esempio n. 22
0
try:
    from xmlrpc import client as xmlrpclib
except ImportError:
    import xmlrpclib

DATABASE = 'andy_5_10'
USERNAME = '******'
PASSWORD = '******'

common = xmlrpclib.ServerProxy('http://emipro:8998/xmlrpc/2/common')
user_id = common.login(DATABASE, USERNAME, PASSWORD)
print("========user_id", user_id)

server = xmlrpclib.ServerProxy('http://emipro:8998/xmlrpc/2/object')

aws_iot = server.execute_kw(DATABASE, user_id, PASSWORD, 'aws.iot.log.ept',
                            'check_iot', ['G030JF0524415HFV', 'DOUBLE'])

print("********", aws_iot)
Esempio n. 23
0
 def test_unicode_host(self):
     server = xmlrpclib.ServerProxy("http://%s:%d/RPC2" % (ADDR, PORT))
     self.assertEqual(server.add("a", "\xe9"), "a\xe9")
Esempio n. 24
0
#import xmlrpclib
import xmlrpc.client as xmlrpclib

HOST = 'localhost'
PORT = 8011
DB = 'uber'
USER = '******'
PASS = '******'

root = 'http://%s:%d/xmlrpc/' % (HOST, PORT)

userid = xmlrpclib.ServerProxy(root + 'common').login(DB, USER, PASS)
print("Logged in as %s (Userid: %d)" % (USER, userid))

#Create
socket = xmlrpclib.ServerProxy(root + 'object')
args = {'name': 'DemoAbc', 'email': '*****@*****.**'}

#data_id = socket.execute(DB,userid,PASS,'guest.user','create',args)
res = socket.execute(DB, userid, PASS, 'guest.user', 'write', [1], args)
Esempio n. 25
0
 def test_path2(self):
     p = xmlrpclib.ServerProxy(URL + "/foo/bar")
     self.assertEqual(p.add(6, 8), 6 + 8)
     self.assertRaises(xmlrpclib.Fault, p.pow, 6, 8)
Esempio n. 26
0
def sync_products():
    manager = mp.Manager()
    data_pool = manager.list()
    create_ids = manager.dict()
    write_ids = manager.dict()
    uom_ids = manager.dict()
    category_ids = manager.dict()
    location_ids = manager.dict()

    process_Q = []

    fp = open('files/iclitem1.csv', 'r')
    csv_reader = csv.DictReader(fp)

    fp1 = open('files/ivlioh.csv', 'r')
    csv_reader1 = csv.DictReader(fp1)
    sock = xmlrpclib.ServerProxy(URL, allow_none=True)

    all_locations = sock.execute(DB, UID, PSW, 'stock.location', 'search_read',
                                 [('usage', '=', 'internal')], ['id', 'name'])
    all_locations = {ele['name']: ele['id'] for ele in all_locations}

    for vals in csv_reader1:
        if vals['BIN-CODE'] and vals['BIN-CODE'] in all_locations:
            location_ids[vals['ITEM-CODE']] = all_locations[vals['BIN-CODE']]

    default_codes = []
    for vals in csv_reader:
        data_pool.append(vals)
        default_code = vals['ITEM-CODE']
        default_codes.append(default_code)

    sale_uoms = {}

    fp2 = open('files/ivlitum1.csv', 'r')
    csv_reader2 = csv.DictReader(fp2)
    for line in csv_reader2:
        product = line.get('ITEM-CODE', '')
        code = str(line.get('UOM')) + '_' + str(line.get('QTY'))
        if product in sale_uoms:
            sale_uoms[product].append(code)
        else:
            sale_uoms[product] = [code]

    sale_uoms = manager.dict(sale_uoms)

    fp.close()

    domain = [('default_code', 'in', default_codes), '|',
              ('active', '=', False), ('active', '=', True)]

    res = sock.execute(DB, UID, PSW, 'product.product', 'search_read', domain,
                       ['default_code'])
    write_ids = {rec['default_code']: rec['id'] for rec in res}

    uoms = sock.execute(DB, UID, PSW, 'uom.uom', 'search_read', [],
                        ['id', 'name'])
    uom_ids = {uom['name']: uom['id'] for uom in uoms}

    categories = sock.execute(DB, UID, PSW, 'product.category', 'search_read',
                              [], ['id', 'categ_code'])
    category_ids = {
        category['categ_code']: category['id']
        for category in categories
    }

    res = None
    default_codes = None
    uoms = None
    categories = None

    for i in range(WORKERS):
        pid = "Worker-%d" % (i + 1)
        worker = mp.Process(name=pid,
                            target=update_product,
                            args=(pid, data_pool, create_ids, write_ids,
                                  uom_ids, category_ids, location_ids,
                                  sale_uoms))
        process_Q.append(worker)
        worker.start()

    for worker in process_Q:
        worker.join()
Esempio n. 27
0
 def test_close(self):
     p = xmlrpclib.ServerProxy(self.url)
     self.assertEqual(p('close')(), None)
Esempio n. 28
0
def update_product(pid, data_pool, create_ids, write_ids, uom_ids,
                   category_ids, location_ids, sale_uoms):
    sock = xmlrpclib.ServerProxy(URL, allow_none=True)
    while data_pool:
        try:
            data = data_pool.pop()

            default_code = data.get('ITEM-CODE')
            code = str(data.get('ITEM-STOCK-UOM')) + '_' + str(
                data.get('ITEM-QTY-IN-STOCK-UM'))
            active = True
            purchase_ok = True
            #  Comment out because inactive items break order imports
            if data.get('ITEM-STATUS') and data.get('ITEM-STATUS') == 'D':
                purchase_ok = False,
                if float(data.get('ITEM-QTY-ON-HAND')) <= 0.0:
                    active = False

            sale_uom_ids = []
            if default_code in sale_uoms:
                for uom in sale_uoms[default_code]:
                    sale_uom_id = uom_ids.get(uom)
                    if sale_uom_id:
                        sale_uom_ids.append(sale_uom_id)
                    else:
                        logger.debug(
                            'SALE UOM missing uom:{0} product:{1}'.format(
                                uom, default_code))
            uom_id = uom_ids.get(code)
            if uom_id:
                sale_uom_ids.append(uom_id)
            else:
                logger.error('UOM missing uom:{0} product:{1}'.format(
                    uom, default_code))
                continue

            vals = {
                'name': data.get('ITEM-DESC').title(),
                'description_sale': data.get('ITEM-DESC').lower(),
                'description_purchase': data.get('ITEM-DESCR2').lower(),
                'default_code': default_code,
                'categ_id': category_ids.get(data.get('PROD-CODE')),
                'active': active,
                'type': 'product',
                'standard_price': data.get('ITEM-UNIT-COST'),
                'sale_ok': True,
                'taxes_id': [(6, 0, [3])],
                'lst_price': data.get('ITEM-AVG-SELL-PRC'),
                'purchase_ok': purchase_ok,
                'sale_uoms': [(6, 0, sale_uom_ids)],
                'uom_id': uom_ids.get(code),
                'uom_po_id': uom_ids.get(code),
                'volume': data.get('ITEM-CUBE'),
                'weight': data.get('ITEM-WEIGHT'),
                'property_stock_location': location_ids.get(default_code)
            }

            res = write_ids.get(default_code, [])
            if res:
                sock.execute(DB, UID, PSW, 'product.product', 'write', res,
                             vals)
                print(pid, 'UPDATE - PRODUCT', res)
            else:
                res = sock.execute(DB, UID, PSW, 'product.product', 'create',
                                   vals)
                print(pid, 'CREATE - PRODUCT', res)

        except Exception as e:
            logger.error('Error {0} {1}'.format(vals, e))
Esempio n. 29
0
 def __init__(self):
     self.rat_client = client.ServerProxy("http://{0}:{1}".format(server_ip, server_port))
    #
    # Make the call. 
    #
    try:
        response = server.raw_request(stuff)
        pass
    except xmlrpclib.Fault as e:
        print (e.faultString)
        sys.exit(-1);
        pass

    print (str(response));
    sys.exit(0);
elif len(req_args):
    # Get a handle on the server,
    server = xmlrpclib.ServerProxy(URI, context=ctx)
    # Method and args are on the command line.
    sys.exit(do_method(server, req_args))
else:
    # Get a handle on the server,
    server = xmlrpclib.ServerProxy(URI, context=ctx)
    # Prompt the user for input.
    try:
        while True:
            line = raw_input("$ ")
            tokens = line.split(" ")
            if len(tokens) >= 1 and len(tokens[0]) > 0:
                print (str(do_method(server, tokens)))
                pass
            pass
        pass