Ejemplo n.º 1
0
    def close(self):
        # TODO: locking in init, close()
        fname = self.__file.name
        self.__file.close()

        if self.mode in ('w', 'w+', 'r+'):
            par = self._get_parent()
            cr = pooler.get_db(par.context.dbname).cursor()
            icont = ''
            mime = ''
            filename = par.path
            if isinstance(filename, (tuple, list)):
                filename = '/'.join(filename)

            try:
                mime, icont = cntIndex.doIndex(None,
                                               filename=filename,
                                               content_type=None,
                                               realfname=fname)
            except Exception:
                logging.getLogger('document.storage').debug(
                    'Cannot index file:', exc_info=True)
                pass

            try:
                icont_u = ustr(icont)
            except UnicodeError:
                icont_u = ''

            try:
                fsize = os.stat(fname).st_size
                cr.execute("UPDATE ir_attachment " \
                            " SET index_content = %s, file_type = %s, " \
                            " file_size = %s " \
                            "  WHERE id = %s",
                            (icont_u, mime, fsize, par.file_id))
                par.content_length = fsize
                par.content_type = mime
                cr.commit()
                cr.close()
            except Exception:
                logging.getLogger('document.storage').warning(
                    'Cannot save file indexed content:', exc_info=True)

        elif self.mode in ('a', 'a+'):
            try:
                par = self._get_parent()
                cr = pooler.get_db(par.context.dbname).cursor()
                fsize = os.stat(fname).st_size
                cr.execute("UPDATE ir_attachment SET file_size = %s " \
                            "  WHERE id = %s",
                            (fsize, par.file_id))
                par.content_length = fsize
                cr.commit()
                cr.close()
            except Exception:
                logging.getLogger('document.storage').warning(
                    'Cannot save file appended content:', exc_info=True)
    def close(self):
        # TODO: locking in init, close()
        fname = self.__file.name
        self.__file.close()

        if self.mode in ('w', 'w+', 'r+'):
            par = self._get_parent()
            cr = pooler.get_db(par.context.dbname).cursor()
            icont = ''
            mime = ''
            filename = par.path
            if isinstance(filename, (tuple, list)):
                filename = '/'.join(filename)
            
            try:
                mime, icont = cntIndex.doIndex(None, filename=filename,
                        content_type=None, realfname=fname)
            except Exception:
                _logger.debug('Cannot index file:', exc_info=True)
                pass

            try:
                icont_u = ustr(icont)
            except UnicodeError:
                icont_u = ''

            try:
                fsize = os.stat(fname).st_size
                cr.execute("UPDATE ir_attachment " \
                            " SET index_content = %s, file_type = %s, " \
                            " file_size = %s " \
                            "  WHERE id = %s",
                            (icont_u, mime, fsize, par.file_id))
                par.content_length = fsize
                par.content_type = mime
                cr.commit()
                cr.close()
            except Exception:
                _logger.warning('Cannot save file indexed content:', exc_info=True)

        elif self.mode in ('a', 'a+' ):
            try:
                par = self._get_parent()
                cr = pooler.get_db(par.context.dbname).cursor()
                fsize = os.stat(fname).st_size
                cr.execute("UPDATE ir_attachment SET file_size = %s " \
                            "  WHERE id = %s",
                            (fsize, par.file_id))
                par.content_length = fsize
                cr.commit()
                cr.close()
            except Exception:
                _logger.warning('Cannot save file appended content:', exc_info=True)
Ejemplo n.º 3
0
    def close(self):
        # TODO: locking in init, close()
        fname = self.__file.name
        self.__file.close()

        if self.mode in ("w", "w+", "r+"):
            par = self._get_parent()
            cr = pooler.get_db(par.context.dbname).cursor()
            icont = ""
            mime = ""
            filename = par.path
            if isinstance(filename, (tuple, list)):
                filename = "/".join(filename)

            try:
                mime, icont = cntIndex.doIndex(None, filename=filename, content_type=None, realfname=fname)
            except Exception:
                logging.getLogger("document.storage").debug("Cannot index file:", exc_info=True)
                pass

            try:
                icont_u = ustr(icont)
            except UnicodeError:
                icont_u = ""

            try:
                fsize = os.stat(fname).st_size
                cr.execute(
                    "UPDATE ir_attachment "
                    " SET index_content = %s, file_type = %s, "
                    " file_size = %s "
                    "  WHERE id = %s",
                    (icont_u, mime, fsize, par.file_id),
                )
                par.content_length = fsize
                par.content_type = mime
                cr.commit()
                cr.close()
            except Exception:
                logging.getLogger("document.storage").warning("Cannot save file indexed content:", exc_info=True)

        elif self.mode in ("a", "a+"):
            try:
                par = self._get_parent()
                cr = pooler.get_db(par.context.dbname).cursor()
                fsize = os.stat(fname).st_size
                cr.execute("UPDATE ir_attachment SET file_size = %s " "  WHERE id = %s", (fsize, par.file_id))
                par.content_length = fsize
                cr.commit()
                cr.close()
            except Exception:
                logging.getLogger("document.storage").warning("Cannot save file appended content:", exc_info=True)
Ejemplo n.º 4
0
    def _start_job_process(self,
                           cr,
                           uid,
                           ids=None,
                           use_new_cursor=False,
                           context=None):
        if not context:
            context = {}
        maxdate = DateTime.now()

        if use_new_cursor:
            cr = pooler.get_db(use_new_cursor).cursor()
        wf_service = netsvc.LocalService("workflow")

        process_obj = self.pool.get('etl.job.process')
        if not ids:
            ids = process_obj.search(cr, uid, [('state', '=', 'open')])
        for process in process_obj.browse(cr, uid, ids):
            if process.schedule_date:
                if maxdate.strftime(
                        '%Y-%m-%d %H:%M:%S') >= process.schedule_date:
                    wf_service.trg_validate(uid, 'etl.job.process', process.id,
                                            'start_process', cr)
        if use_new_cursor:
            cr.commit()
Ejemplo n.º 5
0
    def run(self, cr, uid, ids=None, context=None):
        """ Call the actions of each job and commit after each job

        :param list/int/long ids: id of workflow jobs to process, if None
            they will all be processed
        :return: True
        """
        if ids is None:
            ids = self.search(cr, uid, [], context=context)
        elif isinstance(ids, (int, long)):
            ids = [ids]

        for job in self.browse(cr, uid, ids, context=context):
            local_cr = pooler.get_db(cr.dbname).cursor()
            try:
                if self._call_action(local_cr, uid, job, context=context):
                    self.unlink(local_cr, uid, job.id, context=context)
            except Exception:
                local_cr.rollback()
                _logger.exception(
                    "Failed to execute automatic workflow job %s"
                    "on %s with id %s", job.action, job.res_model, job.res_id)
            else:
                local_cr.commit()
            finally:
                local_cr.close()
        return True
Ejemplo n.º 6
0
 def check(self, db, uid, passwd):
     logger = logging.getLogger('smile_sso')
     if not passwd:
         error_msg = "No password authentication not supported!"
         logger.error(error_msg)
         raise OpenERPException(error_msg, ('', '', ''))
     cr = pooler.get_db(db).cursor()
     try:
         cr.autocommit(True)
         if self._uid_cache.get(db, {}).get(uid) != passwd:
             cr.execute("SELECT u.id, u.password FROM res_users u LEFT JOIN res_users_expiry e ON u.id = e.user_id "
                        "WHERE u.id=%s AND u.password=%s AND u.active=TRUE "
                        "AND (e.expiry_date IS NULL OR e.expiry_date>=now() AT TIME ZONE 'UTC') "
                        "LIMIT 1", (uid, passwd))
             res = cr.fetchone()
             if not res:
                 error_msg = "Server session expired for the user [uid=%s]" % uid
                 logger.error(error_msg)
                 raise OpenERPException(error_msg, ('', '', ''))
             self._uid_cache.setdefault(db, {}).update({uid: passwd})
         expiry_date = self.pool.get('res.users.expiry').get_expiry_date()
         cr.execute("SELECT u.login, e.login, u.sso FROM res_users u LEFT JOIN res_users_expiry e ON u.id = e.user_id "
                    "WHERE u.id=%s LIMIT 1", (uid,))
         user_info = cr.fetchone()
         if user_info[2]:
             if user_info[1]:
                 cr.execute("UPDATE res_users_expiry SET expiry_date=%s WHERE user_id=%s", (expiry_date, int(uid)))
             else:
                 cr.execute("INSERT INTO res_users_expiry (user_id, login, expiry_date) VALUES (%s, %s, %s AT TIME ZONE 'UTC')",
                            (int(uid), user_info[0], expiry_date))
             logger.debug("Server session extended for the user [uid=%s]", uid)
     finally:
         cr.close()
Ejemplo n.º 7
0
 def sso_login(self, db, login, length=64, context=None):
     password = generate_random_password(length)
     expiry_date = self.get_expiry_date()
     set_clause = 'date=now(), password=%s'
     params = [password]
     if expiry_date:
         set_clause += ', expiry_date=%s'
         params.append(expiry_date)
     where_clause = 'login=%s'
     params.append(login)
     cr = pooler.get_db(db).cursor()
     try:
         cr.execute('SELECT id, password FROM res_users WHERE login=%s AND password IS NOT NULL '
                    'AND active=TRUE AND (expiry_date IS NULL OR expiry_date>=now()) LIMIT 1', (login,))
         res = cr.dictfetchone()
         if not res or not res['password']:
             query = 'UPDATE res_users SET %s WHERE %s RETURNING id, password' % (set_clause, where_clause)
             cr.execute(query, params)
             res = cr.dictfetchone()
             cr.commit()
         if res:
             netsvc.Logger().notifyChannel('smile_sso', netsvc.LOG_DEBUG, "Login of the user [login=%s]" % login)
             return res
     finally:
         cr.close()
    def generate_invoices_threaded(self, cr, uid, ids, context=None):
        if not context:
            context = {}

        cursor = pooler.get_db(cr.dbname).cursor()

        wiz = self.browse(cursor, uid, ids[0], context=context)
        fact_ids = json.loads(wiz.invoice_ids)
        tmp_dir = tempfile.mkdtemp()

        failed_invoices, info_inv = self.generate_inv(cursor, uid, wiz,
                                                      fact_ids, tmp_dir,
                                                      context)
        clean_invoices = list(set(fact_ids) - set(failed_invoices))
        info_csv = self.generate_csv(cursor, uid, wiz, clean_invoices, tmp_dir,
                                     'Adreces.csv', context)
        info_reb = self.generate_reb(cursor, uid, wiz, clean_invoices, tmp_dir,
                                     context)

        wiz.write({
            'state':
            'done',
            'file':
            self.get_zip_from_directory(tmp_dir, True),
            'info':
            wiz.info + "\n" + info_inv + "\n" + info_csv + "\n" + info_reb,
        })
Ejemplo n.º 9
0
 def action_start_component(self, key, signal_data={}, data={}):
     cr = pooler.get_db(data['dbname']).cursor()
     pool = pooler.get_pool(cr.dbname)
     uid = data['uid']
     process_obj = pool.get('etl.job.process')
     process = process_obj.browse(cr, uid, data['process_id'], context={})
     if process.statistics:
         cid = pool.get('etl.job.process.statistics').create(
             cr, uid, {
                 'name': key['id'],
                 'signal': 'start',
                 'start_date': time.strftime('%Y-%m-%d %H:%M:%S'),
                 'state': 'start',
                 'job_process_id': data['process_id']
             })
     if process.log:
         lid = pool.get('etl.job.process.log').create(
             cr, uid, {
                 'date_time':
                 time.strftime('%Y-%m-%d %H:%M:%S'),
                 'desc':
                 str(key['instance']) + str(key['id']) +
                 'component is started...'
             })
     cr.commit()
     return True
Ejemplo n.º 10
0
    def exec_workflow(self, db, uid, model, method, *args, **argv):
        pool = pooler.get_pool(db)
        logged_uids = []
        fct_src = super(audittrail_objects_proxy, self).exec_workflow
        field = method
        rule = False
        model_pool = pool.get('ir.model')
        rule_pool = pool.get('audittrail.rule')
        cr = pooler.get_db(db).cursor()
        cr.autocommit(True)
        try:
            model_ids = model_pool.search(cr, uid, [('model', '=', model)])
            for obj_name in pool.obj_list():
                if obj_name == 'audittrail.rule':
                    rule = True
            if not rule:
                return super(audittrail_objects_proxy, self).exec_workflow(db, uid, model, method, *args, **argv)
            if not model_ids:
                return super(audittrail_objects_proxy, self).exec_workflow(db, uid, model, method, *args, **argv)

            rule_ids = rule_pool.search(cr, uid, [('object_id', 'in', model_ids), ('state', '=', 'subscribed')])
            if not rule_ids:
                return super(audittrail_objects_proxy, self).exec_workflow(db, uid, model, method, *args, **argv)

            for thisrule in rule_pool.browse(cr, uid, rule_ids):
                for user in thisrule.user_id:
                    logged_uids.append(user.id)
                if not logged_uids or uid in logged_uids:
                    if thisrule.log_workflow:
                        return self.log_fct(db, uid, model, method, fct_src, *args)
                return super(audittrail_objects_proxy, self).exec_workflow(db, uid, model, method, *args, **argv)

            return True
        finally:
            cr.close()
Ejemplo n.º 11
0
    def retry_failed_lines(self, cr, uid, ids, context=None):
        retry_cr = pooler.get_db(cr.dbname).cursor()

        self._logger.info("retry the failed lines of the reports ids %s" %
                          (ids, ))
        if isinstance(ids, int):
            ids = [ids]
        if not context:
            context = {}
        context['origin'] = 'retry'
        for report in self.read(retry_cr,
                                uid,
                                ids, ['failed_line_ids'],
                                context=context):
            failed_line_ids = report['failed_line_ids']
            if failed_line_ids:
                context['external_report_id'] = report['id']
                self.start_report(retry_cr, uid, report['id'], context=context)
                self.pool.get('external.report.line').retry(retry_cr,
                                                            uid,
                                                            failed_line_ids,
                                                            context=context)
                self.end_report(retry_cr, uid, report['id'], context=context)
        retry_cr.commit()
        retry_cr.close()
        return True
 def _inventory_export(self, db_name, uid, ids, esale_products_ids=None, *args):
     if esale_products_ids is None:
         esale_products_ids = []
     cr = pooler.get_db(db_name).cursor()
     pool = pooler.get_pool(cr.dbname)
     for website in pool.get('esale.spree.web').browse(cr, uid, ids):
         exceptions = ""
         if not esale_products_ids:
             esale_products_ids = pool.get('esale.spree.product').search(cr, uid, [('website_id', '=', website.id)])
         if esale_products_ids:
             for esale_product_id in esale_products_ids:
                 esale_product = pool.get('esale.spree.product').browse(cr, uid, esale_product_id)
                 product = pool.get('product.product').browse(cr, uid, esale_product.product_id.id)
                 qty = product.virtual_available
                 params = {'variant': {'count_on_hand': qty}}                        
                 url = "/api/variants/%s" % (esale_product.spree_id)
                 response = get_spree_list(website.url, url, website.api_key, "PUT", params)
                 if response[0] != 200:
                    exceptions += "Error Ocurred HTTP response: %s:%s " % (response[0], response[1])                    
                 if not exceptions:
                     exceptions += "No exceptions"
         request_body = """
             Inventory Export End                
             
             End Date: %s       
             
             Exceptions: 
             %s
         """ % (time.strftime('%Y-%m-%d %H:%M:%S'), exceptions)
         pool.get('res.request').create(cr, uid, {'name': 'Spree Inventory Export report',
                                                  'act_from': uid,
                                                  'act_to': uid,
                                                  'body': request_body})
         cr.commit()
     cr.close()
Ejemplo n.º 13
0
 def run(self):
     """
         Import all data into openerp, 
         this is the Entry point to launch the process of import
         
     
     """
     self.data_started = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
     self.cr = pooler.get_db(self.cr.dbname).cursor()
     try:
         result = []
         imported = set() #to invoid importing 2 times the sames modules
         for table in self.table_list:
             to_import = self.get_mapping()[table].get('import', True)
             if not table in imported:
                 res = self._resolve_dependencies(self.get_mapping()[table].get('dependencies', []), imported)
                 result.extend(res)
                 if to_import:
                     (position, warning) = self._import_table(table)
                     result.append((table, position, warning))
                 imported.add(table)
         self.cr.commit()
         
     finally:
         self.cr.close()
     self.date_ended = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
     self._send_notification_email(result)
Ejemplo n.º 14
0
 def mkdir(self, node, basename, attr):
     try:
         """Create the specified directory."""
         if not node:
             raise OSError(1, 'Operation not permited.')
         uid = node.context.uid
         pool = pooler.get_pool(node.context.dbname)
         cr = pooler.get_db(node.context.dbname).cursor()
         basename = _to_unicode(basename)
         object2 = False
         if isinstance(node, node_res_obj):
             object2 = node and pool.get(
                 node.context.context['res_model']).browse(
                     cr, uid, node.context.context['res_id']) or False
         obj = node.context._dirobj.browse(cr, uid,
                                           node.context.context['dir_id'])
         if obj and (obj.type == 'ressource') and not node.object2:
             raise OSError(1, 'Operation not permited.')
         val = {
             'name': basename,
             'ressource_parent_type_id': object and obj.ressource_type_id.id
             or False,
             'ressource_id': object2 and object2.id or False
         }
         if (obj and (obj.type in ('directory'))) or not object2:
             val['parent_id'] = obj and obj.id or False
         # Check if it alreayd exists !
         pool.get('document.directory').create(cr, uid, val)
         cr.commit()
         cr.close()
         return paramiko.SFTP_OK
     except Exception, err:
         return paramiko.SFTPServer.convert_errno(e.errno)
Ejemplo n.º 15
0
    def list_folder(self, node):
        """ List the contents of a folder """
        try:
            """List the content of a directory."""
            class false_node:
                write_date = None
                create_date = None
                type = 'database'

                def __init__(self, db):
                    self.path = '/' + db

            if node is None:
                result = []
                for db in self.db_list():
                    uid = self.server.check_security(db, self.server.username,
                                                     self.server.key)
                    if uid:
                        result.append(false_node(db))
                return result
            cr = pooler.get_db(node.context.dbname).cursor()
            res = node.children(cr)
            cr.close()
            return res
        except OSError, e:
            return paramiko.SFTPServer.convert_errno(e.errno)
Ejemplo n.º 16
0
        def go(id, uid, ids, datas, context):
            cr = pooler.get_db(db).cursor()
            import traceback
            import sys
            try:
                obj = netsvc.LocalService('report.'+object)
                bg_obj = pooler.get_pool(cr.dbname).get('memory.background.report')
                (result, format) = obj.create(cr, uid, ids, datas, context)
                if not result:
                    tb = sys.exc_info()
                    self._reports[id]['exception'] = ExceptionWithTraceback('RML is not available at specified location or not enough data to print!', tb)
                if context.get('background_id'):
                    bg_obj.update_percent(cr, uid, context['background_id'], {'percent': 1.00}, context=context)
                if isinstance(result, tools.misc.Path):
                    self._reports[id]['path'] = result.path
                    self._reports[id]['result'] = ''
                    self._reports[id]['delete'] = result.delete
                else:
                    self._reports[id]['result'] = result
                self._reports[id]['format'] = format
                self._reports[id]['state'] = True
            except Exception, exception:

                tb = sys.exc_info()
                tb_s = "".join(traceback.format_exception(*tb))
                logger = netsvc.Logger()
                logger.notifyChannel('web-services', netsvc.LOG_ERROR,
                        'Exception: %s\n%s' % (str(exception), tb_s))
                if hasattr(exception, 'name') and hasattr(exception, 'value'):
                    self._reports[id]['exception'] = ExceptionWithTraceback(tools.ustr(exception.name), tools.ustr(exception.value))
                else:
                    self._reports[id]['exception'] = ExceptionWithTraceback(tools.exception_to_unicode(exception), tb)
                self._reports[id]['state'] = True
Ejemplo n.º 17
0
 def action_end_component(self, key, signal_data={}, data={}):
     cr = pooler.get_db(data['dbname']).cursor()
     pool = pooler.get_pool(cr.dbname)
     uid = data['uid']
     process_obj = pool.get('etl.job.process')
     process = process_obj.browse(cr, uid, data['process_id'], context={})
     if process.statistics:
         comp_obj = pool.get('etl.job.process.statistics')
         comp_ids = comp_obj.search(
             cr, uid, [('job_process_id', '=', data['process_id']),
                       ('name', '=', key['id'])])
         cid = comp_obj.write(
             cr, uid, comp_ids, {
                 'end_date': time.strftime('%Y-%m-%d %H:%M:%S'),
                 'signal': 'end',
                 'state': 'end'
             })
     if process.log:
         lid = pool.get('etl.job.process.log').create(
             cr, uid, {
                 'date_time': time.strftime('%Y-%m-%d %H:%M:%S'),
                 'desc': 'Component' + str(key['instance']) +
                 str(key['id']) + ' is ended...',
                 'job_process_id': data['process_id']
             })
     cr.commit()
     return True
Ejemplo n.º 18
0
    def __call__(self, source):
        try:
            frame = inspect.stack()[1][0]
        except:
            return source

        cr = frame.f_locals.get('cr')
        try:
            lang = (frame.f_locals.get('context') or {}).get('lang', False)
            if not (cr and lang):
                args = frame.f_locals.get('args', False)
                if args:
                    lang = args[-1].get('lang', False)
                    if frame.f_globals.get('pooler', False):
                        cr = pooler.get_db(
                            frame.f_globals['pooler'].pool_dic.keys()
                            [0]).cursor()
            if not (lang and cr):
                return source
        except:
            return source

        cr.execute(
            'select value from ir_translation where lang=%s and type IN (%s,%s) and src=%s',
            (lang, 'code', 'sql_constraint', source))
        res_trans = cr.fetchone()
        return res_trans and res_trans[0] or source
Ejemplo n.º 19
0
    def pull_continue_thread(self, cr, uid, ids, context=None):
        _logger = logging.getLogger('pull.rw')
        cr = pooler.get_db(cr.dbname).cursor()
        try:
            wizard = self.browse(cr, uid, ids[0])
            #US-26: Added a check if the zip file has already been imported before
            syncusb = self.pool.get('sync.usb.files')
            md5 = syncusb.md5(wizard.pull_data)
            self.write(cr, uid, ids, {'in_progress': True})
            updates_pulled = update_pull_error = updates_ran = update_run_error = \
            messages_pulled = message_pull_error = messages_ran = message_run_error = 0
            try:
                updates_pulled, update_pull_error, updates_ran, update_run_error, \
                messages_pulled, message_pull_error, messages_ran, message_run_error = self.pool.get('sync.client.entity').usb_pull(cr, uid, wizard.pull_data, context=context)
            except zipfile.BadZipfile:
                raise osv.except_osv(_('Not a Zip File'), _('The file you uploaded was not a valid .zip file'))

            #Update list of pulled files
            syncusb.create(cr, uid, {
                'sum': md5,
                'date': datetime.datetime.now().isoformat(),
            }, context=context)

            # handle returned values
            pull_result = ''
            if not update_pull_error:
                pull_result += 'Pulled %d update(s)' % updates_pulled 
                if not update_run_error:
                    pull_result += '\nRan %s update(s)' % updates_ran
                else:
                    pull_result += '\nError while executing %s update(s): %s' % (updates_ran, update_run_error)
            else:
                pull_result += 'Got an error while pulling %d update(s): %s' % (updates_pulled, update_pull_error)

            if not message_pull_error:
                pull_result += '\nPulled %d message(s)' % messages_pulled 
                if not message_run_error:
                    pull_result += '\nRan %s message(s)' % messages_ran
                else:
                    pull_result += '\nError while executing %s message(s): %s' % (messages_ran, message_run_error)
            else:
                pull_result += '\nGot an error while pulling %d message(s): %s' % (messages_pulled, message_pull_error)

            # If the correct sequence is received, then update this value into the DB for this instance, and inform in the RW sync dialog  
            rw_pull_sequence = context.get('rw_pull_sequence', -1)
            if rw_pull_sequence != -1:
                entity = self._get_entity(cr, uid, context)
                self.pool.get('sync.client.entity').write(cr, uid, entity.id, {'rw_pull_sequence': rw_pull_sequence}, context)        
                pull_result += '\n\nThe pulling file sequence is updated. The next expected sequence is %d' % (rw_pull_sequence + 1)

            vals = {
                'pull_result': pull_result,
                'usb_sync_step': self._get_usb_sync_step(cr, uid, context=context),
                'push_file_visible': False,
            }

            self.write(cr, uid, ids, vals, context=context)
        except osv.except_osv, e:
            self.write(cr, uid, ids, {'pull_result': "Error: %s" % e.value})
            _logger.error("%s : %s" % (tools.ustr(e.value), tools.ustr(traceback.format_exc())))
Ejemplo n.º 20
0
    def db_list(self):
        """Get the list of available databases, with FTPd support
        """
        s = netsvc.ExportService.getService('db')
        result = s.exp_list(document=True)
        self.db_name_list = []
        for db_name in result:
            db, cr = None, None
            try:
                try:
                    db = pooler.get_db(db_name)
                    cr = db.cursor()
                    cr.execute("SELECT 1 FROM pg_class WHERE relkind = 'r' AND relname = 'ir_module_module'")
                    if not cr.fetchone():
                        continue

                    cr.execute("SELECT id FROM ir_module_module WHERE name = 'document_ftp' AND state IN ('installed', 'to install', 'to upgrade') ")
                    res = cr.fetchone()
                    if res and len(res):
                        self.db_name_list.append(db_name)
                    cr.commit()
                except Exception:
                    self._log.warning('Cannot use db "%s"', db_name)
            finally:
                if cr is not None:
                    cr.close()
        return self.db_name_list
Ejemplo n.º 21
0
    def exp_report_get(self, db, uid, report_id):

        cr = pooler.get_db(db).cursor()
        try:
            pool = pooler.get_pool(cr.dbname)
            # First of all load report defaults: name, action and printer
            report_obj = pool.get('ir.actions.report.xml')
            report = report_obj.search(cr,uid,[('report_name','=',self._reports[report_id]['report_name'])])
            if report:
                report = report_obj.browse(cr,uid,report[0])
                name = report.name
                data = report.behaviour()[report.id]
                action = data['action']
                printer = data['printer']
                if action != 'client':
                    if (self._reports and self._reports.get(report_id, False) and self._reports[report_id].get('result', False)
                        and self._reports[report_id].get('format', False)):
                        report_obj.print_direct(cr, uid, base64.encodestring(self._reports[report_id]['result']),
                            self._reports[report_id]['format'], printer)
        except:
            cr.rollback()
            raise
        finally:
            cr.close()

        res = super(virtual_report_spool, self).exp_report_get(db, uid, report_id)
        return res
Ejemplo n.º 22
0
 def check(self, db, uid, passwd):
     if not passwd:
         error_msg = "No password authentication not supported!"
         _logger.error(error_msg)
         raise OpenERPException(error_msg, ('', '', ''))
     cr = pooler.get_db(db).cursor()
     try:
         cr.autocommit(True)
         if self._uid_cache.get(db, {}).get(uid) != passwd:
             cr.execute("SELECT u.id, u.password FROM res_users u LEFT JOIN res_users_expiry e ON u.id = e.user_id "
                        "WHERE u.id=%s AND u.password=%s AND u.active=TRUE "
                        "AND (e.expiry_date IS NULL OR e.expiry_date>=now() AT TIME ZONE 'UTC') "
                        "LIMIT 1", (uid, passwd))
             res = cr.fetchone()
             if not res:
                 error_msg = "Server session expired for the user [uid=%s]" % uid
                 _logger.error(error_msg)
                 raise OpenERPException(error_msg, ('', '', ''))
             self._uid_cache.setdefault(db, {}).update({uid: passwd})
         expiry_date = self.pool.get('res.users.expiry').get_expiry_date()
         cr.execute("SELECT u.login, e.login, u.sso FROM res_users u LEFT JOIN res_users_expiry e ON u.id = e.user_id "
                    "WHERE u.id=%s LIMIT 1", (uid,))
         user_info = cr.fetchone()
         if user_info[2]:
             if user_info[1]:
                 cr.execute("UPDATE res_users_expiry SET expiry_date=%s WHERE user_id=%s", (expiry_date, int(uid)))
             else:
                 cr.execute("INSERT INTO res_users_expiry (user_id, login, expiry_date) VALUES (%s, %s, %s AT TIME ZONE 'UTC')",
                            (int(uid), user_info[0], expiry_date))
             _logger.debug("Server session extended for the user [uid=%s]", uid)
     finally:
         cr.close()
Ejemplo n.º 23
0
 def open(self, node, flags, attr):
     try:
         if not node:
             raise OSError(1, 'Operation not permited.')
         cr = pooler.get_db(node.context.dbname).cursor()
         uid = node.context.uid
         if node.type == 'file':
             if not self.isfile(node):
                 raise OSError(1, 'Operation not permited.')
             att_obj = node.context._dirobj.pool.get('ir.attachment')
             fobj = att_obj.browse(cr, uid, node.file_id, \
                               context=node.context.context)
             if fobj.store_method and fobj.store_method == 'fs':
                 f = StringIO.StringIO(node.get_data(cr, fobj))
             else:
                 f = StringIO.StringIO(base64.decodestring(fobj.datas
                                                           or ''))
         elif node.type == 'content':
             pool = pooler.get_pool(cr.dbname)
             res = getattr(pool.get('document.directory.content'),
                           'process_read')(cr, uid, node)
             f = StringIO.StringIO(res)
         else:
             raise OSError(1, 'Operation not permited.')
     except OSError, e:
         return paramiko.SFTPServer.convert_errno(e.errno)
Ejemplo n.º 24
0
    def report_get(self, db, uid, passwd, report_id):
        security.check(db, uid, passwd)

        cr = pooler.get_db(db).cursor()
        pool = pooler.get_pool(cr.dbname)
        report = pool.get('printjob.job').browse(cr, uid, report_id)

        if not report:
            cr.close()
            raise Exception, 'ReportNotFound'

        if report.create_uid.id != uid:
            cr.close()
            raise Exception, 'AccessDenied'

        res = {'state': report.state in ('ready', 'done')}
        if res['state']:
            res['result'] = report.result
            res['format'] = report.format
            if report.state == 'ready':
                pool.get('printjob.job').write(cr, uid, report_id, {
                    'state': 'done',
                })
                cr.commit()
        cr.close()
        return res
Ejemplo n.º 25
0
 def log_success(self,
                 cr,
                 uid,
                 model,
                 action,
                 referential_id,
                 res_id=None,
                 external_id=None,
                 context=None):
     if res_id is None and external_id is None:
         raise ValueError('Missing ext_id or external_id')
     domain = [
         ('res_model', '=', model),
         ('action', '=', action),
         ('referential_id', '=', referential_id),
     ]
     if res_id is not None:
         domain += ('res_id', '=', res_id),
     if external_id is not None:
         domain += ('external_id', '=', external_id),
     log_cr = pooler.get_db(cr.dbname).cursor()
     try:
         log_ids = self.search(log_cr, uid, domain, context=context)
         self.unlink(log_cr, uid, log_ids, context=context)
     except:
         log_cr.rollback()
         raise
     else:
         log_cr.commit()
     finally:
         log_cr.close()
     return True
Ejemplo n.º 26
0
    def run(self, cr, uid, ids=None, context=None):
        """ Call the actions of each job and commit after each job

        :param list/int/long ids: id of workflow jobs to process, if None
            they will all be processed
        :return: True
        """
        if ids is None:
            ids = self.search(cr, uid, [], context=context)
        elif isinstance(ids, (int, long)):
            ids = [ids]

        for job in self.browse(cr, uid, ids, context=context):
            local_cr = pooler.get_db(cr.dbname).cursor()
            try:
                if self._call_action(local_cr, uid, job, context=context):
                    self.unlink(local_cr, uid, job.id, context=context)
            except Exception:
                local_cr.rollback()
                _logger.exception(
                    "Failed to execute automatic workflow job %s"
                    "on %s with id %s", job.action, job.res_model, job.res_id)
            else:
                local_cr.commit()
            finally:
                local_cr.close()
        return True
Ejemplo n.º 27
0
 def email_task():
     db = pooler.get_db(cr.dbname)
     local_cr = None
     try:
         time.sleep(3) # lame workaround to wait for commit of parent transaction
         # grab a fresh browse_record on local cursor
         local_cr = db.cursor()
         web_root_url = self.pool.get('ir.config_parameter').get_param(local_cr, uid, 'web.base.url')
         if not web_root_url:
             _logger.warning('Ignoring EDI mail notification, web.base.url not defined in parameters')
             return
         mail_tmpl = self._edi_get_object_by_external_id(local_cr, uid, template_ext_id, 'email.template', context=context)
         if not mail_tmpl:
             # skip EDI export if the template was not found
             _logger.warning('Ignoring EDI mail notification, template %s cannot be located', template_ext_id)
             return
         for edi_record in self.browse(local_cr, uid, ids, context=context):
             edi_context = dict(context, edi_web_url_view=self._edi_get_object_web_url_view(local_cr, uid, edi_record, context=context))
             self.pool.get('email.template').send_mail(local_cr, uid, mail_tmpl.id, edi_record.id,
                                                       force_send=False, context=edi_context)
             _logger.info('EDI export successful for %s #%s, email notification sent.', self._name, edi_record.id)
     except Exception:
         _logger.warning('Ignoring EDI mail notification, failed to generate it.', exc_info=True)
     finally:
         if local_cr:
             local_cr.commit()
             local_cr.close()
Ejemplo n.º 28
0
def auth(db, identifier, password, mode='login'):
    """Do the General OTP autentification stuff"""
    if not password:
        return False
    passes = password.split('--otpsep--')
    password = passes[0]
    if len(passes) > 1:
        otp = passes[1]
    else :
        return False
    pool = pooler.get_pool(db)
    res_user_obj =pool.get('res.users')
    user_id = identifier
    cr = pooler.get_db(db).cursor()  
    if mode == 'login' :
        cr.execute('select id from res_users where login=%s and password=%s and active', (tools.ustr(identifier), tools.ustr(password)))
        res = cr.fetchone()
        if res:
            user_id = res[0]
        else:
            return False
    # import pdb
    # pdb.set_trace()
    user = res_user_obj.browse(cr, user_id, user_id)
    if user.company_id.otp_active :
        handle_login_session(user, res_user_obj)
        timeoutcheck = check_otp_timeout(otp, user, res_user_obj) 
        if not check_otp_timeout(otp, user, res_user_obj) :
            return False
        if timeoutcheck == 'valid' :
            return user_id
        return  check_otp(otp, user, res_user_obj )
    else :
        return user_id
Ejemplo n.º 29
0
    def sync_all_thread(self, cr, uid, ids, context={}):
        
        if cr:
            cr = pooler.get_db(cr).cursor()


        self.import_language(cr, uid, ids, context=context)
        self.import_addresses_config(cr, uid, ids, context=context)

        self.import_account_config(cr, uid, ids, context=context)
        self.import_product_categories(cr, uid, ids, context=context)
        self.import_product_attributes(cr, uid, ids, context=context)
        
        time.sleep(3)
        self.import_product(cr, uid, ids, context=context)

        self.import_partner_addresses(cr, uid, ids, context=context)
        self.export_product(cr, uid, ids, context=context)
        self.import_sale_order(cr, uid, ids, context=context)
        self.import_sale_order_update(cr, uid, ids, context=context)

        self.import_images(cr, uid, ids, context)
        self.export_images(cr, uid, ids, context)

        cr.commit()

        try:
            cr.close()
            
        except Exception:
            pass
        
        return True
Ejemplo n.º 30
0
 def log_success(self, cr, uid, model, action, referential_id,
         res_id=None, external_id=None, context=None):
     if res_id is None and external_id is None:
         raise ValueError('Missing ext_id or external_id')
     domain = [
         ('res_model', '=', model),
         ('action', '=', action),
         ('referential_id', '=', referential_id),
     ]
     if res_id is not None:
         domain += ('res_id', '=', res_id),
     if external_id is not None:
         domain += ('external_id', '=', external_id),
     log_cr = pooler.get_db(cr.dbname).cursor()
     try:
         log_ids = self.search(
             log_cr, uid, domain, context=context)
         self.unlink(log_cr, uid, log_ids, context=context)
     except:
         log_cr.rollback()
         raise
     else:
         log_cr.commit()
     finally:
         log_cr.close()
     return True
Ejemplo n.º 31
0
    def __init__(self, cr, uid, ids, context):
        # Inizializzazione superclasse
        threading.Thread.__init__(self)

        # Inizializzazione classe ImportPartner
        self.uid = uid
        self.dbname = cr.dbname
        self.pool = pooler.get_pool(cr.dbname)
        self.partner_obj = self.pool['res.partner']
        self.category_obj = self.pool['res.partner.category']
        self.address_obj = self.pool['res.partner.address']
        self.city_obj = self.pool['res.city']
        self.province_obj = self.pool['res.province']
        self.state_obj = self.pool['res.country.state']
        self.account_fiscal_position_obj = self.pool['account.fiscal.position']
        self.partner_template = self.pool['partner.import.template']
        # Necessario creare un nuovo cursor per il thread, quello fornit
        # o dal metodo chiamante viene chiuso
        # alla fine del metodo e diventa inutilizzabile all'interno del thread.
        self.cr = pooler.get_db(self.dbname).cursor()

        self.partnerImportID = ids[0]

        self.context = context
        self.error = []
        self.warning = []
        self.first_row = True

        # Contatori dei nuovi partner inseriti e aggiornati, vengono utilizzati per compilare il
        # rapporto alla terminazione del processo di import
        self.uo_new = 0
        self.updated = 0
        self.problems = 0
Ejemplo n.º 32
0
    def ext_import(self, cr, uid, data, external_referential_id, defaults=None, context=None):
        """
        Inherit the method to flag the order to "Imported" on Magento right after the importation
        Before the import, check if the order is already imported and in a such case, skip the import
         and flag "imported" on Magento.
        """

        #This check should be done by a decorator

        if context is None: context = {}
        if not (context.get('external_referential_type', False) and 'Magento' in context['external_referential_type']):
            return super(sale_order, self).ext_import(cr, uid, data, external_referential_id, defaults=defaults, context=context)

        res = {'create_ids': [], 'write_ids': []}
        ext_order_id = data[0]['increment_id']
        #the new cursor should be replaced by a beautiful decorator on ext_import
        order_cr = pooler.get_db(cr.dbname).cursor()
        try:
            if not self.extid_to_existing_oeid(order_cr, uid, ext_order_id, external_referential_id, context):
                res = super(sale_order, self).ext_import(order_cr, uid, data, external_referential_id, defaults=defaults, context=context)

                # if a created order has a relation_parent_real_id, the new one replaces the original, so we have to cancel the old one
                if data[0].get('relation_parent_real_id', False): # data[0] because orders are imported one by one so data always has 1 element
                    self._chain_cancel_orders(order_cr, uid, ext_order_id, external_referential_id, defaults=defaults, context=context)

            # set the "imported" flag to true on Magento
            self.ext_set_order_imported(order_cr, uid, ext_order_id, external_referential_id, context)
            order_cr.commit()
        finally:
            order_cr.close()
        return res
Ejemplo n.º 33
0
    def __init__(self, cr, uid, ids, context):

        # Inizializzazione superclasse
        threading.Thread.__init__(self)

        # Inizializzazione classe MetelImport
        self.uid = uid

        self.dbname = cr.dbname
        self.pool = pooler.get_pool(cr.dbname)

        # Necessario creare un nuovo cursor per il thread,
        # quello fornito dal metodo chiamante viene chiuso
        # alla fine del metodo e diventa inutilizzabile
        # all'interno del thread.
        self.cr = pooler.get_db(self.dbname).cursor()

        self.productMetelImportID = ids[0]
        self.importedLines = 0

        self.context = context

        self.manufacturerID = None

        # Contatori dei nuovi prodotti inseriti e dei prodotti aggiornati,
        # vengono utilizzati per compilare il rapporto alla terminazione
        # del processo di import
        self.uo_new = 0
        self.uo_update = 0
Ejemplo n.º 34
0
    def authenticate(self, db, login, password, user_agent_env):
        """Verifies and returns the user ID corresponding to the given
          ``login`` and ``password`` combination, or False if there was
          no matching user.

           :param str db: the database on which user is trying to authenticate
           :param str login: username
           :param str password: user password
           :param dict user_agent_env: environment dictionary describing any
               relevant environment attributes
        """
        uid = self.login(db, login, password)
        if uid == openerp.SUPERUSER_ID:
            # Successfully logged in as admin!
            # Attempt to guess the web base url...
            if user_agent_env and user_agent_env.get('base_location'):
                cr = pooler.get_db(db).cursor()
                try:
                    self.pool.get('ir.config_parameter').set_param(
                        cr, uid, 'web.base.url',
                        user_agent_env['base_location'])
                    cr.commit()
                except Exception:
                    _logger.exception(
                        "Failed to update web.base.url configuration parameter"
                    )
                finally:
                    cr.close()
        return uid
Ejemplo n.º 35
0
    def run(self):
        """
            Import all data into openerp,
            this is the Entry point to launch the process of import


        """
        self.data_started = datetime.datetime.now().strftime(
            "%Y-%m-%d %H:%M:%S")
        self.cr = pooler.get_db(self.cr.dbname).cursor()
        error = False
        result = []
        try:
            self.init_run()
            imported = set()  #to invoid importing 2 times the sames modules
            for table in self.table_list:
                to_import = self.get_mapping()[table].get('import', True)
                if not table in imported:
                    res = self._resolve_dependencies(
                        self.get_mapping()[table].get('dependencies', []),
                        imported)
                    result.extend(res)
                    if to_import:
                        (position, warning) = self._import_table(table)
                        result.append((table, position, warning))
                    imported.add(table)
            self.cr.commit()

        except Exception, err:
            sh = StringIO.StringIO()
            traceback.print_exc(file=sh)
            error = sh.getvalue()
            print error
Ejemplo n.º 36
0
    def authenticate(self, db, login, password, user_agent_env):
        """Verifies and returns the user ID corresponding to the given
          ``login`` and ``password`` combination, or False if there was
          no matching user.

           :param str db: the database on which user is trying to authenticate
           :param str login: username
           :param str password: user password
           :param dict user_agent_env: environment dictionary describing any
               relevant environment attributes
        """
        uid = self.login(db, login, password)
        if uid == openerp.SUPERUSER_ID:
            # Successfully logged in as admin!
            # Attempt to guess the web base url...
            if user_agent_env and user_agent_env.get('base_location'):
                cr = pooler.get_db(db).cursor()
                try:
                    self.pool.get('ir.config_parameter').set_param(cr, uid, 'web.base.url',
                                                                   user_agent_env['base_location'])
                    cr.commit()
                except Exception:
                    _logger.exception("Failed to update web.base.url configuration parameter")
                finally:
                    cr.close()
        return uid
Ejemplo n.º 37
0
    def __init__(self, cr, uid, ids, context):
        # Inizializzazione superclasse
        threading.Thread.__init__(self)

        # Inizializzazione classe
        self.uid = uid
        self.start_time = datetime.now()
        self.dbname = cr.dbname
        self.pool = pooler.get_pool(cr.dbname)
        self.product_obj = self.pool['product.product']
        self.supplierinfo_obj = self.pool['product.supplierinfo']
        self.bom_obj = self.pool['mrp.bom']

        # Necessario creare un nuovo cursor per il thread, quello fornito dal metodo chiamante viene chiuso
        # alla fine del metodo e diventa inutilizzabile all'interno del thread.
        self.cr = pooler.get_db(self.dbname).cursor()

        self.bomImportID = ids[0]

        self.context = context
        self.error = []
        self.warning = []
        self.first_row = True

        # Contatori dei nuovi prodotti inseriti e dei prodotti aggiornati,
        # vengono utilizzati per compilare il rapporto alla terminazione
        # del processo di import
        self.uo_new = 0
        self.updated = 0
        self.problems = 0
        self.cache = []
Ejemplo n.º 38
0
    def __init__(self, cr, uid, ids, context):
        # Inizializzazione superclasse
        threading.Thread.__init__(self)

        # Inizializzazione classe ImportPartner
        self.uid = uid
        self.dbname = cr.dbname
        self.start_time = datetime.now()
        self.pool = pooler.get_pool(cr.dbname)
        self.partner_obj = self.pool['res.partner']
        self.category_obj = self.pool['res.partner.category']
        self.address_obj = self.pool['res.partner.address']
        self.city_obj = self.pool['res.city']
        self.province_obj = self.pool['res.province']
        self.state_obj = self.pool['res.country.state']
        self.account_fiscal_position_obj = self.pool['account.fiscal.position']
        self.partner_template = self.pool['partner.import.template']
        # Necessario creare un nuovo cursor per il thread, quello fornit
        # o dal metodo chiamante viene chiuso
        # alla fine del metodo e diventa inutilizzabile all'interno del thread.
        self.cr = pooler.get_db(self.dbname).cursor()

        self.partnerImportID = ids[0]

        self.context = context
        self.error = []
        self.warning = []
        self.first_row = True

        # Contatori dei nuovi partner inseriti e aggiornati, vengono utilizzati per compilare il
        # rapporto alla terminazione del processo di import
        self.uo_new = 0
        self.updated = 0
        self.problems = 0
    def create(self, node, objname, flags):
        objname=_to_unicode(objname)
        cr = None
        try:
            uid = node.context.uid
            pool = pooler.get_pool(node.context.dbname)
            cr = pooler.get_db(node.context.dbname).cursor()
            child = node.child(cr, objname)
            f = None
            if child:
                if child.type in ('collection','database'):
                    raise OSError(1, 'Operation not permited.')
                if child.type=='content':
                    f = content_wrapper(cr.dbname, uid, pool, child)
            fobj = pool.get('ir.attachment')
            ext = objname.find('.') >0 and objname.split('.')[1] or False

            # TODO: test if already exist and modify in this case if node.type=file
            ### checked already exits
            object2 = False
            if isinstance(node, node_res_obj):
                object2 = node and pool.get(node.context.context['res_model']).browse(cr, uid, node.context.context['res_id']) or False            
            
            cid = False
            object = node.context._dirobj.browse(cr, uid, node.context.context['dir_id'])

            where=[('name','=',objname)]
            if object and (object.type in ('directory')) or object2:
                where.append(('parent_id','=',object.id))
            else:
                where.append(('parent_id','=',False))

            if object2:
                where +=[('res_id','=',object2.id),('res_model','=',object2._name)]
            cids = fobj.search(cr, uid,where)
            if len(cids):
                cid=cids[0]

            if not cid:
                val = {
                    'name': objname,
                    'datas_fname': objname,
                    'datas': '',
                    'file_size': 0L,
                    'file_type': ext,
                }
                if object and (object.type in ('directory')) or not object2:
                    val['parent_id']= object and object.id or False
                partner = False
                if object2:
                    if 'partner_id' in object2 and object2.partner_id.id:
                        partner = object2.partner_id.id
                    if object2._name == 'res.partner':
                        partner = object2.id
                    val.update( {
                        'res_model': object2._name,
                        'partner_id': partner,
                        'res_id': object2.id
                    })
                cid = fobj.create(cr, uid, val, context={})
Ejemplo n.º 40
0
    def __init__(self, cr, uid, ids, context):
        # Inizializzazione superclasse
        threading.Thread.__init__(self)
        
        # Inizializzazione classe ImportPricelist
        self.uid = uid
        
        self.dbname = cr.dbname
        self.pool = pooler.get_pool(cr.dbname)
        self.product_obj = self.pool['product.product']
        self.picking_obj = self.pool['stock.picking']
        self.move_obj = self.pool['stock.move']
        self.location_obj = self.pool['stock.location']

        # Necessario creare un nuovo cursor per il thread,
        # quello fornito dal metodo chiamante viene chiuso
        # alla fine del metodo e diventa inutilizzabile
        # all'interno del thread.
        self.cr = pooler.get_db(self.dbname).cursor()
        
        self.pickingImportID = ids[0]
        
        self.context = context
        self.error = []
        self.warning = []
        self.first_row = True
        
        # Contatori dei nuovi prodotti inseriti e dei prodotti aggiornati,
        # vengono utilizzati per compilare il rapporto alla terminazione
        # del processo di import
        self.uo_new = 0
        self.updated = 0
        self.problems = 0
        self.cache = {}
        self.cache_product = {}
Ejemplo n.º 41
0
 def __init__(self, cr, uid, ids, context):
     
     # Inizializzazione superclasse
     threading.Thread.__init__(self)
     
     # Inizializzazione classe MetelImport
     self.uid = uid
     
     self.dbname = cr.dbname
     self.pool = pooler.get_pool(cr.dbname)
     
     # Necessario creare un nuovo cursor per il thread,
     # quello fornito dal metodo chiamante viene chiuso
     # alla fine del metodo e diventa inutilizzabile
     # all'interno del thread.
     self.cr = pooler.get_db(self.dbname).cursor()
     
     self.productMetelImportID = ids[0]
     self.importedLines = 0
     
     self.context = context
     
     self.manufacturerID = None
     
     # Contatori dei nuovi prodotti inseriti e dei prodotti aggiornati,
     # vengono utilizzati per compilare il rapporto alla terminazione
     # del processo di import
     self.uo_new = 0
     self.uo_update = 0
Ejemplo n.º 42
0
 def action_start_job(self, key, signal_data={}, data={}):
     cr = pooler.get_db(data['dbname']).cursor()
     uid = data['uid']
     process=self.browse(cr, uid, data['process_id'], context={})
     self.write(cr, uid, process.id, {'state':'start', 'start_date':time.strftime('%Y-%m-%d %H:%M:%S')})
     cr.commit()
     return True
Ejemplo n.º 43
0
 def check(self, db, uid, passwd):
     try:
         return super(users,self).check(db, uid, passwd)
     except security.ExceptionNoTb: # AccessDenied
         pass
     cr = pooler.get_db(db).cursor()
     user = self.browse(cr, 1, uid)
     logger = logging.getLogger('orm.ldap')
     if user and user.company_id.ldaps:
         for res_company_ldap in user.company_id.ldaps:
             try:
                 l = ldap.open(res_company_ldap.ldap_server, res_company_ldap.ldap_server_port)
                 if l.simple_bind_s(res_company_ldap.ldap_binddn,
                         res_company_ldap.ldap_password):
                     base = res_company_ldap.ldap_base
                     scope = ldap.SCOPE_SUBTREE
                     filter = filter_format(res_company_ldap.ldap_filter, (user.login,))
                     retrieve_attributes = None
                     result_id = l.search(base, scope, filter, retrieve_attributes)
                     timeout = 60
                     result_type, result_data = l.result(result_id, timeout)
                     if result_data and result_type == ldap.RES_SEARCH_RESULT and len(result_data) == 1:
                         dn = result_data[0][0]
                         if l.bind_s(dn, passwd):
                             l.unbind()
                             self._uid_cache.setdefault(db, {})[uid] = passwd
                             cr.close()
                             return True
                     l.unbind()
             except Exception, e:
                 logger.warning('cannot check', exc_info=True)
                 pass
Ejemplo n.º 44
0
    def generate_report_bkg(self, cr, uid, ids, datas, context=None):
        """
        Generate the report in background
        """
        if context is None:
            context = {}

        if isinstance(ids, (int, long)):
            ids = [ids]

        import pooler
        new_cr = pooler.get_db(cr.dbname).cursor()

        rp_spool = report_spool()
        result = rp_spool.exp_report(cr.dbname, uid, 'stock.move.xls', ids, datas, context)
        file_res = {'state': False}
        while not file_res.get('state'):
            file_res = rp_spool.exp_report_get(cr.dbname, uid, result)
            time.sleep(0.5)
        attachment = self.pool.get('ir.attachment')
        attachment.create(new_cr, uid, {
            'name': 'move_analysis_%s.xls' % time.strftime('%Y_%m_%d_%H_%M'),
            'datas_fname': 'move_analysis_%s.xls' % time.strftime('%Y_%m_%d_%H_%M'),
            'description': 'Move analysis',
            'res_model': 'export.report.stock.move',
            'res_id': ids[0],
            'datas': file_res.get('result'),
        })
        self.write(new_cr, uid, ids, {'state': 'ready'}, context=context)

        new_cr.commit()
        new_cr.close(True)

        return True
Ejemplo n.º 45
0
 def email_task():
     db = pooler.get_db(cr.dbname)
     local_cr = None
     try:
         time.sleep(3) # lame workaround to wait for commit of parent transaction
         # grab a fresh browse_record on local cursor
         local_cr = db.cursor()
         web_root_url = self.pool.get('ir.config_parameter').get_param(local_cr, uid, 'web.base.url')
         if not web_root_url:
             _logger.warning('Ignoring EDI mail notification, web.base.url is not defined in parameters.')
             return
         mail_tmpl = self._edi_get_object_by_external_id(local_cr, uid, template_ext_id, 'email.template', context=context)
         if not mail_tmpl:
             # skip EDI export if the template was not found
             _logger.warning('Ignoring EDI mail notification, template %s cannot be located.', template_ext_id)
             return
         for edi_record in self.browse(local_cr, uid, ids, context=context):
             edi_token = self.pool.get('edi.document').export_edi(local_cr, uid, [edi_record], context = context)[0]
             edi_context = dict(context, edi_web_url_view=EDI_VIEW_WEB_URL % (web_root_url, local_cr.dbname, edi_token))
             self.pool.get('email.template').send_mail(local_cr, uid, mail_tmpl.id, edi_record.id,
                                                       force_send=False, context=edi_context)
             _logger.info('EDI export successful for %s #%s, email notification sent.', self._name, edi_record.id)
     except Exception:
         _logger.warning('Ignoring EDI mail notification, failed to generate it.', exc_info=True)
     finally:
         if local_cr:
             local_cr.commit()
             local_cr.close()
Ejemplo n.º 46
0
    def create(self, cr, uid, ids, data, context):
        name = self.name
        report_instance = Report(name, cr, uid, ids, data, context)

        pool = pooler.get_pool(cr.dbname)
        ir_pool = pool.get('ir.actions.report.xml')
        report_xml_ids = ir_pool.search(cr, uid,
                [('report_name', '=', name[7:])], context=context)

        rendered_report, output_type = report_instance.execute()
        if report_xml_ids:
            report_xml = ir_pool.browse(cr, uid, report_xml_ids[0], context=context)
            model = context.get('active_model')
            if report_xml.attachment and model:
                crtemp = pooler.get_db(cr.dbname).cursor()  # Creating new cursor to prevent TransactionRollbackError
                                                            # when creating attachments, concurrency update have place otherwise
                self.create_attachment(crtemp, uid, ids, report_xml.attachment, rendered_report, output_type, model, context=context)

                # TODO: Will remodel bellow functionality as its causes a lot of bugs, it returns previous filename
                # Error in report registration

                # service_name = check_report_name(report_name)
                # if check_report_name(report_name) != self.name:
                    # Changing report stored filename

                    # report_xml = ir_pool.browse(crtemp, uid, report_xml_ids[0], context=context)
                    # report_xml.write({'report_name': report_name})
                    # change_service_name(self.name, service_name)
                    # self.name = service_name

                crtemp.commit()  # It means attachment will be created even if error occurs
                crtemp.close()
        return rendered_report, output_type
Ejemplo n.º 47
0
    def run(self):
        """
            Import all data into openerp,
            this is the Entry point to launch the process of import


        """
        self.data_started = datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
        self.cr = pooler.get_db(self.cr.dbname).cursor()
        error = False
        result = []
        try:
            self.init_run()
            imported = set() #to invoid importing 2 times the sames modules
            for table in self.table_list:
                to_import = self.get_mapping()[table].get('import', True)
                if not table in imported:
                    res = self._resolve_dependencies(self.get_mapping()[table].get('dependencies', []), imported)
                    result.extend(res)
                    if to_import:
                        (position, warning) = self._import_table(table)
                        result.append((table, position, warning))
                    imported.add(table)
            self.cr.commit()

        except Exception, err:
            sh = StringIO.StringIO()
            traceback.print_exc(file=sh)
            error = sh.getvalue()
            print error
 def _reschedule_procurement(self, cr, uid, use_new_cursor=False, context=None):
     if context is None:
         context = {}
     if use_new_cursor:
         cr = pooler.get_db(use_new_cursor).cursor()
     try:
         company_obj = self.pool['res.company']
         product_obj = self.pool['product.product']
         company_ids = company_obj.search(cr, uid, [], context=context)
         for company in company_obj.browse(cr, uid, company_ids, context=context):
             range = company.reschedule_range
             maxdate = datetime.today() + relativedelta(days=range)
             maxdate = maxdate.strftime(DEFAULT_SERVER_DATE_FORMAT)
             recompute_prod_ids = product_obj._get_product_ids_to_recompute(
                 cr, uid, maxdate, company, context=context)
             if recompute_prod_ids:
                 context['company_id'] = company.id
                 product_obj.reschedule_all_procurement(cr, uid,
                                                        recompute_prod_ids,
                                                        maxdate,
                                                        context=context)
         if use_new_cursor:
             cr.commit()
     finally:
         if use_new_cursor:
             try:
                 cr.close()
             except Exception:
                 pass
     return {}
Ejemplo n.º 49
0
def import_with_try(self, cr, uid, callback, data_record, external_referential_id, defaults, context=None):
    if not context:
        context={}
    res={}
    report_line_obj = self.pool.get('external.report.line')
    report_line_id = report_line_obj._log_base(cr, uid, self._name, callback.im_func.func_name,
                                    state='fail', external_id=context.get('external_object_id', False),
                                    defaults=defaults, data_record=data_record,
                                    context=context)
    context['report_line_id'] = report_line_id
    import_cr = pooler.get_db(cr.dbname).cursor()
    res = callback(import_cr, uid, data_record, external_referential_id, defaults, context=context)
    try:
        pass
        #res = callback(import_cr, uid, data_record, external_referential_id, defaults, context=context)
    except MappingError as e:
        import_cr.rollback()
        report_line_obj.write(cr, uid, report_line_id, {
                        'error_message': 'Error with the mapping : %s. Error details : %s'%(e.mapping_name, e.value),
                        }, context=context)
    except osv.except_osv as e:
        import_cr.rollback()
        raise osv.except_osv(*e)
    except Exception as e:
        import_cr.rollback()
        raise Exception(e)
    else:
        report_line_obj.write(cr, uid, report_line_id, {
                    'state': 'success',
                    }, context=context)
        import_cr.commit()
    finally:
        import_cr.close()
    return res
Ejemplo n.º 50
0
 def process_thread(self, cr, uid, ids, context=None):
     cr = pooler.get_db(cr.dbname).cursor()
     # Browse all given wizard
     try:
         for wiz in self.browse(cr, uid, ids, context=context):
             values = {'process_in_progress': True}
             super(mass_reallocation_verification_wizard, self).write(cr, uid, [wiz.id], values, context=context)
             # If no supporteds_ids, raise an error
             if not wiz.process_ids:
                 raise osv.except_osv(_('Error'), _('No lines to be processed.'))
             # Prepare some values
             account_id = wiz.account_id and wiz.account_id.id
             # Sort by distribution
             lines = defaultdict(list)
             for line in wiz.process_ids:
                 lines[line.distribution_id.id].append(line)
             # Process each distribution
             for distrib_id in lines:
                 # UF-2205: fix problem with lines that does not have any distribution line or distribution id (INTL engagement lines)
                 if not distrib_id:
                     continue
                 for line in lines[distrib_id]:
                     # Update distribution
                     self.pool.get('analytic.distribution').update_distribution_line_account(cr, uid, line.distrib_line_id.id, account_id, context=context)
                 # Then update analytic line
                 self.pool.get('account.analytic.line').update_account(cr, uid, [x.id for x in lines[distrib_id]], account_id, wiz.date, context=context)
         cr.commit()
     finally:
         values = {'process_in_progress': False}
         super(mass_reallocation_verification_wizard, self).write(cr, uid, ids, values, context=context)
         cr.close(True)
Ejemplo n.º 51
0
        def print_thread(id, uid, ids, datas, context, printer):
            logger = netsvc.Logger()
            logger.notifyChannel("report", netsvc.LOG_DEBUG, "Printing thread started")

            cr = pooler.get_db(db).cursor()
            pool = pooler.get_pool(cr.dbname)

            exception_string = False
            exception_backtrace = False
            state = 'ready'
            result = False
            format = False

            service = netsvc.LocalService('report.'+object)
            try:
                (result, format) = service.create(cr, uid, ids, datas, context)
                result = base64.encodestring(result)
            except Exception, exception:
                import traceback
                import sys
                tb = sys.exc_info()
                tb_s = "".join(traceback.format_exception(*tb))
                logger = netsvc.Logger()
                logger.notifyChannel('web-services', netsvc.LOG_ERROR, 'Exception: %s\n%s' % (str(exception), tb_s))

                exception = ExceptionWithTraceback(tools.exception_to_unicode(exception), tb)
                self.exceptions_protect.acquire()
                self.exceptions[id] = exception
                self.exceptions_protect.release()

                state = 'error'
 def mkdir(self, node, basename, attr):
     try:
         """Create the specified directory."""
         if not node:
             raise OSError(1, 'Operation not permited.')
         uid = node.context.uid
         pool = pooler.get_pool(node.context.dbname)
         cr = pooler.get_db(node.context.dbname).cursor()
         basename=_to_unicode(basename)
         object2 = False
         if isinstance(node, node_res_obj):
             object2 = node and pool.get(node.context.context['res_model']).browse(cr, uid, node.context.context['res_id']) or False
         obj = node.context._dirobj.browse(cr, uid, node.context.context['dir_id'])
         if obj and (obj.type=='ressource') and not node.object2:
             raise OSError(1, 'Operation not permited.')
         val = {
             'name': basename,
             'ressource_parent_type_id': object and obj.ressource_type_id.id or False,
             'ressource_id': object2 and object2.id or False
         }
         if (obj and (obj.type in ('directory'))) or not object2:
             val['parent_id'] =  obj and obj.id or False
         # Check if it alreayd exists !
         pool.get('document.directory').create(cr, uid, val)
         cr.commit()
         cr.close()
         return paramiko.SFTP_OK
     except Exception,err:
         return paramiko.SFTPServer.convert_errno(e.errno)
Ejemplo n.º 53
0
    def sync_all_thread(self, cr, uid, ids, context={}):

        if cr:
            cr = pooler.get_db(cr).cursor()

        self.import_language(cr, uid, ids, context=context)
        self.import_addresses_config(cr, uid, ids, context=context)

        self.import_account_config(cr, uid, ids, context=context)
        self.import_product_categories(cr, uid, ids, context=context)
        self.import_product_attributes(cr, uid, ids, context=context)

        time.sleep(3)
        self.import_product(cr, uid, ids, context=context)

        self.import_partner_addresses(cr, uid, ids, context=context)
        self.export_product(cr, uid, ids, context=context)
        self.import_sale_order(cr, uid, ids, context=context)
        self.import_sale_order_update(cr, uid, ids, context=context)

        self.import_images(cr, uid, ids, context)
        self.export_images(cr, uid, ids, context)

        cr.commit()

        try:
            cr.close()

        except Exception:
            pass

        return True
Ejemplo n.º 54
0
        def go(id, uid, ids, datas, context):
            cr = pooler.get_db(db).cursor()
            import traceback
            import sys
            try:
                obj = netsvc.LocalService('report.'+object)
                (result, format) = obj.create(cr, uid, ids, datas, context)
                if not result:
                    tb = sys.exc_info()
                    self._reports[id]['exception'] = ExceptionWithTraceback('RML is not available at specified location or not enough data to print!', tb)
                self._reports[id]['result'] = result
                self._reports[id]['format'] = format
                self._reports[id]['state'] = True
            except Exception, exception:

                tb = sys.exc_info()
                tb_s = "".join(traceback.format_exception(*tb))
                logger = netsvc.Logger()
                logger.notifyChannel('web-services', netsvc.LOG_ERROR,
                        'Exception: %s\n%s' % (str(exception), tb_s))
                if hasattr(exception, 'name') and hasattr(exception, 'value'):
                    self._reports[id]['exception'] = ExceptionWithTraceback(tools.ustr(exception.name), tools.ustr(exception.value))
                else:
                    self._reports[id]['exception'] = ExceptionWithTraceback(tools.exception_to_unicode(exception), tb)
                self._reports[id]['state'] = True
Ejemplo n.º 55
0
    def run_auto_import(self, cr, uid, automatic=False, use_new_cursor=False, context=None):
      pool = pooler.get_pool(cr.dbname)  
      #import pdb;pdb.set_trace()
      testo_log = """Inizio procedura di aggiornamento/inserimento articoli """+time.ctime()+'\n'
      percorso='/home/openerp/filecsv'
      partner_obj = pool.get('res.partner')
      if use_new_cursor:
        cr = pooler.get_db(use_new_cursor).cursor()
      elenco_csv = os.listdir(percorso)
      for filecsv in elenco_csv:
        codfor=filecsv.split(".")
        testo_log = testo_log + " analizzo file "+codfor[0]+".csv \n"
        fornitore_ids = partner_obj.search(cr,uid,[('ref', '=',codfor[0])])
        if fornitore_ids:
          fornitore_id = fornitore_ids[0]
          lines = csv.reader(open(percorso+'/'+filecsv,'rb'),delimiter=";")
          #import pdb;pdb.set_trace() 
          res = self._import_product_func(cr, uid, lines, fornitore_id, context)
          testo_log = testo_log + " Inseriti "+str(res[0])+" Aggiornati "+str(res[1]) +" Articoli \n"
        else:
          testo_log = testo_log + " fornitore "+codfor[0]+" non trovato  \n"
        os.remove(percorso+'/'+filecsv)
      testo_log = testo_log + " Operazione Teminata  alle "+time.ctime()+"\n"
      #invia e-mail
      type_ = 'plain'
      tools.email_send('*****@*****.**',
                       ['*****@*****.**'],
                       'Import Automatico Articoli',
                       testo_log,
                       subtype=type_,
                       )

        
      return
Ejemplo n.º 56
0
    def __init__(self, cr, uid, ids, context):
        # Inizializzazione superclasse
        threading.Thread.__init__(self)
        
        # Inizializzazione classe ImportPricelist
        self.uid = uid
        
        self.dbname = cr.dbname
        self.start_time = datetime.now()
        self.pool = pooler.get_pool(cr.dbname)
        self.product_obj = self.pool['product.product']
        self.inventory_line_obj = self.pool['stock.inventory.line']
        self.product_obj = self.pool['product.product']

        # Necessario creare un nuovo cursor per il thread,
        # quello fornito dal metodo chiamante viene chiuso
        # alla fine del metodo e diventa inutilizzabile
        # all'interno del thread.
        self.cr = pooler.get_db(self.dbname).cursor()
        
        self.productImportID = ids[0]
        
        self.context = context
        self.error = []
        self.warning = []
        self.first_row = True
        
        # Contatori dei nuovi prodotti inseriti e dei prodotti aggiornati,
        # vengono utilizzati per compilare il rapporto alla terminazione
        # del processo di import
        self.uo_new = 0
        self.updated = 0
        self.problems = 0
        self.cache = []
        self.cache_product = {}
Ejemplo n.º 57
0
 def action_end_job(self, key, signal_data={}, data={}):
     cr = pooler.get_db(data['dbname']).cursor()
     self.write(cr, data['uid'], data['process_id'], {
         'state': 'end',
         'end_date': time.strftime('%Y-%m-%d %H:%M:%S')
     })
     cr.commit()
     return True
Ejemplo n.º 58
0
        def __init__(self, cr, uid, product_product_obj, split_ids, context=None):
            self.cr = pooler.get_db(cr.dbname).cursor()
            self.product_product_obj = product_product_obj
            self.uid = uid
            self.context = context
            self.product_ids = split_ids

            threading.Thread.__init__(self)
Ejemplo n.º 59
0
 def sso_logout(self, db, login, context=None):
     cr = pooler.get_db(db).cursor()
     try:
         cr.execute("UPDATE res_users_expiry SET expiry_date=NULL WHERE login=%s AND sso=TRUE", (login,))
         cr.commit()
         logging.getLogger('smile_sso').debug("Logout of the user [login=%s]", login)
     finally:
         cr.close()