Esempio n. 1
0
def getRefreshImage():
    stream = cStringIO.StringIO(getRefreshData())
    return ImageFromStream(stream)
Esempio n. 2
0
    def VTiter(self, *args, **formatArgs):
        import bz2
        import msgpack

        serializer = msgpack
        largs, dictargs = self.full_parse(args)
        where = None
        mode = 'row'
        input = cStringIO.StringIO()

        if 'file' in dictargs:
            where = dictargs['file']
        else:
            raise functions.OperatorError(
                __name__.rsplit('.')[-1], "No destination provided")
        col = 0

        if 'cols' in dictargs:
            a = re.split(' |,| , |, | ,', dictargs['cols'])
            column = [x for x in a if x != '']
        else:
            col = 1
        start = 0
        end = sys.maxint - 1
        if 'start' in dictargs:
            start = int(dictargs['start'])
        if 'end' in dictargs:
            end = int(dictargs['end'])

        fullpath = str(
            os.path.abspath(
                os.path.expandvars(os.path.expanduser(
                    os.path.normcase(where)))))
        fileIterlist = []
        for x in xrange(start, end + 1):
            try:
                fileIterlist.append(open(fullpath + "." + str(x), "rb"))
            except:
                break

        if fileIterlist == []:
            try:
                fileIterlist = [open(where, "rb")]
            except:
                raise functions.OperatorError(
                    __name__.rsplit('.')[-1], "No such file")

        for filenum, fileIter in enumerate(fileIterlist):
            blocksize = struct.unpack('!i', fileIter.read(4))
            b = struct.unpack('!B', fileIter.read(1))
            schema = cPickle.load(fileIter)
            colnum = len(schema)
            minr = "1997-09-17"
            maxr = "1997-11-17"
            retcols = [7]
            if filenum == 0:
                yield ("c1", )
                #yield ("pkey",)
            input = cStringIO.StringIO()
            while True:
                input.truncate(0)
                try:
                    blocksize = struct.unpack('!i', fileIter.read(4))
                except:
                    break

                if blocksize[0]:
                    input.write(fileIter.read(blocksize[0]))
                    input.seek(0)
                    b = struct.unpack('!B', input.read(1))
                    if b[0]:
                        decompression = struct.unpack('!B', input.read(1))
                        if decompression[0]:
                            decompress = zlib.decompress
                        else:
                            decompress = bz2.decompress

                        type = '!' + 'i' * (colnum * 2 + 1)
                        ind = list(
                            struct.unpack(type,
                                          input.read(4 * (colnum * 2 + 1))))
                        cols = [[] for i in range(len(retcols))]
                        indexes = []
                        c = 10

                        input.seek(
                            sum(ind[0:c * 2]) + 1 + 1 + 4 * (colnum * 2 + 1))
                        s = serializer.loads(decompress(input.read(ind[c *
                                                                       2])))

                        def binarySearchmi(alist, item):
                            first = 0
                            last = len(alist) - 1
                            found = False
                            midpoint = (first + last) // 2
                            while first <= last and not found:
                                midpoint = (first + last) // 2
                                if alist[midpoint] == item:
                                    found = True
                                else:
                                    if item < alist[midpoint]:
                                        last = midpoint - 1
                                    else:
                                        first = midpoint + 1
                            if found == False:
                                if alist[midpoint] < item:
                                    midpoint += 1
                            return midpoint, found

                        def binarySearchma(alist, item):
                            first = 0
                            last = len(alist) - 1
                            found = False
                            midpoint = (first + last) // 2
                            while first <= last and not found:
                                midpoint = (first + last) // 2
                                if alist[midpoint] == item:
                                    found = True
                                else:
                                    if item < alist[midpoint]:
                                        last = midpoint - 1
                                    else:
                                        first = midpoint + 1
                            if found == False:
                                if alist[midpoint] > item:
                                    midpoint -= 1
                            return midpoint, found

                        if (maxr >= s[0] and minr <= s[len(s) - 1]):
                            t1 = binarySearchmi(s, minr)
                            t2 = binarySearchma(s, maxr)
                            if (len(s) > 1 and ind[c * 2 + 1] == 0
                                    and ind[colnum * 2] > 1):
                                cols[0] = s
                            else:

                                if len(s) == 1:
                                    cols[0] = repeat(s[0], ind[colnum * 2])

                                elif len(s) < 256:
                                    listptr = array('B')
                                    listptr.fromstring(
                                        decompress(input.read(ind[c * 2 + 1])))
                                    indices = [
                                        i for i, x in enumerate(listptr)
                                        if x >= t1[0] and x <= t2[0]
                                    ]
                                    for i in indices:
                                        indexes.append(i)
                                        cols[0].append(s[listptr[i]])
                                else:
                                    listptr = array('H')
                                    listptr.fromstring(
                                        decompress(input.read(ind[c * 2 + 1])))
                                    indices = [
                                        i for i, x in enumerate(listptr)
                                        if t2[0] >= x >= t1[0]
                                    ]
                                    j = 0
                                    for i in indices:
                                        indexes.append(i)
                                        j += 1
                                        cols[0].append(s[listptr[i]])

                                # elif len(s)<256:
                                #     cols = imap(s.__getitem__, array('B', decompress(input.read(ind[c*2+1]))))
                                # else:
                                #     cols = imap(s.__getitem__, array('H', decompress(input.read(ind[c*2+1]))))
                        else:
                            cols = [[]] * colnum
                        for i in cols[0]:
                            yield [i]
                        if len(indexes) < 0:
                            for c in retcols:
                                if c != 10:
                                    input.seek(
                                        sum(ind[0:c * 2]) + 1 + 1 + 4 *
                                        (colnum * 2 + 1))
                                    s = serializer.loads(
                                        decompress(input.read(ind[c * 2])))
                                    if (len(s) > 1 and ind[c * 2 + 1] == 0
                                            and ind[colnum * 2] > 1):
                                        cols = s
                                    else:
                                        if len(s) == 1:
                                            cols[c] = repeat(
                                                s[0], len(indexes))

                                        elif len(s) < 256:
                                            listptr = array('B')
                                            listptr.fromstring(
                                                decompress(
                                                    input.read(ind[c * 2 +
                                                                   1])))
                                            for i in indexes:
                                                cols[c].append(s[listptr[i]])
                                        else:
                                            listptr = array('H')
                                            listptr.fromstring(
                                                decompress(
                                                    input.read(ind[c * 2 +
                                                                   1])))
                                            for i in indexes:
                                                cols[c].append(s[listptr[i]])

                                    iterators = tuple(map(iter, cols))
                                    ilen = len(cols)
                                    res = [None] * ilen

                                    while True:
                                        ci = 0
                                        try:
                                            while ci < ilen:
                                                res[ci] = iterators[ci].next()
                                                ci += 1
                                            yield res
                                        except:
                                            break

                            cols = []
                    elif not b[0]:
                        schema = cPickle.load(fileIter)

        try:
            for fileObject in fileIterlist:
                fileObject.close()
        except NameError:
            pass
Esempio n. 3
0
 def __init__(self, text):
     self._file = cStringIO.StringIO(text)
     self.log = []
Esempio n. 4
0
#!/usr/bin/env python
'''
sweety.logstatus

@author: Chris Chou <m2chrischou AT gmail.com>
@description: 
'''

import cStringIO

_content = cStringIO.StringIO()


def get_log_content():
    _content.seek(0, 0)
    return _content.read()
Esempio n. 5
0
    def transform(self, src_fp, target_fp, image_request):

        # kdu writes to this:
        fifo_fp = self._make_tmp_fp()

        # kdu command
        q = '-quiet'
        t = '-num_threads %s' % (self.num_threads, )
        i = '-i "%s"' % (src_fp, )
        o = '-o %s' % (fifo_fp, )
        reduce_arg = self._scales_to_reduce_arg(image_request)
        red = '-reduce %s' % (reduce_arg, ) if reduce_arg else ''
        region_arg = self._region_to_kdu_arg(image_request.region_param)
        reg = '-region %s' % (region_arg, ) if region_arg else ''

        kdu_cmd = ' '.join((self.kdu_expand, q, i, t, reg, red, o))

        # make the named pipe
        mkfifo_call = '%s %s' % (self.mkfifo, fifo_fp)
        logger.debug('Calling %s' % (mkfifo_call, ))
        resp = subprocess.check_call(mkfifo_call, shell=True)

        try:
            # Start the kdu shellout. Blocks until the pipe is empty
            logger.debug('Calling: %s' % (kdu_cmd, ))
            kdu_expand_proc = subprocess.Popen(kdu_cmd,
                                               shell=True,
                                               bufsize=-1,
                                               stderr=subprocess.PIPE,
                                               env=self.env)

            f = open(fifo_fp, 'rb')
            logger.debug('Opened %s' % fifo_fp)

            # read from the named pipe
            p = Parser()
            while True:
                s = f.read(1024)
                if not s:
                    break
                p.feed(s)
            im = p.close()  # a PIL.Image

            # finish kdu
            kdu_exit = kdu_expand_proc.wait()
            if kdu_exit != 0:
                map(logger.error, kdu_expand_proc.stderr)

            if self.map_profile_to_srgb and image_request.info.color_profile_bytes:  # i.e. is not None
                emb_profile = cStringIO.StringIO(
                    image_request.info.color_profile_bytes)
                im = profileToProfile(im, emb_profile, self.srgb_profile_fp)

            self._derive_with_pil(im, target_fp, image_request, crop=False)
        except:
            raise
        finally:
            kdu_exit = kdu_expand_proc.wait()
            if kdu_exit != 0:
                map(logger.error, map(string.strip, kdu_expand_proc.stderr))
            unlink(fifo_fp)
Esempio n. 6
0
    def _image(self,
               cr,
               uid,
               model,
               id,
               field,
               response,
               max_width=maxint,
               max_height=maxint,
               context=None):
        """ Fetches the requested field and ensures it does not go above
        (max_width, max_height), resizing it if necessary.

        Resizing is bypassed if the object provides a $field_big, which will
        be interpreted as a pre-resized version of the base field.

        If the record is not found or does not have the requested field,
        returns a placeholder image via :meth:`~._image_placeholder`.

        Sets and checks conditional response parameters:
        * :mailheader:`ETag` is always set (and checked)
        * :mailheader:`Last-Modified is set iif the record has a concurrency
          field (``__last_update``)

        The requested field is assumed to be base64-encoded image data in
        all cases.
        """
        Model = self.pool[model]
        id = int(id)

        ids = Model.search(cr, uid, [('id', '=', id)], context=context)
        if not ids and 'website_published' in Model._all_columns:
            ids = Model.search(cr,
                               openerp.SUPERUSER_ID,
                               [('id', '=', id),
                                ('website_published', '=', True)],
                               context=context)
        if not ids:
            return self._image_placeholder(response)

        concurrency = '__last_update'
        [record] = Model.read(cr,
                              openerp.SUPERUSER_ID, [id], [concurrency, field],
                              context=context)

        if concurrency in record:
            server_format = openerp.tools.misc.DEFAULT_SERVER_DATETIME_FORMAT
            try:
                response.last_modified = datetime.datetime.strptime(
                    record[concurrency], server_format + '.%f')
            except ValueError:
                # just in case we have a timestamp without microseconds
                response.last_modified = datetime.datetime.strptime(
                    record[concurrency], server_format)

        # Field does not exist on model or field set to False
        if not record.get(field):
            # FIXME: maybe a field which does not exist should be a 404?
            return self._image_placeholder(response)

        response.set_etag(hashlib.sha1(record[field]).hexdigest())
        response.make_conditional(request.httprequest)

        # conditional request match
        if response.status_code == 304:
            return response

        data = record[field].decode('base64')

        if (not max_width) and (not max_height):
            response.data = data
            return response

        image = Image.open(cStringIO.StringIO(data))
        response.mimetype = Image.MIME[image.format]

        w, h = image.size
        max_w = int(max_width) if max_width else maxint
        max_h = int(max_height) if max_height else maxint

        if w < max_w and h < max_h:
            response.data = data
        else:
            image.thumbnail((max_w, max_h), Image.ANTIALIAS)
            image.save(response.stream, image.format)
            # invalidate content-length computed by make_conditional as
            # writing to response.stream does not do it (as of werkzeug 0.9.3)
            del response.headers['Content-Length']

        return response
Esempio n. 7
0
def depens_get():
    repo = redis_client.spop("repo_depen_set")
    repo_id = repo.split(" ")[0]
    url = repo.split(" ")[1]

    owner = url.split('/')[4]
    repo_name = url.split("/")[5]

    # page_num=1
    depens_tag = 0

    # cur.execute(select_time, (repo_id, url))
    while (True):
        access_token = redis_client.rpop("access_token_zyr")
        it_url = depend_url % (owner, repo_name, access_token)
        it_c = pycurl.Curl()
        it_c.setopt(it_c.URL, it_url)
        it_b = cStringIO.StringIO()
        it_c.setopt(it_c.WRITEFUNCTION, it_b.write)
        it_c.setopt(it_c.CONNECTTIMEOUT, 60)
        it_c.setopt(it_c.TIMEOUT, 80)
        it_c.setopt(it_c.SSL_VERIFYPEER, 0)
        it_c.setopt(it_c.SSL_VERIFYHOST, 0)
        it_c.setopt(it_c.FOLLOWLOCATION, 5)
        try:
            it_c.perform()
        except Exception as ee:
            print ee
            pass
        else:
            it_html = it_b.getvalue().decode("utf-8", "ignore")
            result_json = json.loads(it_html)
            it_b.close()

            if result_json.has_key("error"):
                if "403" in result_json["error"]:  # key 每分钟60条
                    continue
            elif result_json.has_key("message"):
                if "RecordNotFound" in result_json["message"]:
                    print("repo %s:%s not exist" % (repo_id, url))
                    depens_tag = -1
                    break
            else:
                #project_id,fullname, depen_prj_name,depen_name, depen_platform, depen_kind
                fullname = result_json["full_name"]
                dependencies = result_json["dependencies"]
                if len(dependencies) == 0:
                    print("repo %s : %s has no dependency!" %
                          (repo_id, fullname))
                else:
                    print("repo %s has %s dependency" %
                          (repo_id, len(dependencies)))
                    for depen in dependencies:
                        project_name = depen["project_name"]
                        name = depen['name']
                        platform = depen["platform"]
                        kind = depen["kind"]
                        cur.execute(
                            insert_sql %
                            (repo_id, MySQLdb.escape_string(fullname),
                             MySQLdb.escape_string(project_name),
                             MySQLdb.escape_string(name), platform, kind))
                    conn.commit()
                depens_tag = 1
                break

    if depens_tag == 1:
        cur.execute(update_depend_sql % repo_id)
    if depens_tag == -1:
        cur.execute(update_depend_error_sql % repo_id)
Esempio n. 8
0
    def generate_report_content(cls, start_time, end_time):

        redpackrecord = RedPackRecord.objects.filter(apply_at__gte=start_time, apply_at__lt=end_time).\
            prefetch_related('redpack').prefetch_related('redpack__event').\
            prefetch_related('user').prefetch_related('user__wanglibaouserprofile')

        output = cStringIO.StringIO()

        writer = UnicodeWriter(output, delimiter='\t')
        writer.writerow([
            u'序号', u'红包活动ID', u'红包活动名称', u'用户名称', u'用户手机号', u'兑换平台', u'使用平台',
            u'红包创建时间', u'红包使用时间', u'使用金额', u'关联订单'
        ])
        name = ''
        phone = ''
        for index, record in enumerate(redpackrecord):
            if record.user:
                name = record.user.wanglibaouserprofile.name
                phone = record.user.wanglibaouserprofile.phone
            writer.writerow([
                str(index + 1),
                unicode(record.redpack.event.id),
                unicode(record.redpack.event.name),
                unicode(name),
                unicode(phone),
                unicode(record.change_platform),
                unicode(record.apply_platform),
                timezone.localtime(
                    record.created_at).strftime("%Y-%m-%d %H:%M:%S"),
                timezone.localtime(
                    record.apply_at).strftime("%Y-%m-%d %H:%M:%S")
                if record.apply_at else '',
                str(record.apply_amount),
                unicode(record.order_id) if record.order_id else '',
            ])
        return output.getvalue()


#class IntroducedRewardGenerator(ReportGeneratorBase):
#    prefix = 'yqsytj'
#    reportname_format = u'邀请收益统计 %s--%s'
#
#    @classmethod
#    def generate_report_content(cls, start_time, end_time):
#        from marketing.models import IntroducedByReward
#
#        introduced_records = IntroducedByReward.objects.filter(
#            checked_status=0,
#            activity_start_at=start_time,
#            activity_end_at=end_time
#        ).prefetch_related('user').prefetch_related('user__wanglibaouserprofile').select_related('introduced_by_person__wanglibaouserprofile')
#
#        output = cStringIO.StringIO()
#
#        writer = UnicodeWriter(output, delimiter='\t')
#        writer.writerow([u'序号', u'被邀请人名称', u'被邀请人手机号', u'邀请人名称', u'邀请人手机号', u'产品名称',
#                         u'首笔购买时间', u'投资金额', u'奖励金额', u'发放状态'])
#        user, phone, user_parent, phone_parent = '', '', '', ''
#        for index, record in enumerate(introduced_records):
#            if record.user:
#                user = record.user.wanglibaouserprofile.name
#                phone = record.user.wanglibaouserprofile.phone,
#
#            if record.introduced_by_person:
#                try:
#                    user_parent = record.introduced_by_person.wanglibaouserprofile.name
#                    phone_parent = record.introduced_by_person.wanglibaouserprofile.phone
#                except:
#                    user_parent, phone_parent = '', ''
#
#            writer.writerow([
#                str(index + 1),
#                user,
#                unicode(phone),
#                user_parent,
#                unicode(phone_parent),
#                record.product.name,
#                timezone.localtime(record.first_bought_at,).strftime("%Y-%m-%d %H:%M:%S"),
#                str(record.first_amount),
#                str(record.introduced_reward),
#                str(record.checked_status)
#            ])
#
#        return output.getvalue()
Esempio n. 9
0
    def post(self):
        # The project file is uploaded once to extract the metadata.
        # It is then deleted and the metadata is used to populate another
        # import dialog, giving the user an opportunity to edit the
        # info before importing or cancel the import.
        if not 'projectname' in self.request.arguments:
            # First upload
            sourcefile = self.request.files['projectfile'][0]
            if sourcefile:
                filename = sourcefile['filename']
                if len(filename) > 0:
                    unique = _get_unique_name(self.get_project_dir(),
                                              parse_archive_name(filename))
                    tdir = mkdtemp(prefix=unique)
                    buff = StringIO.StringIO(sourcefile['body'])
                    archive = tarfile.open(fileobj=buff, mode='r:gz')
                    archive.extractall(path=tdir)
                    proj = Project(tdir)
                    project_info = proj.get_info()

                    try:
                        shutil.rmtree(tdir, onerror=onerror)
                    except:
                        pass

                    self.render('projdb/import-metadata-fields.html',
                                projectname=parse_archive_name(unique),
                                description=project_info['description'],
                                version=project_info['version'])
        else:
            # second upload
            forms = {}
            for field in ['projectname', 'description', 'version']:
                if field in self.request.arguments.keys():
                    forms[field] = self.request.arguments[field][0]

            sourcefile = self.request.files['projectfile'][0]
            if sourcefile:
                filename = sourcefile['filename']
                if len(filename) > 0:

                    unique = _get_unique_name(self.get_project_dir(),
                                              parse_archive_name(filename))

                    pdb = Projects()

                    project = {}
                    project['id'] = pdb.predict_next_rowid()
                    project['active'] = 1
                    project['projectname'] = forms['projectname'].strip()
                    project['description'] = forms['description'].strip()
                    project['version'] = forms['version'].strip()
                    project['projpath'] = unique

                    os.mkdir(unique)

                    buff = StringIO.StringIO(sourcefile['body'])

                    archive = tarfile.open(fileobj=buff, mode='r:gz')
                    archive.extractall(path=unique)

                    vcslist = find_vcs()
                    if vcslist:
                        vcs = vcslist[0](unique)
                    else:
                        vcs = DumbRepo(unique)
                    vcs.init_repo()

                    # Update project settings.
                    proj = Project(project['projpath'])
                    dummy = proj.get_info()  # Just to get required keys.
                    info = {}
                    for key in dummy:
                        info[key] = project[key]
                    proj.set_info(info)

                    pdb.new(project)

                    self.redirect("/workspace/project?projpath=" +
                                  quote_plus(project['projpath']))

        self.redirect("/")
Esempio n. 10
0
def getIconImage():
    stream = cStringIO.StringIO(getIconData())
    return ImageFromStream(stream)
Esempio n. 11
0
def find_tls (params,
              pdb_inp,
              pdb_hierarchy,
              xray_structure,
              return_as_list=False,
              ignore_pdb_header_groups=False,
              out=None) :
  """
  !!! WARNING! incoming xray_structure here gets converted to
  isotropic B-factors IN PLACE.
  """
  if (out is None) :
    out = sys.stdout
  print_statistics.make_header("Analyzing inputs", out=out)
  if (params.random_seed is None) :
    params.random_seed = flex.get_random_seed()
  random.seed(params.random_seed)
  flex.set_random_seed(params.random_seed)
  xray_structure.convert_to_isotropic()
  sites_cart = xray_structure.sites_cart()
  u_cart = None
  u_iso  = xray_structure.extract_u_iso_or_u_equiv()#*adptbx.u_as_b(1.) # ?
  bad_i_seqs = check_adp(u_iso=u_iso, out=out)
  if (bad_i_seqs is not None) :
    atoms = pdb_hierarchy.atoms()
    bad_atom_strings = []
    for i_seq in bad_i_seqs[:10] :
      atom_str = atoms[i_seq].format_atom_record()
      bad_atom_strings.append(atom_str)
    if (len(bad_i_seqs) > 10) :
      bad_atom_strings.append("... (remaining %d not shown)" %
        (len(bad_i_seqs)-10))
    raise Sorry(("%d atoms in the model contain isotropic B-factors <= 0:\n"+
      "\n".join(bad_atom_strings)) % (len(bad_i_seqs)))
  #
  ssm = mmtbx.secondary_structure.manager(
    pdb_hierarchy                = pdb_hierarchy,
    sec_str_from_pdb_file        = None,
    params                       = None,
    log                          = out)
  alpha_h_selection = ssm.alpha_selection()
  secondary_structure_selection = ssm.alpha_selection() | \
      ssm.beta_selection() | ssm.base_pair_selection()
  if(u_cart is not None):
    assert secondary_structure_selection.size() == u_cart.size()
  else:
    assert secondary_structure_selection.size() == u_iso.size()
  ssm.show_summary(log=out)
  chains_and_residue_selections, secondary_structure_selection = chains_and_atoms(
    pdb_hierarchy                 = pdb_hierarchy,
    secondary_structure_selection = secondary_structure_selection,
    out                           = out)
  chains_and_permutations = []
  chains_and_atom_selection_strings = []
  print_statistics.make_header("Processing chains", out=out)
  if (params.nproc is None) :
    params.nproc = 1
  for crs in chains_and_residue_selections:
    print_statistics.make_sub_header("Processing chain '%s'"%crs[0],
      out=out)
    chain_selection = chain_selection_from_residues(crs[1])
    groups, perms = get_model_partitioning(residues = crs[1],
      secondary_structure_selection = secondary_structure_selection,
      out = out)
    #
    #print
    #selection_arrays = sels_as_selection_arrays(sels = groups)
    #merge_groups_by_connectivity(
    #  pdb_hierarchy     = pdb_hierarchy,
    #  xray_structure    = xray_structure,
    #  selection_arrays  = selection_arrays)
    #assert 0
    #
    if(len(perms)==1):
      print >> out, "  Whole chain is considered as one TLS group."
      chains_and_atom_selection_strings.append([crs[0],
        permutations_as_atom_selection_string(groups, perms[0])])
    else:
      print >> out, "  Fitting TLS matrices..."
      dic = {}
      target_best = 1.e+9
      if (params.nproc is Auto) or (params.nproc > 1) :
        process_perms = analyze_permutations(
          groups=groups,
          sites_cart=sites_cart,
          u_cart=u_cart,
          u_iso=u_iso)
        from libtbx import easy_mp
        stdout_and_targets = easy_mp.pool_map(
          processes=params.nproc,
          fixed_func=process_perms,
          args=perms,
          chunksize=100,
          func_wrapper="buffer_stdout_stderr")
        targets = [ t for so, t in stdout_and_targets ]
        for (perm, target) in zip(perms, targets) :
          dic.setdefault(len(perm), []).append([target,perm])
      else :
        for i_perm, perm in enumerate(perms):
          if i_perm%500==0:
            print >> out, "    ...perm %d of %d"%(i_perm, len(perms))
          selections = tls_group_selections(groups, perm)
          target = 0
          for selection in selections:
            mo = tls_refinery(
              u_cart     = u_cart,
              u_iso      = u_iso,
              sites_cart = sites_cart,
              selection  = selection)
            target += mo.f
          dic.setdefault(len(perm), []).append([target,perm])
        #print "    perm %d of %d: target=%8.3f (TLS groups: %s), permutation:"%(
        #  i_perm, len(perms),target,len(perm)),perm
      print >> out, "    Best fits:"
      print >> out, "      No. of         Targets"
      print >> out, "      groups   best   rand.pick  diff.  score permutation"
      score_best = -1.e+9
      perm_choice = None
      for k, v in zip(dic.keys(),dic.values()):
        t_best = v[0][0]
        perm_best = v[0][1]
        for v_ in v:
          if(v_[0]<t_best):
            t_best = v_[0]
            perm_best = v_[1]
        if(u_cart is not None):
          u_cart_ = u_cart.select(chain_selection)
        else: u_cart_ = None
        if(u_iso is not None):
          u_iso_ = u_iso.select(chain_selection)
        else: u_iso_ = None
        r = tls_refinery_random_groups(
          u_cart     = u_cart_,
          u_iso      = u_iso_,
          sites_cart = sites_cart.select(chain_selection),
          n_groups   = k)
        score = (r-t_best)/(r+t_best)*100.
        print >> out, "         %3d %6.3f      %6.3f %6.2f %6.3f"%(
          k,t_best, r, r-t_best, score), perm_best
        if(score > score_best):
          score_best = score
          perm_choice = perm_best[:]
      #
      chains_and_permutations.append([crs[0],perm_choice])
      chains_and_atom_selection_strings.append([crs[0],
        permutations_as_atom_selection_string(groups, perm_choice)])
      #
  if (pdb_inp is not None) and (not ignore_pdb_header_groups) :
    external_tls_selections = external_tls(
      pdb_inp       = pdb_inp,
      pdb_hierarchy = pdb_hierarchy,
      sites_cart    = sites_cart,
      u_iso         = u_iso,
      out           = out)
  print_statistics.make_header("SUMMARY", out=out)
  #print "Optimal TLS groups:"
  #for chain_and_permutation in chains_and_permutations:
  #  print chain_and_permutation
  #print
  print >> out, "TLS atom selections for phenix.refine:"
  groups_out = cStringIO.StringIO()
  selection_strings = []
  print >> groups_out, "refinement.refine.adp {"
  for r in chains_and_atom_selection_strings:
    prefix = "chain '%s'"%r[0]
    if(len(r[1])>0 and len(r[1:])>0):
      prefix += " and "
      for r_ in r[1:]:
        for r__ in r_:
          if(len(r__)>0):
            group_selection = prefix+"(%s)"%r__
            print >> groups_out, "  tls = \"%s\"" % group_selection
            selection_strings.append("%s" % group_selection)
    else:
      print >> groups_out, "  tls = \"%s\"" % prefix
      selection_strings.append("%s" % prefix)
  print >> groups_out, "}"
  print >> out, groups_out.getvalue()
  print >> out
  #XXX
  if 0:
    merge_groups_by_connectivity(
      pdb_hierarchy     = pdb_hierarchy,
      xray_structure    = xray_structure,
      selection_strings = selection_strings)
  #XXX
  if(len(selection_strings)>0):
    total_target = total_score(
      pdb_hierarchy     = pdb_hierarchy,
      sites_cart        = sites_cart,
      u_iso             = u_iso,
      selection_strings = selection_strings)
    print >> out, "Overall best total target for automatically found groups: %10.1f"%total_target
    print >> out
  if (return_as_list) :
    return selection_strings
  else :
    return groups_out.getvalue()
Esempio n. 12
0
def getShowFillingImage():
    stream = cStringIO.StringIO(getShowFillingData())
    return ImageFromStream(stream)
Esempio n. 13
0
def getShowSizersImage():
    stream = cStringIO.StringIO(getShowSizersData())
    return ImageFromStream(stream)
Esempio n. 14
0
def getFindImage():
    stream = cStringIO.StringIO(getFindData())
    return ImageFromStream(stream)
    def excel_action(self):
        wb = pycel.Workbook(encoding='utf-8')

        style_cabecera = pycel.easyxf('font: colour black, bold True;'
                                      'align: vertical center, horizontal center;'
                                      )

        style_header = pycel.easyxf('font: bold True;'
                                    'align: vertical center, horizontal center, wrap on;'
                                    'borders: left 1, right 1, top 1, bottom 1;')

        view_style = pycel.easyxf('font: colour green, bold true, height 200;'
                                  'align: vertical center, horizontal center, wrap on;'
                                  'borders: left 1, right 1, top 1, bottom 1;'
                                  )

        linea_izq = pycel.easyxf('font: colour black, height 150;'
                                 'align: vertical center, horizontal left, wrap on;'
                                 'borders: left 1, right 1, top 1, bottom 1;'
                                 )
        linea_izq_n = pycel.easyxf('font: colour black, height 140;'
                                   'align: vertical center, horizontal center, wrap on;'
                                   'borders: left 1, right 1, top 1, bottom 1;'
                                   )
        linea_izq_neg = pycel.easyxf('font: colour black, bold true, height 200;'
                                     'align: vertical center, horizontal left, wrap on;'
                                     'borders: left 1, right 1, top 1, bottom 1;'
                                     )
        linea_der = pycel.easyxf('font: colour black, height 150;'
                                 'align: vertical center, horizontal right;'
                                 'borders: left 1, right 1, top 1, bottom 1;'
                                 )
        linea_der_bold = pycel.easyxf('font: colour black, bold true, height 200;'
                                      'align: vertical center, horizontal right, wrap on;'
                                      'borders: left 1, right 1, top 1, bottom 1;'
                                      )
        view_style_out = pycel.easyxf('font: colour red, bold true, height 200;'
                                      'align: vertical center, horizontal center, wrap on;'
                                      'borders: left 1, right 1, top 1, bottom 1;'
                                      )
        view_style_red = pycel.easyxf('font: colour red, height 200;'
                                      'align: vertical center, horizontal left, wrap on;'
                                      'borders: left 1, right 1, top 1, bottom 1;'
                                      )
        view_style_green = pycel.easyxf('font: colour green, height 200;'
                                        'align: vertical center, horizontal left, wrap on;'
                                        'borders: left 1, right 1, top 1, bottom 1;'
                                        )

        ws = wb.add_sheet('Reporte Ventas')

        ws.show_grid = False
        ws.header_str = u"&LFecha de Impresion: &D Hora: &T&RPagina &P de &N"
        ws.footer_str = u""
        company = self.env['res.users'].browse(self._uid).company_id
        ws.write_merge(1, 1, 1, 5, company.name, style_cabecera)
        ws.write_merge(2, 2, 1, 5, 'Fecha desde: ' + self.date_from + ' - ' + 'Fecha hasta: ' + self.date_to + ' ', style_cabecera)
        ws.write_merge(3, 3, 1, 5, 'REPORTE VENTAS', style_cabecera)

        ws.fit_num_pages = 1
        ws.fit_height_to_pages = 0
        ws.fit_width_to_pages = 1
        ws.portrait = 1

        align = pycel.Alignment()
        align.horz = pycel.Alignment.HORZ_RIGHT
        align.vert = pycel.Alignment.VERT_CENTER

        font1 = pycel.Font()
        font1.colour_index = 0x0
        font1.height = 140

        linea_izq_n.width = 150

        # Formato de Numero
        style = pycel.XFStyle()
        style.num_format_str = '#,##0.00'
        style.alignment = align
        style.font = font1

        # Formato de Numero Saldo
        font = pycel.Font()
        font.bold = True
        font.colour_index = 0x27

        style1 = pycel.XFStyle()
        style1.num_format_str = '#,##0.00'
        style1.alignment = align
        style1.font = font

        font2 = pycel.Font()
        font2.bold = True
        font2.colour_index = 0x0

        style2 = pycel.XFStyle()
        style2.num_format_str = '#,##0.00'
        style2.alignment = align
        style2.font = font2

        style3 = pycel.XFStyle()
        style3.num_format_str = '0'
        style3.alignment = align
        style3.font = font1

        xi = 9  # Cabecera de Cliente
        self.set_header(ws, xi, style_header)
        xi += 1
        data_file_name = "reporte_ventas.xls"
        seq = 0
        orders = self.get_orders()
        invoice = False
        for order in orders:

            # self.set_body(order, invoice, ws, xi, linea_der, linea_izq, seq, linea_izq_n, linea_izq_neg, view_style,
            #           linea_der_bold, view_style_out)
            # xi += 1

            if order:
                for invoice in order.invoice_ids:
                    if invoice.state in ('open', 'paid'):
                        self.set_body(order, invoice, ws, xi, linea_der, linea_izq, seq, linea_izq_n, linea_izq_neg,
                                      view_style,
                                      linea_der_bold, view_style_out)
                        xi += 1

        ws.col(0).width = 100
        ws.col(1).width = 100
        ws.col(2).width = 3500
        ws.col(3).width = 4000
        ws.col(4).width = 2000
        ws.col(5).width = 3500
        ws.col(6).width = 10000
        ws.col(7).width = 5000
        ws.col(8).width = 4000
        ws.col(9).width = 4000
        ws.col(10).width = 2000
        ws.col(11).width = 2000
        ws.col(12).width = 3000
        ws.col(13).width = 3000
        ws.col(14).width = 3000
        ws.col(15).width = 3000

        try:
            buf = cStringIO.StringIO()

            wb.save(buf)
            out = base64.encodestring(buf.getvalue())
            buf.close()
            res_model = 'sale.order.detail.report'
            self.load_doc(out, data_file_name, res_model)

            return self.write({'data': out, 'txt_filename': data_file_name, 'name': 'control_facturas.xls'})

        except ValueError:
            raise Warning('Error a la hora de salvar el archivo')
        ws.col(6).width = 3000
        ws.col(7).width = 3000
Esempio n. 16
0
def test_determinism():
    """
    Tests that apply nodes are all passed inputs
    with the same md5sums, apply nodes are run in same order, etc.
    Uses disturb_mem to try to cause dictionaries to iterate in different
    orders, etc.
    """
    def run_bgd(mode):
        # Must be seeded the same both times run_bgd is called
        disturb_mem.disturb_mem()
        rng = np.random.RandomState([2012, 11, 27, 8])

        batch_size = 5
        train_batches = 3
        valid_batches = 4
        num_features = 2

        # Synthesize dataset with a linear decision boundary
        w = rng.randn(num_features)

        def make_dataset(num_batches):
            disturb_mem.disturb_mem()
            m = num_batches * batch_size
            X = rng.randn(m, num_features)
            y = np.zeros((m, 1))
            y[:, 0] = np.dot(X, w) > 0.

            rval = DenseDesignMatrix(X=X, y=y)

            rval.yaml_src = ""  # suppress no yaml_src warning

            X = rval.get_batch_design(batch_size)
            assert X.shape == (batch_size, num_features)

            return rval

        train = make_dataset(train_batches)
        valid = make_dataset(valid_batches)

        num_chunks = 10
        chunk_width = 2

        class ManyParamsModel(Model):
            """
            Make a model with lots of parameters, so that there are many
            opportunities for their updates to get accidentally re-ordered
            non-deterministically. This makes non-determinism bugs manifest
            more frequently.
            """
            def __init__(self):
                super(ManyParamsModel, self).__init__()
                self.W1 = [
                    sharedX(rng.randn(num_features, chunk_width))
                    for i in xrange(num_chunks)
                ]
                disturb_mem.disturb_mem()
                self.W2 = [
                    sharedX(rng.randn(chunk_width)) for i in xrange(num_chunks)
                ]
                self._params = safe_union(self.W1, self.W2)
                self.input_space = VectorSpace(num_features)
                self.output_space = VectorSpace(1)

        disturb_mem.disturb_mem()
        model = ManyParamsModel()
        disturb_mem.disturb_mem()

        class LotsOfSummingCost(Cost):
            """
            Make a cost whose gradient on the parameters involves summing many
            terms together,
            so that T.grad is more likely to sum things in a random order.
            """

            supervised = True

            def expr(self, model, data, **kwargs):
                self.get_data_specs(model)[0].validate(data)
                X, Y = data
                disturb_mem.disturb_mem()

                def mlp_pred(non_linearity):
                    Z = [T.dot(X, W) for W in model.W1]
                    H = map(non_linearity, Z)
                    Z = [T.dot(h, W) for h, W in safe_izip(H, model.W2)]
                    pred = sum(Z)
                    return pred

                nonlinearity_predictions = map(
                    mlp_pred, [T.nnet.sigmoid, T.nnet.softplus, T.sqr, T.sin])
                pred = sum(nonlinearity_predictions)
                disturb_mem.disturb_mem()

                return abs(pred - Y[:, 0]).sum()

            def get_data_specs(self, model):
                data = CompositeSpace(
                    (model.get_input_space(), model.get_output_space()))
                source = (model.get_input_source(), model.get_target_source())
                return (data, source)

        cost = LotsOfSummingCost()

        disturb_mem.disturb_mem()

        algorithm = BGD(cost=cost,
                        batch_size=batch_size,
                        updates_per_batch=5,
                        scale_step=.5,
                        conjugate=1,
                        reset_conjugate=0,
                        monitoring_dataset={
                            'train': train,
                            'valid': valid
                        },
                        termination_criterion=EpochCounter(max_epochs=5))

        disturb_mem.disturb_mem()

        train_object = Train(dataset=train,
                             model=model,
                             algorithm=algorithm,
                             save_freq=0)

        disturb_mem.disturb_mem()

        train_object.main_loop()

    output = cStringIO.StringIO()
    record = Record(file_object=output, replay=False)
    record_mode = RecordMode(record)

    run_bgd(record_mode)

    output = cStringIO.StringIO(output.getvalue())
    playback = Record(file_object=output, replay=True)
    playback_mode = RecordMode(playback)

    run_bgd(playback_mode)
Esempio n. 17
0
from logger import *

IMAGE_MAX_SIZE = 1024

BASE_DIR = os.path.dirname(__file__)

data = sys.stdin.read()

mail = email.message_from_string(data)

if mail.is_multipart():
    for part in mail.get_payload():
        if part.get_content_type().startswith('image'):
            payload = part.get_payload(decode=True)
            try:
                im = Image.open(cStringIO.StringIO(payload))
                # Ensure it is no bigger than this
                im.thumbnail((IMAGE_MAX_SIZE, IMAGE_MAX_SIZE), Image.ANTIALIAS)
                data = cStringIO.StringIO()
                im.save(data, 'JPEG')
                #message = 'data:image/jpg;base64,' + base64.b64encode(data.getvalue())

                (fd, filename) = tempfile.mkstemp(dir=os.path.join(
                    BASE_DIR, 'static', 'uploads'),
                                                  prefix='uploads-',
                                                  suffix='.jpg')
                with os.fdopen(fd, 'w') as f:
                    f.write(data.getvalue())
                    f.close()
                    os.chmod(filename, 0444)
            except Exception as e:
Esempio n. 18
0
def flatten(x):
    f = cStringIO.StringIO()
    _flatten(x, f)
    return f.getvalue()
Esempio n. 19
0
def calculate_oommf_fields(name, s0, Ms, spec=None, alpha=0., gamma_G=0.,
                           fields=[]):
    assert type(Ms) is float
    assert type(s0) is MeshField and s0.dims == (3,)

    # Calculate the checksum corresponding to the parameters
    m = hashlib.new('md5')
    delim = "\n---\n"
    m.update(SOURCE + delim)
    m.update(name + delim)
    m.update("%25.19e%s" % (Ms, delim))
    m.update("%25.19e%s" % (alpha, delim))
    m.update("%25.19e%s" % (gamma_G, delim))
    m.update("%s%s" % (",".join(fields), delim))
    m.update(spec + delim)
    s = cStringIO.StringIO()
    np.save(s, s0.flat)
    m.update(s.getvalue())
    checksum = m.hexdigest()

    # Format the simulation script
    basename = "%s_%s" % (name, checksum)
    tag = basename.lower()
    params = {
        'spec': spec,
        'basename': basename,
        'Ms': "%25.19e" % Ms,
        'gamma_G': "%25.19e" % gamma_G,
        'alpha': "%25.19e" % alpha,
        'tag': tag,
        'fields': "\n".join("Schedule %s archive Step 1" % f for f in fields)
    }

    mif = MIF_TEMPLATE % params

    # print mif

    # Check if the result is already known
    cachedir = os.path.join(CACHE_DIR, basename)
    with ignored(OSError):
        os.makedirs(CACHE_DIR)

    if not os.path.exists(cachedir):
        # Run the simulation
        print "Running OOMMF simulation %s..." % basename,
        sys.stdout.flush()
        dir = os.path.join(RUN_DIR, basename)
        with ignored(OSError):
            os.makedirs(dir)
        # Write the MIF file
        mif_file_name = basename + ".mif"
        mif_file = open(os.path.join(dir, mif_file_name), "w")
        mif_file.write(mif)
        mif_file.close()
        # Write the starting OMF file
        fl = lattice.FieldLattice(s0.mesh.get_lattice_spec())
        fl.field_data = s0.flat

        # Save it to file
        m0_file = ovf.OVFFile()
        m0_file.new(fl, version=ovf.OVF10, data_type="binary8")
        m0_file.write(os.path.join(dir, basename + "-start.omf"))
        # Run the OOMMF simulation
        run_oommf(dir, ["boxsi", "-threads", "4", mif_file_name])
        # Move the results to the cache directory
        shutil.move(dir, cachedir)
        print "success"

    # Read the results
    fields = {}
    for fn in os.listdir(cachedir):
        m = re.match("^(.*)_%s-(.*)-00-0000000.o[hvm]f$" % checksum, fn)
        if m and m.group(1) == name:
            fl = ovf.OVFFile(os.path.join(cachedir, fn)).get_field()
            fields[m.group(2)] = s0.mesh.field_from_xyz_array(fl.field_data)

    return fields
Esempio n. 20
0
 def resolve(self, uri, baseUri=None):
     if uri in self._uriMapping:
         return cStringIO.StringIO(self._uriMapping[uri])
     return Uri.FtUriResolver.resolve(self, uri, baseUri)
Esempio n. 21
0
 def loads(self, xml_str):
     fh = cStringIO.StringIO(xml_str)
     obj = self.load(fh)
     fh.close()
     return obj
Esempio n. 22
0
def report_exception(e, editor=True):
    """
    Reports an exception by writing it to standard error and
    traceback.txt. If `editor` is True, opens the traceback
    up in a text editor.

    Returns a two-unicode tuple, with the first item being
    a simple message, and the second being a full traceback.
    """

    import codecs

    type, _value, tb = sys.exc_info()  #@ReservedAssignment

    def safe_utf8(e):
        try:
            m = unicode(e)
        except:
            m = str(e)

        if isinstance(m, unicode):
            return m.encode("utf-8", "replace")
        else:
            return m

    # Return values - which can be displayed to the user.
    simple = cStringIO.StringIO()
    full = cStringIO.StringIO()

    print >> simple, renpy.game.exception_info
    script_level_traceback(simple, tb)
    print >> simple, type.__name__ + ":",
    print >> simple, safe_utf8(e)

    print >> full, "Full traceback:"
    tbl = traceback.extract_tb(tb)
    write_utf8_traceback_list(full, tbl)
    print >> full, type.__name__ + ":",
    print >> full, safe_utf8(e)

    # Write to stdout/stderr.
    sys.stdout.write("\n")
    sys.stdout.write(full.getvalue())
    sys.stdout.write("\n")
    sys.stdout.write(simple.getvalue())

    print >> full
    try:
        print >> full, platform.platform()
        print >> full, renpy.version
        print >> full, renpy.config.name + " " + renpy.config.version
    except:
        pass

    simple = simple.getvalue()
    full = full.getvalue()

    # Inside of the file, which may not be openable.
    try:

        f, traceback_fn = open_error_file("traceback.txt", "w")

        f.write(codecs.BOM_UTF8)

        print >> f, "I'm sorry, but an uncaught exception occurred."
        print >> f

        f.write(simple)

        print >> f
        print >> f, "-- Full Traceback ------------------------------------------------------------"
        print >> f

        f.write(full)
        f.close()

        try:
            if editor and renpy.game.args.command == "run":  #@UndefinedVariable
                renpy.exports.launch_editor([traceback_fn], 1, transient=1)
        except:
            pass

    except:
        pass

    try:
        renpy.display.log.exception()  #@UndefinedVariable
    except:
        pass

    return simple.decode("utf-8",
                         "replace"), full.decode("utf-8",
                                                 "replace"), traceback_fn
Esempio n. 23
0
 def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
     # Redirect output to a queue
     self.queue = cStringIO.StringIO()
     self.writer = csv.writer(self.queue, dialect=dialect, **kwds)
     self.stream = f
     self.encoder = codecs.getincrementalencoder(encoding)()
Esempio n. 24
0
 def get_p_content_as_file(self):
     return cStringIO.StringIO(self.get_p_content())
Esempio n. 25
0
 def getMessage(self, i):
     """
     Return an in-memory file-like object for the message content at the
     given offset.
     """
     return StringIO.StringIO(self.msgs[i])
Esempio n. 26
0
def run_code(code, code_path, ns=None, function_name=None):
    """
    Import a Python module from a path, and run the function given by
    name, if function_name is not None.
    """

    # Change the working directory to the directory of the example, so
    # it can get at its data files, if any.  Add its path to sys.path
    # so it can import any helper modules sitting beside it.
    if six.PY2:
        pwd = os.getcwdu()
    else:
        pwd = os.getcwd()
    old_sys_path = list(sys.path)
    if setup.config.plot_working_directory is not None:
        try:
            os.chdir(setup.config.plot_working_directory)
        except OSError as err:
            raise OSError(
                str(err) + '\n`plot_working_directory` option in'
                'Sphinx configuration file must be a valid '
                'directory path')
        except TypeError as err:
            raise TypeError(
                str(err) + '\n`plot_working_directory` option in '
                'Sphinx configuration file must be a string or '
                'None')
        sys.path.insert(0, setup.config.plot_working_directory)
    elif code_path is not None:
        dirname = os.path.abspath(os.path.dirname(code_path))
        os.chdir(dirname)
        sys.path.insert(0, dirname)

    # Reset sys.argv
    old_sys_argv = sys.argv
    sys.argv = [code_path]

    # Redirect stdout
    stdout = sys.stdout
    if six.PY3:
        sys.stdout = io.StringIO()
    else:
        sys.stdout = cStringIO.StringIO()

    # Assign a do-nothing print function to the namespace.  There
    # doesn't seem to be any other way to provide a way to (not) print
    # that works correctly across Python 2 and 3.
    def _dummy_print(*arg, **kwarg):
        pass

    try:
        try:
            code = unescape_doctest(code)
            if ns is None:
                ns = {}
            if not ns:
                if setup.config.plot_pre_code is None:
                    six.exec_(
                        six.text_type(
                            "import numpy as np\n" +
                            "from matplotlib import pyplot as plt\n"), ns)
                else:
                    six.exec_(six.text_type(setup.config.plot_pre_code), ns)
            ns['print'] = _dummy_print
            if "__main__" in code:
                six.exec_("__name__ = '__main__'", ns)
            code = remove_coding(code)
            six.exec_(code, ns)
            if function_name is not None:
                six.exec_(function_name + "()", ns)
        except (Exception, SystemExit) as err:
            raise PlotError(traceback.format_exc())
    finally:
        os.chdir(pwd)
        sys.argv = old_sys_argv
        sys.path[:] = old_sys_path
        sys.stdout = stdout
    return ns
Esempio n. 27
0
    def import_csv(self, cr, uid, ids, context=None):
        bom_obj = self.pool.get('mrp.bom')
        prod_obj = self.pool.get('product.product')
        if context is None:
            context = {}
        for wiz_rec in self.browse(cr, uid, ids, context=context):
            bom_vals = {}
            str_data = base64.decodestring(wiz_rec.browse_path)
            if not str_data:
                raise osv.except_osv('Warning', 'The file contains no data')
            try:
                bom_data = list(csv.reader(cStringIO.StringIO(str_data)))
            except:
                raise osv.except_osv(
                    'Warning',
                    'Make sure you saved the file as .csv extension and import!'
                )

            headers_list = []
            for header in bom_data[0]:
                headers_list.append(header.strip())
            headers_dict = {
                'parent': headers_list.index('Parent'),
                'child': headers_list.index("Child"),
                'qty': headers_list.index('Qty'),
            }

            for data in bom_data[1:]:
                parent = data[headers_dict['parent']]
                child = data[headers_dict['child']]
                qty = data[headers_dict['qty']]
                if parent in bom_vals:
                    bom_vals[parent].append((child, qty))
                else:
                    bom_vals.update({parent: [(child, qty)]})
            for key, vals in bom_vals.iteritems():
                parent_prod_ids = prod_obj.search(cr,
                                                  uid,
                                                  [('default_code', '=', key)],
                                                  context=context)
                if not parent_prod_ids:
                    _logger.info('Error',
                                 key + ' Default Code is not available')
                    raise osv.except_osv(
                        'Error', key + ' Default Code is not available')
                parent_product = prod_obj.browse(cr,
                                                 uid,
                                                 parent_prod_ids,
                                                 context=context)
                child_list = []
                product_onchange_dict = {}
                for val in vals:
                    child_prod_ids = prod_obj.search(
                        cr,
                        uid, [('default_code', '=', val[0])],
                        context=context)
                    child_product = prod_obj.browse(cr,
                                                    uid,
                                                    child_prod_ids,
                                                    context=context)
                    product_onchange_dict = bom_obj.onchange_product_id(
                        cr,
                        uid, [],
                        child_prod_ids[0],
                        parent_product[0].name,
                        context=context)['value']
                    child_list.append((0, 0, {
                        'product_id': child_prod_ids[0],
                        'product_uom': child_product[0].uom_id.id,
                        'product_qty': val[1]
                    }))
                if product_onchange_dict:
                    bom_id = bom_obj.create(
                        cr,
                        uid, {
                            'product_id': parent_prod_ids[0],
                            'name': parent_product[0].name,
                            'product_uom': product_onchange_dict.values()[1],
                            'bom_lines': child_list
                        },
                        context=context)
                    _logger.info('BOM  # %s created for Product %s', bom_id,
                                 parent_product[0].name)
        return True


# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
Esempio n. 28
0
 def encode(self):
     buf = StringIO.StringIO()
     buf.write(lcmt_viewer_command._get_packed_fingerprint())
     self._encode_one(buf)
     return buf.getvalue()
Esempio n. 29
0
def LZWDecode(data):
    return ''.join(LZWDecoder(cStringIO.StringIO(data)).run())
Esempio n. 30
0
 def __str__(self):
     s = cStringIO.StringIO()
     self.writeTo(s)
     return s.getvalue()