def auth(request):

    if request.method == "GET":

        _code = request.GET.get('code', None)
        try:
            (oauth_id, screen_name, access_token, expires_in) = api.show_me(_code)
        except :
            return render_to_response('weibo/invalid_grant.html',
                                      context_instance=RequestContext(request))
        logger.info("taobao token %s" %access_token)

        token = Token.objects.create_or_update(oauth_type=Token.Taobao, oauth_id=oauth_id, screen_name=screen_name,
                                               access_token=access_token, expires_in=expires_in)
        if token.user_id:
            login_without_password(request, token.user)
        else:
            user_name = '*****@*****.**' % screen_name
            password = gen_random_str()
            user = User.objects.create_user(username=user_name, password=password)
            token.user=user
            token.save()
            taobao_login(request, user_name, password)

        # 跳转到网站的根目录,这里你可以设置授权成功后跳转uri
        return HttpResponseRedirect('/')
예제 #2
0
파일: views.py 프로젝트: 54chen/ityao
def parseDailyMovie(dl):
    try:
        page = urllib2.urlopen(dl.link)
        content = page.read()         
        content = content.decode('gb18030').encode('utf8')
        page.close() 
    except URLError:        
        raise
    
    index = content.find("<div id=\"content\">")+18
    index2 = content.find("</div>",index)
    content = content[index:index2]    
    movies = content.split("<br />\r\n<br />\r\n")
    mi = 1
    p = re.compile('<IMG class="postimg" src=".*" />',re.IGNORECASE);
    pl = re.compile('<A href=".*" target=_blank >\*\*\*\*\*點此下載\*\*\*\*\*</A>',re.IGNORECASE);
    for movie in movies:
        #logger.info("movie "+str(mi)+" :\n" + movie)
        #logger.info("movie "+str(mi)+"***************************************************************")
        mi = mi + 1          
        movie = movie.strip()
        if len(movie) < 20:
            continue
        #create the movielink object
        digestkey = hashlib.sha224(movie).hexdigest()
        tIndex = movie.find("<br />")
        mTitle = movie[0:tIndex] 
        #find all images        
        
        images = []
        for match in p.finditer(movie):
            image = str(match.group())
            iIndex = image.find('src="')+5
            iIndex2 = image.find('"',iIndex)            
            image = image[iIndex:iIndex2]
            #logger.info("movie: "+mTitle+" image:"+image)
            images.append(image)            
        imagesLink = ";".join(images)
        
        dls = []
        for match in pl.finditer(movie):
            dlink = str(match.group())
            iIndex = dlink.find('href="')+6
            iIndex2 = dlink.find('"',iIndex)            
            dlink = dlink[iIndex:iIndex2]
            dls.append(dlink)            
        dlLinks = ";".join(dls)
        
        
        result = MovieLink.objects.filter(digestkey=digestkey)               
        if len(result) == 0:              
            ml = MovieLink(title = mTitle,raw_desc = movie,digestkey = digestkey,daily_link=dl,images=imagesLink,downloadlink=dlLinks)            
            ml.save()            
        else:
            logger.info("movie already existed:...." + mTitle)             
    dl.parsed = True
    dl.save()
예제 #3
0
파일: admin.py 프로젝트: v-francoise/th
 def clean_code(self):
     logger.error('Code field value = %s' % self.cleaned_data["code"])
     if self.check_link(self.cleaned_data["code"]):
         logger.info("if link ok")
         print "if link OK"
         self.cleaned_data["code"] = self.youtube_extract(self.cleaned_data["code"])
         return self.cleaned_data["code"]
     else:
         raise forms.ValidationError("Error ! Url not valid !")
예제 #4
0
def parseDailyMovie(dl):
    try:
        page = urllib2.urlopen(dl.link)
        content = page.read()         
        content = content.decode('gb18030').encode('utf8')
        page.close() 
    except URLError:        
        raise
    
    index = content.find("<div id=\"content\">")+18
    index2 = content.find("</div>",index)
    content = content[index:index2]    
    movies = content.split("<br />\r\n<br />\r\n")
    mi = 1
    p = re.compile('<IMG class="postimg" src=".*" />',re.IGNORECASE);
    pl = re.compile('<A href=".*" target=_blank >\*\*\*\*\*點此下載\*\*\*\*\*</A>',re.IGNORECASE);
    for movie in movies:
        #logger.info("movie "+str(mi)+" :\n" + movie)
        #logger.info("movie "+str(mi)+"***************************************************************")
        mi = mi + 1          
        movie = movie.strip()
        if len(movie) < 20:
            continue
        #create the movielink object
        digestkey = hashlib.sha224(movie).hexdigest()
        tIndex = movie.find("<br />")
        mTitle = movie[0:tIndex] 
        #find all images        
        
        images = []
        for match in p.finditer(movie):
            image = str(match.group())
            iIndex = image.find('src="')+5
            iIndex2 = image.find('"',iIndex)            
            image = image[iIndex:iIndex2]
            #logger.info("movie: "+mTitle+" image:"+image)
            images.append(image)            
        imagesLink = ";".join(images)
        
        dls = []
        for match in pl.finditer(movie):
            dlink = str(match.group())
            iIndex = dlink.find('href="')+6
            iIndex2 = dlink.find('"',iIndex)            
            dlink = dlink[iIndex:iIndex2]
            dls.append(dlink)            
        dlLinks = ";".join(dls)
        
        
        result = MovieLink.objects.filter(digestkey=digestkey)               
        if len(result) == 0:              
            ml = MovieLink(title = mTitle,raw_desc = movie,digestkey = digestkey,daily_link=dl,images=imagesLink,downloadlink=dlLinks)            
            ml.save()            
        else:
            logger.info("movie already existed:...." + mTitle)             
    dl.parsed = True
    dl.save()
예제 #5
0
def ajax_excessive_qa_record_pass(request):
    '''
    自检通过
    用户自检之后,提交到质量部,质量部检查通过
    '''
    result = 0
    msg = {0: 'success'}
    if request.method == 'POST':
        qa_record_id = int(request.POST['qa_record_id'])
        qa_record = QARecord.objects.get(id=qa_record_id)
        note = request.POST['pass_note']

        #create_new_manufacture_item(qa_record.manufacture_item,'pass')
        logger.info(qa_record)
        #set reject_product_record attributes
        reject_product_record = qa_record.self_reject_product_record
        if reject_product_record:
            reject_product_record.quality_problems = note
            reject_product_record.reason_analysis = ''
            reject_product_record.processing_result = 'pass'
            reject_product_record.save()

        #set qa_record.qa_excessive_status
        qa_record.qa_excessive_status = 3
        qa_record.note = note
        qa_record.decider = request.user
        qa_record.save()
        logger.info(qa_record)

        #do_transition
        # 结束
        manufacture_item = qa_record.manufacture_item
        # 如果工件的检验记录都合格了,则修改为通过
        number_of_qa_records = len(manufacture_item.excessive_qa_records)
        number_of_qa_records_passed = 0
        for qa in manufacture_item.excessive_qa_records:
            if qa.item_pass:
                number_of_qa_records_passed += 1
        if number_of_qa_records == number_of_qa_records_passed:
            manufacture_item.qa_excessive_status = 3
            manufacture_item.save()

        if manufacture_item.status == 6:
            result = productionline_do_transition(
                request.user, manufacture_item.productionline, 6)

    data = {'result': result, 'msg': msg[result]}
    return HttpResponse('yuankong(' + json.dumps(data) + ');',
                        content_type="application/json")
예제 #6
0
    def __get__(self, instance, instance_type=None):
        """
        Returns field translation or None
        """
        if instance is None:
            return self

        try:
            return getattr(
                instance._get_translation(self.language_code, fallback=self.fallback),
                self._field_name
            )
        except instance._meta.translation_model.DoesNotExist:
            logger.info("Translation '%s' for '%s' (pk='%s') does not exist.", self.name, type(instance), instance.pk)
            return None
예제 #7
0
def login_auth(request):
    user_auth = request.POST.get('username')
    passwd_auth = request.POST.get('password')
    authed = auth.authenticate(username=user_auth,password=passwd_auth)
    if authed and authed.is_active:
        auth.login(request,authed)
        if globals().has_key('next_next') and not next_next == None:
            logger.info('<%s> login in sucess.' % user_auth)
            return HttpResponseRedirect(next_next)
        else:
            logger.info('<%s> login in sucess.' % user_auth)
            return HttpResponseRedirect('/main/')
    else:
        logger.warn('<%s> login in fail.' % user_auth)
        return render_to_response('login/login.html',{'msg':u'账号或密码错误'})
예제 #8
0
파일: views.py 프로젝트: jbetsinger/mes
def ajax_excessive_qa_record_pass(request):
    '''
    自检通过
    用户自检之后,提交到质量部,质量部检查通过
    ''' 
    result = 0
    msg = {0:'success'}
    if request.method == 'POST':
        qa_record_id = int(request.POST['qa_record_id'])
        qa_record    = QARecord.objects.get(id=qa_record_id)
        note         = request.POST['pass_note']

        #create_new_manufacture_item(qa_record.manufacture_item,'pass')
        logger.info(qa_record)
        #set reject_product_record attributes
        reject_product_record = qa_record.self_reject_product_record
        if reject_product_record:
            reject_product_record.quality_problems = note
            reject_product_record.reason_analysis = ''
            reject_product_record.processing_result = 'pass'
            reject_product_record.save()

        #set qa_record.qa_excessive_status
        qa_record.qa_excessive_status = 3
        qa_record.note = note
        qa_record.decider = request.user
        qa_record.save()
        logger.info(qa_record)

        #do_transition
        # 结束
        manufacture_item = qa_record.manufacture_item
        # 如果工件的检验记录都合格了,则修改为通过
        number_of_qa_records = len(manufacture_item.excessive_qa_records)
        number_of_qa_records_passed = 0
        for qa in manufacture_item.excessive_qa_records:
            if qa.item_pass:
                number_of_qa_records_passed += 1
        if number_of_qa_records == number_of_qa_records_passed:
            manufacture_item.qa_excessive_status = 3
            manufacture_item.save()

        if manufacture_item.status == 6:
            result = productionline_do_transition(request.user,manufacture_item.productionline,6)

    data = {'result':result,'msg':msg[result]}
    return HttpResponse('yuankong(' + json.dumps(data) + ');', content_type="application/json")
예제 #9
0
파일: views.py 프로젝트: 54chen/ityao
def moviethumbcron(request):
    #get 1 link of not saved
    movielinks = MovieLink.objects.filter(images_loaded=False)[:5]
    #save image to local
    for ml in movielinks:
        logger.info("[movie thumb cron]movieid: %s" % (ml.id, ))
        images = ml.images.split(";")
        logger.info("[movie thumb cron]%s" % (images, ))
        count = 0   
        imageLinks = [] 
        for image in images:
            count = count + 1
            imgname = str(ml.digestkey)+"_"+str(count)
            try: 
                logger.info('[movie image]%s | %s' % (imgname, image))
                link = create_resized_image(imgname, image)
                if link:
                    imageLinks.append(link) 
            except IOError:        
                continue
        ml.images = ";".join(imageLinks)
        ml.images_loaded = True
        ml.save()
    
    return render_to_response('btfactory/movie.html', locals())
예제 #10
0
파일: views.py 프로젝트: 54chen/ityao
def moviethumbcron(request):
    #get 1 link of not saved
    movielinks = MovieLink.objects.filter(images_loaded=False)[:5]
    #save image to local
    for ml in movielinks:
        logger.info("[movie thumb cron]movieid: %s" % (ml.id, ))
        images = ml.images.split(";")
        logger.info("[movie thumb cron]%s" % (images, ))
        count = 0
        imageLinks = []
        for image in images:
            count = count + 1
            imgname = str(ml.digestkey) + "_" + str(count)
            try:
                logger.info('[movie image]%s | %s' % (imgname, image))
                link = create_resized_image(imgname, image)
                if link:
                    imageLinks.append(link)
            except IOError:
                continue
        ml.images = ";".join(imageLinks)
        ml.images_loaded = True
        ml.save()

    return render_to_response('btfactory/movie.html', locals())
예제 #11
0
파일: views.py 프로젝트: 54chen/ityao
def parseActress(url):
    try:
        page = urllib2.urlopen(url)
        soup = BeautifulSoup(page)
        #content = content.decode('gb18030').encode('utf8')
        page.close()
    except URLError:
        raise

    body = soup.find(attrs={"class": "cssboxwhite_body2", "id": "data"})
    if not body:
        logger.info("url:" + url + " is NULL!!!!!!!!!!!!")
    else:
        names = body.contents[1].find("h4").getString().strip()
        co_index = names.find("(")
        real_name = names[0:co_index]
        co_name = names[co_index + 1:-1]
        co_name_array = co_name.split(u"、")
        co_name_array.append(real_name)
        co_name = u",".join(co_name_array)

        header = body.contents[1].find("img")
        result = Actress.objects.get_or_create(name=real_name)
        actress = result[0]
        logger.info("realname:" + str(actress.id) + ">" + real_name +
                    " coname:" + co_name)
        #create or update the actress
        actress.co_names = co_name
        if header is not None:
            link = header.get('src', '')
            actress.photo = save_acttress_header(actress.id, link)
        else:
            logger.info("realname:" + str(actress.id) + ">" + real_name +
                        " no header photo!")
        dashrow = body.contents[1].find("ul", attrs={"class": "dashrow"})
        lis = dashrow.findAll('li')
        #logger.info("dashrow:"+dashrow.prettify())
        pf = []
        for content in lis:
            text = content.getText().strip()
            if text != ":::":
                pf.append(text)

        actress.profile = ";".join(pf)
        logger.info("profile:" + actress.profile)
        actress.save()
        return True
예제 #12
0
파일: views.py 프로젝트: 54chen/ityao
def parseActress(url):
    try:
        page = urllib2.urlopen(url)
        soup = BeautifulSoup(page)
        #content = content.decode('gb18030').encode('utf8')
        page.close() 
    except URLError:        
        raise
    
    body = soup.find(attrs={"class":"cssboxwhite_body2","id":"data"})
    if not body:
        logger.info("url:"+url+" is NULL!!!!!!!!!!!!")
    else:
        names = body.contents[1].find("h4").getString().strip()
        co_index = names.find("(")
        real_name = names[0:co_index]
        co_name = names[co_index+1:-1]        
        co_name_array = co_name.split(u"、")        
        co_name_array.append(real_name)
        co_name = u",".join(co_name_array)
        
        header = body.contents[1].find("img")        
        result = Actress.objects.get_or_create(name=real_name)        
        actress = result[0]
        logger.info("realname:"+str(actress.id)+">"+real_name+" coname:"+co_name)
        #create or update the actress
        actress.co_names = co_name
        if header is not None:
            link = header.get('src','')
            actress.photo = save_acttress_header(actress.id, link)
        else:
            logger.info("realname:"+str(actress.id)+">"+real_name+" no header photo!")
        dashrow = body.contents[1].find("ul",attrs={"class":"dashrow"})
        lis = dashrow.findAll('li')
        #logger.info("dashrow:"+dashrow.prettify())
        pf = []        
        for content in lis:
            text = content.getText().strip()
            if text != ":::":
                pf.append(text)    
            
        actress.profile = ";".join(pf)
        logger.info("profile:"+actress.profile)
        actress.save()
        return True
예제 #13
0
파일: views.py 프로젝트: 54chen/ityao
def parseMonth(month_url):
    #logger.info("Parse Month:" + str(month_url.link))
    url = urlparse.urlsplit(month_url.link)
    servername = url[0] + "://" + url[1]
    try:
        page = urllib2.urlopen(month_url.link)
    except URLError:
        raise
    soup = BeautifulSoup(page, fromEncoding='gbk')
    #content = soup.prettify()
    links = soup.findAll('a', {'href': True, 'target': True}, True)

    count = 0
    parsed_count = 0
    reobj = re.compile(u"^★㊣最新の[(日本)(亚洲)](.)*♂(.)*♀$")

    for link in links:
        content = link.getText()
        match = reobj.search(content)
        if match:
            count = count + 1
            logger.info(content)
            #存储日常链接
            linkstr = servername + link.get('href', '')
            dailyLink = DailyLink(link=linkstr,
                                  monthly_link=month_url,
                                  label=content)
            try:
                dailyLink.save()
                parsed_count = parsed_count + 1
            except IntegrityError:
                logger.info("URL already existed:...." + linkstr)
                pass
            if count > 3:
                #only parse 2 links every time
                break
        else:
            logger.info(content + " not match!")
            continue
    return parsed_count
예제 #14
0
파일: views.py 프로젝트: 54chen/ityao
def parseMonth(month_url):
    #logger.info("Parse Month:" + str(month_url.link))
    url = urlparse.urlsplit(month_url.link)
    servername = url[0]+"://"+url[1]    
    try:
        page = urllib2.urlopen(month_url.link)
    except URLError:        
        raise
    soup = BeautifulSoup(page,fromEncoding='gbk')
    #content = soup.prettify()
    links = soup.findAll('a', {'href':True,'target':True},True)
    
    count = 0
    parsed_count = 0
    reobj = re.compile(u"^★㊣最新の[(日本)(亚洲)](.)*♂(.)*♀$")
        
    for link in links:            
        content = link.getText()                
        match = reobj.search(content)
        if match:
            count = count+1
            logger.info(content)
            #存储日常链接            
            linkstr = servername+link.get('href','')
            dailyLink = DailyLink(link=linkstr,monthly_link=month_url,label=content)
            try:
                dailyLink.save()
                parsed_count = parsed_count + 1
            except IntegrityError:
                logger.info("URL already existed:...." + linkstr)
                pass    
            if count > 3:
                #only parse 2 links every time
                break 
        else:
            logger.info(content+" not match!")
            continue
    return parsed_count   
예제 #15
0
파일: views.py 프로젝트: 54chen/ityao
def dailymovie(request, daily_id):
    logger.info('request for daily moviews.')
    dl = get_object_or_404(DailyLink, pk=daily_id)
    movielinks = MovieLink.objects.filter(daily_link=dl).order_by('-id')[:100]

    return render_to_response('btfactory/dailymovies.html', locals())
def login(request):
    url = api.get_authorize_url()
    next_url = request.META.get('HTTP_REFERER', None)
    if next_url:
        logger.info(next_url)
    return HttpResponseRedirect(url)
예제 #17
0
파일: views.py 프로젝트: 54chen/ityao
def dailymovie(request, daily_id):        
    logger.info('request for daily moviews.')
    dl = get_object_or_404( DailyLink, pk=daily_id)
    movielinks = MovieLink.objects.filter(daily_link=dl).order_by('-id')[:100]    

    return render_to_response('btfactory/dailymovies.html', locals())