Ejemplo n.º 1
0
def letsget_edit_note(request, id, val):
    #try:
    if request.user.is_superuser or request.is_admin:
        current_site = request.current_site
        subdomain = request.subdomain

        pk = id.split(';')
        client = LetsGetClients.objects.select_related('organization').get(
            pk=pk[1], site=current_site, subdomain=subdomain)
        extra = ['', '', '', '']
        if client.organization.extra:
            extra = client.organization.extra.replace(' 00:00:00',
                                                      '').split(';')

        if pk[0] == 'note0':
            year, month, day = val.split('-')
            try:
                val = datetime.datetime(int(year), int(month), int(day))
            except ValueError:
                month = get_month_en(month)
                val = datetime.datetime(int(day), int(month), int(year))

        extra[int(pk[0].replace('note', ''))] = str(val)
        client.organization.extra = ';'.join(extra)
        client.organization.save()

        if pk[0] == 'note0':
            val = val.strftime('%d-%b-%Y').lower()

        return simplejson.dumps({
            'status': True,
            'id': id.replace(';', '__'),
            'val': val
        })
Ejemplo n.º 2
0
def get_imdb_film_list():

    source = ImportSources.objects.get(url='http://www.imdb.com/')

    url = '%scalendar/?region=us' % source.url
    
    opener = give_me_cookie()
    req = opener.open(urllib2.Request(url))
    
    xml = ''
    ids = []
    if req.getcode() == 200:
        data = BeautifulSoup(req.read(), from_encoding="utf-8")
        div = data.find('div', id="main")
        old_date = ''
        for h4 in div.findAll('h4'):
            release = h4.string.encode('utf-8')
            day, month, year = release.split()
            
            month = get_month_en(low(month))
            
            rel_date = '%s-%s-%s' % (year, month, day)

            xml += '<date v="%s">' % rel_date
                
            ul = h4.find_next('ul')
            
            for li in ul.findAll('li'):
                year = li.find('span', {'class': "year_type"}).string.encode('utf-8')
                if 'documentary' not in low(year):
                    year = re.findall(r'\d+', year)
                    if year:
                        details = li.find('i')
                        if details:
                            details = str(details).encode('utf-8').replace('<i>','').replace('</i>','')
                            details = details.replace('(','').replace(')','')
                        else:
                            details = ''
                            
                        if 'limited' not in low(details) and 'fest' not in low(details) or 'tv premiere' not in low(details):
                            film_name = li.a.string.encode('utf-8').replace('"', '&quot;').replace('&','&amp;')
                            film_slug = low(del_separator(film_name))
                            full_url = li.a.get('href').encode('utf-8')
                            imdb_id = full_url.replace('/title/tt', '').replace('/', '')
                        
                            xml += '<film n="%s" s="%s" y="%s" id="%s" d="%s" r="%s"></film>' % (film_name, film_slug, year[0], imdb_id, details, rel_date)
                            ids.append(imdb_id)
                    
            xml += '</date>'
    ids = ';'.join(set(ids))
    xml = '<data><ids value="%s">%s</ids></data>' % (ids, xml)time

    create_dump_file('%s_film_list' % source.dump, settings.API_DUMP_PATH, xml)
    cron_success('html', source.dump, 'films_list', 'Список релизов')
Ejemplo n.º 3
0
def imdb_person_data(id):

    url = 'http://www.imdb.com/name/nm%s/bio' % id
    
    resp, content = httplib2.Http(disable_ssl_certificate_validation=True).request(url)
    
    result = {'bio': '', 'birth': '', 'place': '', 'country': '', 'poster': ''}

    if resp['status'] == '200':
        data = BeautifulSoup(content, "html5lib", from_encoding="utf-8")

        table = data.find('table', id="overviewTable")

        birth_day = 0
        birth_month = 0
        birth_year = 0

        if table:
            trs = table.findAll('tr')

            for a in trs[0].findAll('a'):
                href = a.get('href').encode('utf-8')

                if '?birth_monthday' in href:
                    birth_day, birth_month = a.text.strip().split()
                    birth_month = get_month_en(low(birth_month))
                elif '?birth_year' in href:
                    birth_year = a.text.encode('utf-8')
                elif '?birth_place' in href:
                    result['place'] = a.text.encode('utf-8')
                    result['country'] = a.text.split(',')[-1].split('[')[0].strip()


        if birth_day and birth_month and birth_year:
            result['birth'] = datetime.date(int(birth_year), int(birth_month), int(birth_day))

        bio_block = data.find('a', {'name': 'mini_bio'}).find_next('p')
        if bio_block:
            result['bio'] = bio_block.text.strip().encode('utf-8')
    

        poster = data.find('img', {'class': 'poster'})
        if poster:
            poster = poster.get('src').split('._V1_')[0]
            poster += '._V1_SX640_SY720_.jpg'
            
            result['poster'] = poster

    return result
Ejemplo n.º 4
0
def get_top250():
    source = ImportSources.objects.get(url='http://top250.info/')
    
    films_exist = SourceFilms.objects.filter(source_obj=source)
    films_exist_dict = {}
    for i in films_exist:
        films_exist_dict[int(i.source_id)] = i
    
    data_nof_films = ''
    films = {}
    keys = []
    
    url = '%scharts/' % source.url
    req = urllib.urlopen(url)
    if req.getcode() == 200:
        data = BeautifulSoup(req.read(), from_encoding="utf-8")
        main = data.find('div', {'class': "layout"})
        tables = main.findAll('table', limit=2)
        trr = tables[0].findAll('tr', limit=1)[0]
        tdd = trr.findAll('td', limit=2)[1]
        
        date_tmp = tdd.text.encode('utf-8').replace('Date: ','').replace(',','').strip()
        month, day, year, times = date_tmp.split()
        month = get_month_en(low(month))
        date_upd = datetime.date(int(year), month, int(day))
        
        #'-' без изменений
        #'↑' поднялся
        #'↓' опустился
        #'*' новый в топе
        
        for tr in tables[1].findAll('tr', {'class': ['row_same', 'row_up', 'row_down', 'row_new']}):
            td = tr.findAll('td')
            
            position = int(td[0].text)
            change = td[1].text.encode('utf-8')
            rating = float(td[3].text)
            votes = int(td[4].text)
            
            title = td[2].a.text.encode('utf-8')
            
            year = re.findall(r'\(.*?\)', title)[0].replace('(','').replace(')','')
            title = re.sub(r'\(.*?\)', '', title).strip()
            
            imdb_id = int(td[2].a.get('href').encode('utf-8').replace('/movie/?',''))
            
            if '-' in change:
                change = 1
                change_val = None
            elif '↑' in change:
                change_val = int(change.replace('↑','').strip())
                change = 2
            elif '↓' in change:
                change_val = int(change.replace('↓','').strip())
                change = 3
            elif '*' in change:
                change_val = None
                change = 4
        
            # получаю объект фильм от источника из БД, если существует
            obj = films_exist_dict.get(imdb_id)
            
            unique = '%s%s' % (imdb_id, date_upd)
            keys.append(unique)
            
            # записываю в словарь все спарсенные данные и объект
            films[imdb_id] = {
                'imdb_id': imdb_id,
                'position': position,
                'change': change,
                'change_val': change_val,
                'rating': rating,
                'votes': votes,
                'title': title,
                'year': year,
                'obj': obj,
                'key': unique,
            }
    
    top = Top250.objects.filter(key__in=keys)
    tops = [i.key.encode('utf-8') for i in top]
    
    # достаю все совпавшие фильмы по id imdb из БД для идентификации
    films_afisha = Film.objects.using('afisha').only('id', 'idalldvd').filter(idalldvd__in=films.keys())
    films_afisha_dict = {}
    for i in films_afisha:
        films_afisha_dict[int(i.idalldvd)] = i.id
    
    # иду по всем спарсенным фильмам
    for i in films.values():
        # идентифицирую фильм
        kid = films_afisha_dict.get(i['imdb_id'])
        # если у нас уже есть такой фильм от источника
        if i['obj']:
            # и он неидентифицирован, но сейчас идентифицировался, то сохраняю kid
            if kid and not i['obj'].kid:
                i['obj'].kid = kid
                i['obj'].save()
        # если у нас нет такого фильма от источника, то сохраняю
        else:
            sobj = SourceFilms.objects.create(
                source_id = i['imdb_id'],
                source_obj = source,
                name = i['title'],
                kid = kid,
                imdb = i['imdb_id'],
                year = i['year'],
            )
            i['obj'] = sobj
    
        
        if i['key'] not in tops:
            Top250.objects.create(
                key = i['key'],
                date_upd = date_upd,
                film = i['obj'],
                position = i['position'],
                change = i['change'],
                change_val = i['change_val'],
                rating = i['rating'],
                votes = i['votes'],
            )
    
    create_dump_file('%s_nof_film' % source.dump, settings.NOF_DUMP_PATH, '<data>%s</data>' % data_nof_films)
    cron_success('html', source.dump, 'films', 'Фильмы')
Ejemplo n.º 5
0
def parse_imdb(main_data, count, source, imdb, is_dump, images, country_data, genres_data, persons_data, productions, distr_objs, film_object, films, language, distr_nof_data, data_nof_persons, nof_distr, nof_persons, release_format, country_id, release):

    limits = {
        'G': 0,
        'PG': 6,
        'PG-13': 12,
        'R': 16,
        'NC-17': 18,
    }
    
    imdb = get_imdb_id(imdb)
    opener = give_me_cookie()
    url = '%stitle/tt%s/' % (source.url, imdb)
    try:
        req = opener.open(urllib2.Request(url))
    except urllib2.HTTPError:
        req = None

    film_obj = None
    if req:
        data = BeautifulSoup(req.read(), from_encoding="utf-8")

        #open('imdb.html','w').write(str(data))

        imdb = long(imdb)
        
        fname = main_data.get('fname')
        fslug = main_data.get('fslug')
        fyear = main_data.get('fyear')
        details = main_data.get('details','')
        
        new_interface = data.find('div', {'class': "title_block"})

        if not is_dump:
            # название
            if new_interface:
                fname = data.find('h1', itemprop="name")
                try:
                    fname.find('span').extract()
                except AttributeError: pass
                fname = fname.text.strip().encode('utf-8')
            else:
                h1 = data.find('h1', {'class': 'header'})
                fname = h1.find('span', itemprop="name").text.strip().encode('utf-8')

            fslug = low(del_separator(fname))
            
            # год
            if new_interface:
                year_tmp = data.find('title').text.replace(u' - IMDb','')
                # если такого вида 'The Expanse (2015)'
                year = re.findall(r'\(\d{4}\)$', year_tmp)
                if year:
                    fyear = year[0].replace('(','').replace(')','').strip()
                else:
                    # если такого вида 'The Expanse (TV Series 2015– )'
                    year = re.findall(r'\(.*\d{4}.*\)$', year_tmp)
                    if year:
                        year = re.findall(r'\d{4}', year[0].strip())
                        fyear = year[0] if year else fyear
            else:
                year = h1.find('span', {'class': 'nobr'})
                if year:
                    if year.find('a'):
                        year = year.find('a').text.encode('utf-8').strip()
                    else:
                        year = year.text.encode('utf-8').replace('(','').replace(')','').split('–')[0].strip()

                    try:
                        fyear = int(year)
                    except ValueError:
                        fyear = int(year.split()[-1])
            
            # дата релиза
            if not release:
                url_release = '%sreleaseinfo' % url
                time.sleep(1.5)
                req_release = opener.open(urllib2.Request(url_release))
                if req_release.getcode() == 200:
                    data_release = BeautifulSoup(req_release.read(), from_encoding="utf-8")

                    table = data_release.find('table', id='release_dates')
                    if table:
                        for ttr in table.findAll('tr'):
                            tds = ttr.findAll('td')
                            td_country = tds[0].find('a').text.encode('utf-8').strip()
                            td_release = tds[1].text.encode('utf-8').strip()
                            td_details = tds[2].text.encode('utf-8').strip()
                            if td_country == 'USA' and '(' not in td_details:
                                try:
                                    td_day, td_month, td_year = td_release.split()
                                    td_month = get_month_en(low(td_month.encode('utf-8')))
                                    release = datetime.date(int(td_year), int(td_month), int(td_day))
                                except ValueError: pass

        
        # постер
        if new_interface:
            poster = data.find('div', {'class': 'poster'})
        else:
            poster = data.find('td', id="img_primary")
            if poster:
                poster = poster.find('div', {'class': 'image'})

        if poster:
            if new_interface:
                poster = poster.find('img', itemprop="image").get('src').split('@._')[0]
            else:
                poster = poster.find('img').get('src').split('@._')[0]

            poster += '@._V1_SX640_SY720_.jpg'
            
            poster_name = 'poster__%s' % md5_string_generate('%s%s' % (poster, datetime.datetime.now()))
            
            while poster_name.decode('utf-8') in images:
                poster_name = 'poster__%s' % md5_string_generate('%s%s' % (poster, datetime.datetime.now()))
            
            images.append(poster_name.decode('utf-8'))
        else:
            poster = None
        
        # ограничения
        if new_interface:
            title_block = data.find('div', {'class': "title_block"})

            limit = title_block.find('meta', itemprop="contentRating")
            if limit:
                limit = limit.get('content').encode('utf-8')
                limit = limits.get(limit)

            genres_tmp = [gen.text.encode('utf-8') for gen in title_block.findAll('span', itemprop="genre")]

            div_details = data.find('div', id="titleDetails")

            runtime = div_details.find('time', itemprop="duration")
        else:
            div = data.find('div', {'class': "infobar"})

            limit = div.find('span', itemprop="contentRating")
            if limit:
                limit = limit.get('content').encode('utf-8')
                limit = limits.get(limit)

            genres_tmp = [gen.string.encode('utf-8') for gen in div.findAll('span', itemprop="genre")]
        
            runtime = div.find('time', itemprop="duration")


        if runtime:
            runtime = runtime.text.strip().encode('utf-8')
            runtime = re.findall(r'\d+', runtime)[0]

        # рейтинг
        imdb_rate = data.find('span', itemprop="ratingValue")
        imdb_votes = None
        if imdb_rate:
            imdb_rate = float(imdb_rate.text.encode('utf-8'))
            imdb_votes = data.find('span', itemprop="ratingCount")
            imdb_votes = int(imdb_votes.text.encode('utf-8').replace(u' ', '').replace(u',', ''))
        

        # жанры
        genres = []
        if len(genres_tmp) == 1 and genres_tmp[0] == 'Crime':
            # детектив
            gen_obj = Genre.objects.get(name='детектив')
            genres.append(gen_obj)
        elif 'Action' in genres_tmp and 'Drama' in genres_tmp:
            # драму не импортируем
            for genr in genres_tmp:
                if genr != 'Drama':
                    gen_obj = genres_data.get(genr)
                    genres.append(gen_obj)
        elif 'Romance' in genres_tmp:
            if 'Comedy' in genres_tmp:
                # драму не импортируем
                for genr in genres_tmp:
                    if genr != 'Drama':
                        gen_obj = genres_data.get(genr)
                        genres.append(gen_obj)
            elif 'Drama' in genres_tmp:
                # мелодрама
                gen_obj = Genre.objects.get(name='мелодрама')
                genres.append(gen_obj)
                for genr in genres_tmp:
                    if genr != 'Drama' and genr != 'Romance':
                        gen_obj = genres_data.get(genr)
                        genres.append(gen_obj)
            else:
                for genr in genres_tmp:
                    gen_obj = genres_data.get(genr)
                    genres.append(gen_obj)
        
        else:
            for genr in genres_tmp:
                gen_obj = genres_data.get(genr)
                genres.append(gen_obj)
            
        if 'Horror' in genres_tmp:
            if not limit or limit < 16:
                limit = 16

        note = None

        if new_interface:
            persons = []
            persons_block = data.find('div', {'class': "plot_summary_wrapper"})
            for pb in persons_block.findAll('span', itemprop="director"):
                pb_a = pb.find('a')
                pb_name = pb_a.text.encode('utf-8').strip()
                if pb_name:
                    pb_id = pb_a.get('href').split('?')[0]
                    pb_id = long(pb_id.replace('/name/nm', '').replace('/', ''))
                    persons.append({'name': pb_name, 'action': 3, 'status': 1, 'id': pb_id})
            
            for pb in persons_block.findAll('span', itemprop="creator"):
                pb_a = pb.find('a')
                pb_name = pb_a.text.encode('utf-8').strip()
                if pb_name:
                    pb_type = pb_a.next_sibling
                    if u'screenplay' in pb_type:
                        pb_id = pb_a.get('href').split('?')[0]
                        pb_id = long(pb_id.replace('/name/nm', '').replace('/', ''))
                        persons.append({'name': pb_name, 'action': 4, 'status': 1, 'id': pb_id})

            for pb in persons_block.findAll('span', itemprop="actors"):
                pb_a = pb.find('a')
                pb_name = pb_a.text.encode('utf-8').strip()
                if pb_name:
                    pb_id = pb_a.get('href').split('?')[0]
                    pb_id = long(pb_id.replace('/name/nm', '').replace('/', ''))
                    persons.append({'name': pb_name, 'action': 1, 'status': 1, 'id': pb_id})

            budget_obj = None
            countries = []
            production = []
            for div in div_details.findAll('div', {'class': "txt-block"}):
                h4 = div.find('h4')
                if h4:
                    if h4.text == u'Country:':
                        for a in div.findAll('a'):
                            country_obj = country_data.get(a.text)
                            countries.append(country_obj)
                    elif h4.text == u'Budget:':
                        budget = div
                        budget.find('h4').extract()
                        budget.find('span').extract()
                        budget = budget.text.encode('utf-8').strip()
                        if '$' in budget or '€' in budget:
                            budget = budget.replace(' ', '').replace(',', '').replace('.', '')
                            
                            budget_sum = re.findall(r'\d+\s?', budget)[0]
                            if '$' in budget:
                                budget_cur = '$'
                            elif '€' in budget:
                                budget_cur = '€'
                            
                            if film_object and film_object['obj'].budget:
                                film_object['obj'].budget.budget = int(budget_sum)
                                film_object['obj'].budget.currency = budget_cur
                                film_object['obj'].budget.save()
                            else:
                                budget_obj = Budget.objects.create(
                                    budget = int(budget_sum),
                                    currency = budget_cur,
                                )
        else:
            budget_obj = None
            countries = []
            production = []
            persons = []
            for div in data.findAll('div', {'class': "txt-block"}):
                h4 = div.find('h4')
                if h4:
                    if h4.string == u'Country:':
                        for a in div.findAll('a'):
                            country_obj = country_data.get(a.string)
                            countries.append(country_obj)
                    elif h4.string == u'Budget:':
                        budget = div
                        budget.find('h4').extract()
                        budget.find('span').extract()
                        budget = budget.text.encode('utf-8').strip()
                        if '$' in budget or '€' in budget:
                            budget = budget.replace(' ', '').replace(',', '').replace('.', '')
                            
                            budget_sum = re.findall(r'\d+\s?', budget)[0]
                            if '$' in budget:
                                budget_cur = '$'
                            elif '€' in budget:
                                budget_cur = '€'
                            
                            if film_object and film_object['obj'].budget:
                                film_object['obj'].budget.budget = int(budget_sum)
                                film_object['obj'].budget.currency = budget_cur
                                film_object['obj'].budget.save()
                            else:
                                budget_obj = Budget.objects.create(
                                    budget = int(budget_sum),
                                    currency = budget_cur,
                                )
                    elif h4.string == u'Director:':
                        for d in div.findAll('a'):
                            d_name = d.find('span', itemprop="name")
                            if d_name:
                                d_name = d_name.string
                                d_id = d.get('href').split('?')[0]
                                d_id = long(d_id.replace('/name/nm', '').replace('/', ''))
                                persons.append({'name': d_name, 'action': 3, 'status': 1, 'id': d_id})
                    elif h4.string == u'Writers:':
                        for w in div.findAll('a'):
                            p_name = w.find('span', itemprop="name")
                            if p_name:
                                p_name = p_name.string
                                p_type = w.next_sibling
                                w_id = w.get('href').split('?')[0]
                                w_id = long(w_id.replace('/name/nm', '').replace('/', ''))
                                if u'screenplay' in p_type:
                                    persons.append({'name': p_name, 'action': 4, 'status': 1, 'id': w_id})
                    elif h4.string == u'Stars:':
                        for s in div.findAll('a'):
                            s_name = s.find('span', itemprop="name")
                            if s_name:
                                s_name = s_name.string
                                s_id = s.get('href').split('?')[0]
                                s_id = long(s_id.replace('/name/nm', '').replace('/',''))
                                persons.append({'name': s_name, 'action': 1, 'status': 1, 'id': s_id})


        distributors = []
        url2 = '%scompanycredits' % url
        time.sleep(1.5)
        req2 = opener.open(urllib2.Request(url2))
        if req2.getcode() == 200:
            data2 = BeautifulSoup(req2.read(), from_encoding="utf-8")
            distr_h4 = data2.find('h4', {'name': "distributors"})
            if distr_h4:
                ul = distr_h4.find_next("ul")
                for link in ul.findAll('a'):
                    distr_name = link.text.encode('utf-8')
                    if distr_name not in nof_distr:
                        distr_details = link.next_sibling.encode('utf-8').strip()
                        
                        if country_id == 1:
                            cntry = 'USA'
                        else:
                            cntry = 'France'

                        if cntry in distr_details and 'theatrical' in distr_details:
                            distr_year = re.findall(r'\d{4}', distr_details)
                            distr_year = distr_year[0] if distr_year else None
                            distributors.append({'year': distr_year, 'name': distr_name})

        distr_data = []
        
        if distributors:
            distributors = sorted(distributors, key=operator.itemgetter('year'))
            cur_year = distributors[0]['year']
            for distrib in distributors:
                if distrib['year'] == cur_year:
                    distr_slug = low(del_separator(distrib['name']))
                    distr_obj = distr_objs.get(distr_slug)
                    if distr_obj:
                        distr_data.append(distr_obj)
                    else:
                        distr_nof_data += '<distributor value="%s" slug="%s" alt="%s"></distributor>' % (distrib['name'].replace('&', '&amp;'), distr_slug, None)
                        nof_distr.append(distrib['name'])
        
        poster_obj = None
        if poster:
            time.sleep(1.5)
            poster_obj = get_imdb_poster(poster, poster_name)

        person_list = []
        for pe in persons:
            person_id = pe['id']
            person_obj = persons_data.get(person_id)
            if person_obj:
                person_list.append({'person': person_obj, 'st': pe['status'], 'act': pe['action']})
            else:
                if person_id not in nof_persons:
                    try:
                        person_name = pe['name'].decode('utf8').encode('utf-8')
                    except UnicodeEncodeError:
                        person_name = pe['name'].encode('utf-8')
                    person_slug = low(del_separator(person_name))
                    data_nof_persons += '<person name="%s" slug="%s" code="%s" name_alt="" slug_alt=""></person>' % (person_name, person_slug, person_id)
                    nof_persons.append(pe['id'])
        
        new = False
        if film_object:
            if not film_object['obj'].imdb_id:
                film_object['obj'].imdb_id = imdb
            if not film_object['obj'].budget and budget_obj:
                film_object['obj'].budget = budget_obj
            if film_object['obj'].runtime != runtime:
                film_object['obj'].runtime = runtime
            if film_object['obj'].imdb_votes != imdb_votes:
                film_object['obj'].imdb_votes = imdb_votes
                film_object['obj'].imdb_rate = imdb_rate
            if film_object['obj'].year != fyear:
                film_object['obj'].year = fyear
            film_object['obj'].save()
        else:
            film_obj = Films.objects.create(
                year = fyear,
                note = note,
                runtime = runtime,
                rated = limit,
                budget = budget_obj,
                imdb_id = imdb,
                imdb_rate = imdb_rate,
                imdb_votes = imdb_votes,
            )
            film_object = {'releases': [], 'obj': film_obj}
            new = True
        
        
            if is_dump:
                films[int(imdb)] = {'obj': film_obj, 'releases': []}
                
        
        if release and release not in film_object['releases']:
            rel_obj = FilmsReleaseDate.objects.create(
                release = release,
                note = details,
                format = release_format,
                country_id = country_id,
            )
            film_object['obj'].release.add(rel_obj)
        
            if is_dump:
                films[int(imdb)]['releases'].append(rel_obj.release)
        
        if not new:
            for img in film_object['obj'].images.filter(status=0):
                img_p = '%s%s' % (settings.MEDIA_ROOT, img.file)
                try:
                    os.remove(img_p)
                except OSError: pass
                film_object['obj'].images.remove(img)
                img.delete()
                
        if poster_obj:
            film_object['obj'].images.add(poster_obj)
        
        
        film_names = [
            {'name': fname, 'status': 1},
            {'name': fslug, 'status': 2},
        ]
        for f in film_names:
            name_obj, name_created = NameFilms.objects.get_or_create(
                name = f['name'].strip(),
                status = f['status'],
                language = language,
                defaults = {
                    'name': f['name'].strip(),
                    'status': f['status'],
                    'language': language,
                })
            
            for fn in film_object['obj'].name.all():
                if fn.status == f['status'] and fn.language == language:
                    film_object['obj'].name.remove(fn)
                    
            film_object['obj'].name.add(name_obj)
            
        
        for c in countries:
            if c:
                if new:
                    film_object['obj'].country.add(c)
                else:
                    if c not in film_object['obj'].country.all():
                        film_object['obj'].country.add(c)
            
        for g in genres:
            if g:
                if new:
                    film_object['obj'].genre.add(g)
                else:
                    if g not in film_object['obj'].genre.all():
                        film_object['obj'].genre.add(g)
            
        for pr in production:
            if pr:
                if new:
                    film_object['obj'].production.add(pr)
                else:
                    if pr not in film_object['obj'].production.all():
                        film_object['obj'].production.add(pr)
        
        for pers in person_list:
            rel_fp, rel_fp_created = RelationFP.objects.get_or_create(
                person = pers['person'],
                status_act_id = pers['st'],
                action_id = pers['act'],
                films = film_object['obj'],
                defaults = {
                    'person': pers['person'],
                    'status_act_id': pers['st'],
                    'action_id': pers['act'],
                    'films': film_object['obj'],
                })
        
        for dis_data in distr_data:
            if new:
                film_object['obj'].distributor.add(dis_data)
            else:
                if dis_data not in film_object['obj'].distributor.all():
                    film_object['obj'].distributor.add(dis_data)

        film_obj = film_object['obj']
        count += 1

    return count, film_obj, distr_nof_data, data_nof_persons, nof_distr, nof_persons