def handle(self, *args, **options): if len(args) != 2: raise CommandError( 'You must specify a file to dump and the pk of the type') with open(args[0], 'r') as f: output = f.read() issues = json.loads(output) type = Type.objects.get(pk=args[1]) for issue in issues: try: i = Issue( **{ 'lat': issue['lat'], 'lon': issue['lon'], 'description': issue['description'], 'type': type }) i.save() except (IntegrityError, pygeolib.GeocoderError): pass self.stdout.write('Successfully dump file to the database')
def _buildIssueFromDictionary(dict, user): check_noProject = dict.has_key('noProject') issue_trackerURL = dict['trackerURL'] issue_projectId = dict['project_id'] issue_projectName = dict.get('project_name', '') check_createProject = dict.has_key('createProject') newProject_name = dict.get('newProjectName', '') newProject_homeURL = dict.get('newProjectHomeURL', '') newProject_trackerURL = dict.get('newProjectTrackerURL', '') issue_key = dict.get('key', '') issue_title = dict.get('title', '') issue_description = dict.get('description', '') _throwIfIssueExists(issue_trackerURL, user) issue = None if (check_noProject): if (not issue_title or not issue_description): raise BaseException('title and description are required') issue = Issue.newIssueOrphan(issue_title, issue_description, user) else: project = None if (check_createProject): if (not newProject_name or not newProject_homeURL or not newProject_trackerURL): raise BaseException( 'all parameters for new project are required') projectHomeURLValidationError = validateURL(newProject_homeURL) if (projectHomeURLValidationError): raise BaseException('invalid project URL (' + newProject_homeURL + ') - ' + projectHomeURLValidationError) projectTrackerURLValidationError = validateURL( newProject_trackerURL) if (projectTrackerURLValidationError): raise BaseException('invalid project tracker URL (' + newProject_trackerURL + ') - ' + projectTrackerURLValidationError) project = Project.newProject(newProject_name, user, newProject_homeURL, newProject_trackerURL) else: project = Project.objects.get(pk=int(issue_projectId)) if (newProject_homeURL != project.homeURL): project.homeURL = newProject_homeURL if (not issue_key or not issue_title): raise BaseException('key and title are required') issueURLValidationError = validateIssueURL(issue_trackerURL) if (issueURLValidationError): raise BaseException('invalid issue URL (' + issue_trackerURL + ') - ' + issueURLValidationError) issue = Issue.newIssue(project, issue_key, issue_title, user, issue_trackerURL) return issue
def test_we_dont_make_multiple_links(self): self.assertEqual(Copyright.objects.count(), 0) i1 = Issue(date_issued=datetime.date(1800, 1, 1)) c1 = i1.copyright_link self.assertEqual(Copyright.objects.count(), 1) dbc1 = Copyright.objects.all()[0] i2 = Issue(date_issued=datetime.date(1801, 1, 1)) c2 = i2.copyright_link self.assertEqual(Copyright.objects.count(), 1) self.assertEqual(c1.uri, c2.uri) self.assertEqual(c1.label, c2.label)
def _buildOfferFromDictionary(dict, user): check_noProject = dict.has_key('noProject') issue_trackerURL = dict['trackerURL'] issue_projectId = dict['project_id'] issue_projectName = dictOrEmpty(dict, 'project_name') check_createProject = dict.has_key('createProject') newProject_name = dictOrEmpty(dict, 'newProjectName') newProject_homeURL = dictOrEmpty(dict, 'newProjectHomeURL') newProject_trackerURL = dictOrEmpty(dict, 'newProjectTrackerURL') issue_key = dictOrEmpty(dict, 'key'); issue_title = dictOrEmpty(dict, 'title'); issue_description = dictOrEmpty(dict, 'description'); _throwIfIssueExists(issue_trackerURL, user) issue = None if(check_noProject): if(not issue_title or not issue_description): raise BaseException('title and description are required') issue = Issue.newIssueOrphan(issue_title, issue_description, user) else: project = None if(check_createProject): if(not newProject_name or not newProject_homeURL or not newProject_trackerURL): raise BaseException('all parameters for new project are required') projectHomeURLValidationError = validateURL(newProject_homeURL) if(projectHomeURLValidationError): raise BaseException('invalid project URL ('+newProject_homeURL+') - '+projectHomeURLValidationError) projectTrackerURLValidationError = validateURL(newProject_trackerURL) if(projectTrackerURLValidationError): raise BaseException('invalid project tracker URL ('+newProject_trackerURL+') - '+projectTrackerURLValidationError) project = Project.newProject(newProject_name, user, newProject_homeURL, newProject_trackerURL) else: project = Project.objects.get(pk=int(issue_projectId)) if(newProject_homeURL != project.homeURL): project.homeURL = newProject_homeURL if(not issue_key or not issue_title): raise BaseException('key and title are required') issueURLValidationError = validateIssueURL(issue_trackerURL) if(issueURLValidationError): raise BaseException('invalid issue URL ('+issue_trackerURL+') - '+issueURLValidationError) issue = Issue.newIssue(project, issue_key, issue_title, user, issue_trackerURL) return _buildOfferFromDictionary_and_issue(dict, user, issue);
def setUp(self): try: self.user = User.objects.get_by_natural_key(USER_NAME) except: self.user = User.objects.create_user(USER_NAME, USER_MAIL, USER_PASS) self.project = Project.newProject(PROJECT_NAME, self.user, PROJECT_URL, PROJECT_BUGTRACKER) self.issue = Issue.newIssue(self.project, ISSUE_KEY, ISSUE_TITLE, ISSUE_DESCRIPTION, self.project.createdByUser, self.project.trackerURL)
def test_non_pd_issue_has_no_copyright_link(self): # This is the absolute earliest an issue can be non-PD i = Issue(date_issued=datetime.date(datetime.date.today().year - 95, 1, 1)) c = i.copyright_link # No copyright self.assertTrue(c is None) # No link is created in the db self.assertEqual(Copyright.objects.count(), 0)
def addFeedback(request): dict = request.POST issue_title = dict['title'] issue_description = dict['description'] if not issue_title or not issue_description: raise BaseException(_('title and description are required')) issue = Issue.newIssueFeedback(issue_title, issue_description, request.user) issue.save() watch_services.watch_issue(request.user, issue.id, Watch.CREATED) notify_admin(_('new Feedback: %s') % issue_title, issue_description) return redirect('core.views.feedback_views.feedback')
def addFeedback(request): dict = request.POST issue_title = dict["title"] issue_description = dict["description"] if not issue_title or not issue_description: raise BaseException(_("title and description are required")) issue = Issue.newIssueFeedback(issue_title, issue_description, request.user) issue.save() watch_services.watch_issue(request.user, issue.id, Watch.CREATED) notify_admin(_("new Feedback: %s") % issue_title, issue_description) return redirect("core.views.feedback_views.feedback")
def addFeedback(request): dict = request.POST issue_title = dict['title'] issue_description = dict['description'] if not issue_title or not issue_description: raise BaseException(_('title and description are required')) issue = Issue.newIssueFeedback(issue_title, issue_description, request.user) issue.save() watch_services.watch_issue(request.user, issue.id, Watch.CREATED) notify_admin(_('new Feedback: %s') % issue_title, issue_description) return redirect('feedback')
def update_latest(self, item, spider): """Update the latest issues in a collection.""" spider_name = spider.name.lower() source = Source.objects.get(spider=spider_name) try: manga = Manga.objects.get(url=item['url'], source=source) except Manga.DoesNotExist: # The manga is not a current one. We simply ignore it # because will be created in the next full sync. return for item_issue in item['issues']: if not manga.issue_set.filter(url=item_issue['url']).exists(): issue = Issue() self._update_issue(issue, item_issue) manga.issue_set.add(issue, bulk=False)
def test_empty_db_pd_issue_has_copyright_link(self): self.assertEqual(Copyright.objects.count(), 0) # This is the absolute latest an issue can be considered public domain i = Issue(date_issued=datetime.date(datetime.date.today().year - 96, 12, 31)) c = i.copyright_link self.assertEqual(Copyright.objects.count(), 1) dbc = Copyright.objects.all()[0] # Make sure what got stuffed in the DB is the same as what got returned # since the return is created on the fly. self.assertEqual(c.label, dbc.label) self.assertEqual(c.uri, dbc.uri) # Next we make sure label and URI aren't empty. We don't hard-code the # expected label/uri, we just want to be sure they exist. self.assertTrue(len(c.label) > 10) self.assertTrue(len(c.uri) > 10)
def _load_issue(self, mets_file): _logger.debug("parsing issue mets file: %s" % mets_file) doc = etree.parse(mets_file) # get the mods for the issue div = doc.xpath('.//mets:div[@TYPE="np:issue"]', namespaces=ns)[0] dmdid = div.attrib['DMDID'] mods = dmd_mods(doc, dmdid) # set up a new Issue issue = Issue() issue.volume = mods.xpath( 'string(.//mods:detail[@type="volume"]/mods:number[1])', namespaces=ns).strip() issue.number = mods.xpath( 'string(.//mods:detail[@type="issue"]/mods:number[1])', namespaces=ns).strip() issue.edition = int( mods.xpath( 'string(.//mods:detail[@type="edition"]/mods:number[1])', namespaces=ns)) issue.edition_label = mods.xpath( 'string(.//mods:detail[@type="edition"]/mods:caption[1])', namespaces=ns).strip() # parse issue date date_issued = mods.xpath('string(.//mods:dateIssued)', namespaces=ns) issue.date_issued = datetime.strptime(date_issued, '%Y-%m-%d') # attach the Issue to the appropriate Title lccn = mods.xpath('string(.//mods:identifier[@type="lccn"])', namespaces=ns).strip() try: title = Title.objects.get(lccn=lccn) except Exception, e: url = settings.MARC_RETRIEVAL_URLFORMAT % lccn logging.info("attempting to load marc record from %s", url) management.call_command('load_titles', url) title = Title.objects.get(lccn=lccn)
def on_data(self, data): print('Inside tweet issue') logger.info('Inside tweet issue') try: tweet = json.loads(data) title = tweet['text'] geo = tweet['geo'] coord = tweet['coordinates'] user = tweet['user']['screen_name'] sentiment = requests.post( 'http://text-processing.com/api/sentiment/', data = { 'text': title, } ).text if not sentiment: logger.info('Sentiment could not be determined [Text: ' + title +']') return True sentiJson = json.loads(sentiment) if sentiJson['label'] != 'neg': logger.info('Sentiment is not negative') print('Sentiment is not negative') return True logger.info('Sentiment is negative') print('Sentiment is negative') print('User handle: @' + user) grievance = Issue(title=title, content=title) grievance.twitter_handle = '@' + user grievance.status = IssueStatus.objects.filter(id=1).get() grievance.tweet = tweet try: grievance.save() logger.info('Saved') reply = '@%s Your issue has been registered with id %s' %(user, grievance.issue_id) s = api.update_status(reply, tweet['id']) except Exception as e: logger.exception('Saving to database failed: %s', e) traceback.print_exc() return True except Exception as e: logger.exception('Unknown exception occurred: %s', e)
def _load_issue(self, mets_file): _logger.debug("parsing issue mets file: %s" % mets_file) doc = etree.parse(mets_file) # get the mods for the issue div = doc.xpath('.//mets:div[@TYPE="np:issue"]', namespaces=ns)[0] dmdid = div.attrib['DMDID'] mods = dmd_mods(doc, dmdid) # set up a new Issue issue = Issue() issue.volume = mods.xpath( 'string(.//mods:detail[@type="volume"]/mods:number[1])', namespaces=ns).strip() issue.number = mods.xpath( 'string(.//mods:detail[@type="issue"]/mods:number[1])', namespaces=ns).strip() issue.edition = int(mods.xpath( 'string(.//mods:detail[@type="edition"]/mods:number[1])', namespaces=ns)) issue.edition_label = mods.xpath( 'string(.//mods:detail[@type="edition"]/mods:caption[1])', namespaces=ns).strip() # parse issue date date_issued = mods.xpath('string(.//mods:dateIssued)', namespaces=ns) issue.date_issued = datetime.strptime(date_issued, '%Y-%m-%d') # attach the Issue to the appropriate Title lccn = mods.xpath('string(.//mods:identifier[@type="lccn"])', namespaces=ns).strip() try: title = Title.objects.get(lccn=lccn) except Exception, e: url = settings.MARC_RETRIEVAL_URLFORMAT % lccn logging.info("attempting to load marc record from %s", url) management.call_command('load_titles', url) title = Title.objects.get(lccn=lccn)
def run(self, queue): """ Fetch the issue with the given number for the current repository """ super(FetchIssueByNumber, self).run(queue) gh = self.gh if not gh: return # it's delayed ! repository_id, issue_number = self.identifier.hget().split('#') repository = self.repository try: issue = repository.issues.get(number=issue_number) except Issue.DoesNotExist: issue = Issue(repository=repository, number=issue_number) force_fetch = self.force_fetch.hget() == '1' try: # prefetch full data if wanted if force_fetch: if repository.has_issues: issue.fetch(gh, force_fetch=True) if issue.is_pull_request: issue.fetch_pr(gh, force_fetch=True) # now the normal fetch, if we previously force fetched they'll result in 304 issue.fetch_all(gh) except ApiNotFoundError, e: # we have a 404, but... check if it's the issue itself try: issue.fetch(gh) except ApiNotFoundError: # ok the issue doesn't exist anymore, delete id issue.delete() self.deleted.hset(1) return False else: raise e
def _load_issue(self, mets_file): _logger.debug("parsing issue mets file: %s" % mets_file) doc = etree.parse(mets_file) # get the mods for the issue div = doc.xpath('.//mets:div[@TYPE="np:issue"]', namespaces=ns)[0] dmdid = div.attrib['DMDID'] mods = dmd_mods(doc, dmdid) # set up a new Issue issue = Issue() issue.volume = mods.xpath( 'string(.//mods:detail[@type="volume"]/mods:number[1])', namespaces=ns).strip() issue.number = mods.xpath( 'string(.//mods:detail[@type="issue"]/mods:number[1])', namespaces=ns).strip() issue.edition = int( mods.xpath( 'string(.//mods:detail[@type="edition"]/mods:number[1])', namespaces=ns)) issue.edition_label = mods.xpath( 'string(.//mods:detail[@type="edition"]/mods:caption[1])', namespaces=ns).strip() # parse issue date date_issued = mods.xpath('string(.//mods:dateIssued)', namespaces=ns) issue.date_issued = datetime.strptime(date_issued, '%Y-%m-%d') # attach the Issue to the appropriate Title lccn = mods.xpath('string(.//mods:identifier[@type="lccn"])', namespaces=ns).strip() try: title = Title.objects.get(lccn=lccn) except Exception as e: url = settings.MARC_RETRIEVAL_URLFORMAT % lccn _logger.info("attempting to load marc record from %s", url) management.call_command('load_titles', url) title = Title.objects.get(lccn=lccn) issue.title = title issue.batch = self.current_batch issue.save() _logger.debug("saved issue: %s" % issue.url) notes = [] for mods_note in mods.xpath('.//mods:note', namespaces=ns): type = mods_note.xpath('string(./@type)') label = mods_note.xpath('string(./@displayLabel)') text = mods_note.xpath('string(.)') note = models.IssueNote(type=type, label=label, text=text) notes.append(note) issue.notes.set(notes, bulk=False) issue.save() # attach pages: lots of logging because it's expensive for page_div in div.xpath('.//mets:div[@TYPE="np:page"]', namespaces=ns): try: page = self._load_page(doc, page_div, issue) self.pages_processed += 1 except BatchLoaderException as e: _logger.exception(e) return issue
def save_city(self, city): city_objs = self.retrieve_city(city) for object in city_objs: if 'id' in object and object['latitud'] and object['longitud']: issue = Issue() issue.lat = float(object['latitud']) issue.lon = float(object['longitud']) issue.address = object['direccion'] or ' ' issue.description = object['desperfectoTexto'].replace("Tipo de incidencia: ", '') or ' ' if object['desperfecto'] in self.crash: issue.type = models.Type.objects.get(pk=5) elif object['desperfecto'] in self.disturb: issue.type = models.Type.objects.get(pk=4) elif object['desperfecto'] in self.noise_polution: issue.type = models.Type.objects.get(pk=2) elif object['desperfecto'] in self.vandalism: issue.type = models.Type.objects.get(pk=1) else: issue.type = models.Type.objects.get(pk=9) issue.save()