def test_bulk_update(self): resources = ResourceFactory.build_batch(2) self.dataset.resources.extend(resources) self.dataset.save() now = datetime.now() ids = [r.id for r in self.dataset.resources] data = [{ 'id': str(id), 'title': faker.sentence(), 'description': faker.text(), } for id in ids] data.append({ 'title': faker.sentence(), 'description': faker.text(), 'url': faker.url(), }) with self.api_user(): response = self.put(url_for('api.resources', dataset=self.dataset), data) self.assert200(response) self.dataset.reload() self.assertEqual(len(self.dataset.resources), 3) for idx, id in enumerate(ids): resource = self.dataset.resources[idx] rdata = data[idx] self.assertEqual(str(resource.id), rdata['id']) self.assertEqual(resource.title, rdata['title']) self.assertEqual(resource.description, rdata['description']) self.assertIsNotNone(resource.url) new_resource = self.dataset.resources[-1] self.assertEqualDates(new_resource.published, now)
def process(self, item): '''Generate a random dataset from a fake identifier''' # Get or create a harvested dataset with this identifier. # Harvest metadata are already filled on creation. dataset = self.get_dataset(item.remote_id) # Here you comes your implementation. You should : # - fetch the remote dataset (if necessary) # - validate the fetched payload # - map its content to the dataset fields # - store extra significant data in the `extra` attribute # - map resources data dataset.title = faker.sentence() dataset.description = faker.text() dataset.tags = list(set(faker.words(nb=faker.pyint()))) # Resources for i in range(faker.pyint()): dataset.resources.append( Resource(title=faker.sentence(), description=faker.text(), url=faker.url(), filetype='remote', mime=faker.mime_type(category='text'), format=faker.file_extension(category='text'), filesize=faker.pyint())) return dataset
def youckan_api_response(**kwargs): '''A YouCKAN ME API response factory''' data = { 'profile': { 'website': faker.url(), 'city': faker.city(), 'about': faker.text(), 'avatar': faker.url() + 'avatar.png', }, 'first_name': faker.first_name(), 'last_name': faker.last_name(), 'email': faker.email(), 'is_active': True, 'is_superuser': False, 'date_joined': datetime.now().isoformat(), 'slug': None, } for key in data.keys(): if key in kwargs: data[key] = kwargs[key] data['fullname'] = ' '.join((data['first_name'], data['last_name'])) if not data['slug']: data['slug'] = slugify.slugify(data['fullname'].lower()) return data
def test_update(self): resource = ResourceFactory() self.dataset.resources.append(resource) self.dataset.save() now = datetime.now() data = { 'title': faker.sentence(), 'description': faker.text(), 'url': faker.url(), 'published': now.isoformat(), 'extras': { 'extra:id': 'id', } } with self.api_user(): response = self.put( url_for('api.resource', dataset=self.dataset, rid=str(resource.id)), data) self.assert200(response) self.dataset.reload() self.assertEqual(len(self.dataset.resources), 1) updated = self.dataset.resources[0] self.assertEqual(updated.title, data['title']) self.assertEqual(updated.description, data['description']) self.assertEqual(updated.url, data['url']) self.assertEqual(updated.extras, {'extra:id': 'id'}) self.assertEqualDates(updated.published, now)
def test_update_404(self): data = { 'title': faker.sentence(), 'description': faker.text(), 'url': faker.url(), } with self.api_user(): response = self.put(url_for('api.resource', dataset=self.dataset, rid=str(ResourceFactory().id)), data) self.assert404(response)