def create_subject_set(docref, name): print("Attempting to create a subject set via the Zooniverse API") subject_set = SubjectSet() subject_set.links.project = project subject_set.display_name = docref + " - " + name subject_set.save() return subject_set
def ls(subject_set_id, project_id, workflow_id, quiet): """Lists subject set IDs and names""" if subject_set_id and not project_id and not workflow_id: subject_set = SubjectSet.find(subject_set_id) if quiet: click.echo(subject_set.id) else: echo_subject_set(subject_set) return args = {} if project_id: args['project_id'] = project_id if workflow_id: args['workflow_id'] = workflow_id if subject_set_id: args['subject_set_id'] = subject_set_id subject_sets = SubjectSet.where(**args) if quiet: click.echo(" ".join([s.id for s in subject_sets])) else: for subject_set in subject_sets: echo_subject_set(subject_set)
def create_subject_set(project, subject_set_name): # Create a new subject set new_set = SubjectSet() new_set.links.project = project new_set.display_name = subject_set_name new_set.save() project.add_subject_sets(new_set) return new_set
def _create_subject_set(self, project_id, subject_set_name): project = Project.find(project_id) subject_set = SubjectSet() subject_set.display_name = subject_set_name subject_set.links.project = project subject_set.save() return subject_set
def main(production=False): uname = input('Enter your username: '******'https://panoptes-staging.zooniverse.org', admin=True ) pId = 5733 # if production else 1820 project = Project.find(pId) subject_set = SubjectSet() subject_set.links.project = project subject_set.display_name = 'Test_subject_set_' + str(int(time.time())) subject_set.save() loc = os.path.abspath(os.path.dirname(__file__)) subjects = os.listdir(loc + '/subjects') images, differences, model, metadata = [ sorted(( int(re.match(r'{}_([0-9]+)\.(?:json|png)$'.format(s), i).group(1)) for i in subjects if re.match(r'{}_([0-9]+)\.(?:json|png)$'.format(s), i) )) for s in ('difference', 'image', 'model', 'metadata') ] if not images == differences == model == metadata: print( 'Images, differences, model and metadata ' + 'must all have same length' ) # TODO: change subject directory structure to be more efficient # (not having 12,000+ files in a folder...) for i in images: try: with open('{}/subjects/metadata_{}.json'.format(loc, i)) as f: metadata = json.load(f) except IOError: metadata = {} subject_set = uploadSubjectToSet( project, subject_set, [[j.format(loc, i) for j in ( '{}/subjects/image_{}.png', '{}/subjects/difference_{}.json', '{}/subjects/model_{}.json' )]], # locations [metadata], )
def createSubjectSet(subjName, project): #Create the subject set subjectSet = SubjectSet() #Link to the appropriate project subjectSet.links.project = project #Set display name of subject set subjectSet.display_name = subjName #Save subject set to the project subjectSet.save() return subjectSet
def create_subject_set(folder_name, set_name='test_subject_set'): subject_names = [ i.group(1) for i in ( re.match(r'image_(.*?).png', f) for f in os.listdir(folder_name) ) if i is not None ] files = [ [ join(folder_name, file_name) for file_name in ( 'image_{}.png'.format(subject_name), 'difference_{}.json'.format(subject_name), 'model_{}.json'.format(subject_name), 'metadata_{}.json'.format(subject_name), ) ] for subject_name in subject_names ] assert all(os.path.exists(j) for i in files for j in i), 'Missing files!' uname = input('Enter your username: ') pwd = getpass.getpass() Panoptes.connect( username=uname, password=pwd, admin=True ) pId = 5590 project = Project.find(pId) subject_set = SubjectSet() subject_set.links.project = project subject_set.display_name = set_name subject_set.save() metadata_list = [] for fs in files: try: with open(fs[3]) as metaF: metadata = json.load(metaF) except IOError: metadata = {} metadata_list.append(metadata) subject_set = uploadSubjectToSet( project, subject_set, [i[:3] for i in files], metadata_list, )
def create(quiet, project_id, display_name): """ Creates a new subject set. Prints the subject set ID and name of the new subject set. """ subject_set = SubjectSet() subject_set.links.project = project_id subject_set.display_name = display_name subject_set.save() if quiet: click.echo(subject_set.id) else: echo_subject_set(subject_set)
def add_to_subject_set(subject_set_id, subject_set_file, username=None, password=None): """ Import a 1 column file of subject_ids to a subject_set. Parameters ---------- subject_set_id : str subject set ID linked to the web interface subject_set_file : str one-column file of subject IDs (output of cull_subject_ids) username, password : str, str if passed, will add subject set ids to the subject set on the web. """ lines = [] with open(subject_set_file) as subject_ids: lines.append(subject_ids.read().splitlines()) if username is not None: try: from panoptes_client import Panoptes, SubjectSet except ImportError: print( 'Install https://github.com/zooniverse/panoptes-python-client') sys.exit(1) Panoptes.connect(username=username, password=password) subject_set = SubjectSet.find(subject_set_id) subject_set.add(np.unique(lines)) return
def subject_set_export( self, export_id, access_token, ): export = SubjectSetExport.objects.get(pk=export_id) try: export.status = SubjectSetExport.RUNNING export.save() with SocialPanoptes(bearer_token=access_token) as p: subject_set = SubjectSet.find(export.subject_set_id) for subject in subject_set.subjects: for location in subject.locations: media_metadata = MediaMetadata.objects.create( export=export, subject_id=subject.id, url=list(location.values())[0], ) task_result = fetch_media_metadata.delay(media_metadata.id) media_metadata.celery_task = task_result.id media_metadata.save() task_result = update_subject_set_export_status.delay(export_id) export.celery_task = task_result.id export.save() except: export.status = SubjectSetExport.FAILED export.save() raise
def ml_subject_assistant_export_to_microsoft_pt1_get_subjects_data( export_id, access_token, ): print('[Subject Assistant] Exporting to Microsoft 1/4: get Subjects') export = MLSubjectAssistantExport.objects.get(pk=export_id) data = [] # Keeps track of all data items that needs to written into a Microsoft-friendly JSON format. # Retrieve all Subjects from a Subject Set with SocialPanoptes(bearer_token=access_token) as p: subject_set = SubjectSet.find(export.subject_set_id) # Process each Subject for subject in subject_set.subjects: # Create a data item for each image URL in the Subject for frame_id, location in enumerate(subject.locations): image_url = list(location.values())[0] subject_information = { 'project_id': str(subject_set.links.project.id), 'subject_set_id': str(export.subject_set_id), 'subject_id': str(subject.id), 'frame_id': str(frame_id) } item = [] item.append(image_url) item.append(json.dumps(subject_information)) # The subject's JSON information is stored as a string. Yes, really. data.append(item) return data
def make_tutorial_images(imagePaths, ellipseData, projectData): # Connect to Panoptes Panoptes.connect( username=projectData["user_name"], password=projectData["password"] ) newSubjects = [] for imageId, imagePath in enumerate(imagePaths): print(f"Adding {imagePath}...") try: subjectSet = SubjectSet.find(projectData["subject_set"]) except PanoptesAPIException as e: print(e) return newSubject = Subject() newSubject.add_location(imagePath) newSubject.links.project = subjectSet.links.project newSubject.metadata.update( make_metadata( ellipseData.get_group(imageId).reset_index(drop=True), imagePath ) ) newSubject.save() newSubjects.append(newSubject) subjectSet.add(newSubjects)
def remove_subject_sets(self, workflows_summary): ''' Removes completed subject sets from workflow by analyzing the workflows summary returned by get_workflows_summary() ''' self.log.info( f"Trying to disable completed Subject Sets from workflows") if not self._auto_disable_subject_sets: self.log.info( f"Configuration prevents from disabling Subject Sets automatically" ) return panoptes_client, project = self.get_conn() for wsum in workflows_summary: workflow = Workflow.find(wsum['id']) subject_sets = list() for ss in wsum['subject_sets']: if (ss['subjects_count'] != 0) and (ss['retired_count'] == ss['subjects_count']): subject_set = SubjectSet.find(ss['id']) subject_sets.append(subject_set) workflow.remove_subject_sets(subject_sets) self.log.info( f"Disabled: {len(subject_sets)} Subject Sets from '{workflow.display_name}'" )
def push_new_row_subjects(self, source_subject, target_subject_set_id, row_paths_by_column): """ Given image paths for the new column-indexed rows (row_paths_by_column), push new unclassified row subjects to the appropriate subject set, with metadata references to the source subject and column. """ project = Project.find(settings.PROJECT_ID) subject_set_unclassified_rows = SubjectSet.find(target_subject_set_id) new_row_subjects = [] for column_index, row_paths in row_paths_by_column.items(): self._logger.info('Creating %d new row subjects for column index %d for subject %s', len(row_paths), column_index, source_subject.id) for row_path in row_paths: new_subject = Subject() new_subject.links.project = project copy_source_metadata_fields = ['book', 'page'] for copy_field in copy_source_metadata_fields: new_subject.metadata[copy_field] = source_subject.metadata[copy_field] new_subject.metadata['source_document_subject_id'] = source_subject.id new_subject.metadata['source_document_column_index'] = column_index new_subject.add_location(row_path) new_subject.save() new_row_subjects.append(new_subject) subject_set_unclassified_rows.add(new_row_subjects)
def download_classifications(subject_set_id, output_file, generate, generate_timeout): """ Downloads a subject-set specific classifications export for the given subject set. OUTPUT_FILE will be overwritten if it already exists. Set OUTPUT_FILE to - to output to stdout. """ subject_set = SubjectSet.find(subject_set_id) if generate: click.echo("Generating new export...", err=True) export = subject_set.get_export('classifications', generate=generate, wait_timeout=generate_timeout) with click.progressbar( export.iter_content(chunk_size=1024), label='Downloading', length=(int(export.headers.get('content-length')) / 1024 + 1), file=click.get_text_stream('stderr'), ) as chunks: for chunk in chunks: output_file.write(chunk)
def add_to_subject_set(subject_set_id, subject_set_file): """Import a 1 column file of subject_ids to a subject_set.""" lines = [] subject_set = SubjectSet.find(subject_set_id) with open(subject_set_file) as subject_ids: lines.append(subject_ids.read().splitlines()) return subject_set.add(np.unique(lines))
def modify(subject_set_id, project_id, display_name): subject_set = SubjectSet.find(subject_set_id) if project_id: subject_set.links.project = project_id if display_name: subject_set.display_name = display_name subject_set.save() echo_subject_set(subject_set)
def add_subject_set(self, display_name, subjects_metadata): ''' Create and Add a new subject set to a workflow returned by get_workflows_summary() ''' project = self._project subject_set = SubjectSet() subject_set.display_name = display_name subject_set.links.project = project subject_set.save() source = subjects_metadata[0]['source'] if source == self.EPICOLLECT5_SOURCE: self.log.info( f"Creating {len(subjects_metadata)} subjects to Subject Set {display_name}" ) subjects = self._create_subjects_from_epicollect5( project, subjects_metadata) else: raise NotImplementedError() subject_set.add(subjects) for workflow in project.links.workflows: workflow.add_subject_sets(subject_set) self.log.info( f"Added new Subject Set '{display_name}' to workflow '{workflow.display_name}'" )
def add_new_subject(self, image_list, metadata_list, subject_set_name): """ Add a subject and the metadata. image_list and metadata_list must be of equal length :param image_list: list of images to be added :param metadata_list: list of metadata to be added :return: """ # Start by making sure we have two equal length list if len(image_list) != len(metadata_list): print("Image list and metadata list do not match") # Link to the subject set we want subject_set = SubjectSet() subject_set.links.project = self.project subject_set.display_name = subject_set_name subject_set.save() # Go through the image and metadata list and add the items new_subjects = [] for i in range(len(image_list)): subject = Subject() subject.links.project = self.project subject.add_location(image_list[i]) subject.metadata.update(metadata_list[i]) subject.save() new_subjects.append(subject) subject_set.add(new_subjects)
def _get_subject_set(self, scope, project_id, set_name): subject_set = None if not scope.subject_set_id: subject_set = self._create_subject_set(project_id, set_name) scope.subject_set_id = subject_set.id scope.save() else: subject_set = SubjectSet.find(scope.subject_set_id) return subject_set
def upload_manifest_to_galaxy_zoo(subject_set_name, manifest, galaxy_zoo_id='5733', n_processes=10): """ Save manifest (set of galaxies with metadata prepared) to Galaxy Zoo Args: subject_set_name (str): name for subject set manifest (list): containing dicts of form {png_loc: img.png, key_data: {metadata_col: metadata_value}} galaxy_zoo_id (str): panoptes project id e.g. '5733' for Galaxy Zoo, '6490' for mobile n_processes (int): number of processes with which to upload galaxies in parallel Returns: None """ if 'TEST' in subject_set_name: logging.warning('Testing mode detected - not uploading!') return manifest if galaxy_zoo_id == '5733': logging.info('Uploading to Galaxy Zoo project 5733') elif galaxy_zoo_id == '6490': logging.info('Uploading to mobile app project 6490') else: logging.info('Uploading to unknown project {}'.format(galaxy_zoo_id)) # Important - don't commit the password! zooniverse_login = read_data_from_txt(zooniverse_login_loc) Panoptes.connect(**zooniverse_login) galaxy_zoo = Project.find(galaxy_zoo_id) subject_set = SubjectSet() subject_set.links.project = galaxy_zoo subject_set.display_name = subject_set_name subject_set.save() pbar = tqdm(total=len(manifest), unit=' subjects uploaded') save_subject_params = {'project': galaxy_zoo, 'pbar': pbar} save_subject_partial = functools.partial(save_subject, **save_subject_params) pool = ThreadPool(n_processes) new_subjects = pool.map(save_subject_partial, manifest) pbar.close() pool.close() pool.join() # new_subjects = [] # for subject in manifest: # print(subject) # new_subjects.append(save_subject_partial(subject)) subject_set.add(new_subjects) return manifest # for debugging only
def remove_subject(self, subject_set_id, subject_list): """ :param subject_list: :return: """ subject_set = SubjectSet.find(subject_set_id) x = subject_set.remove(subject_list) y = subject_set.save() return x, y
def modify(subject_set_id, display_name): """ Changes the attributes of an existing subject set. Any attributes which are not specified are left unchanged. """ subject_set = SubjectSet.find(subject_set_id) if display_name: subject_set.display_name = display_name subject_set.save() echo_subject_set(subject_set)
def delete(force, subject_set_ids): for subject_set_id in subject_set_ids: subject_set = SubjectSet.find(subject_set_id) if not force: click.confirm( 'Delete subject set {} ({})?'.format( subject_set_id, subject_set.display_name, ), abort=True, ) subject_set.delete()
def main(production=False): uname = input('Enter your username: '******'https://panoptes-staging.zooniverse.org', admin=True) pId = 5590 if production else 1820 project = Project.find(pId) subject_set = SubjectSet() subject_set.links.project = project subject_set.display_name = 'Test_subject_set_' + str(int(time.time())) subject_set.save() loc = os.path.abspath(os.path.dirname(__file__)) subjects = os.listdir(loc + '/subjects') # TODO: change subject directory structure to be more efficient # (not having 12,000+ files in a folder...) for i in range(20): if 'image_{}.png'.format(i) in subjects: try: with open('{}/subjects/metadata_{}.json'.format(loc, i)) as f: metadata = json.load(f) except IOError: metadata = {} subject_set = uploadSubjectToSet( project, subject_set, [[ j.format(loc, i) for j in ('{}/subjects/image_{}.png', '{}/subjects/difference_{}.json', '{}/subjects/model_{}.json') ]], # locations [metadata], ) else: break
def remove_subjects(subject_set_id, subject_ids, id_file): """ Unlinks subjects from this subject set. The subjects themselves are not deleted or modified in any way and will still be present in any other sets they're linked to. """ s = SubjectSet.find(subject_set_id) if id_file: s.remove([l.strip() for l in id_file.readlines()]) if subject_ids: s.remove(subject_ids)
def add_subjects(subject_set_id, subject_ids, id_file): """ Links existing subjects to this subject set. This command is useful mainly for adding previously uploaded subjects to additional subject sets. See the upload-subjects command to create new subjects in a subject set. """ s = SubjectSet.find(subject_set_id) if id_file: s.add([l.strip() for l in id_file.readlines()]) if subject_ids: s.add(subject_ids)
def get_subject_set(subject_set_id, subject_set_name): """ Get an existing subject set """ my_set = SubjectSet().find(subject_set_id) logger.info("Subject set {} found, will upload into this set".format( subject_set_id)) # check subject_set name is identical to what is expected from the # manifest name, if not abort if not my_set.display_name == subject_set_name: msg = "Found subject-set with id {} and name {} on Zooniverse -- \ but tried to continue uploading from manifest with \ id {} -- this discrepancy is unexpected, therefore, \ upload is aborted - did you choose the wrong manifest?".format( my_set.id, my_set.display_name, subject_set_name) logger.error(textwrap.shorten(msg, width=150)) raise ValueError("Subject-set name {} not identical to {}".format( my_set.display_name, subject_set_name)) return my_set
def get_level_structure(self, workflow=None, IDfilter=''): """Parameters ---------- workflow : `int`, optional, default None IDfilter : `str`, optional, default '' Returns ------- A dict with keys of workflow IDs and values list of golden sets associated with that workflow """ if hasattr(self, 'level_structure'): return self.level_structure level_structure = {} workflowDictSubjectSets = \ self.get_subject_sets_per_workflow(workflow=workflow) for iworkflow in workflowDictSubjectSets.keys(): # If it is final workflow level 4 subject sets are also linked # so need to filter for level 5 subject sets if int(iworkflow) == 7767: subjectset_id = [iid for iid in \ workflowDictSubjectSets[iworkflow] \ if iid not in workflowDictSubjectSets['7766']] else: subjectset_id = workflowDictSubjectSets[iworkflow] # Determine Display names of subject set subjectset_displayname_id = {} for iSubjectSet in subjectset_id: tmp2 = SubjectSet.find(iSubjectSet) displayname = str(tmp2.raw['display_name']) if IDfilter in displayname: str_tmp = displayname.split(" '")[0].replace(' ', '_') subjectset_displayname_id[str_tmp] = \ (iworkflow, iSubjectSet, [float(iThres) for iThres in re.findall("\d+\.\d+", displayname) ] ) level_structure[iworkflow] = subjectset_displayname_id self.level_structure = level_structure return level_structure
def create_subjects_and_link_to_project(self, proto_subjects, project_id, workflow_id, subject_set_id): try: USERNAME = os.getenv('PANOPTES_USERNAME') PASSWORD = os.getenv('PANOPTES_PASSWORD') Panoptes.connect(username=USERNAME, password=PASSWORD, endpoint=self.ENDPOINT) project = Project.find(project_id) workflow = Workflow().find(workflow_id) if subject_set_id == None: subject_set = SubjectSet() ts = time.gmtime() subject_set.display_name = time.strftime( "%m-%d-%Y %H:%M:%S", ts) subject_set.links.project = project subject_set.save() else: subject_set = SubjectSet().find(subject_set_id) subjects = [] for proto_subject in proto_subjects: subject = Subject() subject.links.project = project subject.add_location(proto_subject['location_lc']) subject.add_location(proto_subject['location_ps']) subject.metadata.update(proto_subject['metadata']) subject.save() subjects.append(subject) subject_set.add(subjects) workflow.add_subject_sets(subject_set) except Exception: self.log.exception("Error in create_subjects_and_link_to_project ")
def upload_subjects(subject_set_id, manifest_file): subject_set = SubjectSet.find(subject_set_id) subject_rows = [] with open(manifest_file) as manifest_f: file_root = os.path.dirname(manifest_file) r = csv.reader(manifest_f) headers = r.next() for row in r: metadata = dict(zip(headers, row)) files = [] for col in row: file_match = re.match(IMAGE_REGEX, col) file_path = os.path.join(file_root, col) if file_match and os.path.exists(file_path): files.append(file_path) if len(files) == 0: click.echo('Could not find any files in row:', err=True) click.echo(','.join(row), err=True) return -1 subject_rows.append((files, metadata)) created_subjects = [] with click.progressbar( enumerate(subject_rows), length=len(subject_rows), label='Uploading subjects', ) as _subject_rows: for count, (files, metadata) in _subject_rows: subject = Subject() subject.links.project = subject_set.links.project map(subject.add_location, files) subject.metadata.update(metadata) subject.save() created_subjects.append(subject) if (count + 1) % LINK_BATCH_SIZE == 0: subject_set.add(created_subjects) created_subjects = [] if len(created_subjects) > 0: subject_set.add(created_subjects)
def upload_images(id, use_database=True): print('Create subject set and upload images for', id) if use_database: update_status(id, gz_status='Uploading') wd = os.getcwd() Panoptes.connect(username='******', password=os.environ['PANOPTES_PASSWORD']) os.chdir(target + id) project = Project.find(slug='chrismrp/radio-galaxy-zoo-lofar') subject_set = SubjectSet() subject_set.display_name = id subject_set.links.project = project subject_set.save() print('Made subject set') new_subjects = [] g = glob.glob('*-manifest.txt') for i, f in enumerate(g): bits = open(f).readlines()[0].split(',') metadata = { 'subject_id': int(bits[0]), 'ra': float(bits[5]), 'dec': float(bits[6]), '#size': float(bits[7]), 'source_name': bits[4] } print('Upload doing', bits[4], '%i/%i' % (i, len(g))) subject = Subject() subject.links.project = project subject.metadata.update(metadata) for location in bits[1:4]: subject.add_location(location) subject.save() new_subjects.append(subject) subject_set.add(new_subjects) workflow = Workflow(11973) workflow.links.subject_sets.add(subject_set) if use_database: update_status(id, gz_status='In progress') print('Done!')
4) Remove the second set of RA and DEC columns, and name the first ones 'RA' and 'DEC' 5) Save this file as a csv 6) This only works in python 2 because it uses panoptes_client ''' import numpy as np from panoptes_client import SubjectSet, Subject, Project, Panoptes import os import progressbar as pb myusername = os.environ['PANOPTES_USERNAME'] mypassword = os.environ['PANOPTES_PASSWORD'] Panoptes.connect(username= myusername, password=mypassword) project = Project.find(id='73') fullsample = SubjectSet.find(5326) spirals = SubjectSet.find(5324) bars = SubjectSet.find(5325) progress = pb.ProgressBar(widgets=[pb.Bar(), pb.ETA()]) data = np.genfromtxt('../GZ3D/MatchedData.csv', delimiter = ',', names=True, dtype=[('DEC', float), ('IAUNAME', '|S30'),('IFUTARGETSIZE',int), ('MANGAID', '|S10'),('MANGA_TILEID',int),('NSAID', int), ('PETROTH50',float),('RA',float),('SERSIC_TH50',float), ('Z',float),('specobjid', int),('dr8objid', int), ('dr7objid', int),('t01_smooth_or_features_a02_features_or_disk_weighted_fraction', float), ('t02_edgeon_a05_no_weighted_fraction', float), ('t03_bar_a06_bar_weighted_fraction', float), ('t04_spiral_a08_spiral_weighted_fraction', float)]) counter = 0 nancounter = 0 spiralcount = 0
def ls(subject_set_id): echo_subject_set(SubjectSet.find(subject_set_id))
def create(project_id, display_name): subject_set = SubjectSet() subject_set.links.project = project_id subject_set.display_name = display_name subject_set.save() echo_subject_set(subject_set)
subject.links.project = project subject.add_location(file) # You can set whatever metadata you want, or none at all subject.metadata['filename'] = os.path.basename(file) #TODO subject.metadata['file_start'] = #TODO subject.metadata['sample_rate'] = 5512 subject.metadata['fft'] = fft subject.metadata['overlap'] = overlap subject.metadata['color_min'] = color_min subject.metadata['color_max'] = color_max #TODO subject.metadata['width'] = #TODO subject.metadata['height'] = subject.save() subjects.append(subject) os.rename(file,dest+os.path.basename(file)) #move file to uploaded directory #Create a new subject set or append the subjects to an existing one for subject_set in project.links.subject_sets: if str(subject_set.display_name) == subject_set_display_name: subject_set_id = subject_set.id subject_set = SubjectSet.find(subject_set_id) break else: #subject_set = SubjectSet() #subject_set.links.project = project #subject_set.display_name = subject_set_display_name #subject_set.save() raise Exception('Subject set does not exist') subject_set.add(subjects) # SubjectSet.add_subjects() can take a list of Subjects, or just one. print("--- %s seconds ---" % (time.time() - start_time))
images = [a['src'] for a in soup.find_all("img", {"src": re.compile("gstatic.com")})] #print images for img in images: raw_img = urllib2.urlopen(img).read() #add the directory for your image here DIR="images/" cntr = len([i for i in os.listdir(DIR) if image_type in i]) + 1 f = open(DIR + image_type + "_"+ str(cntr)+".jpg", 'wb') f.write(raw_img) f.close() print 'Creating image set...' # create the subject set. subject_set = SubjectSet() subject_set.links.project = p subject_set.display_name = "Images of " + thing + '\'s' subject_set.save() print 'Uploading images to Zooniverse...' # add all images to subject set for i in range(1,21): subject = Subject() subject.links.project = p subject.add_location('images/' + str(thing) + '_' + str(i)+'.jpg') subject.save() subject_set.add(subject) print 'Complete.'