def clean_database(simulation): """Drop from the database the tables created for the run.""" matrices = functions.get_query('matrices', simulation) matrices_id = list(matrices.values_list('id', flat=True)) matrices_id.append(simulation.scenario.supply.pttimes.id) with connection.cursor() as cursor: for matrice_id in matrices_id: cursor.execute( "DROP TABLE IF EXISTS Matrix_{id};" ).format(id=matrice_id)
def clean(self): """Checks that no two nodes have the same user_id.""" if any(self.errors): # Don't bother validating the formset unless each form is valid on # its own return centroids = functions.get_query('centroid', self.simulation) user_ids = list(centroids.values_list('user_id', flat=True)) for form in self.forms: user_id = form.cleaned_data['user_id'] delete = form.cleaned_data['DELETE'] if not delete: if user_id in user_ids: raise forms.ValidationError( 'Two nodes (zones and intersections) cannot have the ' 'same id (id: {}).').format(user_id) user_ids.append(user_id)
def export_link_results(output, export_file): # Create a dictionary to map the link ids with the link user ids. link_mapping = dict() links = functions.get_query('link', SIMULATION.id) for link in links: link_mapping[link.id] = link.user_id # Check size of network. large_network = len(output['link_ids']) >= LINK_THRESHOLD # Write a csv. with codecs.open(export_file, 'w', encoding='utf8') as f: writer = csv.writer(f, delimiter='\t') # Writer a custom header. if large_network: labels = ['in-flow_H', 'ttime_H'] else: labels = ['in-flow_H', 'in-flow_S', 'out-flow_H', 'out-flow_S', 'ttime_H', 'ttime_S'] nb_periods = len(output['phi_in_H'][0]) headers = ['{}_{}'.format(label, i + 1) for label in labels for i in range(nb_periods)] headers = ['link'] + headers writer.writerow(headers) # Write rows. if large_network: # Large network, only store one type of results (phi_in_H). output_types = ['phi_in_H', 'ttime_H'] else: # Small network, we can store everything. output_types = ['phi_in_H', 'phi_in_S', 'phi_out_H', 'phi_out_S', 'ttime_H', 'ttime_S'] for i, link_id in enumerate(output['link_ids']): link_id = int(link_id) link_user_id = link_mapping[link_id] row = [link_user_id] for output_type in output_types: values = output[output_type][i] row += list(values) writer.writerow(row)
stopfile = args[13] simulation_id = int(args[15]) run_id = int(args[17]) # Get the SimulationRun object of the argument. try: run = SimulationRun.objects.get(pk=run_id) except SimulationRun.DoesNotExist: raise SystemExit('MetroDoesNotExist: No SimulationRun object ' + 'corresponding to the given id.') # Retrieve Simulation and Link objects. simulation = run.simulation links = get_query('link', simulation.id) # Write dummy MOEs. moes = SimulationMOEs( simulation=simulation.id, runid=run.id, ) moes.save() # Wait 2 minutes. for i in range(30): if os.path.isfile(stopfile): break if i % 10 == 0: with open(logfile, 'a') as g: g.write('\n{} seconds remaining.'.format(30 - i))
raise SystemExit('MetroArgError: This script must be executed with the id ' + 'of the SimulationRun has an argument.') # Get the SimulationRun object of the argument. try: run = SimulationRun.objects.get(pk=run_id) except SimulationRun.DoesNotExist: raise SystemExit('MetroDoesNotExist: No SimulationRun object corresponding' + ' to the given id.') simulation = run.simulation # Output user-specific results only if the population is small. # I believe that Metropolis does not output the file correctly if the # population is large. matrices = get_query('matrices', simulation) nb_travelers = matrices.aggregate(Sum('total'))['total__sum'] if nb_travelers > TRAVELERS_THRESHOLD: simulation.outputUsersTimes = 'false' else: simulation.outputUsersTimes = 'true' simulation.save() # Use the existing network output file if it exists. simulation_network = ( '{0}/website_files/network_output/network_{1}.json' .format(settings.BASE_DIR, simulation.id) ) if simulation.has_changed or not os.path.isfile(simulation_network): # Generate a new output file. print('Network file does not exist, generating a new one...')
'of the SimulationRun has an argument.') # Get the SimulationRun object of the argument. try: run = models.SimulationRun.objects.get(pk=run_id) except models.SimulationRun.DoesNotExist: raise SystemExit( 'MetroDoesNotExist: No SimulationRun object corresponding' + ' to the given id.') simulation = run.simulation # Output user-specific results only if the population is small. # I believe that Metropolis does not output the file correctly if the # population is large. matrices = functions.get_query('matrices', simulation) nb_travelers = matrices.aggregate(Sum('total'))['total__sum'] if nb_travelers > TRAVELERS_THRESHOLD: simulation.outputUsersTimes = 'false' simulation.outputUsersPaths = 'false' else: simulation.outputUsersTimes = 'true' simulation.outputUsersPaths = 'true' simulation.save() # Use the existing network output file if it exists. simulation_network = ( '{0}/website_files/network_output/network_{1}.json'.format( settings.BASE_DIR, simulation.id)) if simulation.has_changed or not os.path.isfile(simulation_network): # Generate a new output file.
DB_NAME = settings.DATABASES['default']['NAME'] # Create a tsv file with a readable user-specific cost output. print('Writing traveler-specific cost output...') FILE = ( '{0}/metrosim_files/output/metrosim_users_{1}_{2}.txt' ).format(settings.BASE_DIR, DB_NAME, SIMULATION.id) if os.path.isfile(FILE): try: EXPORT_FILE = ( '{0}/website_files/network_output/user_results_{1}_{2}.txt' ).format(settings.BASE_DIR, SIMULATION.id, RUN.id) # Create a dictionary to map the centroid ids with the centroid user # ids. centroid_mapping = dict() centroids = functions.get_query('centroid', SIMULATION.id) for centroid in centroids: centroid_mapping[centroid.id] = centroid.user_id # Create a dictionary to map the demandsegment ids with the name of the # usertype. usertype_mapping = dict() demandsegments = models.DemandSegment.objects.filter( demand__scenario__simulation=SIMULATION ) for demandsegment in demandsegments: name = demandsegment.usertype.name if name: usertype_mapping[demandsegment.id] = name else: usertype_mapping[demandsegment.id] = demandsegment.usertype.id with codecs.open(FILE, 'r', encoding='utf8') as f: