def insert( self ):
		
		# get the key
		current_index = self._total_insert
		key = self._keys[ current_index ]
		
		# generate the value with an md5 applied to the key
		value = hashlib.md5( key.encode( 'utf-8' ) ).hexdigest()
			
		
		start = tic()
		try:
			# insert them into the hash table
			self._hash_table.put( key, value )
			# incremental count af all successfull insert
			self._successfull_insert += 1

		except Exception as e:
			# Doesn't matter if some key falls the insertion
			pass
		stop  = toc()
		
		# increment the total insert time
		self._total_insert_time += ( stop - start )

		# incremental count of all operations
		self._total_insert += 1
	def delete( self ):
		
		low  = 0
		high = self._successfull_insert
		random_index = random.randint( low, high )
		random_key = self._keys[ random_index ]
		
		start = tic()
		try:
			self._hash_table.delete( random_key )
			self._successfull_delete += 1
		except:
			# does not matter if a search goes wrong
			pass
		stop  = toc()

		# increment the total delete time
		self._total_delete_time += ( stop - start )
		
		# incremental count of all operations
		self._total_delete += 1
예제 #3
0
def simulation_wrapper(
    simulation_mode: str,
    settings: dict,
    model_name: str = None,
    submodel_path: str = None,
):
    """
    This script wraps around the simulation with Copasi,
    applies the settings for the ODE solver,
    and returns a list of result, which depend on simulation_mode:

    """
    # import the model_summary table to add information about the models to load
    model_info = pd.read_csv(os.path.join(DIR_MODELS, 'model_summary.tsv'),
                             sep='\t')

    # get the list of sbml models which belong to this benchmark model
    if submodel_path is None:
        copasi_file_list, sbml_model_list = \
            get_submodel_list_copasi(model_name, model_info)
    else:
        copasi_file, sbml_model = get_submodel_copasi(submodel_path,
                                                      model_info)
        copasi_file_list = [copasi_file]
        sbml_model_list = [sbml_model]

    # get the path with the Copasi binaries
    CopasiSE = os.path.join(DIR_COPASI_BIN, 'CopasiSE')

    # collect cpu times
    average_cpu_times_intern = []
    average_cpu_times_extern = []
    # collect trajectories
    trajectories = []
    # collect failures
    failures = []

    if simulation_mode == SIMCONFIG.CPUTIME:
        # we want to repeatedly simulate the model
        if os.getenv('SOLVERSTUDY_DIR_BASE', None) == 'TEST':
            n_repetitions = 3
        else:
            n_repetitions = 25
    else:
        n_repetitions = 1

    # loop over models (=modules) to be simulated:
    for i_model, model in enumerate(copasi_file_list):
        # get the corresponding SBML model
        sbml_model = sbml_model_list[i_model]

        # collect cpu times
        cpu_times_intern = []
        cpu_times_extern = []

        # adapt the cps-file for Copasi simulation according to the solver
        # settings (create a temporary file for this).
        # Also generate a name for the temporary Copasi report file
        tmp_cps_file, tmp_report_base = \
            _apply_solver_settings(model, settings)

        for i_rep in range(n_repetitions):
            # adapt the name of the temporary report file
            tmp_report_file = f'{tmp_report_base}_{i_rep}.tsv'

            # simulate and get simulation time, remove temporary simulation file
            start = toc()
            os.system(
                f'{CopasiSE} --report-file {tmp_report_file} {tmp_cps_file}')
            time_extern = (toc() - start)

            # Copasi writes its results to a report file.
            # We need to post process it first and then remove it
            rdata = _post_process_report_file(tmp_report_file, simulation_mode,
                                              sbml_model)

            # if simulation was not successful
            if rdata['status'] != 0:
                cpu_times_extern = [float('nan')] * n_repetitions
                cpu_times_intern = [float('nan')] * n_repetitions
                break

            # report in seconds
            cpu_times_extern.append(time_extern)
            cpu_times_intern.append(rdata['cpu_time'])

        # We need to remoe the temporary cps-file
        os.remove(tmp_cps_file)

        # let's just always collect the stuff which is cheap anyway
        average_cpu_times_intern.append(np.array(cpu_times_intern))
        average_cpu_times_extern.append(np.array(cpu_times_extern))
        failures.append(rdata['status'] != 0)

        # Trajectories are more memory intensive. Only collect if needed
        if simulation_mode == SIMCONFIG.TRAJECTORY:
            trajectories.append(rdata['x'])

    # postprocess depending on purpose and return a dict
    if simulation_mode == SIMCONFIG.TRAJECTORY:
        return trajectories
    else:
        return _post_process_cputime(np.array(average_cpu_times_intern),
                                     np.array(average_cpu_times_extern),
                                     np.array(failures), n_repetitions)
def simulation_wrapper(
    simulation_mode: str,
    settings: dict,
    model_name: str = None,
    submodel_path: str = None,
):
    """
    This script wraps around the simulation with AMICI,
    applies the settings for the ODE solver,
    and returns a list of result, which depend on simulation_mode:

    :param:
        - simulation_mode:
            whether we want to get trajectories out or cpu times

        - settings:
            a dict with settigs for the ODE solver

        - model_name:
            string with the id of the benchmark model (e.g. parthak2013a),
            which may contain different submodels

        - submodel_path:
            string with path to the amici model module (submodel) which belongs
            to *one* SBML file, path being relative to DIR_MODELS
    """

    # import the model_summary table to add information about the models to load
    model_info = pd.read_csv(os.path.join(DIR_MODELS, 'model_summary.tsv'),
                             sep='\t')

    # get the list of sbml models which belong to this benchmark model
    if submodel_path is None:
        amici_model_list, sbml_model_list = get_submodel_list(
            model_name, model_info)
    else:
        amici_model, sbml_model = get_submodel(submodel_path, model_info)
        amici_model_list = [amici_model]
        sbml_model_list = [sbml_model]

    # collect cpu times
    average_cpu_times_intern = []
    average_cpu_times_extern = []
    # collect trajectories
    trajectories = []
    # collect failures
    failures = []

    # loop over models (=modules) to be simulated:
    for i_model, model in enumerate(amici_model_list):
        if simulation_mode == SIMCONFIG.CPUTIME:
            # we want to repeatedly simulate the model
            n_repetitions = repetitions_for_cpu_time_study
        else:
            n_repetitions = 1

        # collect cpu times
        cpu_times_intern = []
        cpu_times_extern = []

        for _ in range(n_repetitions):
            # get the adapted solver object
            solver = _apply_solver_settings(model, settings)

            # simulate and get simulation time
            start = toc()
            rdata = amici.runAmiciSimulation(model, solver)
            time_extern = (toc() - start)  # time in seconds

            # if simulation was not successful
            if rdata['status'] != 0:
                cpu_times_extern.append([float('nan')] * n_repetitions)
                cpu_times_intern.append([float('nan')] * n_repetitions)
                break

            # report in seconds
            cpu_times_extern.append(time_extern)
            # convert time in milliseconds to time in seconds
            cpu_times_intern.append(rdata['cpu_time'] / 1000.)

        # let's just always collect the stuff which is cheap anyway
        average_cpu_times_intern.append(np.array(cpu_times_intern))
        average_cpu_times_extern.append(np.array(cpu_times_extern))
        failures.append(rdata['status'] != 0)

        # Trajectories are more memory intensive. Only collect if needed
        if simulation_mode == SIMCONFIG.TRAJECTORY:
            trajectories.append(rdata['x'])

    # postprocess depending on purpose and return a dict
    if simulation_mode == SIMCONFIG.TRAJECTORY:
        return _post_process_trajectories(trajectories, failures,
                                          amici_model_list, sbml_model_list)
    else:
        return _post_process_cputime(np.array(average_cpu_times_intern),
                                     np.array(average_cpu_times_extern),
                                     np.array(failures), n_repetitions)
	insertion_times = []

	# size goes from MIN_ARRAY_SIZE to MAX_ARRAY_SIZE
	for size in size_range:

		# generate size random integers between -100 and 100
		input_array = generate_random_integers( size )

		# insert input inside sorter objects
		insertion_sorter.get_input( input_array )
		merge_sorter.get_input( input_array )

		# sort with insertion_sorter and benchmark
		start_time_insertion = tic()
		insertion_sorter.asc_sort()
		end_time_insertion   = toc()

		# sort with merge_sorter and benchmark
		start_time_merge = tic()
		merge_sorter.asc_sort()
		end_time_merge   = toc()
		
		'''
		# assert that all is correct
		insertion_output = insertion_sorter.get_output()
		merge_output     = merge_sorter.get_output()
		
		assert len( insertion_output )            == size
		assert len( merge_output )                == size
		assert is_asc_ordered( insertion_output ) == True
		assert is_asc_ordered( merge_output )     == True
for attempt in range(0, M_ATTEMPTS):
	
	# initialize
	hybrid_times = []
	merge_times = []

	# generate size random integers between -100 and 100
	input_array = generate_random_integers( ARRAY_SIZE )

	merge_sorter.get_input( input_array )

	# sort with merge_sorter and benchmark
	start_time_merge = tic()
	merge_sorter.asc_sort()
	end_time_merge   = toc()

	for K in K_VALUES:

		# insert input inside sorter objects
		hybrid_sorter.set_K( K )
		hybrid_sorter.get_input( input_array )
		
		# sort with hybrid_sorter and benchmark
		start_time_hybrid = tic()
		hybrid_sorter.asc_sort()
		end_time_hybrid   = toc()
		
		'''
		# assert that all is correct
		hybrid_output = hybrid_sorter.get_output()