def solve(self, parallel=True, cores=32):
        '''
        Solve problems of type example

        The results are stored as

            ./results/{self.output_folder}/{solver}/results.csv

        using a pandas table with fields
            - 'name': Maros problem name
            - 'solver': solver name
            - 'status': solver status
            - 'run_time': execution time
            - 'iter': number of iterations
            - 'obj_val': objective value from solver
            - 'obj_opt': optimal objective value
            - 'n': leading dimension
            - 'N': nnz dimension (nnz(P) + nnz(A))
        '''

        print("Solving Maros Meszaros problems")
        print("-------------------------------")

        if parallel:
            pool = Pool(processes=min(cores, cpu_count()))

        # Iterate over all solvers
        for solver in self.solvers:
            settings = self.settings[solver]

            #  # Initialize solver results
            #  results_solver = []

            # Solution directory
            path = os.path.join('.', 'results', self.output_folder, solver)

            # Create directory for the results
            make_sure_path_exists(path)

            # Get solver file name
            results_file_name = os.path.join(path, 'results.csv')

            # Check if file name already exists
            if not os.path.isfile(results_file_name):
                # Solve Maros Meszaros problems
                if parallel:
                    results = pool.starmap(
                        self.solve_single_example,
                        zip(self.problems, repeat(solver), repeat(settings)))
                else:
                    results = []
                    for problem in self.problems:
                        results.append(
                            self.solve_single_example(problem, solver,
                                                      settings))
                # Create dataframe
                df = pd.concat(results)

                # Store results
                df.to_csv(results_file_name, index=False)

            #  else:
            #      # Load from file
            #      df = pd.read_csv(results_file_name)
            #
            #      # Combine list of dataframes
            #      results_solver.append(df)

        if parallel:
            pool.close()  # Not accepting any more jobs on this pool
            pool.join()  # Wait for all processes to finish
Beispiel #2
0
    def solve(self, parallel=True):
        '''
        Solve problems of type example

        The results are stored as

            ./results/benchmark_problems/{solver}/{class}/n{dimension}.csv

        using a pandas table with fields
            - 'class': example class
            - 'solver': solver name
            - 'status': solver status
            - 'run_time': execution time
            - 'iter': number of iterations
            - 'obj_val': objective value
            - 'n': leading dimension
            - 'N': nnz dimension (nnz(P) + nnz(A))
        '''

        print("Solving %s" % self.name)
        print("-----------------")

        if parallel:
            pool = Pool(processes=min(self.n_instances, cpu_count()))

        # Iterate over all solvers
        for solver in self.solvers:
            settings = self.settings[solver]

            # Initialize solver results
            results_solver = []

            # Solution directory
            path = os.path.join('.', 'results', 'benchmark_problems', solver,
                                self.name)

            # Create directory for the results
            make_sure_path_exists(path)

            # Get solver file name
            solver_file_name = os.path.join(path, 'full.csv')

            for n in self.dims:

                # Check if solution already exists
                n_file_name = os.path.join(path, 'n%i.csv' % n)

                if not os.path.isfile(n_file_name):

                    if parallel:
                        instances_list = list(range(self.n_instances))
                        n_results = pool.starmap(
                            self.solve_single_example,
                            zip(repeat(n), instances_list, repeat(solver),
                                repeat(settings)))
                    else:
                        n_results = []
                        for instance in range(self.n_instances):
                            n_results.append(
                                self.solve_single_example(
                                    n, instance, solver, settings))

                    # Combine n_results
                    df = pd.concat(n_results)

                    # Store n_results
                    df.to_csv(n_file_name, index=False)

                else:
                    # Load from file
                    df = pd.read_csv(n_file_name)

                # Combine list of dataframes
                results_solver.append(df)

            # Create total dataframe for the solver from list
            df_solver = pd.concat(results_solver)

            # Store dataframe
            df_solver.to_csv(solver_file_name, index=False)

        if parallel:
            pool.close()  # Not accepting any more jobs on this pool
            pool.join()  # Wait for all processes to finish
Beispiel #3
0
    def solve(self):
        """
        Solve Portfolio problem
        """

        print("Solve Portfolio problem for dimension %i" % self.n_factors)

        # Create example instance
        instance = PortfolioExample(self.n_factors, n=self.n_assets)

        # Store number of nonzeros in F and D for updates
        nnzF = instance.F.nnz
        
        # Store alpha
        alpha = self.alpha

        '''
        Solve problem without warm start
        '''
        #  print("Solving without warm start")

        # Solution directory
        no_ws_path = os.path.join('.', 'results', 'parametric_problems',
                                  'OSQP no warmstart',
                                  'Portfolio',
                                  )

        # Create directory for the results
        make_sure_path_exists(no_ws_path)

        # Check if solution already exists
        n_file_name = os.path.join(no_ws_path, 'n%i.csv' % self.n_factors)

        if not os.path.isfile(n_file_name):

            res_list_no_ws = []  # Initialize results
            for i in range(self.n_problems):
                qp = instance.qp_problem

                # Solve problem
                m = osqp.OSQP()
                m.setup(qp['P'], qp['q'], qp['A'], qp['l'], qp['u'],
                        **self.osqp_settings)
                r = m.solve()

                # DEBUG
                #  print("niter = %d" % r.info.iter)

                solution_dict = {'status': [r.info.status],
                                 'run_time': [r.info.run_time],
                                 'iter': [r.info.iter],
                                 'obj_val': [r.info.obj_val]}

                if r.info.status != "solved":
                    print("OSQP no warmstart did not solve the problem")
                    import ipdb; ipdb.set_trace()

                res_list_no_ws.append(pd.DataFrame(solution_dict))

                # Update model
                current_mu = instance.mu
                current_F_data = instance.F.data
                current_D_data = instance.D.data

                if i % self.n_qp_per_update == 0:
                    #  print("Update everything: mu, F, D")
                    # Update everything
                    new_mu = alpha * np.random.randn(instance.n) + (1 - alpha) * current_mu
                    new_F = instance.F.copy()
                    new_F.data = alpha * np.random.randn(nnzF) + (1 - alpha) * current_F_data
                    new_D = instance.D.copy()
                    new_D.data = alpha * np.random.rand(instance.n) * \
                        np.sqrt(instance.k) + (1 - alpha) * current_D_data
                    instance.update_parameters(new_mu, new_F, new_D)
                else:
                    #  print("Update only mu")
                    # Update only mu
                    new_mu = alpha * np.random.randn(instance.n) + (1 - alpha) * current_mu
                    instance.update_parameters(new_mu)

            # Get full warm-start
            res_no_ws = pd.concat(res_list_no_ws)

            # Store file
            res_no_ws.to_csv(n_file_name, index=False)

            # Plot results
            # import matplotlib.pylab as plt
            # plt.figure(0)
            # plt.plot(X_no_ws.T)
            # plt.title("No Warm Start")
            # plt.show(block=False)

        '''
        Solve problem with warm start
        '''
        #  print("Solving with warm start")

        # Solution directory
        ws_path = os.path.join('.', 'results', 'parametric_problems',
                               'OSQP warmstart',
                               'Portfolio',
                               )

        # Create directory for the results
        make_sure_path_exists(ws_path)

        # Check if solution already exists
        n_file_name = os.path.join(ws_path, 'n%i.csv' % self.n_factors)

        if not os.path.isfile(n_file_name):
            # Setup solver
            m = osqp.OSQP()
            m.setup(qp['P'], qp['q'], qp['A'], qp['l'], qp['u'],
                    **self.osqp_settings)

            res_list_ws = []  # Initialize results
            for i in range(self.n_problems):

                # Solve problem
                r = m.solve()

                # DEBUG
                #  print("niter = %d" % r.info.iter)

                if r.info.status != "solved":
                    print("OSQP warmstart did not solve the problem")

                # Get results
                solution_dict = {'status': [r.info.status],
                                 'run_time': [r.info.run_time],
                                 'iter': [r.info.iter],
                                 'obj_val': [r.info.obj_val]}

                res_list_ws.append(pd.DataFrame(solution_dict))

                # Update model
                current_mu = instance.mu
                current_F_data = instance.F.data
                current_D_data = instance.D.data

                if i % self.n_qp_per_update == 0:
                    #  print("Update everything: mu, F, D")
                    # Update everything
                    new_mu = alpha * np.random.randn(instance.n) + (1 - alpha) * current_mu
                    new_F = instance.F.copy()
                    new_F.data = alpha * np.random.randn(nnzF) + (1 - alpha) * current_F_data
                    new_D = instance.D.copy()
                    new_D.data = alpha * np.random.rand(instance.n) * \
                        np.sqrt(instance.k) + (1 - alpha) * current_D_data
                    instance.update_parameters(new_mu, new_F, new_D)
                    # Update solver
                    m.update(q=instance.qp_problem['q'],
                             Px=instance.qp_problem['P'].data,
                             Ax=instance.qp_problem['A'].data)
                else:
                    #  print("Update only mu")
                    # Update only mu
                    new_mu = alpha * np.random.randn(instance.n) + (1 - alpha) * current_mu
                    instance.update_parameters(new_mu)

                    # Update solver
                    m.update(q=instance.qp_problem['q'])

            # Get full warm-start
            res_ws = pd.concat(res_list_ws)

            # Store file
            res_ws.to_csv(n_file_name, index=False)
Beispiel #4
0
    def solve(self):
        """
        Solve MPC problem
        """

        print("Solve MPC problem for dimension %i" % self.dimension)

        # Create example instance
        instance = ControlExample(self.dimension)
        qp = instance.qp_problem
        x0 = np.copy(instance.x0)
        '''
        Solve problem without warm start
        '''
        # Solution directory
        no_ws_path = os.path.join(
            '.',
            'results',
            'parametric_problems',
            'OSQP no warmstart',
            'MPC',
        )

        # Create directory for the results
        make_sure_path_exists(no_ws_path)

        # Check if solution already exists
        n_file_name = os.path.join(no_ws_path, 'n%i.csv' % self.dimension)

        if not os.path.isfile(n_file_name):
            # Initialize states and inputs for the whole simulation
            X_no_ws = np.zeros((instance.nx, self.n_simulation + 1))
            U_no_ws = np.zeros((instance.nu, self.n_simulation))
            X_no_ws[:, 0] = x0

            res_list_no_ws = []  # Initialize results
            for i in range(self.n_simulation):

                # Solve problem
                m = osqp.OSQP()
                m.setup(qp['P'], qp['q'], qp['A'], qp['l'], qp['u'],
                        **self.osqp_settings)
                r = m.solve()

                solution_dict = {
                    'status': [r.info.status],
                    'run_time': [r.info.run_time],
                    'iter': [r.info.iter]
                }

                if r.info.status != "solved":
                    print("OSQP no warmstart did not solve the problem")

                res_list_no_ws.append(pd.DataFrame(solution_dict))

                # Get input
                U_no_ws[:,
                        i] = r.x[instance.nx * (instance.T + 1):instance.nx *
                                 (instance.T + 1) + instance.nu]

                # Propagate state
                X_no_ws[:, i + 1] = instance.A.dot(X_no_ws[:, i]) + \
                    instance.B.dot(U_no_ws[:, i])

                # Update initial state
                instance.update_x0(X_no_ws[:, i + 1])

            # Get full warm-start
            res_no_ws = pd.concat(res_list_no_ws)

            # Store file
            res_no_ws.to_csv(n_file_name, index=False)

            # Plot results
            # import matplotlib.pylab as plt
            # plt.figure(1)
            # plt.plot(X_no_ws.T)
            # plt.title("No Warm Start")
            # plt.show(block=False)
        '''
        Solve problem with warm start
        '''
        # Solution directory
        ws_path = os.path.join(
            '.',
            'results',
            'parametric_problems',
            'OSQP warmstart',
            'MPC',
        )

        # Create directory for the results
        make_sure_path_exists(ws_path)

        # Check if solution already exists
        n_file_name = os.path.join(ws_path, 'n%i.csv' % self.dimension)

        # Set initial state as x0
        instance.update_x0(x0)

        if not os.path.isfile(n_file_name):
            # Setup solver
            m = osqp.OSQP()
            m.setup(qp['P'], qp['q'], qp['A'], qp['l'], qp['u'],
                    **self.osqp_settings)

            # Initialize states and inputs for the whole simulation
            X_ws = np.zeros((instance.nx, self.n_simulation + 1))
            U_ws = np.zeros((instance.nu, self.n_simulation))
            X_ws[:, 0] = x0

            res_list_ws = []  # Initialize results
            for i in range(self.n_simulation):

                # Solve problem
                r = m.solve()

                if r.info.status != "solved":
                    print("OSQP no warmstart did not solve the problem")

                # Get results
                solution_dict = {
                    'status': [r.info.status],
                    'run_time': [r.info.run_time],
                    'iter': [r.info.iter]
                }

                res_list_ws.append(pd.DataFrame(solution_dict))

                # Get input
                U_ws[:, i] = r.x[instance.nx * (instance.T + 1):instance.nx *
                                 (instance.T + 1) + instance.nu]

                # Propagate state
                X_ws[:, i + 1] = instance.A.dot(X_ws[:, i]) + \
                    instance.B.dot(U_ws[:, i])

                # Update initial state
                instance.update_x0(X_ws[:, i + 1])

                # Update solver
                m.update(l=instance.qp_problem['l'],
                         u=instance.qp_problem['u'])

            # Get full warm-start
            res_ws = pd.concat(res_list_ws)

            # Store file
            res_ws.to_csv(n_file_name, index=False)
Beispiel #5
0
    def solve(self):
        """
        Solve Lasso problem
        """

        print("Solve Lasso problem for dimension %i" % self.dimension)

        # Create example instance
        instance = LassoExample(self.dimension)
        qp = instance.qp_problem

        # Create lambda array
        lambda_array = np.logspace(np.log10(self.minimum_lambda_over_max *
                                            instance.lambda_max),
                                   np.log10(instance.lambda_max),
                                   self.n_problems)[::-1]   # From max to min

        '''
        Solve problem without warm start
        '''
        #  print("Solving without warm start")
        # Solution directory
        no_ws_path = os.path.join('.', 'results', 'parametric_problems',
                                  'OSQP no warmstart',
                                  'Lasso',
                                  )

        # Create directory for the results
        make_sure_path_exists(no_ws_path)

        # Check if solution already exists
        n_file_name = os.path.join(no_ws_path, 'n%i.csv' % self.dimension)

        if not os.path.isfile(n_file_name):

            res_list_no_ws = []  # Initialize results
            for lambda_val in lambda_array:
                # Update lambda
                instance.update_lambda(lambda_val)

                # Solve problem
                m = osqp.OSQP()
                m.setup(qp['P'], qp['q'], qp['A'], qp['l'], qp['u'],
                        **self.osqp_settings)
                r = m.solve()

                # DEBUG
                #  print("Lambda = %.4e,\t niter = %d" % (lambda_val, r.info.iter))

                if r.info.status != "solved":
                    print("OSQP no warmstart did not solve the problem")

                solution_dict = {'status': [r.info.status],
                                 'run_time': [r.info.run_time],
                                 'iter': [r.info.iter]}

                res_list_no_ws.append(pd.DataFrame(solution_dict))

            # Get full warm-start
            res_no_ws = pd.concat(res_list_no_ws)

            # Store file
            res_no_ws.to_csv(n_file_name, index=False)

        '''
        Solve problem with warm start
        '''

        #  print("Solving with warm start")
        # Solution directory
        ws_path = os.path.join('.', 'results', 'parametric_problems',
                               'OSQP warmstart',
                               'Lasso',
                               )

        # Create directory for the results
        make_sure_path_exists(ws_path)

        # Check if solution already exists
        n_file_name = os.path.join(ws_path, 'n%i.csv' % self.dimension)

        # Reset problem to first instance
        instance.update_lambda(lambda_array[0])

        # Setup solver
        qp = instance.qp_problem
        m = osqp.OSQP()
        m.setup(qp['P'], qp['q'], qp['A'], qp['l'], qp['u'],
                **self.osqp_settings)

        if not os.path.isfile(n_file_name):

            res_list_ws = []  # Initialize results
            for lambda_val in lambda_array:

                # Update lambda
                instance.update_lambda(lambda_val)
                m.update(q=qp['q'])

                # Solve problem
                r = m.solve()
                
                # DEBUG
                #  print("Lambda = %.4e,\t niter = %d" % (lambda_val, r.info.iter))

                if r.info.status != "solved":
                    print("OSQP warmstart did not solve the problem")

                # Get results
                solution_dict = {'status': [r.info.status],
                                 'run_time': [r.info.run_time],
                                 'iter': [r.info.iter]}

                res_list_ws.append(pd.DataFrame(solution_dict))

            # Get full warm-start
            res_ws = pd.concat(res_list_ws)

            # Store file
            res_ws.to_csv(n_file_name, index=False)

        else:
            res_ws = pd.read_csv(n_file_name)
    def __init__(self, configparser, configurationfile):
        """Initialize a particle filter based on a configurationfile"""
        self.pool = None
        # Parse the configuration file
        self.configfile = configurationfile
        self.config = collections.OrderedDict()  # preserve order of entries
        for section in configparser.sections():
            self.config[section] = collections.OrderedDict()
            for key, value in configparser.items(section):
                self.config[section][key] = value

        if not self.config['COMPUTING']['poolsize']:
            self.poolsize = multiprocessing.cpu_count()
        else:
            self.poolsize = configparser.getint('COMPUTING', 'poolsize')

        self.ID = self.config['FILTER']['runid']
        logging.info('Initializing particle filter: {}'.format(self.ID))

        ncvarinfo = {}
        varlist = filterPick(self.config['VICASC2NC'].keys(),
                             re.compile('^ncvar.*_name$'))
        varlist = [x.split('_name')[0] for x in varlist]
        for var in varlist:
            ncvarinfo[var] = {}
            ncvarinfo[var]['name'] = self.config['VICASC2NC'][var + '_name']
            ncvarinfo[var]['longname'] = self.config['VICASC2NC'][var +
                                                                  '_longname']
            ncvarinfo[var]['column'] = self.config['VICASC2NC'][var +
                                                                '_column']
            ncvarinfo[var]['units'] = self.config['VICASC2NC'][var + '_units']
            ncvarinfo[var]['divideby'] = float(
                self.config['VICASC2NC'][var + '_divideby'])
        self.config['VICASC2NC']['ncvarinfo'] = ncvarinfo

        # initialize simulation time
        self.start = parse_date(self.config['SIMULATION']['start'])
        self.now = self.start
        self.end = parse_date(self.config['SIMULATION']['end'])
        self.runlength = dt.timedelta(
            configparser.getint('SIMULATION', 'runlength'))
        self.backstep = dt.timedelta(
            configparser.getint('SIMULATION', 'backstep'))

        # number of timestep to evaluate
        self.neval = configparser.getint('FILTER', 'neval')

        # Initialize particles
        self.np = configparser.getint('FILTER', 'np')
        self.particles = [
            VicParticle(i + 1, 1. / self.np) for i in range(self.np)
        ]
        for i in range(self.np):
            p = self.particles[i]
            p.vic['exe'] = self.config['MODELS']['vicexe']
            p.route['exe'] = self.config['MODELS']['routeexe']
            p.refvar = self.config['FILTER']['referencevar']
            p.initstatefile = {}
            p.initstatefile['vic'] = self.config['VICCONFIG'][
                'INIT_STATE'].format(p.ID)
            if self.config['ROUTECONFIG']['SECTION|OPTIONS|RUN_TYPE'].lower(
            ) == 'startup':
                p.initstatefile['route'] = \
                    self.config['ROUTECONFIG']['SECTION|INITIAL_STATE|FILE_NAME'].\
                    format(p.ID)
            p.neval = self.neval
        self.normalize_weights()
        logging.info('Initialized {} particles'.format(self.np))

        # set up run and archive paths for particles
        runpath = self.config['FILTER']['rundir']
        archivepath = self.config['FILTER']['archivedir']
        subdirs = ['log', 'history', 'settings']
        self.paths = {}
        self.paths['run'] = {}
        self.paths['archive'] = {}
        for subdir in subdirs:
            self.paths['run'][subdir] = '{}/{}'.format(runpath, subdir)
            self.paths['archive'][subdir] = '{}/{}'.format(archivepath, subdir)
        logging.debug('Creating paths for filter {}:'.format(self.ID))
        for topdir, dirdict in self.paths.iteritems():
            logging.debug('\t{:8s}:'.format(topdir))
            for key, val in dirdict.iteritems():
                logging.debug('\t\t{:6s}: {}'.format(key, val))
                make_sure_path_exists(val)

        subdirs = ['config', 'data', 'log', 'state', 'history']
        for p in self.particles:
            p.paths = {}
            p.paths['run'] = {}
            p.paths['archive'] = {}
            runpath = '{}/pid_{:03d}'.format(self.config['FILTER']['rundir'],
                                             p.ID)
            archivepath = '{}/pid_{:03d}'.format(
                self.config['FILTER']['archivedir'], p.ID)
            for subdir in subdirs:
                p.paths['run'][subdir] = '{}/{}'.format(runpath, subdir)
                p.paths['archive'][subdir] = '{}/{}'.format(
                    archivepath, subdir)
            logging.debug('Creating paths for particle {:03d}:'.format(p.ID))
            for topdir, dirdict in p.paths.iteritems():
                logging.debug('\t{:8s}:'.format(topdir))
                for key, val in dirdict.iteritems():
                    logging.debug('\t\t{:6s}: {}'.format(key, val))
                    make_sure_path_exists(val)

        # set history file for filter
        filename = '{}.history'.format(self.ID)
        self.hist = open(os.path.join(self.paths['run']['history'], filename),
                         'w')

        # set history file for each particle
        for p in self.particles:
            filename = '{}.{:03d}.history'.format(self.ID, p.ID)
            p.hist = open(os.path.join(p.paths['run']['history'], filename),
                          'w')

        logging.info('Initialization complete')
    def __init__(self, configparser, configurationfile):
        """Initialize a particle filter based on a configurationfile"""
        self.pool = None
        # Parse the configuration file
        self.configfile = configurationfile
        self.config = collections.OrderedDict() # preserve order of entries
        for section in configparser.sections():
            self.config[section] = collections.OrderedDict()
            for key, value in configparser.items(section):
                self.config[section][key] = value

        if not self.config['COMPUTING']['poolsize']:
            self.poolsize = multiprocessing.cpu_count()
        else:
            self.poolsize = configparser.getint('COMPUTING','poolsize')

        self.ID = self.config['FILTER']['runid']
        logging.info('Initializing particle filter: {}'.format(self.ID))

        ncvarinfo = {}
        varlist = filterPick(self.config['VICASC2NC'].keys(), re.compile('^ncvar.*_name$'))
        varlist = [x.split('_name')[0] for x in varlist]
        for var in varlist:
            ncvarinfo[var] = {}
            ncvarinfo[var]['name'] = self.config['VICASC2NC'][var + '_name']
            ncvarinfo[var]['longname'] = self.config['VICASC2NC'][var + '_longname']
            ncvarinfo[var]['column'] = self.config['VICASC2NC'][var + '_column']
            ncvarinfo[var]['units'] = self.config['VICASC2NC'][var + '_units']
            ncvarinfo[var]['divideby'] = float(self.config['VICASC2NC'][var + '_divideby'])
        self.config['VICASC2NC']['ncvarinfo'] = ncvarinfo

        # initialize simulation time
        self.start = parse_date(self.config['SIMULATION']['start'])
        self.now = self.start
        self.end = parse_date(self.config['SIMULATION']['end'])
        self.runlength = dt.timedelta(configparser.getint('SIMULATION',
                                                          'runlength'))
        self.backstep = dt.timedelta(configparser.getint('SIMULATION',
                                                         'backstep'))

        # number of timestep to evaluate
        self.neval = configparser.getint('FILTER','neval')

        # Initialize particles
        self.np = configparser.getint('FILTER', 'np')
        self.particles = [VicParticle(i+1, 1./self.np) for i in range(self.np)]
        for i in range(self.np):
            p = self.particles[i]
            p.vic['exe'] = self.config['MODELS']['vicexe']
            p.route['exe'] = self.config['MODELS']['routeexe']
            p.refvar = self.config['FILTER']['referencevar']
            p.initstatefile = {}
            p.initstatefile['vic'] = self.config['VICCONFIG']['INIT_STATE'].format(p.ID)
            if self.config['ROUTECONFIG']['SECTION|OPTIONS|RUN_TYPE'].lower() == 'startup':
                p.initstatefile['route'] = \
                    self.config['ROUTECONFIG']['SECTION|INITIAL_STATE|FILE_NAME'].\
                    format(p.ID)
            p.neval = self.neval
        self.normalize_weights()
        logging.info('Initialized {} particles'.format(self.np))

        # set up run and archive paths for particles
        runpath = self.config['FILTER']['rundir']
        archivepath = self.config['FILTER']['archivedir']
        subdirs = ['log', 'history', 'settings']
        self.paths = {}
        self.paths['run'] = {}
        self.paths['archive'] = {}
        for subdir in subdirs:
            self.paths['run'][subdir] = '{}/{}'.format(runpath, subdir)
            self.paths['archive'][subdir] = '{}/{}'.format(archivepath, subdir)
        logging.debug('Creating paths for filter {}:'.format(self.ID))
        for topdir, dirdict in self.paths.iteritems():
            logging.debug('\t{:8s}:'.format(topdir))
            for key, val in dirdict.iteritems():
                logging.debug('\t\t{:6s}: {}'.format(key, val))
                make_sure_path_exists(val)

        subdirs = ['config', 'data', 'log', 'state', 'history']
        for p in self.particles:
            p.paths = {}
            p.paths['run'] = {}
            p.paths['archive'] = {}
            runpath = '{}/pid_{:03d}'.format(self.config['FILTER']['rundir'], p.ID)
            archivepath = '{}/pid_{:03d}'.format(self.config['FILTER']['archivedir'], p.ID)
            for subdir in subdirs:
                p.paths['run'][subdir] = '{}/{}'.format(runpath, subdir)
                p.paths['archive'][subdir] = '{}/{}'.format(archivepath, subdir)
            logging.debug('Creating paths for particle {:03d}:'.format(p.ID))
            for topdir, dirdict in p.paths.iteritems():
                logging.debug('\t{:8s}:'.format(topdir))
                for key, val in dirdict.iteritems():
                    logging.debug('\t\t{:6s}: {}'.format(key, val))
                    make_sure_path_exists(val)

        # set history file for filter
        filename = '{}.history'.format(self.ID)
        self.hist = open(os.path.join(self.paths['run']['history'], filename), 'w')

        # set history file for each particle
        for p in self.particles:
            filename = '{}.{:03d}.history'.format(self.ID, p.ID)
            p.hist = open(os.path.join(p.paths['run']['history'], filename), 'w')

        logging.info('Initialization complete')