Exemplo n.º 1
0
def run(connections_file, coordinates_file, best_schedule_file, N, max_time,
        improve, depth, exclusion, method):
    """ Runs algorithm with specified heuristic and returns best schedule. """

    best = {"schedule": None, "K": 0, "All": []}

    print(f"{N}x {method.name}")

    for i in range(N):

        # Display progress
        if N >= 100 and i % (N // 100) == 0:
            print(f"{(i // (N // 100))}%", end="\r")

        method.schedule = Schedule(
            csvdata(connections_file, coordinates_file, exclusion), max_time)
        method.run()

        if improve:
            optimize = Optimize(method.schedule, depth)
            optimize.run()

        quality = method.schedule.quality()
        best["All"].append(quality)

        # Keep track of best schedule
        if quality["K"] > best["K"]:
            best["schedule"] = method.schedule
            best["K"] = quality["K"]

    dump(best["schedule"], best_schedule_file)
    best["schedule"].create_csv()
    visualisation(best["schedule"])
Exemplo n.º 2
0
def define_params(args):
    opt = Optimize(space=weights_space,
                   model_path=args.model_path,
                   path_to_alg=args.path_to_alg,
                   path_to_overall_data=args.path_to_overall,
                   path_movies=args.path_to_movies,
                   path_ratings=args.path_to_ratings,
                   min_ratings=args.min_rating,
                   min_watched=args.min_watched,
                   fields=fields)
    opt.minimize(ncalls, seed=random_seed)
Exemplo n.º 3
0
def portfolio():
    stocks = str(request.form['stock'])
    stocks = [x.strip() for x in stocks.split(',')]
    output = Optimize(stocks, 'full')
    return render_template('portfolio.html',data={'stockNames':stocks,\
                                                 'output':output['allocation'],\
                                                 'maxSharpe':output['maxSharpeRatio'],\
                                                 'stockData':output['stockData']})
Exemplo n.º 4
0
 def opt(self):
     erosions = {}
     for i in range(2, 9):
         erosions[i] = {}
     for i in erosions.keys():
         erosions[i]['orig'] = self.walker_binary(erosions=i)
         erosions[i]['mask'] = np.where(erosions[i]['orig'] > 0, 255, 0)
     opt = Optimize(self.segmentations, erosions).optimize()
     return erosions[opt]['orig']
Exemplo n.º 5
0
 def methodSelector(self, choice, terms):
     selector = {
         2 : Roots().runRootFind,
         3 : Optimize().runMinMaxFind,
         4 : Plotter().runPlotter,
         5 : Integrals().runFindIntegral,
     }
     
     selector[choice](terms)
Exemplo n.º 6
0
    def test_known_solution(self):
        f = lambda z: -1;
        df = lambda z: 0;
        alpha = 0;
        beta = 1;
        tolerance = 1e-12; N = 50;
        fixed = Optimize();
        x,z = fixed.fixedpointODE(f,alpha,beta,N, tolerance);
        newt = Optimize();
        xN,zN = newt.newtonODE(f, df, alpha, beta, N, tolerance);

        # Analytical
        ana = lambda x: 0.5*x*(3-x);
        assert pytest.approx(ana(x), abs = tolerance) == z
        assert pytest.approx(ana(xN), abs = tolerance) == zN
Exemplo n.º 7
0
    parser.add_argument('--perturb',
                        type=str,
                        default='blur',
                        help='Choose a perturbation method (blur, noise)')
    parser.add_argument('--tv_coeff',
                        type=float,
                        default='10e-2',
                        help='Coefficient of TV')
    parser.add_argument('--tv_beta',
                        type=float,
                        default='3',
                        help='TV beta value')
    parser.add_argument('--l1_coeff',
                        type=float,
                        default='10e-3',
                        help='L1 regularization')
    parser.add_argument('--factor',
                        type=int,
                        default=8,
                        help='Factor to upsampling')
    parser.add_argument('--lr', type=float, default=0.1, help='Learning rate')
    parser.add_argument('--iter',
                        type=int,
                        default=300,
                        help='Iteration number')

    args = parser.parse_args()
    mask_opt=Optimize(args.model_path,args.factor,args.iter,args.lr,\
                      args.tv_coeff,args.tv_beta,args.l1_coeff,args.img_path,args.perturb,)
    mask_opt.build()
Exemplo n.º 8
0
    def find_homolytic_scissions(self):
        """
        Enumerate all unique homolytic scission reactions
        """
        # set of unique bonds to break
        bonds = []
        for i in range(len(self.species.atom) - 1):
            for j in range(i + 1, len(self.species.atom)):
                # only consider a bond of which both
                # atoms are not in the same cycle
                cycle = []
                for cyc in self.species.cycle_chain:
                    if i in cyc:
                        cycle.extend(cyc)
                if j not in cycle:
                    # only consider single bonds for homolytic scissions
                    if self.species.bond[i][j] == 1:
                        # check if a bond with identical atomids
                        # has been added to the bonds list yet
                        new = 1
                        for bi in bonds:
                            if sorted([self.species.atomid[at]
                                       for at in bi]) == sorted([
                                           self.species.atomid[i],
                                           self.species.atomid[j]
                                       ]):
                                new = 0
                        if new:
                            bonds.append([i, j])
                            hs = HomolyticScission(self.species, self.par,
                                                   self.qc, [i, j])
                            hs.create_geometries()
                            self.hss.append(hs)

        # optimize the products of the hss
        while 1:
            for index, hs in enumerate(self.hss):
                if hs.status == 0:
                    # do the initial optimization
                    for prod in hs.products:
                        hs.qc.qc_opt(prod, prod.geom)
                    hs.status = 1
                if hs.status == 1:
                    # wait for the optimization to finish
                    err = 0
                    for prod in hs.products:
                        e, prod.geom = hs.qc.get_qc_geom(
                            str(prod.chemid) + '_well', prod.natom)
                        if e < 0:
                            # optimizatin failed
                            hs.status = -999
                            err = -1
                        elif e != 0:
                            err = -1
                        else:
                            e2, prod.energy = hs.qc.get_qc_energy(
                                str(prod.chemid) + '_well', prod.natom)
                            e2, prod.zpe = hs.qc.get_qc_zpe(
                                str(prod.chemid) + '_well', prod.natom)
                    if err == 0:
                        hs.status = 2
                if hs.status == 2:
                    # Do the product conf search, high level opt and HIR
                    for prod in hs.products:
                        prod_opt = Optimize(prod, self.par, self.qc)
                        prod_opt.do_optimization()
                        hs.prod_opt.append(prod_opt)
                    hs.status = 3
                if hs.status == 3:
                    # check up on the optimization
                    opts_done = 1
                    fails = 0
                    for pr_opt in hs.prod_opt:
                        if not pr_opt.shir == 1:
                            opts_done = 0
                            pr_opt.do_optimization()
                        if pr_opt.shigh == -999:
                            fails = 1
                    if fails:
                        hs.status = -999
                    elif opts_done:
                        # check if the energy is higher
                        # than the barrier threshold
                        species_energy = self.species.energy
                        prod_energy = 0.
                        for pr_opt in hs.prod_opt:
                            prod_energy += pr_opt.species.energy
                        barrier = (prod_energy -
                                   species_energy) * constants.AUtoKCAL
                        if barrier > self.par.par['barrier_threshold']:
                            hs.status = -999
                        else:
                            hs.status = -1
            if all([hs.status < 0 for hs in self.hss]):
                break
Exemplo n.º 9
0
    def generate(self):
        """ 
        Creates the input for each reaction, runs them, and tests for success.
        If successful, it creates the barrier and product objects.
        It also then does the conformational search, and finally, the hindered rotor scans.
        To make the code the most efficient, all of these happen in parallel, in a sense that
        the jobs are not waiting for each other. E.g., one reaction can still be in the stage
        of TS search, while the other can be already at the hindered rotor scan. This way, 
        all cores are occupied efficiently.

        The switching between the various stages are done via the reac_ts_done variable.
        0: initiates the TS search
        1: checks barrier height and errors in TS, and initiates normal mode displacement test, start the irc calculations 
        2: submits product optimization
        3: submit the frequency calculation 
        4: do the optimization of the ts and the products
        5: follow up on the optimizations
        6: finalize calculations, check for wrong number of negative frequencies
        """
        if len(self.species.reac_inst) > 0:
            alldone = 1
        else:
            alldone = 0

        while alldone:
            for index, instance in enumerate(self.species.reac_inst):
                obj = self.species.reac_obj[index]
                instance_name = obj.instance_name

                # START REATION SEARCH
                if self.species.reac_ts_done[
                        index] == 0 and self.species.reac_step[index] == 0:
                    #verify after restart if search has failed in previous kinbot run
                    status = self.qc.check_qc(instance_name)
                    if status == 'error' or status == 'killed':
                        logging.info(
                            '\tRxn search failed (error or killed) for {}'.
                            format(instance_name))
                        self.species.reac_ts_done[index] = -999

                if self.species.reac_ts_done[
                        index] == 0:  # ts search is ongoing

                    if obj.scan == 0:  #don't do a scan of a bond
                        if self.species.reac_step[index] == obj.max_step + 1:
                            status = self.qc.get_qc_freq(
                                instance_name, self.species.natom)[0]
                            if status == 0:
                                self.species.reac_ts_done[index] = 1
                            elif status == -1:
                                logging.info(
                                    '\tRxn search failed for {}'.format(
                                        instance_name))
                                self.species.reac_ts_done[index] = -999
                        else:
                            self.species.reac_step[
                                index] = reac_family.carry_out_reaction(
                                    obj, self.species.reac_step[index])

                    else:  # do a bond scan
                        if self.species.reac_step[
                                index] == self.par.par['scan_step'] + 1:
                            status = self.qc.get_qc_freq(
                                instance_name, self.species.natom)[0]
                            if status == 0:
                                self.species.reac_ts_done[index] = 1
                            elif status == -1:
                                logging.info(
                                    '\tRxn search failed for {}'.format(
                                        instance_name))
                                self.species.reac_ts_done[index] = -999
                        else:
                            if self.species.reac_step[index] == 0:
                                self.species.reac_step[
                                    index] = reac_family.carry_out_reaction(
                                        obj, self.species.reac_step[index])
                            elif self.species.reac_step[index] > 0:
                                status = self.qc.check_qc(instance_name)
                                if status == 'error' or status == 'killed':
                                    logging.info(
                                        '\tRxn search failed for {}'.format(
                                            instance_name))
                                    self.species.reac_ts_done[index] = -999
                                else:
                                    err, energy = self.qc.get_qc_energy(
                                        instance_name)
                                    if err == 0:
                                        self.species.reac_scan_energy[
                                            index].append(energy)
                                        if len(self.species.
                                               reac_scan_energy[index]) > 1:
                                            if self.species.reac_scan_energy[
                                                    index][
                                                        -1] < self.species.reac_scan_energy[
                                                            index][-2]:
                                                self.species.reac_step[
                                                    index] = self.par.par[
                                                        'scan_step']
                                        self.species.reac_step[
                                            index] = reac_family.carry_out_reaction(
                                                obj,
                                                self.species.reac_step[index])

                elif self.species.reac_ts_done[index] == 1:
                    status = self.qc.check_qc(instance_name)
                    if status == 'running': continue
                    elif status == 'error':
                        logging.info(
                            '\tRxn search failed (gaussian error) for {}'.
                            format(instance_name))
                        self.species.reac_ts_done[index] = -999
                    else:
                        #check the barrier height:
                        if self.species.reac_type[
                                index] == 'R_Addition_MultipleBond':
                            sp_energy = self.qc.get_qc_energy(
                                str(self.species.chemid) + '_well_mp2')[1]
                            barrier = (self.qc.get_qc_energy(instance_name)[1]
                                       - sp_energy) * constants.AUtoKCAL
                        else:
                            sp_energy = self.qc.get_qc_energy(
                                str(self.species.chemid) + '_well')[1]
                            barrier = (self.qc.get_qc_energy(instance_name)[1]
                                       - sp_energy) * constants.AUtoKCAL
                        if barrier > self.par.par['barrier_threshold']:
                            logging.info(
                                '\tRxn barrier too high ({val}) for {name}'.
                                format(val=barrier, name=instance_name))
                            self.species.reac_ts_done[index] = -999
                        else:
                            obj.irc = IRC(
                                obj
                            )  #TODO: this doesn't seem like a good design
                            irc_status = obj.irc.check_irc()
                            if 0 in irc_status:
                                # No IRC started yet, start the IRC now
                                logging.info(
                                    '\tStarting IRC calculations for {}'.
                                    format(instance_name))
                                obj.irc.do_irc_calculations()
                            elif irc_status[0] == 'running' or irc_status[
                                    1] == 'running':
                                continue
                            else:
                                #IRC's have succesfully finished, have an error or were killed, in any case
                                #read the geometries and try to make products out of them
                                #verify which of the ircs leads back to the reactant, if any
                                prod = obj.irc.irc2stationary_pt()
                                if prod == 0:
                                    logging.info(
                                        '\t\tNo product found for {}'.format(
                                            instance_name))
                                    self.species.reac_ts_done[index] = -999
                                else:
                                    #IRC's are done
                                    obj.products = prod
                                    obj.product_bonds = prod.bond
                                    self.species.reac_ts_done[index] = 2
                elif self.species.reac_ts_done[index] == 2:
                    #identify bimolecular products and wells
                    fragments = obj.products.start_multi_molecular()
                    obj.products = []
                    for i, frag in enumerate(fragments):
                        obj.products.append(frag)
                        self.qc.qc_opt(frag, frag.geom)

                    self.species.reac_ts_done[index] = 3
                elif self.species.reac_ts_done[index] == 3:
                    #wait for the optimization to finish
                    err = 0
                    for st_pt in obj.products:
                        chemid = st_pt.chemid
                        orig_geom = copy.deepcopy(st_pt.geom)
                        e, st_pt.geom = self.qc.get_qc_geom(
                            str(st_pt.chemid) + '_well', st_pt.natom)
                        if e < 0:
                            logging.info(
                                '\tProduct optimization failed for {}, product {}'
                                .format(instance_name, st_pt.chemid))
                            self.species.reac_ts_done[index] = -999
                            err = -1
                        elif e != 0:
                            err = -1
                        else:
                            e2, st_pt.energy = self.qc.get_qc_energy(
                                str(st_pt.chemid) + '_well')
                            e2, st_pt.zpe = self.qc.get_qc_zpe(
                                str(st_pt.chemid) + '_well')
                            st_pt.bond_mx()
                            st_pt.characterize()
                            st_pt.calc_chemid()
                            if chemid != st_pt.chemid:
                                #product was optimized to another structure, give warning and remove this reaction
                                logging.info(
                                    '\tProduct optimizatied to other structure for {}, product {} to {}'
                                    .format(instance_name, chemid,
                                            st_pt.chemid))
                                self.species.reac_ts_done[index] = -999
                                err = -1
                    if err == 0:
                        self.species.reac_ts_done[index] = 4
                elif self.species.reac_ts_done[index] == 4:
                    # Do the TS and product optimization

                    #make a stationary point object of the ts
                    bond_mx = np.zeros(
                        (self.species.natom, self.species.natom), dtype=int)
                    for i in range(self.species.natom):
                        for j in range(self.species.natom):
                            bond_mx[i][j] = max(self.species.bond[i][j],
                                                obj.product_bonds[i][j])
                    err, geom = self.qc.get_qc_geom(instance_name,
                                                    self.species.natom)
                    ts = StationaryPoint(instance_name,
                                         self.species.charge,
                                         self.species.mult,
                                         atom=self.species.atom,
                                         geom=geom,
                                         wellorts=1)
                    err, ts.energy = self.qc.get_qc_energy(instance_name)
                    err, ts.zpe = self.qc.get_qc_zpe(instance_name)
                    ts.bond = bond_mx
                    ts.find_cycle()
                    ts.find_conf_dihedral()
                    obj.ts = ts
                    #do the ts optimization
                    obj.ts_opt = Optimize(obj.ts, self.par, self.qc)
                    obj.ts_opt.do_optimization()
                    #do the products optimizations
                    for st_pt in obj.products:
                        #check for products of other reactions that are the same as this product
                        #in the case such products are found, use the same Optimize object for both
                        new = 1
                        for i, inst_i in enumerate(self.species.reac_inst):
                            if not i == index:
                                obj_i = self.species.reac_obj[i]
                                if self.species.reac_ts_done[i] > 3:
                                    for j, st_pt_i in enumerate(
                                            obj_i.products):
                                        if st_pt_i.chemid == st_pt.chemid:
                                            if len(obj_i.prod_opt) > j:
                                                prod_opt = obj_i.prod_opt[j]
                                                new = 0
                                                break
                        if new:
                            prod_opt = Optimize(st_pt, self.par, self.qc)
                            prod_opt.do_optimization()
                        obj.prod_opt.append(prod_opt)
                    self.species.reac_ts_done[index] = 5
                elif self.species.reac_ts_done[index] == 5:
                    #check up on the TS and product optimizations
                    opts_done = 1
                    fails = 0
                    #check if ts is done
                    if not obj.ts_opt.shir == 1:
                        opts_done = 0
                        obj.ts_opt.do_optimization()
                    if obj.ts_opt.shigh == -999:
                        fails = 1
                    for pr_opt in obj.prod_opt:
                        if not pr_opt.shir == 1:
                            opts_done = 0
                            pr_opt.do_optimization()
                        if pr_opt.shigh == -999:
                            fails = 1
                    if fails:
                        self.species.reac_ts_done[index] = -999
                    elif opts_done:
                        self.species.reac_ts_done[index] = 6
                elif self.species.reac_ts_done[index] == 6:
                    #Finilize the calculations

                    #continue to PES search in case a new well was found
                    if self.par.par['pes']:
                        #verify if product is monomolecular, and if it is new
                        if len(obj.products) == 1:
                            st_pt = obj.prod_opt[0].species
                            chemid = st_pt.chemid
                            energy = st_pt.energy
                            well_energy = self.species.energy
                            new_barrier_threshold = self.par.par[
                                'barrier_threshold'] - (
                                    energy - well_energy) * constants.AUtoKCAL
                            dir = os.path.dirname(os.getcwd())
                            jobs = open(dir + '/chemids',
                                        'r').read().split('\n')
                            jobs = [ji for ji in jobs]
                            if not str(chemid) in jobs:
                                #this well is new, add it to the jobs
                                while 1:
                                    try:
                                        #try to open the file and write to it
                                        pes.write_input(
                                            self.par, obj.products[0],
                                            new_barrier_threshold, dir)
                                        f = open(dir + '/chemids', 'a')
                                        f.write('{}\n'.format(chemid))
                                        f.close()
                                        break
                                    except IOError:
                                        #wait a second and try again
                                        time.sleep(1)
                                        pass

                    #check for wrong number of negative frequencies
                    neg_freq = 0
                    for st_pt in obj.products:
                        if any([fi < 0. for fi in st_pt.reduced_freqs]):
                            neg_freq = 1
                    if any([fi < 0. for fi in obj.ts.reduced_freqs[1:]]):
                        neg_freq = 1

                    if neg_freq:
                        logging.info('\tFound negative frequency for ' +
                                     instance_name)
                        self.species.reac_ts_done[index] = -999
                    else:
                        #the reaction search is finished
                        self.species.reac_ts_done[
                            index] = -1  # this is the success code

                        # write a temporary pes input file
                        # remove old xval and im_extent files
                        if os.path.exists('{}_xval.txt'.format(
                                self.species.chemid)):
                            os.remove('{}_xval.txt'.format(
                                self.species.chemid))
                        if os.path.exists('{}_im_extent.txt'.format(
                                self.species.chemid)):
                            os.remove('{}_im_extent.txt'.format(
                                self.species.chemid))
                        postprocess.createPESViewerInput(
                            self.species, self.qc, self.par)

            alldone = 1
            for index, instance in enumerate(self.species.reac_inst):
                if any(self.species.reac_ts_done[i] >= 0
                       for i in range(len(self.species.reac_inst))):
                    alldone = 1
                    break
                else:
                    alldone = 0

            # write a small summary while running
            wr = 1
            if wr:
                f_out = open('kinbot_monitor.out', 'w')
                for index, instance in enumerate(self.species.reac_inst):
                    f_out.write('{}\t{}\t{}\n'.format(
                        self.species.reac_ts_done[index],
                        self.species.reac_step[index],
                        self.species.reac_obj[index].instance_name))
                f_out.close()
            time.sleep(1)

        s = []
        for index, instance in enumerate(self.species.reac_inst):
            obj = self.species.reac_obj[index]
            instance_name = obj.instance_name
            # Write a summary on the combinatorial exploration
            if 'combinatorial' in instance_name:
                s.append('NAME\t' + instance_name)

                # Write the bonds that were broken and formed
                s.append('BROKEN_BONDS\t' +
                         '\t'.join('[{}, {}]'.format(re[0], re[1])
                                   for re in obj.reac))
                s.append('FORMED_BONDS\t' +
                         '\t'.join('[{}, {}]'.format(pr[0], pr[1])
                                   for pr in obj.prod))

                # Populate the ts_bond_lengths dict with the values
                # of this reaction
                if self.species.reac_ts_done[index] == -1:
                    for i in range(self.species.natom - 1):
                        for j in range(i + 1, self.species.natom):
                            if self.species.bond[i][j] != obj.product_bonds[i][
                                    j]:
                                if (self.species.bond[i][j] == 0
                                        or obj.product_bonds[i][j] == 0):
                                    syms = []
                                    syms.append(self.species.atom[i])
                                    syms.append(self.species.atom[j])
                                    syms = ''.join(sorted(syms))
                                    dist = np.linalg.norm(obj.ts.geom[i] -
                                                          obj.ts.geom[j])
                                    s.append('TS_BOND_LENGTHS\t{}\t{}'.format(
                                        syms, dist))
                # write the expected inchis
                s.append('EXPECTED_INCHIS\t' +
                         '\t'.join(inchi for inchi in obj.prod_inchi))
                # get the inchis the reaction found
                if self.species.reac_ts_done[index] == -1:
                    inchis = obj.get_final_inchis()
                    s.append('FOUND_INCHIS\t' + '\t'.join(inchis))
                s.append('\n')
            with open('combinatorial.txt', 'w') as f:
                f.write('\n'.join(s) + '\n')

        logging.info("Reaction generation done!")
Exemplo n.º 10
0
alpha = (0, 1)
dfuncs = (df, df2)
beta = (1, 1)
tolerance = 1e-12
N = 50

ana = lambda x: 0.5 * x * (3 - x)

print("Press 1 to run fixed point optimization")
print("Press 2 to run Newtons method")
arg = input("Input number:")

if int(arg) == 1:
    # test fixed point
    for i in range(len(alpha)):
        fixed = Optimize()
        x, z = fixed.fixedpointODE(funcs[i], alpha[i], beta[i], N, tolerance)
        plt.plot(x, z, '--.', label="numerical")
        if i == 0:
            plt.plot(x, ana(x), '-', label="analytical")
        plt.title("Fixed point method")
        plt.xlabel("x", fontsize=13)
        plt.ylabel("z", fontsize=13)
        plt.legend(loc="upper right", fontsize=15)
        plt.xticks(fontsize=14)
        plt.yticks(fontsize=14)
        plt.show()

# test Newtons method
elif int(arg) == 2:
    for i in range(len(alpha)):
Exemplo n.º 11
0
def first_model(options, train_scaled, test_scaled, valid_scaled, column, columns, original, path):
    num_units = []
    for layer in range(options['num_layers']):
        num_units.append(options['num_hidden'])

    Y_train, X_batch_train, Y_batch_train = batch_whole_vt(
        np.array(train_scaled[columns]), options['steps_enc'], options['steps_dec'])
    Y_valid, X_batch_valid, Y_batch_valid = batch_whole_vt(
        np.array(valid_scaled[columns]), options['steps_enc'], options['steps_dec'])
    Y_test, X_batch_test, Y_batch_test = batch_whole_test(
        np.array(test_scaled[column]), options['steps_enc'], options['steps_dec'])

    X_batch_valid, X_batch_test, X_batch_train, columns = side_channel(
        valid_scaled, train_scaled, test_scaled, Y_batch_valid, Y_batch_train, Y_batch_test, X_batch_valid, X_batch_train, X_batch_test, column, columns, options)

    num_inputs = len(columns)  # 1 s index columns
    num_outputs = 1

    with tf.Graph().as_default():
        ################### Model #####################
        with tf.name_scope('input'):
            encoder_inputs = tf.placeholder(tf.float32, shape=(
                None, options['steps_enc'], num_inputs), name='encoder_inputs')
            # this changes to tensor of steps_enc array - like (step_num,batch_num,elem_num) but these are separate arrays (to be able to iterate over Tensors)
            encoder_inputs_ta = [tf.squeeze(t, [1]) for t in tf.split(
                encoder_inputs, options['steps_enc'], 1)]

            current_batch_size = tf.shape(encoder_inputs_ta)[1]

            decoder_targets = tf.placeholder(
                tf.float32, shape=(None, options['steps_dec'], num_outputs), name='decoder_targets')
            decoder_targets_ta = [tf.squeeze(t, [1]) for t in tf.split(
                decoder_targets, options['steps_dec'], 1)]

        model = init_model(options['model_type'], current_batch_size,
                           encoder_inputs_ta, decoder_targets_ta, num_units, options['cell_type'])

        decoder_outputs = model.prediction
        targets = model.target

        #########################     Objective function     ##############################
        with tf.name_scope('loss'):
            # Objective function = MSE
            loss = tf.reduce_mean(
                tf.square(decoder_outputs - targets), name='loss')
            # for TensorBoard
            loss_train = tf.compat.v1.summary.scalar('loss_train', loss)
            loss_test = tf.compat.v1.summary.scalar('loss_test', loss)
            loss_valid = tf.compat.v1.summary.scalar('loss_valid', loss)
            loss_batch_train = tf.compat.v1.summary.scalar(
                'loss_batch_train', loss)

        #############################      Training      ##################################
        training = Optimize(loss, gradient_clipping=False)

        #############################   TF preparation   ##################################
        # merge all summaries into a single "operation" which we can execute in a session
        summary_op = tf.compat.v1.summary.merge_all()

        # TB from folder "done": tensorboard --logdir=run1:./tmp/seq2seq/ --port 6006
        with tf.compat.v1.Session() as sess:
            saver = tf.compat.v1.train.Saver(save_relative_paths=True)
            # variables need to be initialized before we can use them
            sess.run(tf.compat.v1.global_variables_initializer())

            # create log writer object
            writer = tf.compat.v1.summary.FileWriter(
                path, graph=tf.compat.v1.get_default_graph())
            save_hyperparameters(options, path)

            # https://github.com/mmuratarat/handson-ml/blob/master/11_deep_learning.ipynb
            max_checks_without_progress = options['max_patience']
            checks_without_progress = 0
            best_loss = np.infty

            try:
                # perform training cycles
                for iteration in range(0, options['max_epochs']+1):

                    x_batch, y_batch = next_batch(
                        np.array(valid_scaled[column]), np.array(train_scaled[column]), options)

                    # Scalars for TensorBoard
                    mse_train, summary_train = sess.run([loss, loss_train], feed_dict={
                        encoder_inputs: X_batch_train, decoder_targets: Y_batch_train})
                    writer.add_summary(summary_train, iteration)
                    mse_test, summary_test = sess.run([loss, loss_test], feed_dict={
                        encoder_inputs: X_batch_test, decoder_targets: Y_batch_test})
                    writer.add_summary(summary_test, iteration)
                    mse_valid, summary_valid = sess.run([loss, loss_valid], feed_dict={
                        encoder_inputs: X_batch_valid, decoder_targets: Y_batch_valid})
                    writer.add_summary(summary_valid, iteration)
                    if iteration == 0:
                        mse_batch_train, summary_batch_train = sess.run([loss, loss_batch_train], feed_dict={
                            encoder_inputs: x_batch, decoder_targets: y_batch})
                        writer.add_summary(summary_batch_train, iteration)
                    print(iteration, '.iteration - MSE_train:',
                          mse_train, ', MSE_test', mse_test)

                    if iteration % 100 == 0:
                        # Compute the predictions
                        train_prediction = sess.run(model.prediction, feed_dict={
                                                    encoder_inputs: X_batch_train})
                        test_prediction = sess.run(model.prediction, feed_dict={
                            encoder_inputs: X_batch_test})
                        Y_train_pred, Y_test_pred = train_test_form(
                            train_prediction, test_prediction, Y_train, Y_test)

                        # Visualize
                        subplot(Y_train, Y_train_pred, Y_test,
                                Y_test_pred, mse_train, mse_test, path, iteration)
                        # Save
                        save_data(path, 'train_pred', Y_train_pred)
                        save_data(path, 'test_pred', Y_test_pred)

                    model_path = path + '/my_model'
                    if mse_valid < best_loss:
                        best_loss = mse_valid
                        checks_without_progress = 0
                        saver.save(sess, model_path)
                    else:
                        checks_without_progress += 1
                        if checks_without_progress > max_checks_without_progress:
                            print('Early stopping!')
                            saver.restore(sess, model_path)
                            iteration = str(
                                iteration - 1 - max_checks_without_progress)
                            print('Model restored.')
                            break

                    _, summary_batch_train, _ = sess.run([loss, loss_batch_train, training.training_op], feed_dict={
                        encoder_inputs: x_batch, decoder_targets: y_batch})
                    writer.add_summary(summary_batch_train, iteration)
                print('done')

            except KeyboardInterrupt:
                print('training interrupted')

            train_prediction = sess.run(model.prediction, feed_dict={
                encoder_inputs: X_batch_train})
            test_prediction = sess.run(model.prediction, feed_dict={
                encoder_inputs: X_batch_test})
            # rescale
            Y_train_pred, Y_test_pred = train_test_form(
                train_prediction, test_prediction, Y_train, Y_test)
            Y_train_pred = inverse_normalization(
                scaler, Y_train_pred.reshape(-1, 1))
            Y_test_pred = inverse_normalization(
                scaler, Y_test_pred.reshape(-1, 1))
            Y_train_pred = inverse_zscore(Y_train_pred, mean, deviation)
            Y_test_pred = inverse_zscore(Y_test_pred, mean, deviation)

            # to have back the original data before scaling and normalizing
            if options['steps_enc'] == options['steps_dec']:
                Y_train = np.array(
                    train[original].iloc[options['steps_enc']:]).reshape(-1, 1)
                Y_test = np.array(
                    test[original].iloc[options['steps_enc']:]).reshape(-1, 1)
            else:
                Y_train = np.array(
                    train[original].iloc[-len(Y_train_pred):]).reshape(-1, 1)
                Y_test = np.array(
                    test[original].iloc[options['steps_enc']:options['steps_enc'] + len(Y_test_pred)]).reshape(-1, 1)

            mean_test = np.mean(Y_test)
            std_test = np.std(Y_test, ddof=1)
            print('data:', options['input_data'], 'mean:',
                  mean_test, 'standard deviation', std_test, 'n', len(Y_test))

            mse_train = np.mean((Y_train-Y_train_pred)**2)
            rmse_train = np.sqrt(mse_train)
            mse_test = np.mean((Y_test-Y_test_pred)**2)
            rmse_test = np.sqrt(mse_test)
            save_data(path, 'finalTrain', Y_train_pred)
            save_data(path, 'finalTest', Y_test_pred)

            final_subplot(Y_train, Y_train_pred, Y_test, Y_test_pred,
                          mse_train, mse_test, rmse_train, rmse_test, path, iteration)
            rmse_train_parts = plot_parts(
                options['steps_dec'], Y_train, Y_train_pred, 'Train', path)
            rmse_test_parts = plot_parts(
                options['steps_dec'], Y_test, Y_test_pred, 'Test', path)

    return model_path
Exemplo n.º 12
0
def main():
    try:
        input_file = sys.argv[1]
    except IndexError:
        print('To use KinBot, supply one argument being the input file!')
        sys.exit(-1)

    # print the license message to the console
    print(license_message.message)

    # initialize the parameters for this run
    par = Parameters(input_file)

    # set up the logging environment
    if par.par['verbose']:
        logging.basicConfig(filename='kinbot.log', level=logging.DEBUG)
    else:
        logging.basicConfig(filename='kinbot.log', level=logging.INFO)

    # write the license message to the log file
    logging.info(license_message.message)
    # time stamp of the KinBot start
    logging.info('Starting KinBot at {}'.format(datetime.datetime.now()))

    # Make the necessary directories
    if not os.path.exists('perm'):
        os.makedirs('perm')
    if not os.path.exists('scratch'):
        os.makedirs('scratch')
    if not os.path.exists('molpro'):
        os.mkdir('molpro')
    if par.par['rotor_scan'] == 1:
        if not os.path.exists('hir'):
            os.mkdir('hir')
        if not os.path.exists('hir_profiles'):
            os.mkdir('hir_profiles')
        if not os.path.exists('perm/hir/'):
            os.makedirs('perm/hir/')
    if par.par['conformer_search'] == 1:
        if not os.path.exists('conf'):
            os.mkdir('conf')
        if not os.path.exists('perm/conf'):
            os.makedirs('perm/conf')
    if not os.path.exists('me'):
        os.mkdir('me')

    # initialize the reactant
    well0 = StationaryPoint('well0',
                            par.par['charge'],
                            par.par['mult'],
                            smiles=par.par['smiles'],
                            structure=par.par['structure'])
    well0.short_name = 'w1'

    # wrtie the initial reactant geometry to a file for visualization
    geom_out = open('geometry.xyz', 'w')
    geom_out.write('{}\n\n'.format(well0.natom))
    for i, at in enumerate(well0.atom):
        x, y, z = well0.geom[i]
        geom_out.write('{} {:.6f} {:.6f} {:.6f}\n'.format(at, x, y, z))
    geom_out.write('\n\n')
    geom_out.close()

    # characterize the initial reactant
    well0.characterize()
    well0.name = str(well0.chemid)
    start_name = well0.name

    # initialize the qc instance
    qc = QuantumChemistry(par)

    # start the initial optimization of the reactant
    logging.info('Starting optimization of intial well')
    qc.qc_opt(well0, well0.geom)
    err, well0.geom = qc.get_qc_geom(str(well0.chemid) + '_well',
                                     well0.natom,
                                     wait=1)
    err, well0.freq = qc.get_qc_freq(str(well0.chemid) + '_well',
                                     well0.natom,
                                     wait=1)
    if err < 0:
        logging.error('Error with initial structure optimization.')
        return
    if any(well0.freq[i] <= 0 for i in range(len(well0.freq))):
        logging.error('Found imaginary frequency for initial structure.')
        return

    # characterize again and look for differences
    well0.characterize()
    well0.name = str(well0.chemid)
    if well0.name != start_name:
        logging.error(
            'The first well optimized to a structure different from the input.'
        )
        return

    # do an MP2 optimization of the reactant,
    # to compare Beta scission barrier heigths to
    logging.info('Starting MP2 optimization of intial well')
    qc.qc_opt(well0, well0.geom, mp2=1)
    err, geom = qc.get_qc_geom(str(well0.chemid) + '_well_mp2', well0.natom, 1)

    # characterize again and look for differences
    well0.characterize()
    well0.name = str(well0.chemid)

    # read the energy and the zpe corrected energy
    err, well0.energy = qc.get_qc_energy(str(well0.chemid) + '_well', 1)
    err, well0.zpe = qc.get_qc_zpe(str(well0.chemid) + '_well', 1)

    well_opt = Optimize(well0, par, qc, wait=1)
    well_opt.do_optimization()
    if well_opt.shigh == -999:
        logging.error(
            'Error with high level optimization of initial structure.')
        return

    # do the reaction search using heuristics
    if par.par['reaction_search'] == 1:
        logging.info('Starting reaction searches of intial well')
        rf = ReactionFinder(well0, par, qc)
        rf.find_reactions()
        rg = ReactionGenerator(well0, par, qc)
        rg.generate()
    # do the homolytic scission products search
    if par.par['homolytic_scissions'] == 1:
        logging.info('Starting the search for homolytic scission products')
        well0.homolytic_scissions = HomolyticScissions(well0, par, qc)
        well0.homolytic_scissions.find_homolytic_scissions()
    # initialize the master equation instance
    mess = MESS(par, well0)
    mess.write_input()
    mesmer = MESMER(par, well0)
    mesmer.write_input()
    if par.par['me'] == 1:
        logging.info('Starting Master Equation calculations')
        if par.par['me_code'] == 'mess':
            mess.run()
        elif par.par['me_code'] == 'mesmer':
            mesmer.run()
        else:
            logging.error('Cannot recognize me code {}'.format(
                par.par['me_code']))

    # postprocess the calculations
    postprocess.createSummaryFile(well0, qc, par)
    postprocess.createPESViewerInput(well0, qc, par)

    logging.info('Finished KinBot at {}'.format(datetime.datetime.now()))
    print("Done!")
Exemplo n.º 13
0
import tensorflow as tf
from optimize import Optimize
import matplotlib.pyplot as plt

if __name__ == '__main__':
    Optimize = Optimize()
    mnist = tf.keras.datasets.mnist
    (x_train, y_train), (x_test, y_test) = mnist.load_data()

    #x_train = x_train[:100,:,:]
    #y_train = y_train[:100]
    #print(y_train.shape)
    #print(y_train[:10])
    #print(x_train.shape)
    #print(Adam.one_hot_encoding(y_train))
    x_train, x_test = x_train / 255.0, x_test / 255.0

    train_costs, val_costs, test_costs = Optimize.model(x_train,
                                                        y_train,
                                                        x_test,
                                                        y_test,
                                                        method='adam')
    r_tr, r_v, r_t = Optimize.model(x_train,
                                    y_train,
                                    x_test,
                                    y_test,
                                    method='rmsprop')
    g_tr, g_v, g_t = Optimize.model(x_train,
                                    y_train,
                                    x_test,
                                    y_test,