コード例 #1
0
ファイル: Port.py プロジェクト: Harnesser/wxDebuggy
 def __init__(self,
              name ='',
              direction = 'input',
              nettype = 'wire',
              msb = 0,
              lsb = 0):
     
     Net.__init__(self, name, nettype, msb, lsb )
     self.direction = direction
コード例 #2
0
ファイル: __init__.py プロジェクト: AeyS/pymod
def test(sourcekey,keyword,nextid):
    keyword = keyword.decode("gb2312")
    sourcekey = sourcekey.decode("gb2312")
    from Net import Net
    net = Net()
    #net.getkeyword("cm79",keyword)
    net.getsource("cm79",keyword)
    print net.result
    exit()
    print "ok"
    net.writeDB(u'../Config/databaselKeyWord.db',
                'august',
                'id INTEGER PRIMARY KEY,keyword text unicode,price text unicode',
                nextid,sourcekey)
コード例 #3
0
ファイル: topcryst.py プロジェクト: peteboyd/tobascco
    def _check_barycentric_embedding(self, graph, voltage):
        net = Net(graph)
        net.voltage = voltage
        net.simple_cycle_basis()
        net.get_lattice_basis()
        #net.get_cycle_basis()
        net.get_cocycle_basis()
        #for i,j in itertools.combinations(range(len(net.kernel)), 2):
        #    print np.any(np.in1d(np.array(net.kernel)[i].nonzero(), np.array(net.kernel)[j].nonzero()))

        #for i, j in itertools.combinations(range(len(net.cycle)), 2):
        #    if not np.any(np.in1d(np.array(net.cycle)[i].nonzero(), np.array(net.kernel)[j].nonzero())):
        #        print 'i', ', '.join(['e%i'%(k+1) for k in np.nonzero(np.array(net.kernel)[i])[0]])
        #        print 'j', ', '.join(['e%i'%(k+1) for k in np.nonzero(np.array(net.kernel)[j])[0]])
        #print np.array(net.cycle)[0].nonzero(), np.array(net.cycle)[1].nonzero()
        net.barycentric_embedding()
        #verts = net.graph.vertices()
        #for id in range(len(verts)):
        #    Pi = [verts[id], verts[:id] + verts[id+1:]]
        #    print net.graph.to_undirected().coarsest_equitable_refinement(Pi)
        #G = net.graph.to_undirected().dominating_set(independent=True)
        #for i in np.array(net.cycle):
        #    print ', '.join(['e%i'%(k+1) for k in np.nonzero(i)[0]])

        #q = np.concatenate((net.cycle, net.kernel[:8]))
        #for id, volt in enumerate(np.array(net.voltage)):
        #    print 'e%i'%(id+1), "(%i, %i, %i)"%(tuple(volt))

        #A = matrix(q)
        #for i in A.echelon_form():
        #    print ', '.join(['e%i'%(k+1) for k in np.nonzero(i)[0]])
        #for j in np.array(net.kernel):
        #    print ', '.join(['e%i'%(k+1) for k in np.nonzero(j)[0]])

        #print G.order()
        #print G.gens()
        g = GraphPlot(net)
        g.view_graph()
        g.view_placement(init=(0.5, 0.5, 0.5), edge_labels=False)
コード例 #4
0
ファイル: Main.py プロジェクト: OmarHoyosE/NeuralNetworks
    def __init__(self):
        self.training_patterns = []
        self.training_targets = []
        self.desired_error = 0.1

        # Reading the inputs from a file
        for line in open('train.txt', 'r'):
            self.training_patterns.append([int(i) for i in line.replace('\n', '').split(']')[0].replace('[', '').split(',')])
            self.training_targets.append(int(line.replace('\n', '').split(']')[1]))
        print self.training_patterns, self.training_targets

        # Creating the Network
        net = Net(len(self.training_patterns[0]), max(self.training_targets) - min(self.training_targets), self.desired_error)
        net.learn()

        # Once the network is trained, classify some patterns
        self.patterns = []
        self.targets = []
        for line in open('inputs.txt','r'):
            self.patterns.append([int(i) for i in line.replace('[','').replace('\n','').split(']')[0].split(',')])
        self.targets = net.classify(self.patterns)

        # Plotting the inputs and targets
        Plotter().plot3d(self.training_patterns, self.training_targets,'Training patterns')
import numpy as np
import pylab as pl
from sys import argv
from Net import Net
import cfunctions as cfn
from functions import simpleaxis, init_fig

savefig = False if len(argv) == 1 else True

net = Net()
step = .1

init_fig()
# colors for colorblind from  http://www.cookbook-r.com/Graphs/Colors_(ggplot2)/
col = [
    '#56B4E9', '#0072B2', '#F0E442', '#E69F00', '#CC79A7', '#D55E00',
    '#40e0d0', '#009E73'
]

# Voltage
spikes, u = cfn.run(net.W, step, 400, 50, 20, 2, 20, 2)
fig1, ax = pl.subplots(nrows=2, ncols=1)
for i, j in enumerate([7, 1]):
    ax[i].plot(u[:, j] + 2 * spikes[:, j], color=col[j], zorder=5)
    ax[i].set_ylim(-1, 3.5)
    ax[i].axis('off')
ax[-1].plot([0, 10 / step], [-1, -1], c='k', lw=7, clip_on=False, zorder=5)
ax[-1].plot([0, 0], [1, 2], c='k', lw=7, clip_on=False, zorder=5)
fig1.subplots_adjust(hspace=-.1)
fig1.subplots_adjust(.04, .1, 1, 1)
if savefig:
コード例 #6
0
ファイル: kechuanet.py プロジェクト: zaaabik/SkolBrainet
crops_per_image = 500
lr = 1e-5
epochs_per_save = 1

crop_size = 65
mini_crop_size = 7

device = torch.device('cpu')
if torch.cuda.is_available():
    print('GPU !!!')
    device = torch.device('cuda:0')

if not os.path.exists('models'):
    os.mkdir('models')

net = Net().to(device)
criterion = nn.BCELoss()
optimizer = optim.Adam(net.parameters(),
                       lr=lr)  # lr = 1e-5 in the original paper

augmentation_imgs, augmentation_gts = augmentation(imgs, gts)
mri_dataset = MriDataset(augmentation_imgs,
                         augmentation_gts,
                         crop_size,
                         mini_crop_size,
                         crops_per_image,
                         crop_function=crop)
mri_dataloader = data.DataLoader(mri_dataset, batch_size)
assert len(mri_dataset) == len(augmentation_imgs) * crops_per_image
print(len(mri_dataset))
コード例 #7
0
ファイル: fcnn_test.py プロジェクト: hmxv2/angles_and_demons
fi_test_set=sys.argv[1]#./simple_process2
fi=sys.argv[2]#./models/train_set_loss-0.3448 valid_set_auc-0.8686.model
fo=sys.argv[3]#./test_predict_result/

f=open(fi_test_set+'/test_set.js', 'rb')#./simple_process2
test_set=pickle.load(f)
f.close()

#hyper para
in_dim= 1109
hidden1_dim=600
hidden2_dim=300

model_ = Net(use_cuda=use_cuda, 
            in_dim= in_dim, 
            hidden1_dim=hidden1_dim, 
            hidden2_dim=hidden2_dim
           )
model_trained = torch.load(fi)
model_.load_state_dict(model_trained)#load model trained 
if use_cuda:
    model_.cuda()
    
#test set
test_set_inputs = Variable(torch.Tensor(test_set))

predicts = model_(test_set_inputs)
if use_cuda:
    pred = predicts.data.cpu().numpy()#gpu to cpu firstly
    pred = pred[:,1]
else:
コード例 #8
0
def search_algo(args):
    # iniailize random seed
    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.set_num_threads(1)

    # initialize/load
    task_class1 = getattr(tasks, args.task1)
    task_class2 = getattr(tasks, args.task2)
    if args.no_noise:
        task1 = task_class1(force_std=0.0, torque_std=0.0)
        task2 = task_class2(force_std=0.0, torque_std=0.0)
    else:
        task1 = task_class1()
        task2 = task_class2()
    graphs = rd.load_graphs(args.grammar_file)
    rules = [rd.create_rule_from_graph(g) for g in graphs]

    # initialize preprocessor
    # Find all possible link labels, so they can be one-hot encoded
    all_labels = set()
    for rule in rules:
        for node in rule.lhs.nodes:
            all_labels.add(node.attrs.require_label)
    all_labels = sorted(list(all_labels))

    max_nodes = args.max_nodes

    global preprocessor
    # preprocessor = Preprocessor(max_nodes = max_nodes, all_labels = all_labels)
    preprocessor = Preprocessor(all_labels=all_labels)

    # initialize the env
    env1 = RobotGrammarEnv(task1,
                           rules,
                           seed=args.seed,
                           mpc_num_processes=args.mpc_num_processes)
    env2 = RobotGrammarEnv(task2,
                           rules,
                           seed=args.seed,
                           mpc_num_processes=args.mpc_num_processes)

    # initialize Value function
    device = 'cpu'
    state = env1.reset()
    sample_adj_matrix, sample_features, sample_masks = preprocessor.preprocess(
        state)
    num_features = sample_features.shape[1]
    V = Net(max_nodes=max_nodes, num_channels=num_features,
            num_outputs=1).to(device)

    # load pretrained V function
    if args.load_V_path is not None:
        V.load_state_dict(torch.load(args.load_V_path))
        print_info('Loaded pretrained V function from {}'.format(
            args.load_V_path))

    # initialize target V_hat look up table
    V_hat = dict()

    # load pretrained V_hat
    if args.load_Vhat_path is not None:
        V_hat_fp = open(args.load_Vhat_path, 'rb')
        V_hat = pickle.load(V_hat_fp)
        V_hat_fp.close()
        print_info('Loaded pretrained Vhat from {}'.format(
            args.load_Vhat_path))

    if not args.test:
        # initialize save folders and files
        fp_log = open(os.path.join(args.save_dir, 'log.txt'), 'w')
        fp_log.close()
        fp_eval = open(os.path.join(args.save_dir, 'eval.txt'), 'w')
        fp_eval.close()
        design_csv_path = os.path.join(args.save_dir, 'designs.csv')
        fp_csv = open(design_csv_path, 'w')
        fieldnames = [
            'rule_seq', 'reward1', 'reward2', 'reward', 'opt_seed1',
            'opt_seed2'
        ]
        writer = csv.DictWriter(fp_csv, fieldnames=fieldnames)
        writer.writeheader()
        fp_csv.close()

        # initialize the optimizer
        global optimizer
        optimizer = torch.optim.Adam(V.parameters(), lr=args.lr)

        # initialize the seen states pool
        states_pool = StatesPool(capacity=args.states_pool_capacity)
        states_set = set()

        # explored designs
        designs = []
        design_rewards = []
        design_rewards_1 = []
        design_rewards_2 = []
        design_opt_seeds_1 = []
        design_opt_seeds_2 = []

        # initialize best design rule sequence
        best_design, best_reward = None, -np.inf

        # reward history
        epoch_rew_his = []
        last_checkpoint = -1

        # recording time
        t_sample_sum = 0.
        num_samples_interval = 0
        max_seen_nodes = 0

        # record the count for invalid samples
        no_action_samples, step_exceeded_samples, self_collision_samples = 0, 0, 0

        # initialize stats variables
        num_invalid_samples, num_valid_samples = 0, 0
        repeated_cnt = 0

        # record prediction error
        prediction_error = []

        for epoch in range(args.num_iterations):
            t_start = time.time()

            V.eval()

            # update eps and eps_sample
            if args.eps_schedule == 'linear-decay':
                eps = args.eps_start + epoch / args.num_iterations * (
                    args.eps_end - args.eps_start)
            elif args.eps_schedule == 'exp-decay':
                eps = args.eps_end + (args.eps_start - args.eps_end) * np.exp(
                    -1.0 * epoch / args.num_iterations / args.eps_decay)

            if args.eps_sample_schedule == 'linear-decay':
                eps_sample = args.eps_sample_start + epoch / args.num_iterations * (
                    args.eps_sample_end - args.eps_sample_start)
            elif args.eps_sample_schedule == 'exp-decay':
                eps_sample = args.eps_sample_end + (
                    args.eps_sample_start - args.eps_sample_end) * np.exp(
                        -1.0 * epoch / args.num_iterations /
                        args.eps_sample_decay)

            t_sample, t_update, t_mpc, t_opt = 0, 0, 0, 0

            selected_design, selected_reward = None, -np.inf
            selected_state_seq, selected_rule_seq = None, None

            p = random.random()
            if p < eps_sample:
                num_samples = 1
            else:
                num_samples = args.num_samples

            # use e-greedy to sample a design within maximum #steps.
            for _ in range(num_samples):
                valid = False
                while not valid:
                    t0 = time.time()

                    state = env1.reset()
                    rule_seq = []
                    state_seq = [state]
                    no_action_flag = False
                    for _ in range(args.depth):
                        action, step_type = select_action(env1, V, state, eps)
                        if action is None:
                            no_action_flag = True
                            break
                        rule_seq.append(action)
                        next_state = env1.transite(state, action)
                        state_seq.append(next_state)
                        state = next_state
                        if not has_nonterminals(state):
                            break

                    valid = env1.is_valid(state)

                    t_sample += time.time() - t0

                    t0 = time.time()

                    if not valid:
                        # update the invalid sample's count
                        if no_action_flag:
                            no_action_samples += 1
                        elif has_nonterminals(state):
                            step_exceeded_samples += 1
                        else:
                            self_collision_samples += 1
                        num_invalid_samples += 1
                    else:
                        num_valid_samples += 1

                    num_samples_interval += 1

                    t_update += time.time() - t0

                predicted_value = predict(V, state)
                if predicted_value > selected_reward:
                    selected_design, selected_reward = state, predicted_value
                    selected_rule_seq, selected_state_seq = rule_seq, state_seq

            t0 = time.time()

            repeated = False
            if hash(selected_design) in V_hat:
                repeated = True
                repeated_cnt += 1

            reward1, best_seed_1 = -np.inf, None

            for _ in range(args.num_eval):
                _, rew = env1.get_reward(selected_design)
                if rew > reward1:
                    reward1, best_seed1 = rew, env1.last_opt_seed

            reward2, best_seed_2 = -np.inf, None

            for _ in range(args.num_eval):
                _, rew = env2.get_reward(selected_design)
                if rew > reward2:
                    reward2, best_seed2 = rew, env2.last_opt_seed

            reward = args.weight1 * reward1 + args.weight2 * reward2

            t_mpc += time.time() - t0

            # save the design and the reward in the list
            designs.append(selected_rule_seq)
            design_rewards.append(reward)
            design_rewards_1.append(reward1)
            design_rewards_2.append(reward2)
            design_opt_seeds_1.append(best_seed1)
            design_opt_seeds_2.append(best_seed2)

            # update best design
            if reward > best_reward:
                best_design, best_reward = selected_rule_seq, reward
                print_info(
                    'new best: reward = {:.4f}, predicted reward = {:.4f}, reward1 = {:.4f}, reward2 = {:.4f}, num_samples = {}'
                    .format(reward, selected_reward, reward1, reward2,
                            num_samples))

            t0 = time.time()

            # update V_hat for the valid design
            update_Vhat(args, V_hat, selected_state_seq, reward)

            # update states pool for the valid design
            update_states_pool(states_pool, selected_state_seq, states_set)

            t_update += time.time() - t0

            t0 = time.time()

            # optimize
            V.train()
            total_loss = 0.0
            for _ in range(args.opt_iter):
                minibatch = states_pool.sample(
                    min(len(states_pool), args.batch_size))

                train_adj_matrix, train_features, train_masks, train_reward = [], [], [], []
                max_nodes = 0
                for robot_graph in minibatch:
                    hash_key = hash(robot_graph)
                    target_reward = V_hat[hash_key]
                    adj_matrix, features, _ = preprocessor.preprocess(
                        robot_graph)
                    max_nodes = max(max_nodes, len(features))
                    train_adj_matrix.append(adj_matrix)
                    train_features.append(features)
                    train_reward.append(target_reward)

                max_seen_nodes = max(max_seen_nodes, max_nodes)

                for i in range(len(minibatch)):
                    train_adj_matrix[i], train_features[i], masks = \
                        preprocessor.pad_graph(train_adj_matrix[i], train_features[i], max_nodes)
                    train_masks.append(masks)

                train_adj_matrix_torch = torch.tensor(train_adj_matrix)
                train_features_torch = torch.tensor(train_features)
                train_masks_torch = torch.tensor(train_masks)
                train_reward_torch = torch.tensor(train_reward)

                optimizer.zero_grad()
                output, loss_link, loss_entropy = V(train_features_torch,
                                                    train_adj_matrix_torch,
                                                    train_masks_torch)
                loss = F.mse_loss(output[:, 0], train_reward_torch)
                loss.backward()
                total_loss += loss.item()
                optimizer.step()

            t_opt += time.time() - t0

            t_end = time.time()

            t_sample_sum += t_sample

            # logging
            if (epoch + 1
                ) % args.log_interval == 0 or epoch + 1 == args.num_iterations:
                iter_save_dir = os.path.join(args.save_dir,
                                             '{}'.format(epoch + 1))
                os.makedirs(os.path.join(iter_save_dir), exist_ok=True)
                # save model
                save_path = os.path.join(iter_save_dir, 'V_model.pt')
                torch.save(V.state_dict(), save_path)
                # save V_hat
                save_path = os.path.join(iter_save_dir, 'V_hat')
                fp = open(save_path, 'wb')
                pickle.dump(V_hat, fp)
                fp.close()

            # save explored design and its reward
            fp_csv = open(design_csv_path, 'a')
            fieldnames = [
                'rule_seq', 'reward1', 'reward2', 'reward', 'opt_seed1',
                'opt_seed2'
            ]
            writer = csv.DictWriter(fp_csv, fieldnames=fieldnames)
            for i in range(last_checkpoint + 1, len(designs)):
                writer.writerow({
                    'rule_seq': str(designs[i]),
                    'reward1': design_rewards_1[i],
                    'reward2': design_rewards_2[i],
                    'reward': design_rewards[i],
                    'opt_seed1': design_opt_seeds_1[i],
                    'opt_seed2': design_opt_seeds_2[i]
                })
            last_checkpoint = len(designs) - 1
            fp_csv.close()

            epoch_rew_his.append(reward)

            avg_loss = total_loss / args.opt_iter
            len_his = min(len(epoch_rew_his), 30)
            avg_reward = np.sum(epoch_rew_his[-len_his:]) / len_his
            prediction_error.append(np.abs(selected_reward - reward))
            avg_prediction_error = np.sum(
                prediction_error[-len_his:]) / len_his

            if repeated:
                print_white('Epoch {:4}: T_sample = {:5.2f}, T_mpc = {:5.2f}, T_opt = {:5.2f}, eps = {:5.3f}, #samples = {:2}, training loss = {:7.4f}, avg_pred_error = {:6.4f}, predicted_reward = {:6.4f}, reward = {:6.4f}, reward1 = {:6.4f}, reward2 = {:6.4f}, last 30 epoch reward = {:6.4f}, best reward = {:6.4f}'.format(\
                    epoch, t_sample, t_mpc, t_opt, eps, num_samples, \
                    avg_loss, avg_prediction_error, selected_reward, reward, reward1, reward2, avg_reward, best_reward))
            else:
                print_warning('Epoch {:4}: T_sample = {:5.2f}, T_mpc = {:5.2f}, T_opt = {:5.2f}, eps = {:5.3f}, #samples = {:2}, training loss = {:7.4f}, avg_pred_error = {:6.4f}, predicted_reward = {:6.4f}, reward = {:6.4f}, reward1 = {:6.4f}, reward2 = {:6.4f}, last 30 epoch reward = {:6.4f}, best reward = {:6.4f}'.format(\
                    epoch, t_sample, t_mpc, t_opt, eps, num_samples, \
                    avg_loss, avg_prediction_error, selected_reward, reward, reward1, reward2, avg_reward, best_reward))

            fp_log = open(os.path.join(args.save_dir, 'log.txt'), 'a')
            fp_log.write('eps = {:.4f}, eps_sample = {:.4f}, num_samples = {}, T_sample = {:4f}, T_update = {:4f}, T_mpc = {:.4f}, T_opt = {:.4f}, loss = {:.4f}, predicted_reward = {:.4f}, reward = {:.4f}, reward1 = {:.4f}, reward2 = {:.4f}, avg_reward = {:.4f}\n'.format(\
                eps, eps_sample, num_samples, t_sample, t_update, t_mpc, t_opt, avg_loss, selected_reward, reward, reward1, reward2, avg_reward))
            fp_log.close()

            if (epoch + 1) % args.log_interval == 0:
                print_info(
                    'Avg sampling time for last {} epoch: {:.4f} second / sample'
                    .format(args.log_interval,
                            t_sample_sum / num_samples_interval))
                t_sample_sum = 0.
                num_samples_interval = 0
                print_info('max seen nodes = {}'.format(max_seen_nodes))
                print_info('size of states_pool = {}'.format(len(states_pool)))
                print_info(
                    '#valid samples = {}, #invalid samples = {}, #valid / #invalid = {}'
                    .format(
                        num_valid_samples, num_invalid_samples,
                        num_valid_samples / num_invalid_samples
                        if num_invalid_samples > 0 else 10000.0))
                print_info(
                    'Invalid samples: #no_action_samples = {}, #step_exceeded_samples = {}, #self_collision_samples = {}'
                    .format(no_action_samples, step_exceeded_samples,
                            self_collision_samples))
                print_info('repeated rate = {}'.format(repeated_cnt /
                                                       (epoch + 1)))

        save_path = os.path.join(args.save_dir, 'model_state_dict_final.pt')
        torch.save(V.state_dict(), save_path)
    else:
        import IPython
        IPython.embed()

        # test
        V.eval()
        print('Start testing')
        test_epoch = 10
        y0 = []
        y1 = []
        x = []
        for ii in range(0, 6):
            eps = 1.0 - 0.2 * ii

            print('------------------------------------------')
            print('eps = ', eps)

            reward_sum = 0.
            best_reward = -np.inf
            for epoch in range(test_epoch):
                t_sample = 0.
                valid = False
                trials = 0
                while not valid:
                    trials += 1

                    t0 = time.time()

                    state = env1.reset()
                    rule_seq = []
                    state_seq = [state]
                    no_action_flag = False
                    for _ in range(args.depth):
                        action, step_type = select_action(env1, V, state, eps)
                        if action is None:
                            no_action_flag = True
                            break
                        rule_seq.append(action)
                        next_state = env1.transite(state, action)
                        state_seq.append(next_state)
                        state = next_state
                        if not has_nonterminals(state):
                            break

                    valid = env1.is_valid(state)

                    t_sample += time.time() - t0

                sys.stdout.write('\rrunning mpc')
                sys.stdout.flush()

                t0 = time.time()
                _, reward = env1.get_reward(state)
                t_mpc = time.time() - t0

                reward_sum += reward
                best_reward = max(best_reward, reward)
                sys.stdout.write(
                    '\rdesign {}: reward = {}, trials = {}, t_mpc = {:.2f}, t_sample = {:.2f}'
                    .format(epoch, reward, trials, t_mpc, t_sample))
                sys.stdout.write('\n')
                sys.stdout.flush()

            print('test avg reward = ', reward_sum / test_epoch)
            print('best reward found = ', best_reward)
            x.append(eps)
            y0.append(reward_sum / test_epoch)
            y1.append(best_reward)

        import matplotlib.pyplot as plt
        fig, ax = plt.subplots(1, 2, figsize=(10, 5))
        ax[0].plot(x, y0)
        ax[0].set_title('Avg Reward')
        ax[0].set_xlabel('eps')
        ax[0].set_ylabel('reward')

        ax[1].plot(x, y1)
        ax[0].set_title('Best Reward')
        ax[0].set_xlabel('eps')
        ax[0].set_ylabel('reward')

        plt.show()
コード例 #9
0
        
        with torch.no_grad():
            batch_y_hat = model(batch_x, mode="prediction")
            loss = criterion(batch_y_hat, batch_y)
         
        current_accuracy = torch.eq(torch.max(batch_y_hat, 1)[1], torch.max(batch_y, 1)[1]).sum().item() / batch_y.shape[0]
        current_CE = loss.detach().cpu().item()

        testing_CE += current_CE
        testing_accuracy += current_accuracy

    #print("Testing Results - Avg accuracy: {:.2f} Avg loss: {:.2f}".format(testing_accuracy / len(test_loader), testing_CE / len(test_loader)))
    
    return testing_accuracy / len(test_loader), testing_CE / len(test_loader)

model_adv = Net()
model_PGD = Net()
model_natural = Net()
model_adv.load_state_dict(torch.load('models/adversarial_interpolation_0_epoch_0'))
model_PGD.load_state_dict(torch.load('models/PGD_0_epoch_0'))
model_natural.load_state_dict(torch.load('models/natural_0_epoch_0'))

data_natural = []
data_adv = []
data_PGD = []

for i in range(100):
    pgd = PGD(Cross_Entropy, iter=i)
    data_natural.append(test_model(model_natural, test_loader, criterion, attacker=pgd, device=device)[1])
    data_PGD.append(test_model(model_PGD, test_loader, criterion, attacker=pgd, device=device)[1])
    data_adv.append(test_model(model_adv, test_loader, criterion, attacker=pgd, device=device)[1])
コード例 #10
0
from Net import Net
from MCTS import MCTS
from Board import Board

net = Net('./best_policy_1900.model')  #'./best_policy_4.model'
count = 5000  # 训练的局数
count_ai = 1
board_move_data = []
board_round = []
board_result = []
# 第一个和第二个棋手,  ai 1   人0
for n in range(1, count + 1):
    board = Board(1, 1)
    m = 1
    mcts = MCTS(net, board)
    #print("第%d局开始"% (n))
    while 1:
        board.not_end()
        if not board.not_end_number:
            break
        if board.current_player:
            board.next_move = mcts.get_move()  # 格式 xyab
        else:
            board.next_move = int(input('请输入下一步棋,格式xy ab: '))  # 输入格式 xyab
        board.all_move.append(board.next_move)
        board.move()
        mcts.board = board
        mcts.update_with_move()
        m += 1
        #print("移动了%d步,%d" % (m,board.next_move))
コード例 #11
0
from dashing import *
from time import sleep, time
import math, PIL.Image
from PIL import Image
from blessed import Terminal

from Net import Net

if __name__ == "__main__":
    net = Net()

    t0 = 'Layer 0    Layer 1     Layer 2                     |  Update type: {0}\n'.format(
        net.update_type)
    t1 = '------     -----                                   |  Learning Rate: {0}\n'.format(
        net.learning_rate)
    t2 = '|_0_|------|_0_|- -                                |  Momentum: {0}\n'.format(
        net.momentum)
    t4 = '----- - -  -----    -  -----     Expected Output   ------------------------------------------------\n'
    t5 = '|_1_|------|_1_|-------|_0_|-------> 0 or 1        \n'
    t7 = '-----  - - -----  -  - -----                       \n'
    t8 = '|_B_|------|_2_|-   -  |_B_|                       \n'
    t10 = '       - - ----- - -                              \n'
    t11 = '       -  -|_3_|- -                               \n'
    t12 = '        -  ----- -                                \n'
    t13 = '          -|_B_|-                                 \n'
    display = t0 + t1 + t2 + t4 + t5 + t7 + t8 + t10 + t11 + t12 + t13

    term = Terminal()
    ui = HSplit(
        VSplit(
            Text(text=f"{display}",
def search_algo(args):
    # iniailize random seed
    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.set_num_threads(1)

    # initialize/load
    # TODO: use 80 to fit the input of trained MPC GNN, use args.depth * 3 later for real mpc
    max_nodes = 80
    task_class = getattr(tasks, args.task)
    if not args.no_noise:
        task = task_class()
    else:
        task = task_class(force_std=0.0, torque_std=0.0)

    graphs = rd.load_graphs(args.grammar_file)
    rules = [rd.create_rule_from_graph(g) for g in graphs]

    # initialize preprocessor
    # Find all possible link labels, so they can be one-hot encoded
    all_labels = set()
    for rule in rules:
        for node in rule.lhs.nodes:
            all_labels.add(node.attrs.require_label)
    all_labels = sorted(list(all_labels))

    global preprocessor
    preprocessor = Preprocessor(max_nodes=max_nodes, all_labels=all_labels)

    # initialize the env
    env = RobotGrammarEnv(task,
                          rules,
                          seed=args.seed,
                          mpc_num_processes=args.mpc_num_processes)

    # initialize Value function
    device = 'cpu'
    state = env.reset()
    sample_adj_matrix, sample_features, sample_masks = preprocessor.preprocess(
        state)
    num_features = sample_features.shape[1]
    V = Net(max_nodes=max_nodes, num_channels=num_features,
            num_outputs=1).to(device)
    # V.share_memory()

    # load pretrained V function
    if args.load_V_path is not None:
        V.load_state_dict(torch.load(args.load_V_path))
        print_info('Loaded pretrained V function from {}'.format(
            args.load_V_path))

    # initialize target V_hat look up table
    V_hat = dict()

    # load pretrained V_hat
    if args.load_Vhat_path is not None:
        V_hat_fp = open(args.load_Vhat_path, 'rb')
        V_hat = pickle.load(V_hat_fp)
        V_hat_fp.close()
        print_info('Loaded pretrained Vhat from {}'.format(
            args.load_Vhat_path))

    # initialize invalid_cnt
    invalid_his = dict()
    num_invalid_samples, num_valid_samples = 0, 0
    repeated_cnt = 0

    # initialize the seen states pool
    states_pool = StatesPool(capacity=args.states_pool_capacity)

    # explored designs
    designs = []
    design_rewards = []
    design_opt_seeds = []

    # record prediction error
    prediction_error_sum = 0.0

    if not args.test:
        # initialize save folders and files
        fp_log = open(os.path.join(args.save_dir, 'log.txt'), 'w')
        fp_log.close()
        fp_eval = open(os.path.join(args.save_dir, 'eval.txt'), 'w')
        fp_eval.close()
        design_csv_path = os.path.join(args.save_dir, 'designs.csv')
        fp_csv = open(design_csv_path, 'w')
        fieldnames = ['rule_seq', 'reward', 'opt_seed']
        writer = csv.DictWriter(fp_csv, fieldnames=fieldnames)
        writer.writeheader()
        fp_csv.close()

        # initialize the optimizer
        global optimizer
        optimizer = torch.optim.Adam(V.parameters(), lr=args.lr)

        # initialize best design rule sequence
        best_design, best_reward = None, -np.inf

        # reward history
        epoch_rew_his = []
        last_checkpoint = -1

        # recording time
        t_sample_sum = 0.

        # record the count for invalid samples
        no_action_samples, step_exceeded_samples, self_collision_samples = 0, 0, 0

        # define state0
        state0 = make_initial_graph()
        for epoch in range(args.num_iterations):
            t_start = time.time()

            V.eval()

            # update eps and eps_sample
            if args.eps_schedule == 'linear-decay':
                eps = args.eps_start + epoch / args.num_iterations * (
                    args.eps_end - args.eps_start)
            elif args.eps_schedule == 'exp-decay':
                eps = args.eps_end + (args.eps_start - args.eps_end) * np.exp(
                    -1.0 * epoch / args.num_iterations / args.eps_decay)

            if args.eps_sample_schedule == 'linear-decay':
                eps_sample = args.eps_sample_start + epoch / args.num_iterations * (
                    args.eps_sample_end - args.eps_sample_start)
            elif args.eps_sample_schedule == 'exp-decay':
                eps_sample = args.eps_sample_end + (
                    args.eps_sample_start - args.eps_sample_end) * np.exp(
                        -1.0 * epoch / args.num_iterations /
                        args.eps_sample_decay)

            t_sample, t_mpc, t_opt = 0, 0, 0

            t0 = time.time()

            selected_design, selected_reward = None, -np.inf
            selected_state_seq, selected_rule_seq = None, None

            p = random.random()
            if p < eps_sample:
                num_samples = 1
            else:
                num_samples = args.num_samples

            # use e-greedy to sample a design within maximum #steps.
            results_queue = Queue()
            done_event = Event()
            time_queue = Queue()
            tt0 = time.time()
            processes = []
            for task_id in range(num_samples):
                seed = random.getrandbits(32)
                p = Process(target=sample_design,
                            args=(args, task_id, seed, env, V, eps,
                                  results_queue, time_queue, done_event))
                p.start()
                processes.append(p)
            t_start = time.time() - tt0

            sampled_rewards = [0.0 for _ in range(num_samples)]
            thread_times = []
            t_update = 0
            for _ in range(num_samples):
                samples = results_queue.get()
                thread_time = time_queue.get()
                thread_times.append(thread_time)
                tt0 = time.time()
                for i in range(len(samples) - 1):
                    assert samples[i].info != 'valid'
                    if samples[i].info == 'no_action':
                        no_action_samples += 1
                    elif samples[i].info == 'step_exceeded':
                        step_exceeded_samples += 1
                    else:
                        self_collision_samples += 1

                    state, state_seq = apply_rules(state0, samples[i].rule_seq,
                                                   env)
                    # update the Vhat for invalid designs
                    update_Vhat(args,
                                V_hat,
                                state_seq,
                                -2.0,
                                invalid=True,
                                invalid_cnt=invalid_his)
                    # update states pool
                    update_states_pool(states_pool, state_seq, V_hat)
                    num_invalid_samples += 1

                assert samples[-1].info == 'valid'
                state, state_seq = apply_rules(state0, samples[-1].rule_seq,
                                               env)
                num_valid_samples += 1
                if samples[-1].predicted_reward > selected_reward:
                    selected_design, selected_reward = state, samples[
                        -1].predicted_reward
                    selected_rule_seq, selected_state_seq = samples[
                        -1].rule_seq, state_seq

                sampled_rewards[
                    samples[-1].task_id] = samples[-1].predicted_reward
                t_update += time.time() - tt0

            done_event.set()

            for p in processes:
                p.join()

            print('thread time = {}'.format(thread_times))
            print('t_update = {}, t_start = {}'.format(t_update, t_start))

            # print('all sampled designs:')
            # print(sampled_rewards)

            t_sample += time.time() - t0

            t0 = time.time()

            repeated = False
            if (hash(selected_design)
                    in V_hat) and (V_hat[hash(selected_design)] > -2.0 + 1e-3):
                repeated = True
                repeated_cnt += 1

            ctrl_seq, reward = env.get_reward(selected_design)

            t_mpc += time.time() - t0

            # save the design and the reward in the list
            designs.append(selected_rule_seq)
            design_rewards.append(reward)
            design_opt_seeds.append(env.last_opt_seed)

            # update best design
            if reward > best_reward:
                best_design, best_reward = selected_rule_seq, reward
                print_info(
                    'new best: reward = {:.4f}, predicted reward = {:.4f}, num_samples = {}'
                    .format(reward, selected_reward, num_samples))

            # update V_hat for the valid design
            update_Vhat(args, V_hat, selected_state_seq, reward)

            # update states pool for the valid design
            update_states_pool(states_pool, selected_state_seq, V_hat)

            t0 = time.time()

            # optimize
            V.train()
            total_loss = 0.0
            for _ in range(args.opt_iter):
                minibatch = states_pool.sample(
                    min(len(states_pool), args.batch_size))

                train_adj_matrix, train_features, train_masks, train_reward = [], [], [], []
                for robot_graph in minibatch:
                    hash_key = hash(robot_graph)
                    target_reward = V_hat[hash_key]
                    adj_matrix, features, masks = preprocessor.preprocess(
                        robot_graph)
                    train_adj_matrix.append(adj_matrix)
                    train_features.append(features)
                    train_masks.append(masks)
                    train_reward.append(target_reward)

                train_adj_matrix_torch = torch.tensor(train_adj_matrix)
                train_features_torch = torch.tensor(train_features)
                train_masks_torch = torch.tensor(train_masks)
                train_reward_torch = torch.tensor(train_reward)

                optimizer.zero_grad()
                output, loss_link, loss_entropy = V(train_features_torch,
                                                    train_adj_matrix_torch,
                                                    train_masks_torch)
                loss = F.mse_loss(output[:, 0], train_reward_torch)
                loss.backward()
                total_loss += loss.item()
                optimizer.step()

            t_opt += time.time() - t0

            t_end = time.time()

            t_sample_sum += t_sample

            # logging
            if (epoch + 1
                ) % args.log_interval == 0 or epoch + 1 == args.num_iterations:
                iter_save_dir = os.path.join(args.save_dir,
                                             '{}'.format(epoch + 1))
                os.makedirs(os.path.join(iter_save_dir), exist_ok=True)
                # save model
                save_path = os.path.join(iter_save_dir, 'V_model.pt')
                torch.save(V.state_dict(), save_path)
                # save V_hat
                save_path = os.path.join(iter_save_dir, 'V_hat')
                fp = open(save_path, 'wb')
                pickle.dump(V_hat, fp)
                fp.close()

            # save explored design and its reward
            fp_csv = open(design_csv_path, 'a')
            fieldnames = ['rule_seq', 'reward', 'opt_seed']
            writer = csv.DictWriter(fp_csv, fieldnames=fieldnames)
            for i in range(last_checkpoint + 1, len(designs)):
                writer.writerow({
                    'rule_seq': str(designs[i]),
                    'reward': design_rewards[i],
                    'opt_seed': design_opt_seeds[i]
                })
            last_checkpoint = len(designs) - 1
            fp_csv.close()

            epoch_rew_his.append(reward)

            avg_loss = total_loss / args.opt_iter
            len_his = min(len(epoch_rew_his), 30)
            avg_reward = np.sum(epoch_rew_his[-len_his:]) / len_his
            prediction_error_sum += (selected_reward - reward)**2
            avg_prediction_error = prediction_error_sum / (epoch + 1)

            if repeated:
                print_white('Epoch {:4}: T_sample = {:5.2f}, T_mpc = {:5.2f}, T_opt = {:5.2f}, eps = {:5.3f}, eps_sample = {:5.3f}, #samples = {:2}, training loss = {:7.4f}, pred_error = {:6.4f}, predicted_reward = {:6.4f}, reward = {:6.4f}, last 30 epoch reward = {:6.4f}, best reward = {:6.4f}'.format(\
                    epoch, t_sample, t_mpc, t_opt, eps, eps_sample, num_samples, \
                    avg_loss, avg_prediction_error, selected_reward, reward, avg_reward, best_reward))
            else:
                print_warning('Epoch {:4}: T_sample = {:5.2f}, T_mpc = {:5.2f}, T_opt = {:5.2f}, eps = {:5.3f}, eps_sample = {:5.3f}, #samples = {:2}, training loss = {:7.4f}, pred_error = {:6.4f}, predicted_reward = {:6.4f}, reward = {:6.4f}, last 30 epoch reward = {:6.4f}, best reward = {:6.4f}'.format(\
                    epoch, t_sample, t_mpc, t_opt, eps, eps_sample, num_samples, \
                    avg_loss, avg_prediction_error, selected_reward, reward, avg_reward, best_reward))

            fp_log = open(os.path.join(args.save_dir, 'log.txt'), 'a')
            fp_log.write('eps = {:.4f}, eps_sample = {:.4f}, num_samples = {}, T_sample = {:4f}, T_mpc = {:.4f}, T_opt = {:.4f}, loss = {:.4f}, predicted_reward = {:.4f}, reward = {:.4f}, avg_reward = {:.4f}\n'.format(\
                eps, eps_sample, num_samples, t_sample, t_mpc, t_opt, avg_loss, selected_reward, reward, avg_reward))
            fp_log.close()

            if (epoch + 1) % args.log_interval == 0:
                print_info(
                    'Avg sampling time for last {} epoch: {:.4f} second'.
                    format(args.log_interval,
                           t_sample_sum / args.log_interval))
                t_sample_sum = 0
                print_info('size of states_pool = {}'.format(len(states_pool)))
                print_info(
                    '#valid samples = {}, #invalid samples = {}, #valid / #invalid = {}'
                    .format(
                        num_valid_samples, num_invalid_samples,
                        num_valid_samples / num_invalid_samples
                        if num_invalid_samples > 0 else 10000.0))
                print_info(
                    'Invalid samples: #no_action_samples = {}, #step_exceeded_samples = {}, #self_collision_samples = {}'
                    .format(no_action_samples, step_exceeded_samples,
                            self_collision_samples))
                max_trials, cnt = 0, 0
                for key in invalid_his.keys():
                    if invalid_his[key] > max_trials:
                        max_trials = invalid_his[key]
                    if invalid_his[key] > args.max_trials:
                        cnt += 1

                print_info(
                    'max invalid_trials = {}, #failed nodes = {}'.format(
                        max_trials, cnt))
                print_info('repeated rate = {}'.format(repeated_cnt /
                                                       (epoch + 1)))

        save_path = os.path.join(args.save_dir, 'model_state_dict_final.pt')
        torch.save(V.state_dict(), save_path)
    else:
        import IPython
        IPython.embed()

        # test
        V.eval()
        print('Start testing')
        test_epoch = 30
        y0 = []
        y1 = []
        x = []
        for ii in range(0, 11):
            eps = 1.0 - 0.1 * ii

            print('------------------------------------------')
            print('eps = ', eps)

            reward_sum = 0.
            best_reward = -np.inf
            for epoch in range(test_epoch):
                t0 = time.time()

                # use e-greedy to sample a design within maximum #steps.
                vaild = False
                while not valid:
                    state = env.reset()
                    rule_seq = []
                    state_seq = [state]
                    for _ in range(args.depth):
                        action, step_type = select_action(env, V, state, eps)
                        if action is None:
                            break
                        rule_seq.append(action)
                        next_state = env.transite(state, action)
                        state_seq.append(next_state)
                        if not has_nonterminals(next_state):
                            valid = True
                            break
                        state = next_state

                _, reward = env.get_reward(state)
                reward_sum += reward
                best_reward = max(best_reward, reward)
                print(
                    f'design {epoch}: reward = {reward}, time = {time.time() - t0}'
                )

            print('test avg reward = ', reward_sum / test_epoch)
            print('best reward found = ', best_reward)
            x.append(eps)
            y0.append(reward_sum / test_epoch)
            y1.append(best_reward)

        import matplotlib.pyplot as plt
        fig, ax = plt.subplots(1, 2, figsize=(10, 5))
        ax[0].plot(x, y0)
        ax[0].set_title('Avg Reward')
        ax[0].set_xlabel('eps')
        ax[0].set_ylabel('reward')

        ax[1].plot(x, y1)
        ax[0].set_title('Best Reward')
        ax[0].set_xlabel('eps')
        ax[0].set_ylabel('reward')

        plt.show()
コード例 #13
0
ファイル: Main.py プロジェクト: vsteinb/clock
from enums import LogLevel, Signal
from gates.Eradicate import Eradicate
from Net import Net
from temporalFissures.Looper import Looper

if __name__ == "__main__":

    net = Net()
    net.logLevel = LogLevel.DEBUG

    e0 = Eradicate()
    e1 = Eradicate()
    e2 = Eradicate()
    L0 = Looper()

    e33 = Eradicate()

    branches = [
        [e0, e1, e2, L0],
        [e1, Eradicate()],
        [e0, Eradicate(), L0],
        [e0, e2, Eradicate(), e33, Eradicate()],
        [e2, Eradicate()],
        [e33, Eradicate()],
    ]
    # branches = [
    # 	[e0, e1],
    # 	[e1, Eradicate()],
    # 	[e1, Eradicate(), Eradicate()],
    # ]
コード例 #14
0
write_log('Loading data ...')
train_gen = batch_generator(batch_size=args.batch_size,
                            random_seed=args.random_seed)
gas_valid_x, gas_valid_y, _ = bulk_load('GAS_valid')
gas_eval_x, gas_eval_y, _ = bulk_load('GAS_eval')
dcase_valid_x, dcase_valid_y, _ = bulk_load('DCASE_valid')
dcase_test_x, dcase_test_y, _ = bulk_load('DCASE_test')
dcase_test_frame_truth = load_dcase_test_frame_truth()
DCASE_CLASS_IDS = [
    318, 324, 341, 321, 307, 310, 314, 397, 325, 326, 323, 319, 14, 342, 329,
    331, 316
]

# Build model
args.kernel_size = tuple(int(x) for x in args.kernel_size.split('x'))
model = Net(args).cuda()
if args.optimizer == 'sgd':
    optimizer = SGD(model.parameters(),
                    lr=args.init_lr,
                    momentum=0.9,
                    nesterov=True)
elif args.optimizer == 'adam':
    optimizer = Adam(model.parameters(), lr=args.init_lr)
scheduler = ReduceLROnPlateau(
    optimizer, mode='max', factor=args.lr_factor,
    patience=args.lr_patience) if args.lr_factor < 1.0 else None
criterion = nn.BCELoss()

# Train model
write_log('Training model ...')
write_log(
コード例 #15
0
ファイル: PoseEstimation.py プロジェクト: cmacw/indiv_project
class PoseEstimation:
    def __init__(self,
                 trainset_info,
                 testset_info=None,
                 lr=0.001,
                 wd=0,
                 radial=False):
        self.trainset_info = trainset_info
        self.radial = radial

        # Tensor using CPU or GPU
        self.device = self._use_cuda()

        # model setup
        self.net = Net()
        self.net.to(self.device)
        if radial:
            self.criterion = nn.L1Loss()
        else:
            self.criterion = nn.MSELoss()
        self.optimizer = optim.Adam(self.net.parameters(),
                                    lr=lr,
                                    weight_decay=wd)

        # Input data setup
        normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                         std=[0.229, 0.224, 0.225])
        self.trsfm = transforms.Compose(
            [transforms.Resize((128, 128)),
             transforms.ToTensor(), normalize])
        # self.trsfm = transforms.Compose([transforms.ToTensor()])
        self.trainset = PosEstimationDataset(self.trainset_info,
                                             transform=self.trsfm,
                                             radial=radial)
        self.norm_range = self.trainset.get_norm_range()
        self.trainloader = DataLoader(
            self.trainset,
            batch_size=self.trainset_info["batch_size"],
            shuffle=True)

        # Set up testset
        if testset_info is not None:
            self.load_test_set(testset_info, radial=radial)

        # initialise directory for saving training results
        self.save_dir = os.path.join(
            trainset_info["path"], trainset_info["dataset_name"] + "_results",
            "eph{}_bs{}_lr{}_wd{}".format(trainset_info["epochs"],
                                          trainset_info["batch_size"], lr, wd))

    def load_test_set(self, testset_info, radial=False, webcam_test=False):
        self.testset_info = testset_info
        self.testset = PosEstimationDataset(self.testset_info, self.trsfm,
                                            self.norm_range, radial,
                                            webcam_test)
        self.testloader = DataLoader(self.testset, shuffle=True)

    def train(self, show_fig=True, save_output=True, eval_eph=False):
        # Create directory for saving results
        os.makedirs(self.save_dir, exist_ok=False)

        loss_sample_size = len(self.trainloader) // 4

        # Initialise loss array
        train_losses = np.zeros(self.trainset_info["epochs"] *
                                len(self.trainloader))

        # Initialise distance and angle diff array
        eph_losses = np.zeros([self.trainset_info["epochs"], 2])
        eph_diff = np.zeros([self.trainset_info["epochs"], 4])

        # Begin training
        t0 = time.time()
        try:
            for epoch in range(self.trainset_info["epochs"]
                               ):  # loop over the dataset multiple times
                print('\n[Epoch', epoch + 1, ']')
                running_loss = 0.0
                for i, data in enumerate(self.trainloader):
                    # Set network to training mode
                    self.net.train()

                    # get the inputs; data is a dictionary of {image, pos}
                    image, pos = data['image'].to(self.device), data['pos'].to(
                        self.device)

                    # zero the parameter gradients
                    self.optimizer.zero_grad()

                    # forward + backward + optimize
                    outputs = self.net(image)
                    loss = self.criterion(outputs, pos)
                    loss.backward()
                    self.optimizer.step()

                    # Calculate the difference in euclidean distance and angles
                    train_losses[epoch * len(self.trainloader) +
                                 i] = loss.item()

                    # print statistics
                    # running_loss += loss.item()
                    # if i % loss_sample_size == loss_sample_size - 1:
                    #     print('[{}, {}] loss: {:.5f}'.
                    #           format(epoch + 1, i + 1, running_loss / loss_sample_size))
                    #     running_loss = 0.0

                # Run evaluation and show results
                if eval_eph:
                    eph_losses[epoch], eph_diff[epoch, :] = self.evaluation()

        except KeyboardInterrupt:
            pass

        t1 = time.time()
        print('Time taken: {}'.format(t1 - t0))

        # Save output
        if save_output:
            self.save_model_output(train_losses, eph_losses, eph_diff)

        if show_fig:
            self.display_training_fig(train_losses, eph_losses, eph_diff)

        print('\n--- Finished Training ---\n')

        # Evaluation use model in the class to run

    def evaluation(self):
        assert self.testset is not None, \
            "No testset is supplied. Make sure PoseEstimation.load_test_set(set_info) is called"
        # Initialise loss array
        losses = np.zeros(len(self.testloader))

        # Initialise distance and angle diff array
        diff = np.zeros([len(self.testloader), 2])

        # turn on evaluation mode
        self.net.eval()

        # start evaluation
        for i, data in enumerate(self.testloader):
            # get the inputs; data is a dictionary of {image, pos}
            image, pos = data['image'].to(self.device), data['pos'].to(
                self.device)

            # forward
            outputs = self.net(image)
            loss = self.criterion(outputs, pos)

            # Calculate the error
            losses[i] = loss.item()
            diff[i] = self.cal_error(outputs, pos)

        print("true   : {}".format(pos[-1]))
        print("predict: {}".format(outputs[-1]))
        return self.print_avg_stat(losses, diff)

    def _use_cuda(self):
        device = torch.device("cpu")
        if torch.cuda.is_available():
            device = torch.device("cuda:0")
            with warnings.catch_warnings(record=True) as w:
                warnings.filterwarnings("error")
                try:
                    torch.cuda.get_device_capability(device)
                except Exception:
                    device = torch.device("cpu")

        print(device)
        return device

    def show_batch_image(self):
        for i_batch, sample_batched in enumerate(self.trainloader):
            print(i_batch, sample_batched['image'].size(),
                  sample_batched['pos'].size())

            images_batch = sample_batched["image"]

            if i_batch == 0:
                plt.figure()
                grid = torchvision.utils.make_grid(images_batch)
                plt.imshow(grid.numpy().transpose((1, 2, 0)))
                plt.axis('off')
                plt.ioff()
                plt.show()
                break

    # Save model and losses
    def save_model_output(self, train_losses, test_losses, test_diff):
        self.net.save_model_parameter(self.trainset_info, self.save_dir)
        self.save_array2csv(self.trainset_info, train_losses, "train_loss")
        self.save_array2csv(self.trainset_info, test_losses, "eph_loss")
        self.save_array2csv(self.trainset_info, test_diff, "diff")

    # Visualise the losses and deviation
    def display_training_fig(self, train_losses, test_losses, test_diff):
        self.plot_array(train_losses, "Loss", self.trainset_info, scatter=True)
        if self.radial:
            self.plot_array(test_diff[:, 0],
                            "Difference_in_distance(m)",
                            self.trainset_info,
                            std=test_diff[:, 1])
        else:
            self.plot_array(test_diff[:, 0],
                            "Difference_in_distance(m)",
                            self.trainset_info,
                            std=test_diff[:, 2])
            self.plot_array(test_diff[:, 1],
                            "Difference_in_angle(deg)",
                            self.trainset_info,
                            std=test_diff[:, 3])

        avg_train_losses = np.average(train_losses.reshape(
            -1, len(self.trainloader)),
                                      axis=1)
        plt.figure()
        plt.plot(range(1,
                       len(avg_train_losses) + 1),
                 avg_train_losses,
                 label="train")
        plt.plot(range(1,
                       len(test_losses) + 1),
                 test_losses[:, 1],
                 label="test")
        plt.ylabel("Loss")
        plt.xlabel("epoch")
        plt.legend()
        fig_name = "fig_{}_eph{}_bs{}_{}.png".format(
            self.trainset_info["dataset_name"], self.trainset_info["epochs"],
            self.trainset_info["batch_size"], "Loss_comp")
        file_path = os.path.join(self.save_dir, fig_name)
        plt.savefig(file_path)

    def plot_array(self, data, ylabel, trainset_info, scatter=False, std=None):
        plt.figure()
        if scatter:
            x = np.arange(len(data))
            plt.plot(x, data, marker='o', markersize=0.6, linewidth='0')
            plt.yscale("log")
            plt.xlabel("batch")
        else:
            plt.errorbar(range(1,
                               len(data) + 1),
                         data,
                         yerr=std,
                         ecolor="k",
                         capsize=3)
            plt.xlabel("epoch")

        plt.ylabel(ylabel)
        fig_name = "fig_{}_eph{}_bs{}_{}.png".format(
            trainset_info["dataset_name"], trainset_info["epochs"],
            trainset_info["batch_size"], ylabel)
        file_path = os.path.join(self.save_dir, fig_name)
        plt.savefig(file_path)
        plt.close('all')

    def save_array2csv(self, trainset_info, data, name):
        file_name = "{}_{}_eph{}_bs{}.csv".format(
            name, trainset_info["dataset_name"], trainset_info["epochs"],
            trainset_info["batch_size"])
        file_path = os.path.join(self.save_dir, file_name)
        np.savetxt(file_path, data, delimiter=",")

    def cal_error(self, predict, true):

        # predict and ture has size [batch_size, 6]
        # [:, :3] is the translational position
        # [:, 3:] is the rotation in euler angle
        # De-normalise
        predict_np = self._denormalise(predict.cpu().detach().numpy())
        true_np = self._denormalise(true.cpu().detach().numpy())

        if self.radial:
            return predict_np - true_np
        else:
            # Get the euclidean distance
            error_distances = np.linalg.norm(
                (predict_np[:, :3] - true_np[:, :3]), axis=1)

            # Calculate the rotation angle from predicated(output) to true(input)
            # diff * output = pos
            # diff = pos * inv(output)
            # Since the rotvec is the vector of the axis multplited by the angle
            # The angle is found by finding magnitude of the vector
            predict_rot = Rotation.from_quat(predict_np[:, 3:])
            true_rot = Rotation.from_quat(true_np[:, 3:])
            rot = true_rot * predict_rot.inv()
            diff_angle = rot.as_rotvec()
            error_rot = np.linalg.norm(diff_angle, axis=1)
            error_rot = np.rad2deg(error_rot)

            return [error_distances, error_rot]

    def _denormalise(self, pos):
        return pos * (self.norm_range["max"] -
                      self.norm_range["min"]) + self.norm_range["min"]

    def load_model_parameter(self, path):
        self.net.load_state_dict(torch.load(path))

    def print_avg_stat(self, losses, diff):
        avg_loss = np.average(losses)
        avg_diff = np.average(diff, axis=0)
        std_loss = np.std(losses)
        std_diff = np.std(diff, axis=0)
        print(self.trainset.dataset_name)
        print("Test avg loss: {:.5f} | avg[distance, angle] {}".format(
            avg_loss, avg_diff))
        print("Test std loss: {:.5f} | std[distance, angle] {}".format(
            std_loss, std_diff))

        return [avg_loss, std_loss], np.concatenate((avg_diff, std_diff),
                                                    axis=0)
コード例 #16
0
    one_class_data = []

    for iter in range(shots_N):
        for t in range(iter * emg_chunk_size, (iter + 1) * emg_chunk_size):
            hist.step(emg[t, :] + [myRand() for x in range(channels_N)])

        one_class_data.append(
            hist.vals.reshape((hist.N * hist.N * hist.N)).copy())

    data_learn.append(one_class_data)
data_learn = torch.tensor(data_learn, dtype=torch.float, requires_grad=False)
# ,requires_grad=False

# обучим Сетку

net = Net(hist.N * hist.N * hist.N)
learning(net=net,
         lr=.6,
         epoches_N=1400,
         data_learn=data_learn,
         targs_learn=targs_learn)
print(net(data_learn[0]))
print(net(data_learn[1]))
print(net(data_learn[2]))
print(net(data_learn[3]))
print(net(data_learn[4]))

# print("test time!")
#print(net(torch.tensor(data_test[0],dtype=torch.float)))
# print('\n\n\n')
#print(net(torch.tensor(data_test[1],dtype=torch.float)))
コード例 #17
0
                               transform=transforms.ToTensor(),
                               download=True)
test_dataset = datasets.MNIST(root='./data',
                              train=False,
                              transform=transforms.ToTensor())

train_loader = DataLoader(dataset=train_dataset,
                          batch_size=batch_size,
                          shuffle=True)
test_loader = DataLoader(dataset=test_dataset,
                         batch_size=batch_size,
                         shuffle=False)

print(train_loader)

net = Net(input_size, hidden_size, out_size)
criterion = nn.CrossEntropyLoss()
optmizer = torch.optim.Adam(net.parameters(), lr=lr)

correct_train = 0
total_train = 0

for i, (images, lables) in enumerate(train_loader):
    # print("{} - {} - {}".format(i, images.size(), lables.size()))
    images = images.view(-1, 1 * 28 * 28)
    # print(images.size())

print("TRAINING ...")
for epoch in range(epochs):
    print(".")
    for i, (images, labels) in enumerate(train_loader):
コード例 #18
0
ファイル: Builder.py プロジェクト: peteboyd/tobascco
 def setnet(self, tupl):
     (name, graph, volt) = tupl
     dim=volt.shape[1]
     self._net = Net(graph, dim=dim, options=self.options)
     self._net.name = name
     self._net.voltage = volt
コード例 #19
0
ファイル: xor-test.py プロジェクト: gmpetrov/nn-py
from Net import Net
import random


if __name__ == "__main__":

    # Conf of the net
    config = [2, 3, 3, 1]

    nn = Net(config)

    for i in range(0, 2000):
        # Generate dataset
        a = 0.0 if random.uniform(0.0, 1.0) <= 0.5 else 1.0
        b = 0.0 if random.uniform(0.0, 1.0) > 0.5 else 1.0

        # Target value
        res = int(a) ^ int(b)

        inputVals = []
        inputVals.append(a)
        inputVals.append(b)

        targetVals = []
        targetVals.append(res)

        nn.feedForward(inputVals)
        print "Inputs : " + str(inputVals)

        nn.backPropagation(targetVals)
        print "Target : " + str(targetVals[0])
コード例 #20
0
ファイル: train7.py プロジェクト: ZhuJiaqi9905/ai_poem
#读取训练数据,这里是七言的古诗
train_data_path = root_path + '\\Data\\qtrain'
poem_line_lst7,  poem_vec_lst7 = get_train_data( train_data_path, wd2Idx, 7)


# 超参数
LR=0.001
BATCH_SIZE=128
vocab_size=6773
embed_size=200
hidden_size=200   

start_epoch=0  #现在是从第start_epoch轮开始训练的
sentence_len=7  #七言
net7=Net(sentence_len=sentence_len,batch_size=BATCH_SIZE,vocab_size=vocab_size,embed_size=embed_size,hidden_size=hidden_size)
loss_function=torch.nn.CrossEntropyLoss()
optimizer=torch.optim.Adam(net7.parameters(),lr = LR)

#读取预训练的词向量
word_vec_net = nn.Sequential(nn.Embedding(vocab_size, embed_size), nn.Embedding(vocab_size, embed_size))
wordVec_path = root_path + '\\Data\\word_vector_for_rnn.pth'
word_vec_net.load_state_dict(torch.load(wordVec_path,map_location=torch.device('cpu')))
net7.embedding.weight.data.copy_(word_vec_net[0].weight.data)

net7.load_state_dict(torch.load(root_path + '\\Models\\rnn\\model7_epoch_1.pth')['model'])
#训练
model_path = root_path + '\\Models\\rnn'
train(net7,loss_function,optimizer,poem_vec_lst7, 5, model_path)

コード例 #21
0
def load_model():
    segnet_save_path = os.path.join(model_dir,'model.pth')  # model path to load
    model = Net().to(device)   # replace the Net model to your own Net
    model.load_state_dict(torch.load(segnet_save_path))
    return model
コード例 #22
0
                if iter % (maxiter // 100) == 0:
                    print("iteration {}: loss = {}".format(
                        iter, self.total_loss))

    return Poisson1dModel()


fig, ax = plt.subplots(4, 3, figsize=(16, 16))

basis1 = FourierBasis1D(9, l=-10., u=10.)
net1 = Basis_Net([1, 50, 50, 50, 9], basis1)

basis2 = FourierBasis1D(17, l=-10., u=10.)
net2 = Basis_Net([1, 50, 50, 50, 17], basis2)

net3 = Net([1, 50, 50, 50, 1])

net4 = Net([1, 50, 50, 50, 17, 1])

net_table = [net3, net4, net1, net2]
for i, net in enumerate(net_table):
    print('{}-th net'.format(i))
    model = construct_model(net)
    model.plot(model.net, ax[i][0])
    model.train()
    model.plot(model.net, ax[i][1])
    model.post_process(ax[i][2])
ax[0][0].set_ylabel('PINN(hidden-layer=3)')
ax[1][0].set_ylabel('PINN(hidden-layer=4)')
ax[2][0].set_ylabel('PINN-PD(k=4)')
ax[3][0].set_ylabel('PINN-PD(k=8)')
コード例 #23
0
class PolicyValue:
    def __init__(self, model_file=None):
        self.board_width = board_width
        self.board_height = board_height
        # L2正则化
        self.l2_const = 1e-4
        # 策略价值网络
        self.net = Net().cuda()
        # 优化器
        self.optimizer = optim.Adam(self.net.parameters(),
                                    weight_decay=self.l2_const)

        # 如果有初始模型,则加载初始模型
        if model_file:
            net_params = torch.load(model_file)
            self.net.load_state_dict(net_params)
            print("model load success")

    def policy_value(self, state_batch):
        # 当前棋盘状态
        state_batch = torch.FloatTensor(state_batch).cuda()
        # 策略与价值
        log_act_probs, value = self.net(state_batch)
        act_probs = np.exp(log_act_probs.data.cpu().numpy())

        return act_probs, value.data.cpu().numpy()

    def policy_value_fn(self, board):
        # 可行的落子
        legal_positions = board.availables
        # 当前局面
        current_state = np.ascontiguousarray(board.current_state().reshape(
            -1, 4, self.board_width, self.board_height))
        # 棋盘上每个位置的落子概率及整个局面的评分
        log_act_probs, value = self.net(
            torch.from_numpy(current_state).cuda().float())
        act_probs = np.exp(log_act_probs.data.cpu().numpy().flatten())
        act_probs = zip(legal_positions, act_probs[legal_positions])
        value = value.data[0][0]
        return act_probs, value

    # 参数更新
    def train(self, state_batch, mcts_probs, winner_batch, lr):
        # 自我对弈数据
        state_batch = torch.FloatTensor(state_batch).cuda()
        # 落子策略
        mcts_probs = torch.FloatTensor(mcts_probs).cuda()
        # 胜者
        winner_batch = torch.FloatTensor(winner_batch).cuda()

        # 梯度清零
        self.optimizer.zero_grad()

        # 设置学习率
        for param_group in self.optimizer.param_groups:
            param_group['lr'] = lr

        # 策略和价值
        log_act_probs, value = self.net(state_batch)
        # loss = (z - v)^2 - pi^T * log(p) + c||theta||^2
        value_loss = F.mse_loss(value.view(-1), winner_batch)
        policy_loss = -torch.mean(torch.sum(mcts_probs * log_act_probs, 1))
        loss = value_loss + policy_loss
        # 反向传播
        loss.backward()
        # 更新参数
        self.optimizer.step()
        # 概率分布的熵
        entropy = -torch.mean(
            torch.sum(torch.exp(log_act_probs) * log_act_probs, 1))

        return loss.item(), entropy.item()

    # 保存模型
    def save_model(self, model_file):
        net_params = self.net.state_dict()
        torch.save(net_params, model_file)
コード例 #24
0
ファイル: Test.py プロジェクト: basti-shi031/billeslook
    t = time.time()
    t = int(t)
    cookie = "Hm_lvt_d472e16483828f86781cd857ad2ba196 = 1572580903, 1572580916,1572587444,1572607220;PHPSESSID = 836sjjgrk9d372bhe0ie4lpi93;billeslook_token = user_login_f30f390695110756fb873eeb3d0ef338;Hm_lpvt_d472e16483828f86781cd857ad2ba196 = "
    headers['Cookie'] = cookie + str(t)
    # 设置触发器

    current_time = int(round(time.time() * 1000))
    dest_time = dest_times[time_index]
    delta_time = dest_time - current_time
    delta_time_second = delta_time / 1000
    print(delta_time_second)
    time.sleep(delta_time_second)

    print(time.time())

    response = Net.create_order(product_ids[current_index],
                                sku_ids[current_index], 1, '0', headers)

    if response.status_code == 200:
        order_sn = json.loads(response.text)['data']['order_sn']

        t = time.time()
        t = int(t)
        cookie = "Hm_lvt_d472e16483828f86781cd857ad2ba196 = 1572580903, 1572580916,1572587444,1572607220;PHPSESSID = 836sjjgrk9d372bhe0ie4lpi93;billeslook_token = user_login_f30f390695110756fb873eeb3d0ef338;Hm_lpvt_d472e16483828f86781cd857ad2ba196 = "
        headers['Cookie'] = cookie + str(t)
        print(time.time())
        confirm_response = Net.confirm_order(order_sn, 44210, "", "", 0, "1",
                                             "", headers)
        print(time.time())
        print(confirm_response.status_code)
        print(confirm_response.text)
コード例 #25
0
import sys, os, os.path
sys.path.append(os.path.expanduser("G/coconut"))
from fileutils.htk import *
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from Net import Net
import cPickle
import numpy

# Load model
net = Net(50, 200, 4) #.cuda()
net.load_state_dict(torch.load('model.pt', map_location = lambda storage, loc: storage))

INPUT_FILE = sys.argv[1]        # Feature file containing 6,669-dim HTK-format features
OUTPUT_FILE = sys.argv[2]       # RTTM file to write the results to

# Load PCA matrices
with open('pca-self.pkl', 'rb') as f:
    data = cPickle.load(f)
mask, mu, sigma, V, w, b = data['mask'], data['mu'], data['sigma'], data['V'], data['w'], data['b']
pca = lambda feat: ((feat[:, mask] - mu) / sigma).dot(V) * w + b

# Load input feature and predict
feat = pca(readHtk(INPUT_FILE))
input = Variable(torch.from_numpy(numpy.expand_dims(feat, 0).astype('float32'))) #.cuda()
input = pack_padded_sequence(input, [len(feat)], batch_first = True)
output = net(input).data.data.cpu().numpy()

# Print the predictions in RTTM format
コード例 #26
0
    num_features = test_dataset[0].x.shape[1]  # suppose to be 39

    print_info(
        f'Dataset: train: size {len(train_dataset)}, test: size {len(test_dataset)}, val: size {len(val_dataset)}'
    )
    print_info(f'max_nodes = {max_nodes}, num_features = {num_features}')

    # constrct batch
    test_loader = DenseDataLoader(test_dataset, batch_size=20)
    val_loader = DenseDataLoader(val_dataset, batch_size=20)
    train_loader = DenseDataLoader(train_dataset, batch_size=20)

    global device
    device = torch.device(
        'cuda' if torch.cuda.is_available() and args.use_cuda else 'cpu')
    model = Net(max_nodes=max_nodes, num_channels=num_features,
                num_outputs=1).to(device)
    if args.load_path is not None and os.path.isfile(args.load_path):
        model.load_state_dict(torch.load(args.load_path))
        print_info('Successfully loaded the GNN model from {}'.format(
            args.load_path))
    else:
        print_info('Train with random initial GNN model')

    if not args.test:
        save_dir = os.path.join(current_dir, 'trained_models',
                                'universal_value_function', dataset_name,
                                get_time_stamp())

        os.makedirs(save_dir, exist_ok=True)

        global optimizer
コード例 #27
0
##Normalizing all features values into the range of 0 to 1.
#
#raw_data = pd.read_excel('music-features.xlsx', header=None)
#raw_data.drop(raw_data.columns[0], axis=1, inplace=True)
#raw_data.drop(0, axis=0, inplace=True)
#
#normalized = min_max_norm(raw_data, 14)
#normalized.to_excel('music-features-processed.xlsx')

X_train, Y_train, X_test, Y_test = load_data('music-features-processed.xlsx',
                                             features_num,
                                             label_loc,
                                             features_selector=selector,
                                             spliting_ratio=0.8)

net = Net(features_num, hidden_num, classes_num)
train_model(net, X_train, Y_train, lr=learning_rate, epochs=epochs_num)

accuracy, Y_pred = test_model(net, X_test, Y_test)
##Save relevant parameter for analysis.
#if accuracy > 40:
#    saveParas(net, X_test, hidden_num+1)
#    torch.save(net.state_dict(), 'net_model.pt')
#    saveDataset(X_test, Y_test)

mat = confusion(X_test.size(0), classes_num, Y_pred, Y_test)
print("Confusion Matrix:")
print(mat)
F1_score(mat)

print("\n========================== END ==================================")
コード例 #28
0
ファイル: topcryst.py プロジェクト: sauradeep93/tobascco
    def _check_barycentric_embedding(self, graph, voltage):
        net = Net(graph)
        net.voltage = voltage
        net.simple_cycle_basis()
        net.get_lattice_basis()
        #net.get_cycle_basis()
        net.get_cocycle_basis()
        #for i,j in itertools.combinations(range(len(net.kernel)), 2):
        #    print np.any(np.in1d(np.array(net.kernel)[i].nonzero(), np.array(net.kernel)[j].nonzero()))

        #for i, j in itertools.combinations(range(len(net.cycle)), 2):
        #    if not np.any(np.in1d(np.array(net.cycle)[i].nonzero(), np.array(net.kernel)[j].nonzero())):
        #        print 'i', ', '.join(['e%i'%(k+1) for k in np.nonzero(np.array(net.kernel)[i])[0]])
        #        print 'j', ', '.join(['e%i'%(k+1) for k in np.nonzero(np.array(net.kernel)[j])[0]])
        #print np.array(net.cycle)[0].nonzero(), np.array(net.cycle)[1].nonzero()
        net.barycentric_embedding()
        #verts = net.graph.vertices()
        #for id in range(len(verts)):
        #    Pi = [verts[id], verts[:id] + verts[id+1:]]
        #    print net.graph.to_undirected().coarsest_equitable_refinement(Pi)
        #G = net.graph.to_undirected().dominating_set(independent=True)
        #for i in np.array(net.cycle):
        #    print ', '.join(['e%i'%(k+1) for k in np.nonzero(i)[0]])

        #q = np.concatenate((net.cycle, net.kernel[:8]))
        #for id, volt in enumerate(np.array(net.voltage)):
        #    print 'e%i'%(id+1), "(%i, %i, %i)"%(tuple(volt))

        #A = matrix(q)
        #for i in A.echelon_form():
        #    print ', '.join(['e%i'%(k+1) for k in np.nonzero(i)[0]])
        #for j in np.array(net.kernel):
        #    print ', '.join(['e%i'%(k+1) for k in np.nonzero(j)[0]])

        #print G.order()
        #print G.gens()
        g = GraphPlot(net)
        g.view_graph()
        g.view_placement(init=(0.5, 0.5, 0.5), edge_labels=False)
コード例 #29
0
ファイル: Pong.py プロジェクト: person0709/barebone_pong
from Screen import Screen
from Paddle import Paddle
from Net import Net
import constants

import time


## Graphics class
screen = Screen(constants.WIDTH, constants.HEIGHT)

player1 = Paddle(screen, 3, constants.HEIGHT/2-1, constants.BLACK)
player2 = Paddle(screen, constants.WIDTH-4, constants.HEIGHT/2-1, constants.BLACK)

net = Net(screen, constants.BLACK)

screen.clear()

player1.draw()
player2.draw()
net.draw()
screen.drawScore(5, 9)


time.sleep(2)

player1.move(1500)
player1.draw()
コード例 #30
0
ファイル: topcryst.py プロジェクト: sauradeep93/tobascco
    def _build_structures_from_top(self):
        if not self._topologies:
            warning("No topologies found!")
            Terminate()

        csvinfo = CSV(name='%s_info' % (self.options.jobname))
        csvinfo.set_headings('topology', 'sbus', 'edge_count', 'time',
                             'space_group')
        csvinfo.set_headings('edge_length_err', 'edge_length_std',
                             'edge_angle_err', 'edge_angle_std')
        self.options.csv = csvinfo
        run = Generate(self.options, self.sbu_pool)
        inittime = time()
        if self.options.count_edges_along_lattice_dirs:
            lattfile = open("edge_counts.csv", "w")
            lattfile.writelines("topology,Na,Nb,Nc\n")
        for top, graph in self._topologies.items():
            if self.options.count_edges_along_lattice_dirs:
                info(
                    "Computing Edge lengths along each lattice direction for %s"
                    % (top))
                n = Net(graph)
                n.voltage = self._topologies.voltages[top]
                n.simple_cycle_basis()
                n.get_lattice_basis()
                n.get_cocycle_basis()
                edge_str = n.print_edge_count()
                lattfile.writelines("%s,%s" % (top, edge_str))
            elif self.options.show_barycentric_net_only:
                info("Preparing barycentric embedding of %s" % (top))
                self._check_barycentric_embedding(
                    graph, self._topologies.voltages[top])
            else:

                build = Build(self.options)
                build.net = (top, graph, self._topologies.voltages[top])
                if self.options.sbu_combinations:
                    combinations = run.combinations_from_options()
                else:
                    combinations = run.generate_sbu_combinations(
                        incidence=build.net_degrees())

                if not list(combinations):
                    debug("Net %s does not support the same" % (top) +
                          " connectivity offered by the SBUs")
                for combo in combinations:
                    build.sbus = list(set(combo))
                    # check node incidence
                    if build.met_met_bonds and run.linear_sbus_exist:
                        # add linear organics
                        debug(
                            "Metal-type nodes attached to metal-type nodes. " +
                            "Attempting to insert 2-c organic SBUs between these nodes."
                        )
                        for comb in run.yield_linear_org_sbu(combo):
                            build.sbus = list(set(comb))
                            self.embed_sbu_combo(top, comb, build)
                    elif build.met_met_bonds and not run.linear_sbus_exist:
                        debug(
                            "Metal-type nodes are attached to metal-type nodes. "
                            +
                            "No linear SBUs exist in database, so the structure "
                            + "will have metal - metal SBUs joined")
                        self.embed_sbu_combo(top, combo, build)
                    else:
                        self.embed_sbu_combo(top, combo, build)

        if self.options.count_edges_along_lattice_dirs:
            lattfile.close()
        finaltime = time() - inittime
        info("Topcryst completed after %f seconds" % finaltime)
        Terminate()
コード例 #31
0
ファイル: ec.py プロジェクト: wormtql/AI_homework
import torch
import random
from deap import base, creator, tools
from torch.utils.data import DataLoader, TensorDataset
import torch.nn.functional as F
import numpy as np

from Utils import Utils
from Net import Net

print("start")
net_depth = 2
net_filters = 8
in_channels = 9

net = Net(net_depth, net_filters, in_channels)
net.load_state_dict(torch.load("model/model_2_8.pt"))
net.eval()

psize = net.param_count()
print(psize)

train_loader = DataLoader(TensorDataset(*Utils.load_npy_to_tensor("train")),
                          batch_size=128, shuffle=True)
train_iter = iter(train_loader)

test_loader = DataLoader(TensorDataset(*Utils.load_npy_to_tensor("test")),
                         batch_size=100000, shuffle=True)

print("load data finished", len(train_loader))
コード例 #32
0
ファイル: topcryst.py プロジェクト: sauradeep93/tobascco
    def _build_structures(self):
        """Pass the sbu combinations to a MOF building algorithm."""
        run = Generate(self.options, self.sbu_pool)
        # generate the combinations of SBUs to build
        if self.options.sbu_combinations:
            combinations = run.combinations_from_options()
        else:
            # remove SBUs if not listed in options.organic_sbus or options.metal_sbus
            combinations = run.generate_sbu_combinations()
        csvinfo = CSV(name='%s_info' % (self.options.jobname))
        csvinfo.set_headings('topology', 'sbus', 'edge_count', 'time',
                             'space_group', 'net_charge')
        csvinfo.set_headings('edge_length_err', 'edge_length_std',
                             'edge_angle_err', 'edge_angle_std')
        self.options.csv = csvinfo
        # generate the MOFs.
        if self.options.count_edges_along_lattice_dirs:
            lattfile = open("edge_counts.csv", "w")
            lattfile.writelines("topology,Na,Nb,Nc\n")
        inittime = time()
        for combo in combinations:
            node_degree = [i.degree for i in set(combo)]
            node_lin = [i.linear for i in set(combo)]
            degree = sorted(
                [j for i, j in zip(node_lin, node_degree) if not i])
            # find degrees of the sbus in the combo
            if not self._topologies:
                warning("No topologies found! Exiting.")
                Terminate()
            debug("Trying " + self.combo_str(combo))
            for top, graph in self._topologies.items():
                if self.options.use_builds:
                    try:
                        build = self._stored_builds[top]
                    except:
                        build = Build(self.options)
                        build.net = (top, graph,
                                     self._topologies.voltages[top])
                else:
                    build = Build(self.options)
                    build.net = (top, graph, self._topologies.voltages[top])
                build.sbus = list(set(combo))
                #build.get_automorphisms()
                if self.options.count_edges_along_lattice_dirs:
                    info(
                        "Computing Edge lengths along each lattice direction for %s"
                        % (top))
                    n = Net(graph)
                    n.voltage = self._topologies.voltages[top]
                    n.simple_cycle_basis()
                    n.get_lattice_basis()
                    n.get_cocycle_basis()
                    edge_str = n.print_edge_count()
                    lattfile.writelines("%s,%s\n" % (top, edge_str))
                elif self.options.show_barycentric_net_only:
                    info("Preparing barycentric embedding of %s" % (top))
                    #print("CHECK", top, build.net.graph.number_of_selfloops())
                    self._check_barycentric_embedding(
                        graph, self._topologies.voltages[top])
                else:
                    if build.check_net:
                        # check node incidence
                        if build.met_met_bonds and run.linear_sbus_exist and not run.linear_in_combo(
                                combo):
                            # add linear organics
                            debug(
                                "Metal-type nodes attached to metal-type nodes. "
                                +
                                "Attempting to insert 2-c organic SBUs between these nodes."
                            )
                            for comb in run.yield_linear_org_sbu(combo):
                                if self.options.use_builds:
                                    try:
                                        build = self._stored_builds[top]
                                    except:
                                        build = Build(self.options)
                                else:
                                    build = Build(self.options)
                                    build.sbus = list(set(comb))
                                    build.net = (
                                        top, graph,
                                        self._topologies.voltages[top])
                                self.embed_sbu_combo(top, comb, build)
                        elif build.met_met_bonds and run.linear_in_combo(
                                combo):
                            self.embed_sbu_combo(top, combo, build)

                        elif build.met_met_bonds and not run.linear_sbus_exist:
                            debug(
                                "Metal-type nodes are attached to metal-type nodes. "
                                +
                                "No linear SBUs exist in database, so the structure "
                                + "will have metal - metal SBUs joined")
                            self.embed_sbu_combo(top, combo, build)
                        elif not build.met_met_bonds:
                            self.embed_sbu_combo(top, combo, build)

                    else:
                        debug("Net %s does not support the same" % (top) +
                              " connectivity offered by the SBUs")

        if self.options.count_edges_along_lattice_dirs:
            lattfile.close()
        finaltime = time() - inittime
        info("Topcryst completed after %f seconds" % finaltime)
        if self.options.get_run_info:
            info("Writing run information to %s" % self.options.csv.filename)
            self.options.csv.write()
        if self.options.store_net and self._stored_nets:
            info("Writing all nets to nets_%s.pkl" % self.options.jobname)
            f = open("nets_%s.pkl" % self.options.jobname, 'wb')
            p = pickle.dump(self._stored_nets, f)
            f.close()
        Terminate()
コード例 #33
0
ファイル: Builder.py プロジェクト: peteboyd/tobascco
class Build(object):
    """Build a MOF from SBUs and a Net."""
    def __init__(self, options=None):
        self.name = ""
        self._net = None
        self.options = options
        self._sbus = []
        self.scale = 1.
        self._vertex_sbu = {}
        self._edge_assign = {}
        self._sbu_degrees = None
        self._inner_product_matrix = None
        self.success = False
        self.embedded_net = None

    def _obtain_cycle_bases(self):
        self._net.simple_cycle_basis()
        i = self._net.get_lattice_basis()
        if i<0:
            return i
        #self._net.get_cycle_basis()
        self._net.get_cocycle_basis()
        return i

    def fit_function(self, params, data):
        val = np.zeros(len(params))
        for p in params:
            ind = int(p.split("_")[1])
            val[ind] = data[ind][int(p.value)]
        return val
   
    def assign_vertices(self):
        """Assign SBUs to particular vertices in the graph"""
        # TODO(pboyd): assign sbus intelligently, based on edge lengths
        # and independent cycles in the net... ugh
        # set up SBU possibilities on each node,
        # score them based on their orientation
        # score based on alternate organic/metal SBU
        # If > 2 SBUs: automorphism of vertices?
       
        # get number of unique SBUs
        # get number of nodes which support each SBU

        # get geometric match for each node with each SBU
        # metal - organic bond priority (only if metal SBU and organic SBU have same incidence)
        # assign any obvious ones: ie. if there is only one match between sbu and vertex valency
        temp_vertices = self.sbu_vertices[:]
        for vert in self.sbu_vertices:
            vert_deg = self._net.graph.degree(vert)
            
            sbu_match = [i for i in self._sbus if i.degree == vert_deg]
            if len(sbu_match) == 1:
                bu = deepcopy(sbu_match[0])
                temp_vertices.pop(temp_vertices.index(vert))
                self._vertex_sbu[vert] = bu
                bu.vertex_id = vert
                [cp.set_sbu_vertex(bu.vertex_id) for cp in bu.connect_points]
        # the remaining we will need to choose (automorphisms?)
        #orbits = self._net.graph.automorphism_group(orbits=True)[1]
        init_verts = []
        for vert in temp_vertices[:]:
            neighbours = self._net.original_graph.neighbors(vert)
            neighbour_sbus = [i for i in neighbours if i in self._vertex_sbu.keys()]
            if neighbour_sbus:
                init_verts.append(vert)
                temp_vertices.pop(temp_vertices.index(vert))
        # re-order so the vertices with neighbours already assigned will be assigned
        # new SBUs first
        temp_vertices = init_verts + temp_vertices
        for vert in temp_vertices[:]:
            # For DiGraphs in networkx, neighbors function only returns the
            # successors of this vertex. Add predecessors to get full function
            neighbours = self._net.original_graph.neighbors(vert) + \
                         self._net.original_graph.predecessors(vert)
            
            vert_deg = self._net.graph.degree(vert)

            sbu_match = [i for i in self._sbus if i.degree == vert_deg]
            neighbour_sbus = [i for i in neighbours if i in self._vertex_sbu.keys()]
            temp_assign = []
            for sbu in sbu_match:
                # decide to assign if neighbours are of opposite 'type'
                good_by_type = True
                for neighbour in neighbour_sbus:
                    if sbu.is_metal == self._vertex_sbu[neighbour].is_metal:
                        good_by_type = False
                if good_by_type:
                    temp_assign.append(sbu)

            if len(temp_assign) == 1:
                bu = deepcopy(temp_assign[0])
                temp_vertices.pop(temp_vertices.index(vert))
                self._vertex_sbu[vert] = bu
                bu.vertex_id = vert
                [cp.set_sbu_vertex(bu.vertex_id) for cp in bu.connect_points]
            elif len(temp_assign) > 1:
                self._vertex_sbu[vert] = self.select_sbu(vert, temp_assign) 
                temp_vertices.pop(temp_vertices.index(vert))
                bu = self._vertex_sbu[vert]
                bu.vertex_id = vert
                [cp.set_sbu_vertex(bu.vertex_id) for cp in bu.connect_points]
            else:
                self._vertex_sbu[vert] = self.select_sbu(vert, sbu_match) 
                temp_vertices.pop(temp_vertices.index(vert))
                bu = self._vertex_sbu[vert]
                bu.vertex_id = vert
                [cp.set_sbu_vertex(bu.vertex_id) for cp in bu.connect_points]
        #for vert in self.sbu_vertices:
        #    # is there a way to determine the symmetry operations applicable
        #    # to a vertex?
        #    # if so, we could compare with SBUs...
        #    vert_deg = self._net.graph.degree(vert)
        #    sbu_match = [i for i in self._sbus if i.degree == vert_deg]
        #    # match tensor product matrices
        #    if len(sbu_match) > 1:
        #        self._vertex_sbu[vert] = self.select_sbu(vert, sbu_match) 
        #        bu = self._vertex_sbu[vert]
        #    elif len(sbu_match) == 0:
        #        error("Didn't assign an SBU to vertex %s"%vert)
        #        Terminate(errcode=1) 
        #    else:
        #        self._vertex_sbu[vert] = deepcopy(sbu_match[0])
        #        bu = self._vertex_sbu[vert]
        #    bu.vertex_id = vert
        #    [cp.set_sbu_vertex(bu.vertex_id) for cp in bu.connect_points]

        # check to ensure all sbus were assigned to vertices.
        collect = [(sbu.name, sbu.identifier) for vert, sbu in self._vertex_sbu.items()]
        if len(set(collect)) < len(self._sbus):
            remain = [s for s in self._sbus if (s.name, s.identifier) not in 
                        set(collect)]
            closest_matches = [self.closest_match_vertices(sbu) 
                                        for sbu in remain]
            taken_verts = []
            for id, bu in enumerate(remain):
                cm = closest_matches[id]
                inds = np.where([np.allclose(x[0], cm[0][0], atol=0.1) for x in cm])
                replace_verts = [i[1] for i in np.array(cm)[inds] if 
                                    i[1] not in taken_verts]
                taken_verts += replace_verts
                for v in replace_verts:
                    bb = deepcopy(bu)
                    bb.vertex_id = v
                    [cp.set_sbu_vertex(v) for cp in bb.connect_points]
                    self._vertex_sbu[v] = bb

    def closest_match_vertices(self, sbu):
        g = self._net.graph

        if sbu.two_connected and not sbu.linear:
            cp_v = normalized_vectors([
                self.vector_from_cp_intersecting_pt(cp, sbu) for
                                   cp in sbu.connect_points])
        else:
            cp_v = normalized_vectors([self.vector_from_cp_SBU(cp, sbu) for
                                   cp in sbu.connect_points])
        ipv = self.scaled_ipmatrix(np.inner(cp_v, cp_v))

        inds = np.triu_indices(ipv.shape[0], k=1)
        max, min = np.absolute(ipv[inds]).max(), np.absolute(ipv[inds]).min()
        cmatch = []
        for v in self.sbu_vertices:
            ee = self._net.neighbours(v) 
            l_arcs = self._net.lattice_arcs[self._net.return_indices(ee)]
            lai = np.dot(np.dot(l_arcs, self._net.metric_tensor), l_arcs.T)
            ipc = self.scaled_ipmatrix(lai)
            imax, imin = np.absolute(ipc[inds]).max(), np.absolute(ipc[inds]).min()
            mm = np.sum(np.absolute([max-imax, min-imin]))
            cmatch.append((mm, v))
        return sorted(cmatch)

    def select_sbu(self, v, sbus):
        """This is a hackneyed way of selecting the right SBU,
        will use until it breaks something.

        """
        edges = self._net.neighbours(v) 
        indices = self._net.return_indices(edges)
        lattice_arcs = self._net.lattice_arcs[indices]
        ipv = np.dot(np.dot(lattice_arcs, self._net.metric_tensor), lattice_arcs.T)
        ipv = self.scaled_ipmatrix(ipv)
        # just take the max and min angles... 
        inds = np.triu_indices(ipv.shape[0], k=1) 
        max, min = np.absolute(ipv[inds]).max(), np.absolute(ipv[inds]).min()
        minmag = 15000.
        #There is likely many problems with this method. Need to ensure that there
        # are enough nodes to assign the breadth of SBUs used to build the MOF.
        sbus_assigned = [i.name for i in self._vertex_sbu.values()]
        neighbours_assigned = {}
        for nn in self.sbu_joins[v]:
            try:
                nnsbu = self._vertex_sbu[nn]
                #neighbours_assigned[nn] = (nnsbu.name, nnsbu.is_metal)
                neighbours_assigned[nn] = nnsbu.is_metal
            except KeyError:
                neighbours_assigned[nn] = None

        not_added = [i.name for i in sbus if i.name not in sbus_assigned]

        for sbu in sbus:
            if sbu.two_connected and not sbu.linear:
                vects = np.array([
                    self.vector_from_cp_intersecting_pt(cp, sbu) for cp in 
                              sbu.connect_points])
            else:
                vects = np.array([self.vector_from_cp_SBU(cp, sbu) for cp in 
                              sbu.connect_points])
            ipc = self.scaled_ipmatrix(np.inner(vects, vects))
            imax, imin = np.absolute(ipc[inds]).max(), np.absolute(ipc[inds]).min()
            mm = np.sum(np.absolute([max-imax, min-imin]))
            if any([sbu.is_metal == nn for nn in neighbours_assigned.values()]):
                mm *= 2.
            elif sbu.name in not_added:
                mm = 0.

            if mm < minmag:
                minmag = mm
                assign = sbu
        return deepcopy(assign)

    def obtain_edge_vector_from_cp(self, cp1):
        """Create an edge vector from an sbu's connect point"""
        e1 = self.vector_from_cp(cp1)
        len1 = np.linalg.norm(e1[:3])
        dir = e1[:3]/len1
        return dir * self.options.sbu_bond_length

    def scaled_ipmatrix(self, ipmat):
        """Like normalized inner product matrix, however the 
        diagonal is scaled to the longest vector."""
        ret = np.empty_like(ipmat)
        max = np.diag(ipmat).max()
        for (i,j), val in np.ndenumerate(ipmat):
            if i==j:
                ret[i,j] = val / max 
            if i != j:
                v = val/np.sqrt(ipmat[i,i])/np.sqrt(ipmat[j,j])
                ret[i,j] = v
                ret[j,i] = v
        return ret 

    def normalized_ipmatrix(self, vectors):
        v = normalized_vectors(vectors)
        return np.inner(v,v)

    def assign_edge_labels(self, vertex):
        """Edge assignment is geometry dependent. This will try to 
        find the best assignment based on inner product comparison
        with the non-placed lattice arcs."""
        sbu = self._vertex_sbu[vertex]
        local_arcs = sbu.connect_points
        edges = self._net.neighbours(vertex) 
        indices = self._net.return_indices(edges)
        lattice_arcs = self._net.lattice_arcs
        e_assign = {}
        if sbu.two_connected and not sbu.linear:
            vects = [self.vector_from_cp_intersecting_pt(cp, sbu) for cp in local_arcs]
        else:
            vects = [self.vector_from_cp_SBU(cp, sbu) for cp in local_arcs]
        norm_cp = normalized_vectors(vects)
        li = self.normalized_ipmatrix(vects)
        min, chi_diff=15000., 15000.
        cc, assign = None, None
        debug("%s assigned to %s"%(sbu.name, vertex))
        cell = Cell()
        cell.mkcell(self._net.get_3d_params())
        if self._net.ndim == 2:
            lattice_arcs = np.hstack((lattice_arcs, np.zeros((lattice_arcs.shape[0],1))))
        lattice_vects = np.dot(lattice_arcs, cell.lattice)
        count = 0
        for e in itertools.permutations(edges):
            count += 1
            indices = self._net.return_indices(e)
            #node_arcs = lattice_arcs[indices]*\
            #        self._net.metric_tensor*lattice_arcs[indices].T
            #max = node_arcs.max()
            #la = np.empty((len(indices),len(indices)))
            #for (i,j), val in np.ndenumerate(node_arcs):
            #    if i==j:
            #        la[i,j] = val/max
            #    else:
            #        v = val/np.sqrt(node_arcs[i,i])/np.sqrt(node_arcs[j,j])
            #        la[i,j] = v
            #        la[j,i] = v
            # using tensor product of the incidences
            coeff = np.array([-1. if j in self._net.in_edges(vertex)
                               else 1. for j in e])
            #td = np.tensordot(coeff, coeff, axes=0)
            #diff = np.multiply(li, td) - la
            #inds = np.triu_indices(diff.shape[0], k=1) 
            #xmax, xmin = np.absolute(diff[inds]).max(), np.absolute(diff[inds]).min()
            #mm = np.sum(diff)
            #mm = np.sum(np.absolute(np.multiply(li,td) - la))
            # NB Chirality matters!!!
            # get the cell
            lv_arc = (np.array(lattice_vects[indices]) 
                                        * coeff[:, None])
            # get the lattice arcs

            mm = self.get_chiral_diff(e, lv_arc, vects)

            #norm_arc = normalized_vectors(lv_arc)
            # orient the lattice arcs to the first sbu vector...
            #print count , self.chiral_match(e, oriented_arc, norm_cp)
            #print count, np.allclose(norm_cp, oriented_arc, atol=0.01)
            #or1 = np.zeros(3)
            #or2 = np.array([3., 3., 0.])
            #xyz_str1 = "C %9.5f %9.5f %9.5f\n"%(or1[0], or1[1], or1[2])
            #xyz_str2 = "C %9.5f %9.5f %9.5f\n"%(or2[0], or2[1], or2[2])
            #for ind, (i, j) in enumerate(zip(norm_cp,oriented_arc)):
            #    at = atoms[ind]
            #    pos = i[:3] + or1
            #    xyz_str1 += "%s %9.5f %9.5f %9.5f\n"%(at, pos[0], pos[1], pos[2])
            #    pos = j + or2
            #    xyz_str2 += "%s %9.5f %9.5f %9.5f\n"%(at, pos[0], pos[1], pos[2]) 

            #xyz_file = open("debugging.xyz", 'a')
            #xyz_file.writelines("%i\ndebug\n"%(len(norm_cp)*2+2))
            #xyz_file.writelines(xyz_str1)
            #xyz_file.writelines(xyz_str2)
            #xyz_file.close()

            #print "arc CI", CI_ar, "cp  CI", CI_cp
            #if (mm < min) and (diff < chi_diff):
            if (mm <= min):# and self.chiral_match(e, norm_arc, norm_cp):#, tol=xmax): 
                min = mm
                assign = e
        #CI = self.chiral_invariant(assign, norm_arc)
        #axis = np.array([1., 3., 1.])
        #angle = np.pi/3.
        #R = rotation_matrix(axis, angle)
        #new_norm = np.dot(R[:3,:3], norm_arc.T)
        #nCI = self.chiral_invariant(assign, new_norm.T)
        #print "Rotation invariant?", CI, nCI
        # NB special MULT function for connect points
        cp_vert = [i[0] if i[0] != vertex else i[1] for i in assign]
        #print 'CI diff', chi_diff
        #print 'tensor diff', mm
        sbu.edge_assignments = assign
        for cp, v in zip(local_arcs, cp_vert):
            cp.vertex_assign = v

    def chiral_invariant(self, edges, vectors):
        edge_weights = [float(e[2][1:]) for e in edges]
        # just rank in terms of weights.......
        edge_weights = [float(sorted(edge_weights).index(e)+1) for e in edge_weights]
        vrm = raw_moment(edge_weights, vectors)
        com = vrm(0,0,0)
        (mx, my, mz) = (vrm(1,0,0)/com, 
                        vrm(0,1,0)/com, 
                        vrm(0,0,1)/com)
        vcm = central_moment(edge_weights, vectors, (mx, my, mz))
        return get_CI(vcm)

    def get_chiral_diff(self, edges, arc1, arc2, count=[]):
        narcs1 = normalized_vectors(arc1)
        narcs2 = normalized_vectors(arc2)
        ### DEBUG
        #atoms = ["H", "F", "He", "Cl", "N", "O"]
        R = rotation_from_vectors(narcs2, narcs1)
        #FIXME(pboyd): ensure that this is the right rotation!!! I think it's supposed to rotate narcs2
        narcs1 = (np.dot(R[:3,:3], narcs1.T)).T
        #narcs2 = (np.dot(R[:3,:3], narcs2.T)).T
        #or1 = np.zeros(3)
        #or2 = np.array([3., 3., 0.])
        #xyz_str1 = "C %9.5f %9.5f %9.5f\n"%(or1[0], or1[1], or1[2])
        #xyz_str2 = "C %9.5f %9.5f %9.5f\n"%(or2[0], or2[1], or2[2])
        #for ind, (i, j) in enumerate(zip(narcs1,narcs2)):
        #    at = atoms[ind]
        #    pos = i[:3] + or1
        #    xyz_str1 += "%s %9.5f %9.5f %9.5f\n"%(at, pos[0], pos[1], pos[2])
        #    pos = j + or2
        #    xyz_str2 += "%s %9.5f %9.5f %9.5f\n"%(at, pos[0], pos[1], pos[2]) 

        #xyz_file = open("debugging.xyz", 'a')
        #xyz_file.writelines("%i\ndebug\n"%(len(narcs1)*2+2))
        #xyz_file.writelines(xyz_str1)
        #xyz_file.writelines(xyz_str2)
        #xyz_file.close()

        ### DEBUG
        #CI_1 = self.chiral_invariant(edges, narcs1)

        #CI_2 = self.chiral_invariant(edges, narcs2)
        #count.append(1) 
        #ff = open("CI1", 'a')
        #ff.writelines('%i %e\n'%(len(count), CI_1))
        #ff.close()
        #ff = open("CI2", 'a')
        #ff.writelines('%i %e\n'%(len(count), CI_2))
        #ff.close()
        #print 'edge assignment ', ','.join([p[2] for p in edges])
        #print 'lattice arcs  CI ', CI_1
        #print 'connect point CI ', CI_2

        #if all(item >= 0 for item in (CI_1, CI_2)) or all(item < 0 for item in (CI_1, CI_2)):
        #    return np.absolute(CI_1 - CI_2)
        #else:
        #    return 150000.
        return np.sum(np.absolute((narcs1 - narcs2).flatten()))

    def chiral_match(self, edges, arcs, cp_vects, tol=0.01):
        """Determines if two geometries match in terms of edge
        orientation.

        DOI:10.1098/rsif.2010.0297
        """
        edge_weights = [float(e[2][1:]) for e in edges]
        # just rank in terms of weights.......
        edge_weights = [float(sorted(edge_weights).index(e)+1) for e in edge_weights]
        vrm = raw_moment(edge_weights, cp_vects)
        com = vrm(0,0,0)
        (mx, my, mz) = (vrm(1,0,0)/com, 
                        vrm(0,1,0)/com, 
                        vrm(0,0,1)/com)
        vcm = central_moment(edge_weights, cp_vects, (mx, my, mz))
        if np.allclose(elipsoid_vol(vcm), 0., atol=0.004):
            return True
        # This is a real hack way to match vectors...
        R = rotation_from_vectors(arcs[:], cp_vects[:])
        oriented_arc = (np.dot(R[:3,:3], arcs.T)).T
        return np.allclose(cp_vects, oriented_arc, atol=tol)
        
    def assign_edges(self):
        """Select edges from the graph to assign bonds between SBUs.
        This can become combinatorial...
        
        NB: if the SBUs have low symmetry, just selecting from a pool
        of SBU connection points may result in a node with the wrong
        orientation of edges.  There should be a better way of doing 
        this where the SBU geometry is respected.

        In this algorithm obtain the inner products of all the edges
        These will be used to later optimize the net to match the 
        SBUs.

        """
        # In cases where there is asymmetry in the SBU or the vertex, assignment can 
        # get tricky.
        g = self._net.graph
        self._inner_product_matrix = np.zeros((self.net.shape, self.net.shape))
        self.colattice_inds = ([], [])
        for v in self.sbu_vertices:
            allvects = {}
            self.assign_edge_labels(v)
            sbu = self._vertex_sbu[v]
            sbu_edges = sbu.edge_assignments
            cps = sbu.connect_points
            if sbu.two_connected and not sbu.linear:
                vectors = [self.vector_from_cp_intersecting_pt(cp, sbu) for
                        cp in cps]
            else:
                vectors = [self.vector_from_cp_SBU(cp, sbu) for cp in cps]
            for i, ed in enumerate(sbu_edges):
                if ed in self._net.in_edges(v):
                    vectors[i]*=-1.

            allvects = {e:vec for e, vec in zip(sbu_edges, vectors)}
            for cp in cps:
                cpv = cp.vertex_assign
                cpe = self._net.neighbours(cpv) 
                assert len(cpe) == 2
                edge = cpe[0] if cpe[0] not in sbu_edges else cpe[1]
                # temporarily set to the vertex of the other connect point
                cp.bonded_cp_vertex = edge[0] if edge[0] != cpv else edge[1]
                vectr = self.obtain_edge_vector_from_cp(cp)
                vectr = -1.*vectr if edge in self._net.in_edges(cpv) else vectr
                allvects.update({edge:vectr})

            for (e1, e2) in itertools.combinations_with_replacement(allvects.keys(), 2):
                (i1, i2) = self._net.return_indices([e1, e2])
                dp = np.dot(allvects[e1], allvects[e2])
                self.colattice_inds[0].append(i1)
                self.colattice_inds[1].append(i2)
                self._inner_product_matrix[i1, i2] = dp
                self._inner_product_matrix[i2, i1] = dp
        self._inner_product_matrix = np.asmatrix(self._inner_product_matrix)
        
    def net_degrees(self):
        #n = self._net.original_graph.to_undirected().degree_histogram() # SAGE compatible
        n = degree_histogram(self._net.original_graph) # networkx compatible
        return sorted([i for i, j in enumerate(n) if j])

    def obtain_embedding(self):
        """Optimize the edges and cell parameters to obtain the crystal
        structure embedding.

        """
        # We first need to normalize the edge lengths of the net. This will be
        # done initially by setting the longest vector equal to the longest
        # vector of the barycentric embedding.
        self._net.assign_ip_matrix(self._inner_product_matrix, self.colattice_inds)

        # this calls the optimization routine to match the tensor product matrix
        # of the SBUs and the net.
        optimized = True
        optimized = self._net.net_embedding()

        init = np.array([0.5, 0.5, 0.5])
        if self.bad_embedding() or not optimized:
            warning("net %s didn't embed properly with the "%(self._net.name) +
            "geometries dictated by the SBUs")
        else:
            self.build_structure_from_net(init)

    def test_angle(self, index1, index2, mat):
        return np.arccos(mat[index1, index2]/np.sqrt(mat[index1, index1])/np.sqrt(mat[index2, index2]))*180./np.pi

    def custom_embedding(self, rep, mt):
        self._net.metric_tensor = np.matrix(mt)
        self._net.periodic_rep = np.matrix(rep)
        la = np.dot(self._net.cycle_cocycle.I,rep)
        ip = np.dot(np.dot(la, mt), la.T)
        ipsbu = self._inner_product_matrix
        nz = np.nonzero(np.triu(ipsbu))
        self.build_structure_from_net(np.zeros(self._net.ndim))
        #self.show()

    def bad_embedding(self):
        mt = self._net.metric_tensor
        lengths = []
        angles = []
        for (i,j) in zip(*np.triu_indices_from(mt)):
            if i==j:
                if mt[i,j] <= 0.:
                    warning("The cell lengths reported were less than zero!")
                    return True
                lengths.append(np.sqrt(mt[i,j]))
            else:
                dp_mag = mt[i,j]/mt[i,i]/mt[j,j]
                try:
                    angles.append(math.acos(dp_mag))
                except ValueError:
                    warning("The cell angles reported were less than zero!")
                    return True

        vol = np.sqrt(np.linalg.det(mt))
        if vol < self.options.cell_vol_tolerance*reduce(lambda x, y: x*y, lengths):
            warning("The unit cell obtained was below the specified volume tolerance")
            return True
        # v x w = ||v|| ||w|| sin(t)
        return False

    def build_structure_from_net(self, init_placement):
        """Orient SBUs to the nodes on the net, create bonds where needed, etc.."""
        metals = "_".join(["m%i"%(sbu.identifier) for sbu in 
                                self._sbus if sbu.is_metal])
        organics = "_".join(["o%i"%(sbu.identifier) for sbu in 
                                self._sbus if not sbu.is_metal])
        name = "str_%s_%s_%s"%(metals, organics, self._net.name)
        self.name = name
        #name += "_ftol_%11.5e"%self.options.ftol
        #name += "_xtol_%11.5e"%self.options.xtol
        #name += "_eps_%11.5e"%self.options.epsfcn
        #name += "_fac_%6.1f"%self.options.factor
        struct = Structure(self.options, 
                           name=name,
                           params=self._net.get_3d_params())

        cell = struct.cell.lattice
        V = self.net.vertices(0)
        edges = self.net.neighbours(V)
        sbu_pos = self._net.vertex_positions(edges, [], pos={V:init_placement})
        for v in self.sbu_vertices:
            self.sbu_orient(v, cell)
            fc = sbu_pos[v]
            tv = np.dot(fc, cell)
            self.sbu_translate(v, tv)
            # compute dihedral angle, if one exists...
            struct.add_sbu(self._vertex_sbu[v])
        struct.connect_sbus(self._vertex_sbu)
        if self.options.overlap_tolerance != 0.0 and struct.compute_overlap():
            warning("Overlap found in final structure, not creating MOF.")
        else:
            struct.write_cif()
            self.struct = struct
            self.success=True
            if self.options.store_net:
                self.embedded_net = self.store_placement(cell, init_placement) 
            info("Structure Generated!")

    def rotation_function(self, params, vect1, vect2):
        #axis = np.array((params['a1'].value, params['a2'].value, params['a3'].value))
        #angle = params['angle'].value
        #R = rotation_matrix(axis, angle)
        omega = np.array([params['w1'].value, params['w2'].value, params['w3'].value])
        R = rotation_from_omega(omega)
        res = np.dot(R[:3,:3], vect1.T).T
        
        #v = normalized_vectors(res.T)
        ### DEBUGGGGGG
        #or1 = np.zeros(3)
        #or2 = np.array([3., 3., 0.])
        #xyz_str1 = "C %9.5f %9.5f %9.5f\n"%(or1[0], or1[1], or1[2])
        #xyz_str2 = "C %9.5f %9.5f %9.5f\n"%(or2[0], or2[1], or2[2])
        #atms = ["H", "F", "O", "He", "N", "Cl"]
        #for ind, (i, j) in enumerate(zip(res, vect2)):
        #    at = atms[ind]
        #    pos = i + or1
        #    xyz_str1 += "%s %9.5f %9.5f %9.5f\n"%(at, pos[0], pos[1], pos[2])
        #    pos = j + or2
        #    xyz_str2 += "%s %9.5f %9.5f %9.5f\n"%(at, pos[0], pos[1], pos[2]) 

        #xyz_file = open("debug_rotation_function.xyz", 'a')
        #xyz_file.writelines("%i\ndebug\n"%(len(res)*2+2))
        #xyz_file.writelines(xyz_str1)
        #xyz_file.writelines(xyz_str2)
        #xyz_file.close()
        ### DEBUGGGGGG
        #angles = np.array([calc_angle(v1, v2) for v1, v2 in zip(v, vect2)])
        #return angles
        return (res - vect2).flatten()

    def get_rotation_matrix(self, vect1, vect2):
        """Optimization to match vectors, obtain rotation matrix to rotate
        vect1 to vect2"""
        params = Parameters()
        params.add('w1', value=1.000)
        params.add('w2', value=1.000)
        params.add('w3', value=1.000)
        min = Minimizer(self.rotation_function, params, fcn_args=(vect1, vect2))
        # giving me a hard time
        min.lbfgsb(factr=100., epsilon=0.001, pgtol=0.001)
        #print report_errors(params)
        #min = minimize(self.rotation_function, params, args=(sbu_vects, arcs), method='anneal')
        #min.leastsq(xtol=1.e-8, ftol=1.e-7)
        #min.fmin()
        #axis = np.array([params['a1'].value, params['a2'].value, params['a3'].value])
        #angle = params['angle'].value
        R = rotation_from_omega(np.array([params['w1'].value, params['w2'].value, params['w3'].value]))
        return R
    
    #def sbu_orient(self, v, cell):
    #    """Optimize the rotation to match vectors"""
    #    sbu = self._vertex_sbu[v]
    #    g = self._net.graph
    #    debug("Orienting SBU: %i, %s on vertex %s"%(sbu.identifier, sbu.name, v))
    #    # re-index the edges to match the order of the connect points in the sbu list
    #    indexed_edges = sbu.edge_assignments
    #    coefficients = np.array([1. if e in g.outgoing_edges(v) else -1. for e in indexed_edges])
    #    if len(indexed_edges) != sbu.degree:
    #        error("There was an error assigning edges "+
    #                    "to the sbu %s"%(sbu.name))
    #        Terminate(errcode=1)
    #    inds = self._net.return_indices(indexed_edges)
    #    arcs = np.dot(self._net.lattice_arcs[inds], cell)

    #    # get the right orientation of the arcs (all pointing away from the node)
    #    # **********************MAY BREAK STUFF
    #    arcs = normalized_vectors(arcs) * coefficients[:, None]
    #    # **********************MAY BREAK STUFF 
    #    sbu_vects = np.array([self.vector_from_cp_SBU(cp, sbu) 
    #                            for cp in sbu.connect_points])

    #    sbu_vects = normalized_vectors(sbu_vects)
    #    #print np.inner(arcs, arcs)
    #    #print np.inner(sbu_vects, sbu_vects)
    #    # Try quaternion??
    #    params = Parameters()
    #    #params.add('a1', value=0.001, min=-1., max=1.)
    #    #params.add('a2', value=0.001, min=-1., max=1.)
    #    #params.add('a3', value=0.001, min=-1., max=1.)
    #    ## make sure that the angle range covers all 3d rotations...
    #    #params.add('angle', value=np.pi/2., min=0., max=np.pi)
    #    params.add('w1', value=1.000)
    #    params.add('w2', value=1.000)
    #    params.add('w3', value=1.000)
    #    min = Minimizer(self.rotation_function, params, fcn_args=(sbu_vects, arcs))
    #    # giving me a hard time
    #    #min.lbfgsb(factr=10., epsilon=0.00001, pgtol=0.000001)
    #    #print report_errors(params)
    #    #min = minimize(self.rotation_function, params, args=(sbu_vects, arcs), method='anneal')
    #    min.leastsq(xtol=1.e-8, ftol=1.e-7)
    #    #min.lbfgsb(factr=1000., approx_grad=True, m=200, epsilon=1e-6, pgtol=1e-6)
    #    #min.fmin()
    #    #axis = np.array([params['a1'].value, params['a2'].value, params['a3'].value])
    #    #angle = params['angle'].value
    #    R = rotation_from_omega(np.array([params['w1'].value, params['w2'].value, params['w3'].value]))
    #    self.report_errors(sbu_vects, arcs, R)
    #    #R = rotation_matrix(axis, angle)
    #    sbu.rotate(R)

    def sbu_orient(self, v, cell):
        """Least squares optimization of orientation matrix.
        Obtained from:
        Soderkvist & Wedin
        'Determining the movements of the skeleton using well configured markers'
        J. Biomech. 26, 12, 1993, 1473-1477.
        DOI: 10.1016/0021-9290(93)90098-Y"""
        g = self._net.graph
        sbu = self._vertex_sbu[v]
        edges = self._net.neighbours(v) 
        debug("Orienting SBU: %i, %s on vertex %s"%(sbu.identifier, sbu.name, v))
        # re-index the edges to match the order of the connect points in the sbu list
        indexed_edges = sbu.edge_assignments
        coefficients = np.array([1. if e in self._net.out_edges(v) else -1. for e in indexed_edges])
        if len(indexed_edges) != sbu.degree:
            error("There was an error assigning edges "+
                        "to the sbu %s"%(sbu.name))
            Terminate(errcode=1)

            
        inds = self._net.return_indices(indexed_edges)
        la = self._net.lattice_arcs[inds]
        if self._net.ndim == 2:
            la = np.hstack((la, np.zeros((la.shape[0], 1))))

        arcs = np.dot(la, cell)
        arcs = normalized_vectors(arcs) * coefficients[:, None]

        sbu_vects = normalized_vectors(np.array([self.vector_from_cp_SBU(cp, sbu) 
                                for cp in sbu.connect_points]))

        #print np.dot(arcs, arcs.T)
        #sf = self._net.scale_factor
        #la = self._net.lattice_arcs
        #mt = self._net.metric_tensor/sf
        #obj = la*mt*la.T
        #print obj
        # issue for ditopic SBUs where the inner product matrices could invert the
        # angles (particularly for ZIFs)
        if sbu.degree == 2 and not sbu.linear:
            sbu_vects = normalized_vectors(np.array([
                self.vector_from_cp_intersecting_pt(cp, sbu) 
                                for cp in sbu.connect_points]))
            # define the plane generated by the edges
            #print "arc angle %9.5f"%(180.*calc_angle(*arcs)/np.pi)
            #print "sbu angle %9.5f"%(180.*calc_angle(*sbu_vects)/np.pi)
            # For some reason the least squares rotation matrix
            # does not work well with just two vectors, so a third
            # orthonormal vector is included to create the proper
            # rotation matrix
            arc3 = np.cross(arcs[0], arcs[1])
            arc3 /= np.linalg.norm(arc3)
            cp3 = np.cross(sbu_vects[0], sbu_vects[1])
            cp3 /= np.linalg.norm(cp3)
            sbu_vects = np.vstack((sbu_vects, cp3))
            arcs = np.vstack((arcs, arc3))
        R = rotation_from_vectors(sbu_vects, arcs) 
        mean, std = self.report_errors(sbu_vects, arcs, rot_mat=R)
        
        ### DEBUGGGGGG
        #or1 = np.zeros(3)
        #or2 = np.array([3., 3., 0.])
        #xyz_str1 = "C %9.5f %9.5f %9.5f\n"%(or1[0], or1[1], or1[2])
        #xyz_str2 = "C %9.5f %9.5f %9.5f\n"%(or2[0], or2[1], or2[2])
        #atms = ["H", "F", "O", "He", "N", "Cl"]
        #sbu_rot_vects = np.dot(R[:3,:3], sbu_vects.T)
        #for ind, (i, j) in enumerate(zip(arcs, sbu_rot_vects.T)):
        #    at = atms[ind]
        #    pos = i + or1
        #    xyz_str1 += "%s %9.5f %9.5f %9.5f\n"%(at, pos[0], pos[1], pos[2])
        #    pos = j + or2
        #    xyz_str2 += "%s %9.5f %9.5f %9.5f\n"%(at, pos[0], pos[1], pos[2]) 

        #xyz_file = open("debug_rotation_function.xyz", 'a')
        #xyz_file.writelines("%i\ndebug\n"%(len(sbu_rot_vects.T)*2+2))
        #xyz_file.writelines(xyz_str1)
        #xyz_file.writelines(xyz_str2)
        #xyz_file.close()
        ### DEBUGGGGGG

        mean, std = self.report_errors(sbu_vects, arcs, rot_mat=R)
        debug("Average orientation error: %12.6f +/- %9.6f degrees"%(mean/DEG2RAD, std/DEG2RAD))
        sbu.rotate(R)
        
    def report_errors(self, sbu_vects, arcs, rot_mat):
        rotation = np.dot(rot_mat[:3,:3], sbu_vects.T)
        v = normalized_vectors(rotation.T)
        angles = np.array([calc_angle(v1, v2) for v1, v2 in zip(v, arcs)])
        mean, std = np.mean(angles), np.std(angles)
        return mean, std

    def sbu_translate(self, v, trans):
        sbu = self._vertex_sbu[v]
        sbu.translate(trans)

    def show(self):
        g = GraphPlot(self._net)
        #g.view_graph()
        sbu_verts = self.sbu_vertices
        g.view_placement(init=(0.51, 0.51, 0.51), edge_labels=False, sbu_only=sbu_verts)

    def vector_from_cp_intersecting_pt(self, cp, sbu):
        for atom in sbu.atoms:
            # NB: THIS BREAKS BARIUM MOFS!!
            coords=cp.origin[:3]
            for b in atom.sbu_bridge:
                if b == cp.identifier:
                    coords = atom.coordinates[:3]
                    break
        v = coords - sbu.closest_midpoint
        return v 

    def vector_from_cp_SBU(self, cp, sbu):
        #coords = cp.origin[:3]
        for atom in sbu.atoms:
            # NB: THIS BREAKS BARIUM MOFS!!
            for b in atom.sbu_bridge:
                if b == cp.identifier:
                    coords = atom.coordinates[:3]
                    break
        # fix for tetrahedral metal ions
        if np.allclose(coords - sbu.COM[:3], np.zeros(3)):
            return (cp.origin[:3] - coords)
        return coords - sbu.COM[:3]

    def vector_from_cp(self,cp):
        return cp.z[:3]/np.linalg.norm(cp.z[:3])
        #return cp.origin[:3].copy()# + cp.z[:3]

    @property
    def check_net(self):
        #if self._net.original_graph.size() < 25 and self.sbu_degrees == self.net_degrees():
        min = self.options.min_edge_count
        max = self.options.max_edge_count
        if self.sbu_degrees == self.net_degrees() and \
                self._net.original_graph.size() <= max and \
                self._net.original_graph.size() >= min:
            return True
        return False

    @property
    def sbu_degrees(self):
        if self._sbu_degrees is not None:
            return self._sbu_degrees
        else:
            deg = [i.degree for i in self._sbus]
            lin = [i.linear for i in self._sbus]
            # added a 'set' here in case two different SBUs have the same
            # coordination number
            # added incidence != 2 for nonlinear sbus. 
            self._sbu_degrees = sorted(set([j for i,j in zip(lin,deg) if not i and j!=2]))
            return self._sbu_degrees

    @property
    def linear_sbus(self):
        """Return true if one or more of the SBUs are linear"""
        # Not sure why this is limited to only linear SBUs.
        # should apply to any ditopic SBUs, hopefully the program will
        # sort out the net shape during the optimization.
        for s in self._sbus:
            if s.degree == 2:# and s.linear:
                return True
        return False

    
    @property
    def met_met_bonds(self):
        met_incidence = [sbu.degree for sbu in self._sbus if sbu.is_metal]
        org_incidence = [sbu.degree for sbu in self._sbus if not sbu.is_metal]
        
        # if org and metal have same incidences, then just ignore this...
        # NB may still add met-met bonds in the net!
        if set(met_incidence).intersection(set(org_incidence)):
            return False
        #for (v1, v2, e) in self._net.graph.edges(): # SAGE compliant
        for (v1, v2, e) in self._net.neighbours(None):
            nn1 = len(self._net.neighbours(v1))
            nn2 = len(self._net.neighbours(v2))

            if (nn1 in met_incidence) or (nn2 in met_incidence):
                if ((nn1 == nn2) or ((v1,v2,e) in self.net.loop_edges())):
                    return True

        return False

    def init_embed(self):
        # keep track of the sbu vertices
        edges_split = []
        self.sbu_vertices = list(self._net.vertices())
        met_incidence = [sbu.degree for sbu in self._sbus if sbu.is_metal]
        org_incidence = [sbu.degree for sbu in self._sbus if not sbu.is_metal]
        # Some special cases: linear sbus and no loops. 
        # Insert between metal-type vertices
        #if self.linear_sbus and not self._net.graph.loop_edges():
        #    for (v1, v2, e) in self._net.graph.edges():
        #        nn1 = len(self._net.neighbours(v1))
        #        nn2 = len(self._net.neighbours(v2))
        #        if nn1 == nn2 and (nn1 in met_incidence):
        #            vertices, edges = self._net.add_edges_between((v1, v2, e), 5)
        #            self.sbu_vertices.append(vertices[2])
        #            edges_split += edges
        self.sbu_joins = {}
        for (v1, v2, e) in self._net.all_edges():
            if (v1, v2, e) not in edges_split:
                nn1 = len(self._net.neighbours(v1))
                nn2 = len(self._net.neighbours(v2))
                # LOADS of ands here.
                if self.linear_sbus:
                    if ((v1, v2, e) in self._net.loop_edges()) or \
                        ((nn1==nn2) and (nn1 in met_incidence)):
                        vertices, edges = self._net.add_edges_between((v1, v2, e), 5)
                        # add the middle vertex to the SBU vertices..
                        # this is probably not a universal thing.
                        self.sbu_joins.setdefault(vertices[2],[]).append(v1)
                        self.sbu_joins.setdefault(v1,[]).append(vertices[2])
                        self.sbu_joins.setdefault(vertices[2],[]).append(v2)
                        self.sbu_joins.setdefault(v2,[]).append(vertices[2])
                        self.sbu_vertices.append(vertices[2])
                        
                        edges_split += edges
                    else:
                        self.sbu_joins.setdefault(v1,[]).append(v2)
                        self.sbu_joins.setdefault(v2,[]).append(v1)
                        vertices, edges = self._net.add_edges_between((v1, v2, e), 2)
                        edges_split += edges
                else:
                    self.sbu_joins.setdefault(v1,[]).append(v2)
                    self.sbu_joins.setdefault(v2,[]).append(v1)
                    vertices, edges = self._net.add_edges_between((v1, v2, e), 2)
                    edges_split += edges

        i = self._obtain_cycle_bases()
        if i<0:
            return i
        # start off with the barycentric embedding
        self._net.barycentric_embedding()
        return i

    def setnet(self, tupl):
        (name, graph, volt) = tupl
        dim=volt.shape[1]
        self._net = Net(graph, dim=dim, options=self.options)
        self._net.name = name
        self._net.voltage = volt
    
    def getnet(self):
        return self._net

    net=property(getnet, setnet)
    
    
    def getsbus(self):
        return self._sbus

    def setsbus(self, sbus):
        self._sbus = sbus

    sbus=property(getsbus,setsbus)

    def get_automorphisms(self):
        """Compute the automorphisms associated with the graph.
        Automorphisms are defined as a permutation, s, of the vertex set such that a 
        pair of vertices, (u,v) form an edge if and only if the pair (s(u),s(v)) also
        form an edge. I have to identify all the edge swappings according to the 
        permutation groups presented by sage. I need to define all the edge permutations,
        then I can identify the symmetry operations associated with these permutations."""

        G = self.net.original_graph.to_undirected().automorphism_group()
        count = 0
        for i in G:
            count += 1
            # find equivalent edges after vertex automorphism

            # construct linear representation

            # determine symmetry element (rotation, reflection, screw, glide, inversion..)

            # chose to discard or keep. Does it support the site symmetries of the SBUs?

            if count==2:
                break

        # final set of accepted symmetry operations

        # determine space group

        # determine co-lattice vectors which keep symmetry elements intact

        # determine lattice parameters.

        #self.net.original_graph.order()
        #self.net.original_graph.edges()


    def store_placement(self, cell, init=(0., 0., 0.)):
        init = np.array(init)
        data = {"cell":cell, "nodes":{}, "edges":{}}
        # set the first node down at the init position
        V = self._net.vertices(0)
        edges = self._net.neigbours(V)
        unit_cell_vertices = self._net.vertex_positions(edges, [], pos={V:init})
        for key, value in unit_cell_vertices.items():
            if key in self._vertex_sbu.keys():
                label = self._vertex_sbu[key].name
            else:
                label = key
                for bu in self._vertex_sbu.values():
                    for cp in bu.connect_points:
                        if cp.vertex_assign == key:
                            label = str(cp.identifier)
            data["nodes"][label] = np.array(value)
            for edge in self._net.out_edges(key):
                ind = self._net.get_index(edge)
                arc = np.array(self._net.lattice_arcs)[ind]
                data["edges"][edge[2]]=(np.array(value), arc)
            for edge in self._net.in_edges(key):
                ind = self._net.get_index(edge)
                arc = -np.array(self._net.lattice_arcs)[ind]
                data["edges"][edge[2]]=(np.array(value), arc)
        return(data)
コード例 #34
0
    return Poisson1dModel()


fig2 = plt.figure(constrained_layout=False, figsize=(16, 10))
grid = fig2.add_gridspec(2, 3)
ax = [[None, None], [None, None]]

ax[0][0] = fig2.add_subplot(grid[0, :2])
ax[0][1] = fig2.add_subplot(grid[0, 2])
ax[1][0] = fig2.add_subplot(grid[1, :2])
ax[1][1] = fig2.add_subplot(grid[1, 2])

basis = FourierBasis1D(21, -10, 10)
net1 = Basis_Net_Time([2, 50, 50, 50, 21], basis=basis)
net0 = Net([2, 50, 50, 50, 50, 1])

net_table = [net0, net1]
for i, net in enumerate(net_table):
    print('{}-th net'.format(i))
    model = construct_model(net)
    model.train()
    model.plot(model.net, ax[i][0])
    model.post_process(ax[i][1])

ax[0][0].set_ylabel('PINN')
ax[1][0].set_ylabel('PINN-PD(k=10)')
ax[1][0].set_xlabel('Iteration 1000')
ax[1][1].set_xlabel('Loss')
plt.show()
コード例 #35
0
ファイル: tasks.py プロジェクト: WebValley2014/WebDev
        if not os.path.isfile(path):
            e = "File not found: {0}".format(path)
            return IOError(e)
    for d in dir_keys:
        if not os.path.isdir(d):
            try:
                os.makedirs(d)
            except Exception, e:
                msg = "Error while crating `{0}'. Details: {1}".format(d, e)
                return Exception(msg)
    

    # build args list and get instance
    args = [kwargs[arg] for arg in path_keys]
    
    # start task
    print "Starting celery network task ..."
    print self.request.id
    #try:
    self.update_state(state='RUNNING')
    start_time = unicode(datetime.datetime.now())
    netAnalysis = Net(*args)
    result = netAnalysis.run()
    finish_time = unicode(datetime.datetime.now())
    return {'result' : result, 'st': start_time, 'ft': finish_time}
    #except Exception, e:
        # msg = "Error while executing Network Analysis. "
        # msg+= "Details: {0}".format(e)
        # return Exception(e)
        #
コード例 #36
0
ファイル: topcryst.py プロジェクト: peteboyd/tobascco
    def _build_structures_from_top(self):
        if not self._topologies:
            warning("No topologies found!")
            Terminate()

        csvinfo = CSV(name='%s_info'%(self.options.jobname))
        csvinfo.set_headings('topology', 'sbus', 'edge_count', 'time', 'space_group')
        csvinfo.set_headings('edge_length_err', 'edge_length_std', 'edge_angle_err', 'edge_angle_std')
        self.options.csv = csvinfo
        run = Generate(self.options, self.sbu_pool)
        inittime = time()
        if self.options.count_edges_along_lattice_dirs:
            lattfile = open("edge_counts.csv","w")
            lattfile.writelines("topology,Na,Nb,Nc\n")
        for top, graph in self._topologies.items():
            if self.options.count_edges_along_lattice_dirs:
                info("Computing Edge lengths along each lattice direction for %s"%(top))
                n = Net(graph)
                n.voltage = self._topologies.voltages[top]
                n.simple_cycle_basis()
                n.get_lattice_basis()
                n.get_cocycle_basis()
                edge_str = n.print_edge_count()
                lattfile.writelines("%s,%s"%(top, edge_str))
            elif self.options.show_barycentric_net_only:
                info("Preparing barycentric embedding of %s"%(top))
                self._check_barycentric_embedding(graph, self._topologies.voltages[top])
            else:

                build = Build(self.options)
                build.net = (top, graph, self._topologies.voltages[top])
                if self.options.sbu_combinations:
                    combinations = run.combinations_from_options()
                else:
                    combinations = run.generate_sbu_combinations(incidence=build.net_degrees())

                if not list(combinations):
                    debug("Net %s does not support the same"%(top)+
                            " connectivity offered by the SBUs")
                for combo in combinations:
                    build.sbus = list(set(combo))
                    # check node incidence
                    if build.met_met_bonds and run.linear_sbus_exist:
                        # add linear organics
                        debug("Metal-type nodes attached to metal-type nodes. "+
                                "Attempting to insert 2-c organic SBUs between these nodes.")
                        for comb in run.yield_linear_org_sbu(combo):
                            build.sbus = list(set(comb))
                            self.embed_sbu_combo(top, comb, build)
                    elif build.met_met_bonds and not run.linear_sbus_exist:
                        debug("Metal-type nodes are attached to metal-type nodes. "+
                                "No linear SBUs exist in database, so the structure "+
                                "will have metal - metal SBUs joined")
                        self.embed_sbu_combo(top, combo, build)
                    else:
                        self.embed_sbu_combo(top, combo, build)

        if self.options.count_edges_along_lattice_dirs:
            lattfile.close() 
        finaltime = time() - inittime
        info("Topcryst completed after %f seconds"%finaltime)
        Terminate()
コード例 #37
0
@author: Ulrich
"""

import torch
import pandas as pd
from reducing_net import reduced_ann_net
from Net import Net, test_model
from utils import confusion, F1_score, loadDataset, saveNNParas
import time

# Loading the previous network status.
feature_num = 11
hidden_num = 30
output_num = 3

load_net = Net(feature_num, hidden_num, output_num)
load_net.load_state_dict(torch.load('net_model_genre.pt'))
#load_net.load_state_dict(torch.load('net_model_subjective_rating.pt'))
load_net.eval()

# Loading testing dataset to evaluate new network.
x_train, y_train = loadDataset('testing')
x_test, y_test = loadDataset('testing')

# Loading the information of vector.
vectors = pd.read_excel('vector_angle_sample.xls', header=None)
raw_df = pd.DataFrame({
    'row': vectors.iloc[:, 0],
    'col': vectors.iloc[:, 1],
    'vector': vectors.iloc[:, 2]
})
コード例 #38
0
def create_model(num_filters, fc1_size, fc2_size):
    return Net(num_filters, fc1_size, fc2_size)
コード例 #39
0
ファイル: topcryst.py プロジェクト: peteboyd/tobascco
    def _build_structures(self):
        """Pass the sbu combinations to a MOF building algorithm."""
        run = Generate(self.options, self.sbu_pool)
        # generate the combinations of SBUs to build
        if self.options.sbu_combinations:
            combinations = run.combinations_from_options()
        else:
            # remove SBUs if not listed in options.organic_sbus or options.metal_sbus
            combinations = run.generate_sbu_combinations()
        csvinfo = CSV(name='%s_info'%(self.options.jobname))
        csvinfo.set_headings('topology', 'sbus', 'edge_count', 'time', 'space_group', 'net_charge')
        csvinfo.set_headings('edge_length_err', 'edge_length_std', 'edge_angle_err', 'edge_angle_std')
        self.options.csv = csvinfo
        # generate the MOFs.
        if self.options.count_edges_along_lattice_dirs:
            lattfile = open("edge_counts.csv","w")
            lattfile.writelines("topology,Na,Nb,Nc\n")
        inittime = time()
        for combo in combinations:
            node_degree = [i.degree for i in set(combo)]
            node_lin = [i.linear for i in set(combo)]
            degree = sorted([j for i, j in zip(node_lin, node_degree) if not i])
            # find degrees of the sbus in the combo
            if not self._topologies:
                warning("No topologies found! Exiting.")
                Terminate()
            debug("Trying "+self.combo_str(combo))
            for top, graph in self._topologies.items():
                if self.options.use_builds:
                    try:
                        build = self._stored_builds[top]
                    except:
                        build = Build(self.options)
                        build.net = (top, graph, self._topologies.voltages[top])
                else:
                    build = Build(self.options)
                    build.net = (top, graph, self._topologies.voltages[top])
                build.sbus = list(set(combo))
                #build.get_automorphisms()
                if self.options.count_edges_along_lattice_dirs:
                    info("Computing Edge lengths along each lattice direction for %s"%(top))
                    n = Net(graph)
                    n.voltage = self._topologies.voltages[top]
                    n.simple_cycle_basis()
                    n.get_lattice_basis()
                    n.get_cocycle_basis()
                    edge_str = n.print_edge_count()
                    lattfile.writelines("%s,%s\n"%(top, edge_str))
                elif self.options.show_barycentric_net_only:
                    info("Preparing barycentric embedding of %s"%(top))
                    #print("CHECK", top, build.net.graph.number_of_selfloops())
                    self._check_barycentric_embedding(graph, self._topologies.voltages[top])
                else:
                    if build.check_net:
                        # check node incidence
                        if build.met_met_bonds and run.linear_sbus_exist and not run.linear_in_combo(combo):
                            # add linear organics
                            debug("Metal-type nodes attached to metal-type nodes. "+
                                    "Attempting to insert 2-c organic SBUs between these nodes.")
                            for comb in run.yield_linear_org_sbu(combo):
                                if self.options.use_builds:
                                    try:
                                        build = self._stored_builds[top]
                                    except:
                                        build = Build(self.options)
                                else:
                                    build = Build(self.options)
                                    build.sbus = list(set(comb))
                                    build.net = (top, graph, self._topologies.voltages[top])
                                self.embed_sbu_combo(top, comb, build)
                        elif build.met_met_bonds and run.linear_in_combo(combo):
                            self.embed_sbu_combo(top, combo, build)

                        elif build.met_met_bonds and not run.linear_sbus_exist:
                            debug("Metal-type nodes are attached to metal-type nodes. "+
                                   "No linear SBUs exist in database, so the structure "+
                                    "will have metal - metal SBUs joined")
                            self.embed_sbu_combo(top, combo, build)
                        elif not build.met_met_bonds:
                            self.embed_sbu_combo(top, combo, build)

                    else:
                        debug("Net %s does not support the same"%(top)+
                                " connectivity offered by the SBUs")

        if self.options.count_edges_along_lattice_dirs:
            lattfile.close() 
        finaltime = time() - inittime
        info("Topcryst completed after %f seconds"%finaltime)
        if self.options.get_run_info:
            info("Writing run information to %s"%self.options.csv.filename)
            self.options.csv.write()
        if self.options.store_net and self._stored_nets:
            info("Writing all nets to nets_%s.pkl"%self.options.jobname)
            f = open("nets_%s.pkl"%self.options.jobname, 'wb')
            p = pickle.dump(self._stored_nets, f)
            f.close()
        Terminate()
コード例 #40
0
class DQN(object):
    def __init__(self):
        # 两张网是一样的,不过就是target_net是每100次更新一次,eval_net每次都更新
        self.eval_net, self.target_net = Net(N_STATES, N_ACTIONS,
                                             Hidden_num), Net(
                                                 N_STATES, N_ACTIONS,
                                                 Hidden_num)

        self.learn_step_counter = 0  # 如果次数到了,更新target_net
        self.memory_counter = 0  # for storing memory
        self.memory = np.zeros((MEMORY_CAPACITY, N_STATES * 2 + 2))  # 初始化记忆
        self.memory_state = np.zeros((MEMORY_CAPACITY, N_STATES))
        self.memory_next_state = np.zeros((MEMORY_CAPACITY, N_STATES))
        self.memory_action = np.zeros((MEMORY_CAPACITY, 1))
        self.memory_reward = np.zeros((MEMORY_CAPACITY, 1))
        self.optimizer = torch.optim.Adam(self.eval_net.parameters(), lr=LR)
        self.loss_func = nn.MSELoss()

    # 选择动作
    def choose_action(self, x):
        x = torch.unsqueeze(torch.FloatTensor(x), 0)
        if np.random.uniform() < EPSILON:  # 贪婪策略
            actions_value = self.eval_net.forward(x, False).detach()
            action = torch.max(actions_value, 1)[1].data.numpy()
            action = action[0]
        else:  # random
            action = np.random.randint(0, N_ACTIONS)
        return action

    # 存储记忆
    def store_transition(self, s, a, r, s_):
        transition = np.hstack((s, [a, r], s_))  # 将每个参数打包起来
        # replace the old memory with new memory
        index = self.memory_counter % MEMORY_CAPACITY
        self.memory_state[index, :] = s
        self.memory_next_state[index, :] = s_
        self.memory_action[index, :] = a
        self.memory_reward[index, :] = r
        # self.memory[index, :] = transition
        self.memory_counter += 1

    def learn(self):
        # target parameter update
        for target_param, param in zip(self.target_net.parameters(),
                                       self.eval_net.parameters()):
            target_param.data.copy_(target_param.data * (1.0 - TAU) +
                                    param.data * TAU)
        self.learn_step_counter += 1

        # 学习过程
        sample_index = np.random.choice(MEMORY_CAPACITY, BATCH_SIZE)
        bound = min(T - sample_index.max(), N) - 1
        s_memory = list()
        next_s_memory = list()
        a_memory = list()
        r_memory = list()
        for index in sample_index:
            r_memory.append(self.memory_reward[index:index + bound + 1, ])
        s_memory.append(self.memory_state[sample_index])
        next_s_memory.append(self.memory_next_state[sample_index + bound])
        a_memory.append(self.memory_action[sample_index])

        # b_memory = self.memory[sample_index, :]
        b_s = torch.tensor(s_memory, dtype=torch.float32).squeeze()
        b_s_ = torch.tensor(next_s_memory, dtype=torch.float32).squeeze()
        b_r = torch.tensor(r_memory, dtype=torch.float32)
        b_r = torch.sum(b_r, dim=1)
        b_a = torch.tensor(a_memory, dtype=torch.long).squeeze(0)

        # q_eval w.r.t the action in experience
        q_eval = self.eval_net(b_s)
        q_eval = q_eval.gather(1, b_a)  # shape (batch, 1)
        q_next = self.target_net(
            b_s_).detach()  # detach的作用就是不反向传播去更新,因为target的更新在前面定义好了的
        q_target = b_r + GAMMA * q_next.max(1)[0].view(BATCH_SIZE,
                                                       1)  # shape (batch, 1)
        loss = self.loss_func(q_eval, q_target)

        self.optimizer.zero_grad()
        loss.backward()
        self.optimizer.step()
        return loss.data.item()

    def learn2(self):
        sample_index = np.random.choice(self.memory_counter, BATCH_SIZE)
        s_memory = list()
        next_s_memory = list()
        a_memory = list()
        r_memory = list()
        r_memory.append(self.memory_reward[sample_index])
        s_memory.append(self.memory_state[sample_index])
        next_s_memory.append(self.memory_next_state[sample_index])
        a_memory.append(self.memory_action[sample_index])

        b_s = torch.tensor(s_memory, dtype=torch.float32).squeeze()
        b_s_ = torch.tensor(next_s_memory, dtype=torch.float32).squeeze()
        b_r = torch.tensor(r_memory, dtype=torch.float32)
        b_r = torch.sum(b_r, dim=1)
        b_a = torch.tensor(a_memory, dtype=torch.long).squeeze(0)

        q_eval = self.eval_net(b_s)
        q_eval = q_eval.gather(1, b_a)  # shape (batch, 1)
        action = torch.argmax(self.eval_net(b_s_), dim=1).view(BATCH_SIZE, 1)
        q_next = self.target_net(b_s_).gather(
            dim=1,
            index=action).detach()  # detach的作用就是不反向传播去更新,因为target的更新在前面定义好了的
        q_target = b_r + GAMMA * q_next.view(BATCH_SIZE, 1)
        loss = self.loss_func(q_eval, q_target)

        self.optimizer.zero_grad()
        loss.backward()
        self.optimizer.step()
        return loss.data.item()