コード例 #1
0
    def __init__(self, config, state_net=None, out_net=None):
        super(GNN, self).__init__()

        self.config = config
        # hyperparameters and general properties
        self.convergence_threshold = config.convergence_threshold
        self.max_iterations = config.max_iterations
        self.n_nodes = config.n_nodes
        self.state_dim = config.state_dim
        self.label_dim = config.label_dim
        self.output_dim = config.output_dim
        self.state_transition_hidden_dims = config.state_transition_hidden_dims
        self.output_function_hidden_dims = config.output_function_hidden_dims

        # node state initialization
        self.node_state = torch.zeros(*[self.n_nodes, self.state_dim]).to(self.config.device)  # (n,d_n)
        self.converged_states = torch.zeros(*[self.n_nodes, self.state_dim]).to(self.config.device)
        # state and output transition functions
        if state_net is None:
            self.state_transition_function = StateTransition(self.state_dim, self.label_dim,
                                                             mlp_hidden_dim=self.state_transition_hidden_dims,
                                                             activation_function=config.activation)
        else:
            self.state_transition_function = state_net
        if out_net is None:
            self.output_function = MLP(self.state_dim, self.output_function_hidden_dims, self.output_dim)
        else:
            self.output_function = out_net

        self.graph_based = self.config.graph_based
コード例 #2
0
ファイル: mlp.py プロジェクト: kunglab/idp
def run(args):
    train, test = util.get_dataset(args.dataset)
    names = ['all-one (standard)', 'linear']
    colors = [vz.colors.all_one_lg, vz.colors.linear_lg]
    models = [
        MLP.MLP(10, cg.uniform, n_units=100),
        MLP.MLP(10, cg.linear, n_units=100),
    ]
    comp_ratios = np.linspace(0.1, 1.0, 20)
    acc_dict = {}
    ratios_dict = {}
    for name, model in zip(names, models):
        util.load_or_train_model(model, train, test, args)
        acc_dict[name] = util.sweep_idp(model, test, comp_ratios, args)
        ratios_dict[name] = [100. * cr for cr in comp_ratios]

    filename = "MLP_{}".format(args.dataset)
    vz.plot(ratios_dict,
            acc_dict,
            names,
            filename,
            colors=colors,
            folder=args.figure_path,
            ext=args.ext,
            title='MLP (MNIST)',
            xlabel='IDP (%)',
            ylabel='Classification Accuracy (%)',
            ylim=(85, 100))
コード例 #3
0
ファイル: mlp_coef_comparison.py プロジェクト: kunglab/idp
def run(args):
    train, test = util.get_dataset(args.dataset)
    colors = [vz.colors.all_one_lg, vz.colors.linear_lg]
    names = ['all-one', 'harmonic', 'linear', 'half_one']
    colors = [
        vz.colors.all_one_lg, vz.colors.harmonic_lg, vz.colors.linear_lg,
        vz.colors.half_one_lg
    ]
    models = [
        MLP.MLP(10, cg.uniform),
        MLP.MLP(10, cg.harmonic),
        MLP.MLP(10, cg.linear),
        MLP.MLP(10, cg.uniform_exp),
    ]
    comp_ratios = np.linspace(0.1, 1.0, 20)
    acc_dict = {}
    ratios_dict = {}
    for name, model in zip(names, models):
        util.load_or_train_model(model, train, test, args)
        acc_dict[name] = util.sweep_idp(model, test, comp_ratios, args)
        ratios_dict[name] = [100. * cr for cr in comp_ratios]

    filename = "MLP_coef_comparison_{}".format(args.dataset)
    vz.plot(ratios_dict,
            acc_dict,
            names,
            filename,
            colors=colors,
            folder=args.figure_path,
            ext=args.ext,
            xlabel='IDP (%)',
            ylabel='Classification Accuracy (%)',
            title='MLP (MNIST)',
            legend_loc='lower right',
            ylim=(85, 100))
コード例 #4
0
def main():

    for seed in range(42,45):
        torch.manual_seed(seed)

        model = MLP().cuda()
        optimizer = optim.SGD(model.parameters(), lr=1e-2, momentum=0)

        train_ds = datasets.MNIST('../data', train=True, download=True,
                       transform=transforms.Compose([
                           transforms.ToTensor(),
                           transforms.Normalize((0.1307,), (0.3081,))
                       ]))
        train_ds.train_labels = torch.load('./random_labels_mnist.pth').long()
        train_loader = torch.utils.data.DataLoader(train_ds, batch_size=64, shuffle=True)
        test_loader = torch.utils.data.DataLoader(
        datasets.MNIST('../data', train=False, transform=transforms.Compose([
                            transforms.ToTensor(),
                            transforms.Normalize((0.1307,), (0.3081,))
                        ])),
        batch_size=1000, shuffle=True)

        # 10 epoches 
        for epoch in range(1, 150 + 1):
            train(model, train_loader, optimizer, epoch)
            test(model, test_loader)
        
        torch.save(model.state_dict(), './model_weights/mlp_random_weights_{}.pth'.format(seed))
        model.load_state_dict(torch.load('./model_weights/mlp_random_weights_{}.pth'.format(seed)))
コード例 #5
0
def main():
    parser = argparse.ArgumentParser(description='Chainer example: MNIST')
    parser.add_argument('--gpu', '-g', type=int, default=-1,
                        help='GPU ID (negative value indicates CPU)')
    parser.add_argument('--image', '-i', type=str, default="",
                        help='pass to input image')
    parser.add_argument('--model', '-m', default='my_mnist.model',
                        help='path to the training model')
    parser.add_argument('--unit', '-u', type=int, default=1000,
                        help='Number of units')
    args = parser.parse_args()
    model = MLP(args.unit,10)
    if args.gpu >= 0:
        model.to_gpu(chainer.cuda.get_device_from_id(args.gpu).use())
    serializers.load_npz(args.model, model)
    try:
        img = Image.open(args.image).convert("L").resize((28,28))
    except :
        print("invalid input")
        return
    img_array = model.xp.asarray(img,dtype=model.xp.float32).reshape(1,784)
    with chainer.using_config('train', False), chainer.no_backprop_mode():
        result = model.predict(img_array)
    print("predict:", model.xp.argmax(result.data))
コード例 #6
0
ファイル: agent.py プロジェクト: Soupure/2021-QA-----------
class AgentVPG:
    """
    Agent

    functions:
    1) choose_action
        input - state of the current environment
        output - an action
    2) update
        input - experience obtained by interacting with the environment
        output - losses
    """
    def __init__(self, action_space, obs_dim, gamma=1):
        self.logits_net = MLP(input_dim=obs_dim, output_dim=len(action_space))
        self.act_space = list(action_space)
        self.optim = Adam(self.logits_net.parameters(), lr=5e-3)

    # make action selection function (outputs int actions, sampled from policy)
    def choose_action(self, obs):
        with torch.no_grad():
            return self._get_policy(obs).sample().item()

    def update(self, batch):
        obs = torch.as_tensor(batch['obs'], dtype=torch.float32)
        act = torch.as_tensor(batch['acts'], dtype=torch.int32)
        weights = torch.as_tensor(batch['weights'], dtype=torch.float32)
        batch_loss = self._compute_loss(obs, act, weights)
        self.optim.zero_grad()
        batch_loss.backward()
        self.optim.step()
        return batch_loss.item()

    # make loss function whose gradient, for the right data, is policy gradient
    def _compute_loss(self, obs, act, weights):
        logp = self._get_policy(obs).log_prob(act)
        # print(logp[:10], act[:10])
        return -(logp * weights).mean() + (0.2 * logp).mean()

    # make function to compute action distribution
    def _get_policy(self, obs):
        logits = self.logits_net(obs)
        return Categorical(logits=logits)
コード例 #7
0
ファイル: mlp_multi.py プロジェクト: kunglab/idp
def run(args):
    train, test = util.get_dataset(args.dataset)
    # names = ['all-ones,exp', 'all-ones,all', 'linear,exp', 'linear,all']
    names = ['linear']
    colors = [vz.colors.linear_sm, vz.colors.linear_lg]
    models = [
        MLP.MLP(10, cg.linear, [(0, 3), (3, 10)], n_units=100),
    ]
    comp_ratios = np.linspace(0.1, 1, 20)
    acc_dict = {}
    ratios_dict = {}
    key_names = []
    for name, model in zip(names, models):
        util.train_model_profiles(model, train, test, args)
        for profile in range(len(model.profiles)):
            key = name + '-' + str(profile + 1)
            key_names.append(key)
            acc_dict[key] = util.sweep_idp(model,
                                           test,
                                           comp_ratios,
                                           args,
                                           profile=profile)
            ratios_dict[key] = [100. * cr for cr in comp_ratios]

    filename = "MLP_{}_multi".format(args.dataset)
    vz.plot(ratios_dict,
            acc_dict,
            key_names,
            filename,
            colors=colors,
            folder=args.figure_path,
            ext=args.ext,
            xlabel='IDP (%)',
            ylabel='Classification Accuracy (%)',
            title='MLP (MNIST)',
            ylim=(85, 100))
コード例 #8
0
ファイル: pylpgnn.py プロジェクト: mtiezzi/lpgnn
    def __init__(
        self,
        config,
        state_net=None,
        out_net=None,
        nodewise_lagrangian_flag=True,
        dimwise_lagrangian_flag=False,
    ):
        super(LPGNN, self).__init__()

        self.config = config

        # hyperparameters and general properties
        self.n_nodes = config.n_nodes
        self.state_dim = config.state_dim
        self.label_dim = config.label_dim
        self.output_dim = config.output_dim
        self.state_transition_hidden_dims = config.state_transition_hidden_dims
        self.output_function_hidden_dims = config.output_function_hidden_dims

        # TODO add dims of layered LPGNN
        self.λ_n = self.n_nodes if nodewise_lagrangian_flag else 1
        self.λ_d = self.state_dim if dimwise_lagrangian_flag else 1

        self.λ_list = nn.ParameterList()
        self.state_variable_list = nn.ParameterList()
        self.diffusion_constraint_list = []
        self.node_state_list = []
        self.state_transition_function_list = nn.ModuleList(
        )  # to be visible for parameters and gpu
        # state constraints

        self.state_constraint_function = utils.get_G_function(
            descr=self.config.state_constraint_function, eps=self.config.eps)

        for l in range(self.config.layers):  # loop over layers
            # defining lagrangian multipliers
            self.λ_list.append(
                nn.Parameter(torch.zeros(*[self.λ_n, self.λ_d]),
                             requires_grad=True))  # (n,1) by default ,

            self.state_variable_list.append(
                nn.Parameter(torch.zeros(*[self.n_nodes, self.state_dim[l]]),
                             requires_grad=True))  # (n,d_state)

            # adding to lists

            # node state initialization
            # self.node_state_list.append(
            #     torch.zeros(*[self.n_nodes, self.state_dim[l]]).to(self.config.device))  # (n,d_n)
            # state and output transition functions
            if state_net is None:
                # torch.manual_seed(self.config.seed)
                if l == 0:
                    input_dim = self.state_dim[
                        0] + 2 * self.label_dim  # arc state computation f(l_v, l_n, x_n)

                else:

                    ## f(x_v_l, x_v_l-1, x_n_l-1)
                    input_dim = self.state_dim[l] + 2 * self.state_dim[
                        l - 1]  # + 2 * self.label_dim ##

                output_dim = self.state_dim[l]
                self.state_transition_function_list.append(
                    StateTransition(
                        input_dim=input_dim,
                        output_dim=output_dim,
                        mlp_hidden_dim=self.state_transition_hidden_dims,
                        activation_function=config.activation))

        if state_net is not None:  # only once, give as input the list TODO
            self.state_transition_function_list = state_net

        if out_net is None:

            self.output_function = MLP(self.state_dim[-1],
                                       self.output_function_hidden_dims,
                                       self.output_dim)
        else:
            self.output_function = out_net

        self.graph_based = self.config.graph_based
コード例 #9
0
        print ('what dataset ?', args.dataset)
        sys.exit(1)

    key = folder 
    key+= args.net \
          + '_hdim' + str(args.hdim) \
          + '_Batchsize' + str(args.Batchsize) \
          + '_lr' + str(args.lr) \
          + '_Nsteps' + str(args.Nsteps) \
          + '_epsilon' + str(args.epsilon) 

    cmd = ['mkdir', '-p', key]
    subprocess.check_call(cmd)

    if args.net == 'MLP':
        net = MLP(dim=dim, hidden_size = args.hdim, use_z2=False)
    elif args.net == 'CNN':
        net = CNN(L=length, channel=channel, hidden_size = args.hdim, use_z2=False)
    elif args.net == 'Simple_MLP':
        net = Simple_MLP(dim=dim, hidden_size = args.hdim, use_z2=False)
    else:
        print ('what network ?', args.net)
        sys.exit(1)

    model = MongeAmpereFlow(net, args.epsilon, args.Nsteps, device=device, name = key)
    model.to(device)

    optimizer = torch.optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.decay)
    if args.checkpoint is not None:
        try:
            load_checkpoint(args.checkpoint, model, optimizer)
コード例 #10
0
class GNN(nn.Module):
    def __init__(self, config, state_net=None, out_net=None):
        super(GNN, self).__init__()

        self.config = config
        # hyperparameters and general properties
        self.convergence_threshold = config.convergence_threshold
        self.max_iterations = config.max_iterations
        self.n_nodes = config.n_nodes
        self.state_dim = config.state_dim
        self.label_dim = config.label_dim
        self.output_dim = config.output_dim
        self.state_transition_hidden_dims = config.state_transition_hidden_dims
        self.output_function_hidden_dims = config.output_function_hidden_dims

        # node state initialization
        self.node_state = torch.zeros(*[self.n_nodes, self.state_dim]).to(
            self.config.device)  # (n,d_n)
        self.converged_states = torch.zeros(
            *[self.n_nodes, self.state_dim]).to(self.config.device)
        # state and output transition functions
        if state_net is None:
            self.state_transition_function = StateTransition(
                self.state_dim,
                self.label_dim,
                mlp_hidden_dim=self.state_transition_hidden_dims,
                activation_function=config.activation)
        else:
            self.state_transition_function = state_net
        if out_net is None:
            self.output_function = MLP(self.state_dim,
                                       self.output_function_hidden_dims,
                                       self.output_dim)
        else:
            self.output_function = out_net

        self.graph_based = self.config.graph_based

    def reset_parameters(self):

        self.state_transition_function.mlp.init()
        self.output_function.init()

    def forward(self,
                edges,
                agg_matrix,
                node_labels,
                node_states=None,
                graph_agg=None):
        n_iterations = 0
        # convergence loop
        # state initialization
        node_states = self.node_state if node_states is None else node_states

        while n_iterations < self.max_iterations:
            new_state = self.state_transition_function(node_states,
                                                       node_labels, edges,
                                                       agg_matrix)
            n_iterations += 1
            # convergence condition
            with torch.no_grad():
                distance = torch.norm(
                    input=new_state - node_states,
                    dim=1)  # checked, they are the same (in cuda, some bug)

                check_min = distance < self.convergence_threshold
            node_states = new_state

            if check_min.all():
                break

        states = node_states
        self.converged_states = states
        if self.graph_based:
            states = torch.matmul(graph_agg, node_states)

        output = self.output_function(states)

        return output, n_iterations
コード例 #11
0
ファイル: pylpgnn.py プロジェクト: mtiezzi/lpgnn
class LPGNN(nn.Module):
    def __init__(
        self,
        config,
        state_net=None,
        out_net=None,
        nodewise_lagrangian_flag=True,
        dimwise_lagrangian_flag=False,
    ):
        super(LPGNN, self).__init__()

        self.config = config

        # hyperparameters and general properties
        self.n_nodes = config.n_nodes
        self.state_dim = config.state_dim
        self.label_dim = config.label_dim
        self.output_dim = config.output_dim
        self.state_transition_hidden_dims = config.state_transition_hidden_dims
        self.output_function_hidden_dims = config.output_function_hidden_dims

        # TODO add dims of layered LPGNN
        self.λ_n = self.n_nodes if nodewise_lagrangian_flag else 1
        self.λ_d = self.state_dim if dimwise_lagrangian_flag else 1

        self.λ_list = nn.ParameterList()
        self.state_variable_list = nn.ParameterList()
        self.diffusion_constraint_list = []
        self.node_state_list = []
        self.state_transition_function_list = nn.ModuleList(
        )  # to be visible for parameters and gpu
        # state constraints

        self.state_constraint_function = utils.get_G_function(
            descr=self.config.state_constraint_function, eps=self.config.eps)

        for l in range(self.config.layers):  # loop over layers
            # defining lagrangian multipliers
            self.λ_list.append(
                nn.Parameter(torch.zeros(*[self.λ_n, self.λ_d]),
                             requires_grad=True))  # (n,1) by default ,

            self.state_variable_list.append(
                nn.Parameter(torch.zeros(*[self.n_nodes, self.state_dim[l]]),
                             requires_grad=True))  # (n,d_state)

            # adding to lists

            # node state initialization
            # self.node_state_list.append(
            #     torch.zeros(*[self.n_nodes, self.state_dim[l]]).to(self.config.device))  # (n,d_n)
            # state and output transition functions
            if state_net is None:
                # torch.manual_seed(self.config.seed)
                if l == 0:
                    input_dim = self.state_dim[
                        0] + 2 * self.label_dim  # arc state computation f(l_v, l_n, x_n)

                else:

                    ## f(x_v_l, x_v_l-1, x_n_l-1)
                    input_dim = self.state_dim[l] + 2 * self.state_dim[
                        l - 1]  # + 2 * self.label_dim ##

                output_dim = self.state_dim[l]
                self.state_transition_function_list.append(
                    StateTransition(
                        input_dim=input_dim,
                        output_dim=output_dim,
                        mlp_hidden_dim=self.state_transition_hidden_dims,
                        activation_function=config.activation))

        if state_net is not None:  # only once, give as input the list TODO
            self.state_transition_function_list = state_net

        if out_net is None:

            self.output_function = MLP(self.state_dim[-1],
                                       self.output_function_hidden_dims,
                                       self.output_dim)
        else:
            self.output_function = out_net

        self.graph_based = self.config.graph_based

    def reset_parameters(self):
        with torch.no_grad():
            for l in range(self.config.layers):
                self.state_transition_function_list[l].mlp.init()
                nn.init.constant_(self.state_variable_list[l], 0)
                nn.init.constant_(self.λ_list[l], 0)
            self.output_function.init()

    def lagrangian_composition(self, new_state_list):

        # loss definition  TODO add for , add tensorboard
        convergence_loss_list = []
        for l in range(self.config.layers):
            constraint = self.state_constraint_function(
                self.state_variable_list[l] - new_state_list[l])
            convergence_loss_list.append(
                torch.mean(torch.mean(self.λ_list[l] * constraint, -1)))
        # self.diffusion_constraint_list.append(torch.mean(torch.mean(self.λ * constraint, -1))) TODO why not working
        # convergence_loss = torch.mean(torch.mean(self.λ * constraint, -1))

        return torch.sum(torch.stack(convergence_loss_list), dim=0)
        # if use_energy_constraint:
        #     total_loss += lpgnn.energy_loss(lpgnn.state_variable, new_state)

    def forward(self,
                edges,
                agg_matrix,
                node_labels,
                node_states=None,
                graph_agg=None):
        # convergence loop
        # state initialization
        node_states = self.state_variable_list[
            0] if node_states is None else node_states  #
        new_state_list = []

        for l in range(self.config.layers):
            if l == 0:
                new_layer_state = self.state_transition_function_list[l](
                    node_states, node_labels, edges, agg_matrix, l)
                new_state_list.append(new_layer_state)
            else:
                new_layer_state = self.state_transition_function_list[l](
                    self.state_variable_list[l], new_layer_state, edges,
                    agg_matrix, l)
                new_state_list.append(new_layer_state)
        new_state = new_layer_state
        output = self.output_function(new_state)

        return new_state_list, output
コード例 #12
0
ファイル: pygnn.py プロジェクト: FaezehAmou2020/torch_gnn
class GNN(nn.Module):
    def __init__(self, config, state_net=None, out_net=None):
        super(GNN, self).__init__()

        self.config = config
        # hyperparameters and general properties
        self.convergence_threshold = config.convergence_threshold
        self.max_iterations = config.max_iterations
        self.n_nodes = config.n_nodes
        self.state_dim = config.state_dim
        self.label_dim = config.label_dim
        self.output_dim = config.output_dim
        self.state_transition_hidden_dims = config.state_transition_hidden_dims
        self.output_function_hidden_dims = config.output_function_hidden_dims

        # node state initialization
        self.node_state = torch.zeros(*[self.n_nodes, self.state_dim]).to(
            self.config.device)  # (n,d_n)
        self.converged_states = torch.zeros(
            *[self.n_nodes, self.state_dim]).to(self.config.device)
        # state and output transition functions
        if state_net is None:
            self.state_transition_function = StateTransition(
                self.state_dim,
                self.label_dim,
                mlp_hidden_dim=self.state_transition_hidden_dims,
                activation_function=config.activation)

            # self.state_transition_function = GINTransition(self.state_dim, self.label_dim,
            #                                                 mlp_hidden_dim=self.state_transition_hidden_dims,
            #                                                 activation_function=config.activation)

            # self.state_transition_function = GINPreTransition(self.state_dim, self.label_dim,
            #                                               mlp_hidden_dim=self.state_transition_hidden_dims,
            #                                               activation_function=config.activation)

        else:
            self.state_transition_function = state_net
        if out_net is None:
            self.output_function = MLP(self.state_dim,
                                       self.output_function_hidden_dims,
                                       self.output_dim)
        else:
            self.output_function = out_net

        self.graph_based = self.config.graph_based

    def reset_parameters(self):

        self.state_transition_function.mlp.init()
        self.output_function.init()

    def forward(self,
                edges,
                agg_matrix,
                node_labels,
                node_states=None,
                graph_agg=None):
        n_iterations = 0
        # convergence loop
        # state initialization
        node_states = self.node_state if node_states is None else node_states

        # while n_iterations < self.max_iterations:
        #     with torch.no_grad():  # without memory consumption
        #         new_state = self.state_transition_function(node_states, node_labels, edges, agg_matrix)
        #     n_iterations += 1
        #     # convergence condition
        #
        #     # if torch.dist(node_states, new_state) < self.convergence_threshold:  # maybe uses broadcst?
        #     #     break
        #     # with torch.no_grad():
        #         # distance = torch.sqrt(torch.sum((new_state - node_states) ** 2, 1) + 1e-20)
        #     distance = torch.norm(input=new_state - node_states,
        #                           dim=1)  # checked, they are the same (in cuda, some bug)
        #     #
        #     # diff =torch.norm(input=new_state - node_states, dim=1) -  torch.sqrt(torch.sum((new_state - node_states) ** 2, 1) )
        #
        #     check_min = distance < self.convergence_threshold
        #     node_states = new_state
        #
        #     if check_min.all():
        #         break
        # node_states = self.state_transition_function(node_states, node_labels, edges, agg_matrix) # one more to propagate gradient only on last

        while n_iterations < self.max_iterations:
            new_state = self.state_transition_function(node_states,
                                                       node_labels, edges,
                                                       agg_matrix)
            n_iterations += 1
            # convergence condition
            with torch.no_grad():
                # distance = torch.sqrt(torch.sum((new_state - node_states) ** 2, 1) + 1e-20)
                distance = torch.norm(
                    input=new_state - node_states,
                    dim=1)  # checked, they are the same (in cuda, some bug)

                check_min = distance < self.convergence_threshold
            node_states = new_state

            if check_min.all():
                break

        states = node_states
        self.converged_states = states
        if self.graph_based:
            states = torch.matmul(graph_agg, node_states)

        output = self.output_function(states)

        return output, n_iterations
コード例 #13
0
ファイル: Main.py プロジェクト: lukastencer/RNNpy
iny = data[1]
 
data = make_blobs(n_features=2,centers=2,n_samples=10)
inX = data[0]
iny = data[1]

for _ in xrange(50):

    X, tX, y, ty = train_test_split(inX,iny,test_size=0.33)
    
    clf1 = LogisticRegression()
    
    clf2 = LogisticRegressionNet(optimize='fmin_bfgs')
    #clf2 = LogisticRegressionBin(optimize='fmin_bfgs')
    
    clf3 = MLP(optimize='fmin_bfgs')
    
#     X = [[0],[2]]
#     y = [0,1]
#     params = [0,1]
#      
#     X = np.asarray(X)
#     y = np.asarray(y)
#     params = np.asarray(params)
    
    
    clf1.fit(X, y)
    
#     X = np.asarray([[3, 5],[3,5],[3,5]])
#     params = np.asarray([2, 1])
#     y = np.asarray([1,1,1])
コード例 #14
0
import torch
from net import MLP,CNN #
from torchvision import datasets, transforms
from sklearn.metrics import multilabel_confusion_matrix
#
test_loader = torch.utils.data.DataLoader(
        datasets.FashionMNIST('./fashionmnist_data/', train=False, transform=transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.1307,), (0.3081,))
        ])),
        batch_size=1, shuffle=True)
model=MLP()
device=torch.device('cpu')
model=model.to(device)
model.load_state_dict(torch.load('output/MLP.pt'))
model.eval()
pres=[]
labels=[]
i=0
for data, target in test_loader:
    data, target = data.to(device), target.to(device)
    output = model(data)
    pred = output.argmax(dim=1, keepdim=True)  # get the index of the max log-probability
    pres.append(pred[0][0].item())
    labels.append(target[0].item())
mcm = multilabel_confusion_matrix(labels, pres)#mcm
print(mcm)
コード例 #15
0
ファイル: infer.py プロジェクト: nywgithub/Fashion-MNIST-1
#
import torch
import cv2
from PIL import Image
from net import MLP,CNN #
from torchvision import datasets, transforms
#
model=MLP()#这里可选CNN(),要看你前面训练的是哪个
device=torch.device('cpu')#用cpu进行推理
model=model.to(device)
model.load_state_dict(torch.load('output/MLP.pt'))
model.eval()#这一步很重要,这是告诉模型我们要验证,而不是训练
#--------以上就是推理之前模型的导入--------
print("-------加载模型成功----------")
class_dic={0:"T恤",1:"裤子",2:"套头衫",3:"连衣裙",4:"外套",5:"凉鞋",6:"衬衫",7:"运动鞋",8:"包",9:"靴子"}
data_transforms = transforms.Compose([
    #transforms.ToTensor() convert a PIL image to tensor (HWC) in range [0,255] to a
    #torch.Tensor(CHW)in the range [0.0,1.0]
    transforms.ToTensor(),
    transforms.Normalize((0.1307,), (0.3081,))
    ])
img=Image.open('test_image.jpg')#用于推理的图片
image=data_transforms(img)#预处理,转成tensor同时正则化
image=image.unsqueeze(0)#[1,28,28]->[1,1,28,28]
output = model(image.to(device))
pred = output.argmax(dim=1, keepdim=True)#
cls=pred.item()#输出在0~10之间代表10个类别
print("分类结果:",class_dic[cls])
コード例 #16
0
    correct = 0
    with torch.no_grad():
        for data, target in test_loader:
            data, target = data.cuda(), target.cuda().long()
            output = model(data)
            test_loss += F.nll_loss(output, target, reduction='sum').item()
            pred = output.max(1, keepdim=True)[1]
            correct += pred.eq(target.view_as(pred)).sum().item()

    test_loss /= len(test_loader.dataset)
    return test_loss


Z_ = []
for i in trange(X.shape[0]):
    Z_ += [[]]
    for j in trange(Y.shape[0]):
        convex_hull_weights = sum_weights([
            multiply_weights(weight_dict_1, X[i, j]),
            multiply_weights(weight_dict_2, Y[i, j]),
            multiply_weights(weight_dict_3, Z[i, j])
        ])

        net = MLP().cuda()
        net.load_state_dict(convex_hull_weights)
        Z_[i].append(test(net, test_loader))

np.save('./plots/X_mnist_random', X)
np.save('./plots/Y_mnist_random', Y)
np.save('./plots/Z_mnist_random', Z_)
コード例 #17
0
            pred = output.max(1, keepdim=True)[1]
            correct += pred.eq(target.view_as(pred)).sum().item()

    test_loss /= len(test_loader.dataset)
    return test_loss


global_vals = []
for i in trange(3):
    # natural
    weight_dict = torch.load('model_weights/mlp_weights_{}.pth'.format(i),
                             map_location='cpu')
    # random
    # weight_dict = torch.load('model_weights/mlp_random_weights_{}.pth'.format(i+42),
    #                          map_location='cpu')
    net = MLP().cuda()
    net.load_state_dict(weight_dict)
    I_w = test(net, test_loader)

    vals = []
    for tick in trange(20):
        weight_dict_delta, delta = deepcopy(weight_dict),\
                                   deepcopy(weight_dict)

        norm = 0
        for key in list(weight_dict_delta.keys())[-2:]:
            delta[key] = torch.randn(delta[key].size())
            norm += delta[key].norm().pow(2)
        norm = norm.pow(0.5)

        I_w_delta, r = I_w, 0.
コード例 #18
0
ファイル: train_mnist_mlp.py プロジェクト: eekzmic/dl_exp_cv
def main():
    parser = argparse.ArgumentParser(description='Chainer example: MNIST')
    parser.add_argument('--batchsize', '-b', type=int, default=100,
                        help='Number of images in each mini-batch')
    parser.add_argument('--epoch', '-e', type=int, default=20,
                        help='Number of sweeps over the mini_cifar to train')
    parser.add_argument('--frequency', '-f', type=int, default=-1,
                        help='Frequency of taking a snapshot')
    parser.add_argument('--gpu', '-g', type=int, default=-1,
                        help='GPU ID (negative value indicates CPU)')
    parser.add_argument('--out', '-o', default='result',
                        help='Directory to output the result')
    parser.add_argument('--resume', '-r', default='',
                        help='Resume the training from snapshot')
    parser.add_argument('--unit', '-u', type=int, default=1000,
                        help='Number of units')
    args = parser.parse_args()

    print('GPU: {}'.format(args.gpu))
    print('# unit: {}'.format(args.unit))
    print('# Minibatch-size: {}'.format(args.batchsize))
    print('# epoch: {}'.format(args.epoch))
    print('')

    # Set up a neural network to train
    # Classifier reports softmax cross entropy loss and accuracy at every
    # iteration, which will be used by the PrintReport extension below.
    model = MLP(args.unit, 10)
    if args.gpu >= 0:
        # Make a specified GPU current
        chainer.cuda.get_device_from_id(args.gpu).use()
        model.to_gpu()  # Copy the model to the GPU

    # Setup an optimizer
    optimizer = chainer.optimizers.Adam()
    optimizer.setup(model)

    # Load the MNIST mini_cifar
    train, test = chainer.datasets.get_mnist()


    train_iter = chainer.iterators.SerialIterator(train, args.batchsize)
    test_iter = chainer.iterators.SerialIterator(test, args.batchsize,
                                                 repeat=False, shuffle=False)

    # Set up a trainer
    updater = training.StandardUpdater(train_iter, optimizer, device=args.gpu)
    trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=args.out)

    # Evaluate the model with the test mini_cifar for each epoch
    trainer.extend(extensions.Evaluator(test_iter, model, device=args.gpu))

    trainer.extend(extensions.dump_graph('main/loss'))

    frequency = args.epoch if args.frequency == -1 else max(1, args.frequency)
    trainer.extend(extensions.snapshot(), trigger=(frequency, 'epoch'))
    trainer.extend(extensions.snapshot_object(model, 'model_{.updater.epoch}'),
                   trigger=(frequency, 'epoch'))


    trainer.extend(extensions.LogReport())
    if extensions.PlotReport.available():
        trainer.extend(
            extensions.PlotReport(['main/loss', 'validation/main/loss'],
                                  'epoch', file_name='loss.png'))
        trainer.extend(
            extensions.PlotReport(
                ['main/accuracy', 'validation/main/accuracy'],
                'epoch', file_name='accuracy.png'))

    trainer.extend(extensions.PrintReport(
        ['epoch', 'main/loss', 'validation/main/loss',
         'main/accuracy', 'validation/main/accuracy', 'elapsed_time']))

    trainer.extend(extensions.ProgressBar())

    if args.resume:
        chainer.serializers.load_npz(args.resume, trainer)

    trainer.run()
コード例 #19
0
ファイル: agent.py プロジェクト: Soupure/2021-QA-----------
 def __init__(self, action_space, obs_dim, gamma=1):
     self.logits_net = MLP(input_dim=obs_dim, output_dim=len(action_space))
     self.act_space = list(action_space)
     self.optim = Adam(self.logits_net.parameters(), lr=5e-3)
コード例 #20
0
def main():
    db = sqlite3.connect('race.db')
    c = db.cursor()
    for i in range(10, 18):
        sql = "select * from inputdata where headCount = " + str(
            i) + " and race_id <= 27605 order by race_id,order_of_finish;"
        c.execute(sql)
        inputline = []  # [0,1,...,2]
        inputdata = []  # 完成inputデータ
        inputdataall = []
        count = 0
        label = []
        labels = []
        printflag = 1
        for row in c:
            row = list(row)
            if (isinstance(row[47], int) == False):
                row[47] = 18
            if (isinstance(row[48], int) == False):
                row[48] = 18
            if (count % i == 0):
                noneflag = 0
            for j in range(53):
                if (row[j] == None):
                    noneflag = 1
            inputline.append(row[3])
            inputline.append(row[35])
            try:
                inputline.append(row[46] / row[38])
            except:
                inputline.append(0)
            inputline.append(row[39])
            inputline.append(row[41])
            inputline.append(row[45])
            inputline.append(row[47])
            inputline.append(row[48])
            inputline.append(row[49])
            inputdata.append(inputline)
            inputline = []
            label.append(row[2])
            ##            if (count % i == 0):
            ##                label.append(0)
            ##                wintime = row[53]
            ##            else:
            ##                label.append(row[53] - wintime)
            if (count % i == i - 1):
                #            inputline.insert(0, label)
                if (noneflag == 0):
                    #                    dmean = np.array(inputdata).mean(axis=0, keepdims=True)
                    #                    dstd = np.std(inputdata, axis=0, keepdims=True)
                    #                    inputdata = (inputdata - dmean) / dstd
                    inputdata = scipy.stats.zscore(inputdata)
                    #分散0の処理
                    inputdata[np.isnan(inputdata)] = 0
                    inputdataall.extend(inputdata)
                    #                    lmean = np.mean(np.array(label),keepdims=True)
                    #                    lstd = np.std(label,keepdims=True)
                    horcenum = np.array([row[1]] * len(label))
                    labelnp = np.array(label) / horcenum
                    ##                    labelnp = np.array(label)
                    labels.extend(labelnp)
                inputdata = []
                label = []
            count = count + 1
        inputdataall2 = np.empty((len(inputdataall), len(inputdataall[0])))
        inputdataall2[:] = inputdataall
        inputdataall = inputdataall2
        #    print(inputdata2)
        #    print(inputdata)
        #    X = inputdata[:, 1:].astype(np.float32)
        if (i == 10):
            allX = np.array(inputdataall, dtype='float32')
            Y = np.array(labels, dtype='float32')
            #    le = LabelEncoder()
            #    allY = le.fit_transform(Y).astype(np.float32)
            allY = Y.astype(np.float32)
        else:
            X = np.array(inputdataall, dtype='float32')
            Y = np.array(labels, dtype='float32')
            #        le = LabelEncoder()
            #        Y = le.fit_transform(Y).astype(np.float32)
            Y = Y.astype(np.float32)
            allX = np.vstack((allX, X))
            allY = np.hstack((allY, Y))


#    print(X)
#    print(X[0])
#    print("-------")
#    print(Y[0].dtype)
#    print(Y[0])
#    print(Y[0])
#    Y=Y[:, None]

#    threshold = np.int32(len(inputdata) / 10 * 9)
#    train = np.array(inputdata[0:threshold],dtype=np.float32)
#    test = np.array(inputdata[threshold:],dtype=np.float32)
#    train = np.array(inputdata[0:threshold])
#    train = train.astype(np.float32)
#    test = np.array(inputdata[threshold:])
#    test = test.astype(np.float32)
    train, test = datasets.split_dataset_random(
        datasets.TupleDataset(allX, allY), int(inputdataall.shape[0] * .7))

    #全てtrainにぶちこむ
    train, test2 = datasets.split_dataset_random(
        datasets.TupleDataset(allX, allY),
        int(inputdataall.shape[0] * .999999))

    parser = argparse.ArgumentParser(description='Chainer example: RACE')
    parser.add_argument('--batchsize',
                        '-b',
                        type=int,
                        default=100,
                        help='Number of images in each mini-batch')
    parser.add_argument('--epoch',
                        '-e',
                        type=int,
                        default=20,
                        help='Number of sweeps over the mini_cifar to train')
    parser.add_argument('--frequency',
                        '-f',
                        type=int,
                        default=-1,
                        help='Frequency of taking a snapshot')
    parser.add_argument('--gpu',
                        '-g',
                        type=int,
                        default=-1,
                        help='GPU ID (negative value indicates CPU)')
    parser.add_argument('--out',
                        '-o',
                        default='result',
                        help='Directory to output the result')
    parser.add_argument('--resume',
                        '-r',
                        default='',
                        help='Resume the training from snapshot')
    parser.add_argument('--unit',
                        '-u',
                        type=int,
                        default=1000,
                        help='Number of units')
    args = parser.parse_args()

    print('GPU: {}'.format(args.gpu))
    print('# unit: {}'.format(args.unit))
    print('# Minibatch-size: {}'.format(args.batchsize))
    print('# epoch: {}'.format(args.epoch))
    print('')

    # Set up a neural network to train
    # Classifier reports softmax cross entropy loss and accuracy at every
    # iteration, which will be used by the PrintReport extension below.
    model = MLP(args.unit, 1)
    if args.gpu >= 0:
        # Make a specified GPU current
        chainer.cuda.get_device_from_id(args.gpu).use()
        model.to_gpu()  # Copy the model to the GPU

    # Setup an optimizer
    optimizer = chainer.optimizers.Adam(weight_decay_rate=0.01)
    optimizer.setup(model)

    # Load the MNIST mini_cifar
    # train, test = chainer.datasets.get_mnist()

    train_iter = chainer.iterators.SerialIterator(train, args.batchsize)
    test_iter = chainer.iterators.SerialIterator(test,
                                                 args.batchsize,
                                                 repeat=False,
                                                 shuffle=False)

    # Set up a trainer
    updater = training.StandardUpdater(train_iter, optimizer, device=args.gpu)
    trainer = training.Trainer(updater, (args.epoch, 'epoch'), out=args.out)

    # Evaluate the model with the test mini_cifar for each epoch
    trainer.extend(extensions.Evaluator(test_iter, model, device=args.gpu))

    trainer.extend(extensions.dump_graph('main/loss'))

    frequency = args.epoch if args.frequency == -1 else max(1, args.frequency)
    trainer.extend(extensions.snapshot(), trigger=(frequency, 'epoch'))
    trainer.extend(extensions.snapshot_object(model, 'model_{.updater.epoch}'),
                   trigger=(frequency, 'epoch'))

    trainer.extend(extensions.LogReport())
    if extensions.PlotReport.available():
        trainer.extend(
            extensions.PlotReport(['main/loss', 'validation/main/loss'],
                                  'epoch',
                                  file_name='loss.png'))
        trainer.extend(
            extensions.PlotReport(
                ['main/accuracy', 'validation/main/accuracy'],
                'epoch',
                file_name='accuracy.png'))

    trainer.extend(
        extensions.PrintReport([
            'epoch', 'main/loss', 'validation/main/loss', 'main/accuracy',
            'validation/main/accuracy', 'elapsed_time'
        ]))

    trainer.extend(extensions.ProgressBar())

    if args.resume:
        chainer.serializers.load_npz(args.resume, trainer)

    trainer.run()
コード例 #21
0
def main():
    # parser是训练和测试的一些参数设置,如果default里面有数值,则默认用它,
    # 要修改可以修改default,也可以在命令行输入
    parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
    parser.add_argument('--model', default='CNN',#这里选择你要训练的模型
                        help='CNN or MLP')
    parser.add_argument('--batch-size', type=int, default=128, metavar='N',
                        help='input batch size for training (default: 64)')
    parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
                        help='input batch size for testing (default: 1000)')
    parser.add_argument('--epochs', type=int, default=10, metavar='N',
                        help='number of epochs to train (default: 10)')
    parser.add_argument('--lr', type=float, default=0.01, metavar='LR',
                        help='learning rate (default: 0.01)')
    parser.add_argument('--momentum', type=float, default=0.5, metavar='M',
                        help='SGD momentum (default: 0.5)')
    parser.add_argument('--no-cuda', action='store_true', default=False,
                        help='disables CUDA training')
    parser.add_argument('--seed', type=int, default=1, metavar='S',
                        help='random seed (default: 1)')
    parser.add_argument('--log-interval', type=int, default=50, metavar='N',
                        help='how many batches to wait before logging training status')
    parser.add_argument('--save-model', action='store_true', default=True,
                        help='For Saving the current Model')
    parser.add_argument('--save_dir', default='output/',#模型保存路径
                        help='dir saved models')
    args = parser.parse_args()
    #torch.cuda.is_available()会判断电脑是否有可用的GPU,没有则用cpu训练
    use_cuda = not args.no_cuda and torch.cuda.is_available()
    print(use_cuda)
    torch.manual_seed(args.seed)

    device = torch.device("cuda" if use_cuda else "cpu")

    kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
    train_loader = torch.utils.data.DataLoader(
        datasets.FashionMNIST('./fashionmnist_data/', train=True, download=True,
                       transform=transforms.Compose([
                           transforms.ToTensor(),
                           transforms.Normalize((0.1307,), (0.3081,))
                       ])),
        batch_size=args.batch_size, shuffle=True, **kwargs)
    test_loader = torch.utils.data.DataLoader(
        datasets.FashionMNIST('./fashionmnist_data/', train=False, transform=transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.1307,), (0.3081,))
        ])),
        batch_size=args.test_batch_size, shuffle=True, **kwargs)

    writer=SummaryWriter()#用于记录训练和测试的信息:loss,acc等
    if args.model=='CNN':
        model = CNN().to(device)#CNN() or MLP
    if args.model=='MLP':
        model = MLP().to(device)#CNN() or MLP
    optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)   #optimizer存储了所有parameters的引用,每个parameter都包含gradient
    scheduler = optim.lr_scheduler.MultiStepLR(optimizer, milestones=[12, 24], gamma=0.1)   #学习率按区间更新
    model.train()
    log_loss=0
    log_acc=0
    for epoch in range(1, args.epochs + 1):
        for batch_idx, (data, target) in enumerate(train_loader):
            data, target = data.to(device), target.to(device)
            optimizer.zero_grad()
            output = model(data)
            loss = F.nll_loss(output, target)  # negative log likelihood loss(nll_loss), sum up batch cross entropy
            loss.backward()
            optimizer.step()  # 根据parameter的梯度更新parameter的值
            # 这里设置每args.log_interval个间隔打印一次训练信息,同时进行一次验证,并且将验证(测试)的准确率存入writer
            if batch_idx % args.log_interval == 0:
                print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
                    epoch, batch_idx * len(data), len(train_loader.dataset),
                           100. * batch_idx / len(train_loader), loss.item()))
                #下面是模型验证过程
                model.eval()
                test_loss = 0
                correct = 0
                with torch.no_grad():  # 无需计算梯度
                    for data, target in test_loader:
                        data, target = data.to(device), target.to(device)
                        output = model(data)
                        test_loss += F.nll_loss(output, target, reduction='sum').item()  # sum up batch loss
                        pred = output.argmax(dim=1, keepdim=True)  # get the index of the max log-probability
                        correct += pred.eq(target.view_as(pred)).sum().item()
                test_loss /= len(test_loader.dataset)
                writer.add_scalars('loss', {'train_loss':loss,'val_loss':test_loss},global_step=log_acc)
                writer.add_scalar('val_accuracy', correct / len(test_loader.dataset), global_step=log_acc)
                log_acc += 1
                print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
                    test_loss, correct, len(test_loader.dataset),
                    100. * correct / len(test_loader.dataset)))
                model.train()
    if (args.save_model):#保存训练好的模型
        if not os.path.exists(args.save_dir):
            os.makedirs(args.save_dir)
        torch.save(model.state_dict(), os.path.join(args.save_dir,args.model+".pt"))
    writer.add_graph(model, (data,))# 将模型结构保存成图,跟踪数据流动
    writer.close()
コード例 #22
0
ファイル: DAgger.py プロジェクト: littlelid/CS294-homwork
                if steps % 100 == 0:
                    print("%i/%i" % (steps, max_test_steps))
            if steps >= max_test_steps:
                break
        print('total_reward:', totalr)

    def predict(self, X):
        #print("\t", X.shape)
        y_pred = self.sess.run(self.net.pred, feed_dict={self.net.X: X})
        return y_pred

    def init_all_variables(self):
        if not self.init_variables:
            self.init_variables = True
            self.sess.run(tf.initialize_all_variables())

    def save(self, ):
        pass


if __name__ == '__main__':
    #dagger = DAgger()
    # now I find the best num of the total of epoch is around 50
    env = gym.make('Hopper-v1')
    X_dim = env.observation_space.shape[0]
    y_dim = env.action_space.shape[0]
    net = MLP(X_dim, y_dim, [50, 100])
    dagger = DAgger(env, net)
    dagger.train_dagger(num_dagger=2)
    dagger.test()
コード例 #23
0
def main():

    parser = argparse.ArgumentParser(description='Chainer example: MNIST')
    parser.add_argument('--gpu',
                        '-g',
                        type=int,
                        default=-1,
                        help='GPU ID (negative value indicates CPU)')
    parser.add_argument('--image',
                        '-i',
                        type=str,
                        default="",
                        help='pass to input image')
    parser.add_argument('--model',
                        '-m',
                        default='my_mnist.model',
                        help='path to the training model')
    parser.add_argument('--unit',
                        '-u',
                        type=int,
                        default=1000,
                        help='Number of units')
    args = parser.parse_args()
    model = MLP(args.unit, 1)

    if args.gpu >= 0:
        model.to_gpu(chainer.cuda.get_device_from_id(args.gpu).use())
    serializers.load_npz(args.model, model)
    #    try:
    #        img = Image.open(args.image).convert("L").resize((28,28))
    #    except :
    #        print("invalid input")
    #        return
    #    img_array = model.xp.asarray(img,dtype=model.xp.float32).reshape(1,784)

    ##    df = pd.read_csv('test1.csv')
    db = sqlite3.connect('race.db')
    c = db.cursor()

    win = []
    lose = []
    quinella = []
    place = []
    none_flag = 0
    for race_id in range(27606, 34440):
        #    for race_id, sdf in df.groupby('race_id'):
        if (race_id % 100 == 1):
            print("finished ", race_id - 1)
        df = pd.read_sql("select horse_number,age,winrate,eps,odds,weight,preOOF,pre2OOF,preLastPhase, "\
                     "payoff_quinella,payoff_place, race_id "\
                     "from (select "\
                     "inputdata.race_id, "\
                     "inputdata.order_of_finish, "\
               "inputdata.horse_number horse_number, "\
               "age, "\
               "case when enterTimes != 0 then winRun/enterTimes else 0 end as winrate, "\
               "eps, "\
               "odds, "\
               "weight, "\
               "preOOF, "\
               "pre2OOF, "\
               "preLastPhase, "\
               "pay1.payoff payoff_quinella, "\
                     "pay2.payoff payoff_place "\
                  "from inputdata "\
                  "inner join payoff pay1 "\
                  "	on pay1.ticket_type = 3 and pay1.race_id = inputdata.race_id "\
                  "left join payoff pay2"\
                     "  on pay2.ticket_type = 1"\
                     "  and pay2.horse_number = inputdata.horse_number"\
                     "  and pay2.race_id = inputdata.race_id"\
                  ") as a "\
                     "where a.race_id = "+str(race_id)+" "\
                     "order by a.race_id,order_of_finish;", db)
        #    img_array=df.values.reshape(1,-1)[~np.isnan(df.values.reshape(1,-1))]
        #    img_array = model.xp.asarray(img_array, dtype=model.xp.float32).reshape(1,-1)
        arr = df.values

        #あいてるid用
        #        if(len(arr)==0):
        #            continue

        for i in range(len(arr)):
            if ((isinstance(arr[i][6], int)
                 or isinstance(arr[i][6], float)) == False):
                arr[i][6] = 18
            if ((isinstance(arr[i][7], int)
                 or isinstance(arr[i][7], float)) == False):
                arr[i][7] = 18
        arr = np.array(arr, dtype=float)
        #None処理
        for i in range(len(arr)):
            if (np.isnan(arr[i][10])):
                arr[i][10] = 0
        copy_arr = arr
        winner = arr[0][0]
        second = arr[1][0]
        winner_odds = arr[0][4]
        quinella_odds = arr[0][9]

        #none,nanがあるならとばす。
        for i in range(len(arr)):
            for j in range(len(arr[0])):
                if arr[i][j] is None:
                    none_flag = 1
                elif (math.isnan(float(arr[i][j]))):
                    none_flag = 1
        if (none_flag):
            none_flag = 0
            continue
        arr = arr.astype(np.float32)
        arr = scipy.stats.zscore(arr)
        #分散0の処理
        arr[np.isnan(arr)] = 0
        res = []
        for i in range(len(arr)):
            img_array = arr[i][0:9]
            img_array = model.xp.asarray(img_array,
                                         dtype=model.xp.float32).reshape(
                                             1, -1)
            with chainer.using_config('train',
                                      False), chainer.no_backprop_mode():
                result = model.predict(img_array)
            res.append(result.data[0])
    #        print("predict:", model.xp.argmax(result.data))
    #        arg_sorted = model.xp.argsort(result.data)
    #        arg_sorted = arg_sorted [:, ::-1]
    #        print(arg_sorted[:, :3])
        x = np.array(res).reshape((1, -1))[0]
        # 一着がほかより抜けている時のみ買う
        if ((x[np.argsort(x)[1]] - x[np.argsort(x)[0]]) < 0.001):
            continue
#        for i in range(len(x)):
#            print (np.argsort(x)[i]+1,"-", x[np.argsort(x)[i]])

# 一二着がほかより抜けている時のみ買う
#        if ((x[np.argsort(x)[2]] - x[np.argsort(x)[1]]) < 0.001):
#            continue

# 狙うオッズが微妙ならとばす。
        continue_flag = 0
        for j in range(len(copy_arr)):
            if (copy_arr[j][0] == np.argsort(x)[0] + 1):
                if (copy_arr[j][4] >= 50 or copy_arr[j][4] < 2):
                    continue_flag = 1
        if (continue_flag == 1):
            continue

        if (np.argsort(x)[0] + 1 == winner):
            win.append(winner_odds)


#            print(race_id,np.argsort(x)[0]+1,winner_odds)
        else:
            win.append(0)
            for j in range(len(copy_arr)):
                if (copy_arr[j][0] == np.argsort(x)[0] + 1):
                    lose.append(copy_arr[j][4])

        if (((np.argsort(x)[0] + 1 == winner) and
             (np.argsort(x)[1] + 1 == second))
                or ((np.argsort(x)[0] + 1 == second) and
                    (np.argsort(x)[1] + 1 == winner))):
            quinella.append(quinella_odds)
        else:
            quinella.append(0)
        for i in range(len(arr)):
            if (np.argsort(x)[0] + 1 == copy_arr[i][0]):
                place.append(copy_arr[i][10])
    print(win)
    print(lose)
    print(place)
    print(quinella)
    print("単勝")
    print("回収率 = ",
          sum(win) / len(win) * 100, " 的中率 = ",
          (1 - win.count(0) / len(win)) * 100)
    print("\n複勝")
    print("回収率 = ",
          sum(place) / len(place), " 的中率 = ",
          (1 - place.count(0) / len(place)) * 100)
    print("\n馬連")
    print("回収率 = ",
          sum(quinella) / len(quinella), " 的中率 = ",
          (1 - quinella.count(0) / len(quinella)) * 100)
コード例 #24
0
ファイル: train.py プロジェクト: luyiyun/ResBottleNet
def main():

    # ----- 根据data来读取不同的数据和不同的loss、metrics -----
    if config.args.data == 'brca':
        rna = RnaData.predicted_data(config.brca_cli, config.brca_rna,
                                     {'PAM50Call_RNAseq': 'pam50'})
        rna.transform(tf.LabelMapper(config.brca_label_mapper))
        out_shape = len(config.brca_label_mapper)
        criterion = nn.CrossEntropyLoss()
        scorings = (mm.Loss(), mm.Accuracy(), mm.BalancedAccuracy(),
                    mm.F1Score(average='macro'), mm.Precision(average='macro'),
                    mm.Recall(average='macro'), mm.ROCAUC(average='macro'))
    elif config.args.data == 'survival':
        if os.path.exists('./DATA/temp_pan.pth'):
            rna = RnaData.load('./DATA/temp_pan.pth')
        else:
            rna = RnaData.survival_data(config.pan_cli, config.pan_rna,
                                        '_OS_IND', '_OS')
        out_shape = 1
        if config.args.loss_type == 'cox':
            criterion = NegativeLogLikelihood()
        elif config.args.loss_type == 'svm':
            criterion = SvmLoss(rank_ratio=config.args.svm_rankratio)
        scorings = (mm.Loss(), mm.CIndex())
    rna.transform(tf.ZeroFilterCol(0.8))
    rna.transform(tf.MeanFilterCol(1))
    rna.transform(tf.StdFilterCol(0.5))
    norm = tf.Normalization()
    rna.transform(norm)

    # ----- 构建网络和优化器 -----
    inpt_shape = rna.X.shape[1]
    if config.args.net_type == 'mlp':
        net = MLP(inpt_shape, out_shape, config.args.hidden_num,
                  config.args.block_num).cuda()
    elif config.args.net_type == 'atten':
        net = SelfAttentionNet(inpt_shape, out_shape, config.args.hidden_num,
                               config.args.bottle_num, config.args.block_num,
                               config.args.no_res, config.act,
                               config.args.no_head, config.args.no_bottle,
                               config.args.no_atten,
                               config.args.dropout_rate).cuda()
    elif config.args.net_type == 'resnet':
        net = ResidualNet(inpt_shape, out_shape, config.args.hidden_num,
                          config.args.bottle_num,
                          config.args.block_num).cuda()

    # ----- 训练网络,cross validation -----
    split_iterator = rna.split_cv(config.args.test_size,
                                  config.args.cross_valid)
    train_hists = []
    test_hists = []
    for split_index, (train_rna, test_rna) in enumerate(split_iterator):
        print('##### save: %s, split: %d #####' %
              (config.args.save, split_index))
        #  从train中再分出一部分用作验证集,决定停止
        train_rna, valid_rna = train_rna.split(0.1)
        dats = {
            'train': train_rna.to_torchdat(),
            'valid': valid_rna.to_torchdat(),
        }
        dataloaders = {
            k: data.DataLoader(v, batch_size=config.args.batch_size)
            for k, v in dats.items()
        }
        test_dataloader = data.DataLoader(test_rna.to_torchdat(),
                                          batch_size=config.args.batch_size)
        # 网络训练前都进行一次参数重置,避免之前的训练的影响
        net.reset_parameters()
        # train
        optimizer = optim.Adamax(net.parameters(),
                                 lr=config.args.learning_rate)
        lrs = config.lrs(optimizer)
        net, hist = train(
            net,
            criterion,
            optimizer,
            dataloaders,
            epoch=config.args.epoch,
            metrics=scorings,
            l2=config.args.l2,
            standard_metric_index=config.args.standard_metric_index,
            scheduler=lrs)
        # test
        test_res = evaluate(net, criterion, test_dataloader, metrics=scorings)
        # 将多次训练的结果保存到一个df中
        hist = pd.DataFrame(hist)
        hist['split_index'] = split_index
        train_hists.append(hist)
        # 保存多次test的结果
        test_res['split_index'] = split_index
        test_hists.append(test_res)
        # 每个split训练的模型保存为一个文件
        torch.save(net.state_dict(),
                   os.path.join(config.save_dir, 'model%d.pth' % split_index))
    # 保存train的结果
    train_hists = pd.concat(train_hists)
    train_hists.to_csv(os.path.join(config.save_dir, 'train.csv'))
    # 保存test的结果
    test_hists = pd.DataFrame(test_hists)
    test_hists.to_csv(os.path.join(config.save_dir, 'test.csv'))