def main(args):
    # Create model directory for saving trained models
    if not os.path.exists(args.model_path):
        os.makedirs(args.model_path)

    test_dataset = a2d_dataset.A2DDataset(train_cfg, args.dataset_path)
    data_loader = DataLoader(test_dataset,
                             batch_size=1,
                             shuffle=False,
                             num_workers=1)

    # define load your model here
    model_1 = net('efficientnet_b7')
    model_1.cuda()
    model_1.load_state_dict(
        torch.load(os.path.join(args.model_path, 'efficientnetb7_F53.8.ckpt')))

    model_2 = net('efficientnet_b7')
    model_2.cuda()
    model_2.load_state_dict(
        torch.load(
            os.path.join(args.model_path, 'efficientnetb7_val_53.7.ckpt')))

    X = np.zeros((data_loader.__len__(), args.num_cls))
    Y = np.zeros((data_loader.__len__(), args.num_cls))
    print(data_loader.__len__())
    model_1.eval()
    model_2.eval()
    with torch.no_grad():
        for batch_idx, data in enumerate(data_loader):
            # mini-batch
            images = data[0].to(device)
            labels = data[1].type(torch.FloatTensor).to(device)
            # output = model(images).cpu().detach().numpy()
            output_1 = model_1(images)
            # output_1 = (torch.nn.functional.sigmoid(output_1)).cpu().detach().numpy()
            output_2 = model_2(images)
            # output_2 = (torch.nn.functional.sigmoid(output_2)).cpu().detach().numpy()
            output = (output_1 + output_2) / 2
            output = (
                torch.nn.functional.sigmoid(output)).cpu().detach().numpy()
            target = labels.cpu().detach().numpy()
            output[output >= 0.4] = 1
            output[output < 0.4] = 0
            X[batch_idx, :] = output
            Y[batch_idx, :] = target

    P = Precision(X, Y)
    R = Recall(X, Y)
    F = F1(X, Y)
    print('Precision: {:.1f} Recall: {:.1f} F1: {:.1f}'.format(
        100 * P, 100 * R, 100 * F))
Beispiel #2
0
def cnfolCollectorProcess(startDateCursor='2018-03-01'):
    zjNet = network.net()

    if zjNet.loadCookie(__ZJ_MAINPAGE__) is None:
        raise network.networkError("__ZJ_MAINPAGE__")

    rf = record.recordfile(__RECORDFILE_ZJ__, __FIELDNAMES__)

    cursor = startDateCursor
    insertCusumError = 0
    lockfile = open('./lockPage_zj','w+')
    while cursor:
        lockfile.seek(0)
        lockfile.write(str(cursor))
        lockfile.flush()
        interval = random.uniform(2, 90)
        print(unixtTime2StringTime(time.time()), cursor, interval)
        result = cnfolMessageRequest(zjNet, rf, cursor)
        print(result)
        for status in result:
            insertCusumError +=1
            if status == record.insertStatus.Success:
                insertCusumError=0
        if insertCusumError == 20:
            raise someError("insertCusumError")
        if cursor == '2018-03-01':
            raise someError("last date have data,had get all msg")
        cursor = getPreviousDate(cursor)

        time.sleep(interval)
Beispiel #3
0
def cnfolMessageRequest(net, rf, dateCursor='2018-03-01'):
    net = network.net()
    if net.loadCookie(__ZJ_MAINPAGE__) is None:
        raise network.networkError("__ZJ_MAINPAGE__")


    for i in range(__ERROR_RETRY__):
        print(i, __ZJ_LIVE_ADD__)
        resStr = net.getResponseData(__ZJ_LIVE_ADD__, {'livedate':dateCursor, 'order':1})
        resList = rf.getHTMLObject(resStr)
        if len(resList) > 0: # 请求成功跳出重试循环
            break

        if i == __ERROR_RETRY__:
            raise someError("__ERROR_RETRY__:")

    rows = []
    for item in resList:
        content = item[1]
        createdTime = dateCursor + ' ' + item[0] + ':00'
        row = {'contentHASH': rf.getMD5(content), 'id': '',
               'source': __ZJ_TAG__, 'content': content,
               'created_at': createdTime}
        rows.append(row)

    return rf.addARows(rows)
Beispiel #4
0
def wallstreetCollectorProcess(startTime=1552490575):
    wsNet = network.net()

    if wsNet.loadCookie(__WS_MAINPAGE__) is None:
        raise network.networkError("__WS_MAINPAGE__")

    rf = record.recordfile(__RECORDFILE_WS__, __FIELDNAMES__)

    cursor = startTime
    insertCusumError = 0
    lockfile = open('./lockPage_ws','w+')
    while cursor:
        lockfile.seek(0)
        lockfile.write(str(cursor))
        lockfile.flush()
        interval = random.uniform(2, 90)
        print(unixtTime2StringTime(time.time()), cursor, interval)
        result,nextCursor = wallstreetMessageRequest(wsNet, rf, cursor)
        print(result)
        for status in result:
            insertCusumError +=1
            if status == record.insertStatus.Success:
                insertCusumError=0
        if insertCusumError == 20:
            raise someError("insertCusumError")
        cursor = nextCursor

        time.sleep(interval)
Beispiel #5
0
def __test__():
    dataset = mnist_rotation.DataSet("mnist/train-images.idx3-ubyte",
                                     "mnist/train-labels.idx1-ubyte",
                                     "mnist/t10k-images.idx3-ubyte",
                                     "mnist/t10k-labels.idx1-ubyte")
    X = tf.placeholder(tf.uint8, [None, 42, 42, 1])
    Y = tf.placeholder(tf.uint8, [None, 1])

    images = (tf.cast(X, tf.float32) - 128.0) / 128.0
    labels = tf.cast(Y, tf.int32)

    stn, stn_params = network.net("stn", images, 6)
    #stn = layers.tanh("stn_tanh",fcn0)

    #stn
    images_ = transformer.batch_transformer(images, stn, [42, 42])

    sess = tf.Session()
    stn_saver = tf.train.Saver(stn_params)
    stn_saver.restore(sess, "./transformer_model_stn")

    for i in range(1):
        images, image_ = sess.run([images, images_],
                                  feed_dict={
                                      (X, Y): dataset.getTrainBatch(BATCH_SIZE)
                                  })
        images = images * 128 + 128
        image_ = image_ * 128 + 128
        for j in range(BATCH_SIZE):
            cv2.imwrite(str(j) + "_src.jpg", images[j])
            cv2.imwrite(str(j) + "_dst.jpg", image_[j])
Beispiel #6
0
def sinaCollectorProcess(startPage=1):
    sinaNet = network.net()

    if sinaNet.loadCookie(__SINA_MAINPAGE__) is None:
        raise network.networkError("__SINA_MAINPAGE__")

    rf = record.recordfile(__RECORDFILE_SINA__, __FIELDNAMES__)

    page = startPage
    insertCusumError = 0
    lockfile = open('./lockPage_sina','w+')
    while page:
        lockfile.seek(0)
        lockfile.write(str(page))
        lockfile.flush()
        interval = random.uniform(2, 90)
        print(unixtTime2StringTime(time.time()), page, interval)
        result = sinaMessageRequest(sinaNet, rf, page)
        print(result)
        for status in result:
            insertCusumError +=1
            if status == record.insertStatus.Success:
                insertCusumError=0

        if insertCusumError == 20 and  len(sys.argv)<=2:
            raise someError("insertCusumError")
        if sys.argv[2] == 'test' and len(sys.argv) == 3:
            if insertCusumError >= 10:
                raise someError("test error:  insertCusumError:", insertCusumError, "page:", page)
        page += 1

        time.sleep(interval)
Beispiel #7
0
def main(args):
    # Create model directory for saving trained models
    if not os.path.exists(args.model_path):
        os.makedirs(args.model_path)

    test_dataset = a2d_dataset.A2DDataset_test(test_cfg, args.dataset_path)
    data_loader = DataLoader(test_dataset,
                             batch_size=1,
                             shuffle=False,
                             num_workers=1)

    # define and load pre-trained model
    model = net('se_resnext101')
    model.cuda()
    model.load_state_dict(
        torch.load(
            os.path.join(args.model_path,
                         'seresnext_mode_train_56.2_57.8.ckpt')))

    results = np.zeros((data_loader.__len__(), args.num_cls))
    model.eval()

    # prediction and saving
    with torch.no_grad():
        for batch_idx, data in enumerate(data_loader):
            # mini-batch
            images = data.to(device)
            output = model(images)
            output = (
                torch.nn.functional.sigmoid(output)).cpu().detach().numpy()
            output[output >= 0.4] = 1
            output[output < 0.4] = 0
            results[batch_idx, :] = output
    with open('results_netid.pkl', 'wb') as f:
        pickle.dump(results, f)
    def Choose_Model(self, Model_index):
        # choose model structure
        if self.model_name == 'Net':
            from network import Net as net
        elif self.model_name == 'Net_new3':
            from network import Net_new3 as net
        elif self.model_name == 'Net_new4':
            from network import Net_new4 as net
        else:
            raise Exception('error: undefined Model_name')
        self.model = net(num_channels=self.num_channels, scale_factor=self.scale_factor, d=32, s=5, m=1)

        
        # choose pretrained model
        if Model_index==0:
            print('no need pretrained model')
            # self.model.weight_init_fromMAT_s() #init from trained model
            self.model.weights_init_kaiming()
        elif Model_index==1:
            self.model.load_state_dict(torch.load(self.pretrained_model))
            print('\nTrained generator model is loaded:' + self.pretrained_model)
        elif Model_index==2:
            self.model.weight_init()
            pretrained_dict = torch.load(self.pretrained_model, map_location=torch.device('cpu'))
            model_dict = self.model.state_dict()
            # 1. filter out unnecessary keys
            pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
            # 2. overwrite entries in the existing state dict
            model_dict.update(pretrained_dict)
            self.model.load_state_dict(model_dict)
            print('model partially initialized')
        else:
            raise Exception('error: undefined Model_index')
Beispiel #9
0
def main():
    # parse arguments
    args = parse_args()
    if args is None:
        exit()

    if args.gpu_mode and not torch.cuda.is_available():
        raise Exception("No GPU found, please run without --gpu_mode=False")

    # print 'scale factor = ', scale_factor, \
    #     '\ntest_dir =', args.test_dataset,\

    from network import Net_new4 as net
    #from network import Net as net
    model = net(num_channels=1, scale_factor=4, d=32, s=5, m=1)
    #model = net(num_channels=1, scale_factor=4, d=32, s=5, m=5)
    #model.load_state_dict(torch.load(args.pretrained_model, map_location = torch.device('cpu')))
    #In GPU type
    model.load_state_dict(torch.load(args.pretrained_model))

    # for param_tensor in model.state_dict():
    #     #print(param_tensor)
    #     #print(model.state_dict()[param_tensor].size())
    #     if 'act' in param_tensor:
    #         print(param_tensor)
    #         print(model.state_dict()[param_tensor])

    # print(model)

    model_name_save = './1104_augument_based_1430_epoch110_3pm.onnx'
    x = torch.randn(1, 1, 426, 570, requires_grad=False).type(torch.float)
    torch_out = model(x)
    torch.onnx.export(model, x, model_name_save, export_params=True)
Beispiel #10
0
def main():
    # parse arguments
    args = parse_args()
    if args is None:
        exit()

    # if args.gpu_mode and not torch.cuda.is_available():
    #     raise Exception("No GPU found, please run without --gpu_mode=False")

    # print 'scale factor = ', scale_factor, \
    #     '\ntest_dir =', args.test_dataset,\

    from network import Net as net
    #from network import Net as net
    model = net(num_channels=1, scale_factor=4, d=32, s=5, m=1)
    #model = net(num_channels=1, scale_factor=4, d=32, s=5, m=5)
    #model.load_state_dict(torch.load(args.pretrained_model, map_location = torch.device('cpu')))
    #In GPU type
    model.load_state_dict(torch.load(args.pretrained_model,
                                     map_location='cpu'))

    # for param_tensor in model.state_dict():
    #     #print(param_tensor)
    #     #print(model.state_dict()[param_tensor].size())
    #     if 'act' in param_tensor:
    #         print(param_tensor)
    #         print(model.state_dict()[param_tensor])

    # print(model)
    x = torch.randn(1, 1, 1024, 768, requires_grad=False).type(torch.float)
    stat(model, (1, 1024, 768))
Beispiel #11
0
    def Choose_Model(self, Model_index):
        # choose model structure
        if self.model_name == 'Net':
            from network import Net as net
        elif self.model_name == 'Net_new3':
            from network import Net_new3 as net
        elif self.model_name == 'Net_new4':
            from network import Net_new4 as net
        elif self.model_name == 'FSRCNN':
            from network import FSRCNN as net
        elif self.model_name == 'FSRCNN_d1':
            from network import FSRCNN_d1 as net
        elif self.model_name == 'FSRCNN_d2':
            from network import FSRCNN_d2 as net
        elif self.model_name == 'FSRCNN_ps1':
            net = importlib.import_module("model.FSRCNN_ps1").FSRCNN_ps1
        elif self.model_name == 'FSRCNN_ps2_HN':
            net = importlib.import_module("model.FSRCNN_ps2_HN").FSRCNN_ps2_HN
        else:
            raise Exception('error: undefined Model_name')

        self.model = net(scale_factor=self.scale_factor,
                         num_channels=1,
                         d=56,
                         s=12,
                         m=4)

        # choose pretrained model
        if Model_index == 0:
            print('no need pretrained model')
            # self.model.weight_init_fromMAT_s() #init from trained model
            #self.model.weights_init_kaiming()
        elif Model_index == 1:
            state_dict = torch.load(self.pretrained_model)
            from collections import OrderedDict
            new_state_dict = OrderedDict()
            for k, v in state_dict.items():
                #print(k)
                name = k[7:]  # remove `module.`
                new_state_dict[name] = v
            # load params
            self.model.load_state_dict(new_state_dict)
            #self.model.load_state_dict(torch.load(self.pretrained_model))
            print('Trained generator model is loaded:', self.pretrained_model)
        elif Model_index == 2:
            self.model.weight_init()
            pretrained_dict = torch.load(self.pretrained_model)
            model_dict = self.model.state_dict()
            # 1. filter out unnecessary keys
            pretrained_dict = {
                k: v
                for k, v in pretrained_dict.items() if k in model_dict
            }
            # 2. overwrite entries in the existing state dict
            model_dict.update(pretrained_dict)
            self.model.load_state_dict(model_dict)
            print('model partially initialized')
        else:
            raise Exception('error: undefined Model_index')
Beispiel #12
0
 def __init__(self, data, alpha=0.000001):
     if not isinstance(data, pd.DataFrame):
         return None
     else:
         self.alpha = alpha
         self.data = data.copy()
         self.net = net(data=self.data)
         self.net.calc_cpt(self.data)
         self.scores = {}
Beispiel #13
0
 def __init__(self, data, alpha=0.000001):
     if not isinstance(data, pd.DataFrame):
         return None
     else:
         self.data = data.copy()
         self.net = net(data=self.data)
         self.net.calc_cpt(self.data)
         self.scores = {}
         self.mi_weights = {}
         self.alpha = alpha
         print(self.net.nds.keys())
         self.calc_norm_mi_Weights()
Beispiel #14
0
def generate_frames(first_frame_path_name, res_npy_ori_test_path,
                    res_npy_trs_test_path, model_path_name, gen_frames_path):
    batch_shape = (1, img_height, img_width, 3)
    with tf.Session() as sess:
        X_content = tf.placeholder(tf.float32,
                                   shape=batch_shape,
                                   name="X_content")
        Y_content = tf.placeholder(tf.float32,
                                   shape=batch_shape,
                                   name="Y_content")
        preds = network.net((X_content + 255.0) / 255.0 / 2)

        generated_frames_array = []
        last_frame = rec.img_to_tensor(first_frame_path_name)

        Y_content_flat = tf.reshape(Y_content,
                                    [-1, img_width * img_height * 3])
        preds_flat = tf.reshape(preds, [-1, img_width * img_height * 3])
        loss = tf.reduce_mean(
            tf.reduce_sum(tf.square(preds_flat - Y_content_flat),
                          reduction_indices=[1]))

        tf.train.Saver().restore(sess, model_path_name)

        res_ori_test = np.load(res_npy_ori_test_path)
        res_trs_test = np.load(res_npy_trs_test_path)
        # res_ori_test = rec.get_frames_tensors(frame_ori_path, 15001, 17982)
        # res_trs_test = rec.get_frames_tensors(frame_trs_path, 15001, 17982)

        X_batch = np.zeros(batch_shape, dtype=np.float32)
        Y_batch = np.zeros(batch_shape, dtype=np.float32)

        for i in range(int(len(res_ori_test))):
            X_batch_array, Y_batch_array = our_utils.next_batch(
                res_ori_test, res_trs_test, i)
            X_batch[0] = X_batch_array[0]
            Y_batch[0] = Y_batch_array[0]

            to_get = [loss, preds]
            test_feed_dict = {X_content: X_batch, Y_content: Y_batch}
            tup = sess.run(to_get, feed_dict=test_feed_dict)
            log.info("test " + str(i) + ", loss: " +
                     str(np.sqrt(tup[0] / img_width / img_height / 3)))
            last_frame = last_frame + tup[1]
            generated_frames_array.append(last_frame)

        if not os.path.exists(gen_frames_path): os.makedirs(gen_frames_path)
        for i in range(len(generated_frames_array)):
            scipy.misc.imsave(gen_frames_path + 'frame_' + str(i) + '.jpg',
                              generated_frames_array[i][0])
Beispiel #15
0
 def __init__(self,user_queue,out_queue,id,p_trigger,d_trigger,c_trigger):
     QtCore.QThread.__init__(self,)
     self.queue = user_queue
     self.code_queue = out_queue
     self.info = None
     self.cToken = ''
     self.captcha = ''
     self.code = ''
     self.timeStart = ''
     self.timeSlotID = ''
     self.hasStop = False
     self.network = net()
     self.id = id
     self.p_trigger = p_trigger
     self.d_trigger = d_trigger
     self.c_trigger = c_trigger
Beispiel #16
0
    def train(self, iterations=10, maxmiss=10):
        """
        Train using Recommended Greedy Algorithm
        """
        scores = {'Iteration': [], 'Network': [], 'Score': []}
        score_check = maxmiss
        niter = iterations
        nodes = [i for i in self.data.columns]
        best = score_pom(export_pom(self.net, by='label'), self.data)

        #print("START LOOP")
        while score_check > 0 and niter > 0:
            n = net(data=self.data)
            n.import_dag(self.net.export_dag())

            ops = [n.add_edge, n.del_edge, n.rev_edge]

            for f in ops:
                # Choose the first node in a uniform, random way
                v1 = np.random.choice(nodes)

                # Choose the second with probabilities weighted by mi
                v2 = np.random.choice(self.mi_weights[v1].index,
                                      p=self.mi_weights[v1])
                f(v1, v2)

            if n.acyclic():
                n.calc_cpt(self.data, alpha=self.alpha)
                score = score_pom(export_pom(n, by='label'), self.data)
                scores['Iteration'].append(iterations - niter)
                scores['Network'].append(n)
                scores['Score'].append(score)
                #print(best, score, niter, score_check)
                if score > best:
                    self.net = n
                    best = score
                    niter = niter - 1
                    score_check = maxmiss
                    continue
                else:
                    score_check = score_check - 1
                    niter = niter - 1
                    continue
            else:
                niter = niter - 1
                continue
        self.scores = scores
def main(args):
    # Create model directory for saving trained models
    if not os.path.exists(args.model_path):
        os.makedirs(args.model_path)

    test_dataset = a2d_dataset.A2DDataset(train_cfg, args.dataset_path)
    data_loader = DataLoader(test_dataset, batch_size=4, shuffle=True, num_workers=4) # you can make changes

    # Define model, Loss, and optimizer
    # model.to(device)
    # model = net(num_classes=args.num_cls)
    model = net(args.num_cls)
    model.cuda()
    criterion = nn.BCELoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate)
    # optimizer = torch.optim.SGD(model.parameters(), lr=args.learning_rate, momentum=args.momentum, weight_decay=args.weight_decay)

    # Train the models
    total_step = len(data_loader)
    for epoch in range(args.num_epochs):
        t1 = time.time()
        for i, data in enumerate(data_loader):

            # mini-batch
            images = data[0].to(device)
            labels = data[1].type(torch.FloatTensor).to(device)
            #labels.squeeze(1)

            # Forward, backward and optimize
            outputs = model(images)
            loss = criterion(outputs, labels)
            model.zero_grad()
            loss.backward()
            optimizer.step()

            # Log info
            if i % args.log_step == 0:
                print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'
                      .format(epoch, args.num_epochs, i, total_step, loss.item()))

            # Save the model checkpoints
            if (i + 1) % args.save_step == 0:
                torch.save(model.state_dict(), os.path.join(
                    args.model_path, 'net.ckpt'))
        t2 = time.time()
        print(t2 - t1)
Beispiel #18
0
    def __init__(self, parent = None):
        self.healthFactor = random.randrange(15, 25)
        self.speedFactor = random.randrange(80,120)
        self.eyes = random.randrange(1, 1000)
        self.color = (0, 0, 0)
        self.eatingHabit = "Veg"
        self.nn = network.net(2, 1)

        if parent != None:
            self.healthFactor = parent.dna.healthFactor
            self.speedFactor = parent.dna.speedFactor
            self.eyes = parent.dna.eyes
            self.color = parent.dna.color
            self.eatingHabit = parent.dna.eatingHabit
            self.nn = parent.dna.nn
            m = random.random()
            if m < 0.2:
                self._mutate(parent)
Beispiel #19
0
def wallstreetMessageRequest(net, rf, startTime):
    net = network.net()
    if net.loadCookie(__WS_MAINPAGE__) is None:
        raise network.networkError("__WS_MAINPAGE__")


    reqAddr = __WS_LIVE_ADD__ % (startTime)

    for i in range(__ERROR_RETRY__):
        print(i, reqAddr)
        resStr = net.getResponseData(reqAddr)
        resDic = rf.getJsonObject(resStr, None)
        resCode = resDic["code"]
        if resCode == 20000: # 请求成功跳出重试循环
            break
        if i == __ERROR_RETRY__:
            raise someError("__ERROR_RETRY__:", resCode, resDic["message"])

    items = resDic['data']['items']

    if items is None:
        print(resStr)
        raise someError('NULL Content,maybe wrong cusor')

    rows = []
    for item in items:
        if item['is_calendar']:  # is_calendar时,本条记录不要
            continue
        content = item['content_text'].replace('\n','')
        row = {'contentHASH': rf.getMD5(content), 'id': item['id'],
               'source': __WS_TAG__, 'content': content,
               'created_at': unixtTime2StringTime(item['display_time'])}

        row['tag_name'] = item['score']

        channels = item['channels']
        if 'global-channel' in channels:
            channels.remove('global-channel')
        row['tag_name2'] = '|'.join(item['channels'])

        rows.append(row)
    nextCursor= resDic['data']['next_cursor']

    return rf.addARows(rows), int(nextCursor)
Beispiel #20
0
def main(args):
    # Create model directory for saving trained models
    if not os.path.exists(args.model_path):
        os.makedirs(args.model_path)

    test_dataset = a2d_dataset.A2DDataset(
        val_cfg, args.dataset_path)  # changed to val_cfg from test_cfg
    data_loader = DataLoader(test_dataset,
                             batch_size=1,
                             shuffle=False,
                             num_workers=1)

    # define load your model here
    # model = net(args.num_cls).cuda()
    model = net(args.num_cls).to(device)
    model.load_state_dict(torch.load(os.path.join(args.model_path,
                                                  'net.ckpt')))

    X = np.zeros((data_loader.__len__(), args.num_cls))
    Y = np.zeros((data_loader.__len__(), args.num_cls))
    print(data_loader.__len__())
    model.eval()
    with torch.no_grad():
        for batch_idx, data in enumerate(data_loader):
            # mini-batch
            images = data[0].to(device)
            labels = data[1].type(torch.FloatTensor).to(device)
            output = model(images).cpu().detach().numpy()
            target = labels.cpu().detach().numpy()
            output[output >= 0.5] = 1
            output[output < 0.5] = 0
            X[batch_idx, :] = output
            Y[batch_idx, :] = target

    P = Precision(X, Y)
    R = Recall(X, Y)
    F = F1(X, Y)
    print('Precision: {:.1f} Recall: {:.1f} F1: {:.1f}'.format(
        100 * P, 100 * R, 100 * F))
Beispiel #21
0
def evaluate_model(res_npy_ori_test_path, res_npy_trs_test_path,
                   model_path_name):
    batch_shape = (1, img_height, img_width, 3)
    with tf.Session() as sess:
        X_content = tf.placeholder(tf.float32,
                                   shape=batch_shape,
                                   name="X_content")
        Y_content = tf.placeholder(tf.float32,
                                   shape=batch_shape,
                                   name="Y_content")
        preds = network.net(X_content / 255.0)

        Y_content_flat = tf.reshape(Y_content,
                                    [-1, img_width * img_height * 3])
        preds_flat = tf.reshape(preds, [-1, img_width * img_height * 3])
        loss = tf.reduce_mean(
            tf.reduce_sum(tf.square(preds_flat - Y_content_flat),
                          reduction_indices=[1]))

        tf.train.Saver().restore(sess, model_path_name)

        res_ori_test = np.load(res_npy_ori_test_path)
        res_trs_test = np.load(res_npy_trs_test_path)

        X_batch = np.zeros(batch_shape, dtype=np.float32)
        Y_batch = np.zeros(batch_shape, dtype=np.float32)
        for i in range(0, int(len(res_ori_test))):
            X_batch_array, Y_batch_array = our_utils.next_batch(
                res_ori_test, res_trs_test, i)
            X_batch[0] = X_batch_array[0]
            Y_batch[0] = Y_batch_array[0]

            to_get = [loss, preds]
            test_feed_dict = {X_content: X_batch, Y_content: Y_batch}
            tup = sess.run(to_get, feed_dict=test_feed_dict)
            log.info("test " + str(i) + "average loss: " +
                     str(np.sqrt(tup[0] / img_width / img_height / 3)))
Beispiel #22
0
fs = np.int(1 / dt)
target[t_stim:] = lowpass_filter(np.random.randn(n_step_stim) * sigma, lp,
                                 fs)  #Low-pass filtered white noise

#training params
alpha = dt * 0.1
step = 50
train_start = int(np.round(start_stim / dt))

#Initialize network
net = network.net(N,
                  pNI,
                  mean_delays,
                  tref,
                  G=G,
                  p=p,
                  mean_GE=mean_GE,
                  mean_GI=mean_GI,
                  ITonic=ITonic,
                  tau=tau,
                  tr=tr,
                  td=td)
input_res = np.zeros((N_in, nt))  #Input to the reservoir
net.w_res = np.multiply(
    np.random.normal(0, 1, (N, N_in)),
    np.random.rand(N, N_in) < p_in)  #Input weights (M in methods)
phase = np.random.uniform(
    0, 1, N_in) * np.pi * 2  #Initial phase configuration of the oscillators

for inp_cell in range(N_in):
    input_res[inp_cell, t_stim:t_stim + n_step_stim] = A * (np.sin(
        2 * np.pi * osc_frequencies[inp_cell] *
Beispiel #23
0
images = dataset.test_data[:300].float()
label = dataset.test_labels[:300]
features = images.view(300, 784)
writer.add_embedding(features, metadata=label, label_img=images.unsqueeze(1))

running_loss = 0.0
for epoch in range(10):  # loop over the dataset multiple times
    for i, data in enumerate(trainloader, 0):
        # get the inputs
        inputs, labels = data

        # zero the parameter gradients
        optimizer.zero_grad()

        # forward + backward + optimize
        outputs = net(inputs)
        loss = criterion(outputs, labels)
        loss.backward()
        optimizer.step()

        running_loss += loss.item()
        if i % PRINT_EACH == 0:
            ### Example of the network
            dummy_input = torch.autograd.Variable(torch.rand(1, 3, 224, 224))
            resnet18 = models.resnet18(False)
            writer.add_graph(resnet18, (dummy_input, ))

            print('[%d, %5d]' % (epoch + 1, i + 1))
            print(' Train loss: %.3f' % (running_loss / PRINT_EACH))

            writer.add_scalar('Train loss', (running_loss / PRINT_EACH),
Beispiel #24
0
p_inj = 1
p_in = 0.3
p_fb = 1
p_res = 0.3
gain_inj = 23
gain_fb = 0
gain_in = 0.1
input_Inets = np.zeros((nb_Inets * N_Inets, nt))
input_Inets[:, t_stim:t_stim + n_step_stim] = 1
Inets = [
    network.net(N_Inets,
                p_NI,
                mean_delays,
                Refractory,
                G=G_Inets,
                p=p_Inets,
                mean_GE=GE_Inets,
                mean_GI=GI_Inets,
                ITonic=9,
                mean_TauFall_I=TauI) for GI_Inets in GI_Inets_list
]

osc_range = [5, 7]
N_osc = 3

#training params
alpha = dt * 0.1
step = 50
train_start = int(np.round(start_stim / dt))

#PLot settings
data = pd.DataFrame({
    'G1': G1,
    'G2': G2,
    'G3': G3,
    'G4': G4,
    'G5': G5,
    'G6': G6,
    'G7': G7,
    'G8': G8,
    'G9': G9,
    'G10': G10
})

bnet = BN.from_samples(data)
bnet.plot()
plt.show()

n = net(data=data)
n.add_edge(0, 3)
n.add_edge(0, 4)
n.add_edge(1, 4)
n.add_edge(1, 3)
n.add_edge(1, 2)
n.add_edge(5, 6)
n.add_edge(5, 7)
n.calc_cpt(data)
samp = sample_net(n, 1000)

bnet = BN.from_samples(samp)
bnet.plot()
plt.show()
Beispiel #26
0
import logging
import json

from paddle_fl.split_learning import HostExecutor
from paddle_fl.split_learning.core.table import LocalTable
from paddle_fl.split_learning.core.reader import TmpReader
import network

logging.basicConfig(
        format='%(asctime)s %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s',
        datefmt='%Y-%m-%d %H:%M',
        level=logging.DEBUG)

_LOGGER = logging.getLogger(__name__)


if __name__ == "__main__":
    data_path = sys.argv[1]
    host_input, consumer_input, label, prediction, cost = network.net()
    main_program = fluid.default_main_program()
    startup_program = fluid.default_startup_program()

    place = fluid.CPUPlace()
    table = LocalTable(data_path)
    reader = TmpReader(place)
    exe = HostExecutor(place, table, reader)
    exe.load_program_from_full_network(
            startup_program=startup_program,
            main_program=main_program)
    exe.start(7858)
Beispiel #27
0
def __train__():
    #data
    dataset = mnist_rotation.DataSet("mnist/train-images.idx3-ubyte",
                                     "mnist/train-labels.idx1-ubyte",
                                     "mnist/t10k-images.idx3-ubyte",
                                     "mnist/t10k-labels.idx1-ubyte")
    X = tf.placeholder(tf.uint8, [None, 42, 42, 1])
    Y = tf.placeholder(tf.uint8, [None, 1])

    images = (tf.cast(X, tf.float32) - 128.0) / 128.0
    labels = tf.cast(Y, tf.int32)

    stn, stn_params = network.net("stn", images, 6)
    #!!!!!!!!!!!!!use tanh as activation
    #stn_ = layers.tanh("stn_tanh",stn)

    #stn
    images_ = transformer.batch_transformer(images, stn, [42, 42])
    """
    R = tf.placeholder(tf.uint8,[None,42,42,1])
    RL = tf.placeholder(tf.uint8,[None,1])

    R_ = (tf.cast(R,tf.float32) - 128.0)/128.0
    RL_ = tf.cast(RL,tf.int32)

    IMAGE = tf.concat([images_,R_],0)
    LABEL = tf.concat([labels,RL_],0)
    """
    #cnn
    cnn, cnn_params = network.net("cnn", images_, CLASS_SIZE, trainable=True)
    loss, cross_loss = layers.crossentropy("stn_loss", cnn, labels, BATCH_SIZE,
                                           CLASS_SIZE)
    global_step = tf.Variable(0, trainable=False)
    opt = optimize(loss, global_step, 60000, BATCH_SIZE,
                   stn_params + cnn_params)
    #cnn
    """_
    cnn_,cnn_params_ = network.net("cnn",images,CLASS_SIZE,trainable = True,reuse = True)
    loss_,cross_loss_ = layers.crossentropy("cnn_loss",cnn_,labels,BATCH_SIZE,CLASS_SIZE)
    global_step_ = tf.Variable(0, trainable=False)
    opt_ = optimize(loss_,global_step_,30000,BATCH_SIZE,cnn_params_)
    #softmax = layers.softmax("cnn_softmax",cnn)
    """
    """
    CNN, CNN_PARAMS = network.net("cnn",IMAGE,CLASS_SIZE,trainable = True,reuse = True:)
    LOSS,CROSS_LOSS = layers.crossentropy("LOSS",CNN,LABEL,BATCH_SIZE*2,CLASS_SIZE)
    GLOBAL_STEP    = tf.Variable(0, trainable=False)
    OPT             = optimize(LOSS,GLOBAL_STEP,60000,BATCH_SIZE*2)
    """

    sess = tf.Session()
    init = tf.global_variables_initializer()

    sess.run(init)
    stn_saver = tf.train.Saver(stn_params)
    cnn_saver = tf.train.Saver(cnn_params)

    save_file_name = "./transformer_model"

    for step in range(100):
        #print "step = ",step
        value = 0
        global_step = step

        start_time = time.time()
        for epoch in range(int(60000 / BATCH_SIZE)):
            #print epoch
            #_cnn,_y = sess.run([cnn,Y],feed_dict = {(X,Y):dataset.getTrainBatch(BATCH_SIZE)})
            #print _cnn,_y
            opt_, loss_ = sess.run([opt, cross_loss],
                                   feed_dict={
                                       (X, Y):
                                       dataset.getTrainBatch(BATCH_SIZE)
                                   })
            #value += _loss_value
            print "current loss : ", loss_

            #_opt_,_loss_value_ = sess.run([opt_, cross_loss_],feed_dict = {(X,Y):dataset.getTrainBatch(BATCH_SIZE,"TEST")})
            #value += _loss_value
            #print "current loss : ",_loss_value,"  ",_loss_value_
            #_opt,_loss_value = sess.run([OPT , CROSS_LOSS],feed_dict = {(X,Y,R,RL):(dataset.getTrainBatch(BATCH_SIZE),dataset.getTrainBatch(BATCH_SIZE))})
            #value += _loss_value

            #print _loss_value_

        print("step = ", step)
        print("loss = ", value / (60000 / BATCH_SIZE))
        print("duration = ", time.time() - start_time)

        #save model
        if (step % 1 == 0):
            save_path = stn_saver.save(sess, save_file_name + "_stn")
            print(save_path)

            save_path = cnn_saver.save(sess, save_file_name + "_cnn")
            print(save_path)
Beispiel #28
0
                       shorten_factor=shorten_factor)
len_mel = mel_spec.shape[1]
#Resample spectrogram to match the number of time-steps of the simulation
rs_audio = len_output / len_mel
target = np.full((N_out, nt), np.min(mel_spec))
target[:, t_stim:
       -len_buffer] = mel_spec[:,
                               np.round(np.linspace(0, len_mel - 1, len_output)
                                        ).astype(np.int)]

#Initialize network
net = network.net(N,
                  pNI,
                  mean_delays,
                  tref,
                  G=G,
                  p=p,
                  mean_GE=mean_GE,
                  mean_GI=mean_GI,
                  ITonic=ITonic)
net.tr = tr
net.td = td
input_res = np.zeros((N_in, nt))
N_in_net = int(np.round(p_in * N))
net.w_res = np.abs(
    np.multiply(np.random.normal(0, 1, (N, N_in)),
                np.random.rand(N, N_in) < p_in))

phase = np.random.uniform(0, 1, N_in) * np.pi
for inp_cell in range(N_in):
    input_res[inp_cell, t_stim:t_stim + n_step_stim] = A * (
Beispiel #29
0
def main(args):
    # Create model directory for saving trained models
    if not os.path.exists(args.model_path):
        os.makedirs(args.model_path)

    test_dataset = a2d_dataset.A2DDataset(train_cfg, args.dataset_path)
    data_loader = DataLoader(test_dataset,
                             batch_size=args.batch_size,
                             shuffle=True,
                             num_workers=12)  # you can make changes

    # Define model, Loss, and optimizer
    model = net('se_resnext101')
    model.cuda()
    model.load_state_dict(
        torch.load(os.path.join(args.model_path, 'net_F.ckpt')))
    # for i, param in model.backbone.features.named_parameters():
    #     if 'stage4' or 'stage3' or 'stage2' or 'stage1'in i:
    #         print(i)
    #         param.requires_grad = True
    #     else:
    #         param.requires_grad = False

    criterion = nn.BCEWithLogitsLoss()

    # print(list(filter(lambda p: p.requires_grad, model.parameters())))
    base_optimizer = optim.SGD(model.parameters(), lr=0.005, momentum=0.9)
    # base_optimizer = optim.SGD(list(filter(lambda p: p.requires_grad, model.parameters())), lr=0.005, momentum=0.9)
    optimizer = SWA(base_optimizer, swa_start=1, swa_freq=5, swa_lr=0.005)
    # optimizer = optim.SGD(model.parameters(), lr=0.05, momentum=0.9)
    # optimizer = optim.Adam(model.parameters(), lr=0.005, weight_decay=0.00005)
    # scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, args.num_epochs, eta_min=5e-5, last_epoch=-1)
    scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer,
                                                     [5, 10, 20, 40, 75],
                                                     gamma=0.25)

    model, optimizer = amp.initialize(model,
                                      optimizer,
                                      opt_level="O1",
                                      verbosity=0)
    # Train the models
    total_step = len(data_loader)

    best_P, best_R, best_F = 0, 0, 0
    for epoch in range(args.num_epochs):
        # scheduler.step()
        print('epoch:{}, lr:{}'.format(epoch, scheduler.get_lr()[0]))

        t1 = time.time()
        for i, data in enumerate(data_loader):
            optimizer.zero_grad()
            # mini-batch
            images = data[0].to(device)
            labels = data[1].type(torch.FloatTensor).to(device)

            # Forward, backward and optimize
            outputs = model(images)
            loss = criterion(outputs, labels)

            with amp.scale_loss(loss, optimizer) as scaled_loss:
                scaled_loss.backward()

            if (i + 1) % 1 == 0:
                torch.nn.utils.clip_grad_norm_(model.parameters(),
                                               max_norm=5.0,
                                               norm_type=2)

                optimizer.step()
                optimizer.zero_grad()

        # optimizer.swap_swa_sgd()
        # scheduler.step()

        # Log info
            if i % args.log_step == 0:
                print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'.format(
                    epoch, args.num_epochs, i, total_step, loss.item()))
        optimizer.swap_swa_sgd()
        # Save the model checkpoints
        # if (i + 1) % args.save_step == 0:
        #     torch.save(model.state_dict(), os.path.join(
        #         args.model_path, 'net.ckpt'))
        scheduler.step()
        t2 = time.time()
        print('Time Spend per epoch: ', t2 - t1)

        if epoch > -1:
            val_dataset = a2d_dataset.A2DDataset(val_cfg, args.dataset_path)
            val_data_loader = DataLoader(val_dataset,
                                         batch_size=1,
                                         shuffle=False,
                                         num_workers=1)

            X = np.zeros((val_data_loader.__len__(), 43))
            Y = np.zeros((val_data_loader.__len__(), 43))
            model.eval()

            with torch.no_grad():
                for batch_idx, data in enumerate(val_data_loader):
                    # mini-batch
                    images = data[0].to(device)
                    labels = data[1].type(torch.FloatTensor).to(device)
                    # output = model(images).cpu().detach().numpy()
                    output = model(images)
                    output = (torch.nn.functional.sigmoid(output)
                              ).cpu().detach().numpy()
                    target = labels.cpu().detach().numpy()
                    output[output >= 0.5] = 1
                    output[output < 0.5] = 0
                    X[batch_idx, :] = output
                    Y[batch_idx, :] = target

            P = Precision(X, Y)
            R = Recall(X, Y)
            F = F1(X, Y)
            print('Precision: {:.1f} Recall: {:.1f} F1: {:.1f}'.format(
                100 * P, 100 * R, 100 * F))

            if (P > best_P):
                torch.save(model.state_dict(),
                           os.path.join(args.model_path, 'net_P.ckpt'))
                best_P = P
            if (R > best_R):
                torch.save(model.state_dict(),
                           os.path.join(args.model_path, 'net_R.ckpt'))
                best_R = R
            if (F > best_F):
                torch.save(model.state_dict(),
                           os.path.join(args.model_path, 'net_F.ckpt'))
                best_F = F
Beispiel #30
0
data = {}   #Output dictionary used by figure2b.py
data['nets'] = []
cutoff = 0.95       #Cut-off correlation between the activity on each trial to consider a network as stable
print('Starting simulation.')
for net_i in range(nb_networks):
    input_network = np.zeros(nt)
    N_in = 1
    unstable = True
    mean_TauFall_I = mean_TauFall_I_list[net_i]
    while unstable:
        total_iEx = np.zeros((nb_epochs,nt))
        total_iIn = np.zeros((nb_epochs,nt))
        total_iInp = np.zeros((nb_epochs,nt))
        spikes = np.zeros((N,nt,nb_epochs))
        #Exc cells
        net = network.net(N,pNI,mean_delays,tref,G=G,p=p, mean_GE = mean_GE, mean_GI = mean_GI, ITonic=ITonic, mean_TauFall_I=mean_TauFall_I,GaussSD=GaussSD)
        N_in_net = p_in*net.NE
        temp_w_in = np.abs(np.multiply(np.random.normal(0,1,(N_in_net,N_in)),np.random.rand(N_in_net,N_in)<p_in))
        w_res = np.zeros((N,N_in))
        w_res[net.E_idx,:] = temp_w_in          #Only the excitatory units receive the step input
        net.w_res = w_res
        input_network[t_stim:e_stim] =  A            

        for ep_i in range(nb_epochs):
            #Simulation variables
            gEx = np.zeros(N)                                                  #Conductance of excitatory neurons
            gIn = np.zeros(N)                                                  #Conductance of excitatory neurons
            F = np.full(N,np.nan)                                              #Last spike times of each inhibitory cells
            V = np.random.normal(net.mean_VRest,abs(0.02*net.mean_VRest),N)    #Set initial voltage Exc 

            for t in range(nt):
Beispiel #31
0
import paddle.fluid as fluid
import numpy as np
import time
import grpc
import json
import yaml
import paddle

import network
import utils

if __name__ == "__main__":
    paddle.enable_static()
    place = fluid.CPUPlace()
    exe = fluid.Executor(place)
    slot1, slot2, label, prediction, cost = network.net()
    main_program = fluid.default_main_program()
    startup_program = fluid.default_startup_program()

    #startup_program, main_program = split_nn.util.load_whole_program("whole_program")
    exe.run(startup_program)
    """
    fluid.io.load_persistables(
            executor=exe, 
            dirname="whole_program/persistables_vars",
            main_program=main_program)
    """

    for i, item in enumerate(utils.data_iter("../data/input.json")):
        uid, x1, x2, label = item
        data = {