Exemple #1
0
def createNxN( n ):
	# Create a 3x3 grid network
	network = networks.Network( (n*200, n*200) )
	
	for x in range( 0, n*200, 200 ):
		for y in range( 0, n*200, 200 ):
			network.addNode( x, y )
	
	return network
    def test_updateWeights(self):
        loss = C.LossMSE()
        m = N.Network(loss)

        try:
            m.updateWeights(0.01)
        except:
            return 0

        return 1
    def test_resetGradients(self):
        loss = C.LossMSE()
        m = N.Network(loss)

        try:
            m.resetGradients(x, y)
        except:
            return 0

        return 1
    def test_backwardCall(self):
        loss = C.LossMSE()
        m = N.Network(loss)

        try:
            m.backwardCall()
        except:
            return 0

        return 1
    def test_iter(self):
        loss = C.LossMSE()
        m = N.Network(loss)

        try:
            m.__iter__()
        except:
            return 0

        return 1
    def test_forward(self):
        loss = C.LossMSE()
        m = N.Network(loss)

        try:
            m.forward()
        except:
            return 0

        return 1
def main(args):
    # load model
    model = networks.Network()
    model.init_model()
    model.loading_model(args['model_path'])
    # load dataset
    dset = dataset.Dataset(args['dataset_path'])
    # predict
    scores = model.model.predict_classes(dset.features,
                                         batch_size=args['batch_size'])[:, 0]
    fig = plt.figure(figsize=(8, 8))
    t0 = time()
    for slideName in dset.slides:
        # check if slide name matches slide path
        if slideName == args['slide_path'].split('/')[-1].split('.')[0]:
            # get dataset information for each slide
            slide_idx = dset.getSlideIdx(slideName)
            object_num = dset.getObjNum(slide_idx)
            data_idx = dset.getDataIdx(slide_idx)
            x_centroid_set = dset.getXcentroidSet(data_idx, object_num)
            y_centroid_set = dset.getYcentroidSet(data_idx, object_num)
            # get score for eadh slide
            score_set = scores[data_idx:data_idx + object_num]
            # get slide properties for each slide
            slide = openslide.open_slide(args['slide_path'])
            # compute downsample factor
            base_mag = find_mag(slide.level_count)
            downsample_factor = base_mag / args['target_mag']
            level = find_level(downsample_factor)
            # read downsampled image
            downsampled_img = slide.read_region((0, 0), level,
                                                slide.level_dimensions[level])
            # get centroids for the downsampled image
            [width, height] = slide.level_dimensions[level]
            patch_size_log_mag = int(args['patch_size'] / downsample_factor)
            half_patch_size_log_mag = int(patch_size_log_mag / 2)
            x_centroids = x_centroid_set / downsample_factor
            y_centroids = y_centroid_set / downsample_factor
            mask = np.zeros((height, width))
            for i in range(len(x_centroids)):
                top, left = get_patch_bounds(x_centroids[i, 0], y_centroids[i,
                                                                            0],
                                             half_patch_size_log_mag)
                if score_set[i] > 0:
                    mask[top:top + patch_size_log_mag,
                         left:left + patch_size_log_mag] = 255
                elif score_set[i] == 0:
                    mask[top:top + patch_size_log_mag,
                         left:left + patch_size_log_mag] = 128
            im_output = args['out_dir'] + slideName + '.png'
            plt.imsave(im_output, mask)
    t1 = time()
    print("Processing done ", t1 - t0)
    def test_backward(self):
        x = FloatTensor([[1, 2, 3], [-4, 2, 3]])
        y = FloatTensor([[3, 2, 0], [-1, 2, 3]])

        loss = C.LossMSE()
        m = N.Network(loss)

        try:
            m.backward(x, y)
        except:
            return 0

        return 1
Exemple #9
0
    def __init__(self, nspins, network=None, shape=None, seed=None):
        """Init method.

        Parameters
        ----------
            nspins : int
                Number of spins.

            network : Network object
                Network object with the connection between spins.

            seed : int
                Random number generator seed.

        """
        # Store parameters
        self.nspins = nspins

        # Create a flat array to store the spin configuration
        # (an structured array will be accesible through the
        # latt() method).
        self.spins = np.zeros(self.nspins, dtype="intc")

        # Set values randomly
        self.reset_random()

        # If a network is not given create an empty one
        if network == None:
            self.network = networks.Network(self.nspins)
        else:
            self.network = network

        # If the lattice has no shape attribute, create it
        if shape == None:
            self._shape = tuple((self.nspins, ))
        else:
            if np.prod(shape) == self.nspins:
                self._shape = tuple(shape)
            else:
                raise ValueError(
                    "The given shape does not match the number of spins.")

        # Create neighbour lists
        self.update_neighbours()

        # Initialize the random number generators
        if seed == None:
            seed = random.SystemRandom().randint(0, 32767)
        self.seed = seed  # Store seed
        cevolve.seed(seed)  # dranxor number generator
        np.random.seed(seed)  # Python random number generator
Exemple #10
0
    def __init__(self, shape, seed=None):
        """Init method.

        Parameters
        ----------
            shape : int tuple
                Shape of the network (e.g. (2,3) or 2). It must satisfy
                np.prod(shape) = nnodes. This parameter needed to plot
                the network. If None, a 1D network (shape = (nnodes, ))
                is assumed. 

            seed : int
                Seed for the pseudo-random number generator used in the
                network evolution.

        """
        # Cast shape to tuple
        try:
            shape = tuple(shape)
        except TypeError:
            shape = tuple((shape, ))

        nnodes = np.prod(shape)

        # Initialize list of leanrt patterns
        self.patterns = list()

        # Create empty network
        network = networks.Network(nnodes, weighted=True)

        # Initialize Ising lattice
        isingmodel.IsingCoupling.__init__(self,
                                          nnodes,
                                          network=network,
                                          shape=shape,
                                          seed=seed)
def main(args):
    # load model
    model = networks.Network()
    model.init_model()
    model.loading_model(args['model_path'])
    # load dataset
    dset = dataset.Dataset(args['dataset_path'])
    # predict
    scores = model.model.predict_classes(dset.features,
                                         batch_size=args['batch_size'])[:, 0]
    fig = plt.figure(figsize=(8, 8))
    t0 = time()
    for slideName in dset.slides:
        # check if slide name matches slide path
        if slideName == args['slide_path'].split('/')[-1].split('.')[0]:
            # get dataset information for each slide
            slide_idx = dset.getSlideIdx(slideName)
            object_num = dset.getObjNum(slide_idx)
            data_idx = dset.getDataIdx(slide_idx)
            x_centroid_set = dset.getXcentroidSet(data_idx, object_num)
            y_centroid_set = dset.getYcentroidSet(data_idx, object_num)
            # get score for eadh slide
            score_set = scores[data_idx:data_idx + object_num]
            # get slide properties for each slide
            slide = openslide.open_slide(args['slide_path'])
            # compute downsample factor
            base_mag = find_mag(slide.level_count)
            downsample_factor = base_mag / args['target_mag']
            level = find_level(downsample_factor)
            # read downsampled image
            downsampled_img = slide.read_region((0, 0), level,
                                                slide.level_dimensions[level])
            # get centroids for the downsampled image
            [width, height] = slide.level_dimensions[level]
            patch_size_log_mag = int(args['patch_size'] / downsample_factor)
            half_patch_size_log_mag = int(patch_size_log_mag / 2)
            x_centroids = x_centroid_set / downsample_factor
            y_centroids = y_centroid_set / downsample_factor
            mask = np.zeros((height, width))
            for i in range(len(x_centroids)):
                top, left = get_patch_bounds(x_centroids[i, 0], y_centroids[i,
                                                                            0],
                                             half_patch_size_log_mag)
                if score_set[i] > 0:
                    mask[top:top + patch_size_log_mag,
                         left:left + patch_size_log_mag] = 255
                elif score_set[i] == 0:
                    mask[top:top + patch_size_log_mag,
                         left:left + patch_size_log_mag] = 128
            im_output = args['out_dir'] + slideName + '.png'
            fig.add_subplot(1, 2, 1)
            plt.title('Predicted')
            plt.imshow(mask, cmap='jet')
            fig.add_subplot(1, 2, 2)
            points = np.hstack((x_centroids, y_centroids))
            # get positive and negative points
            positive_index = np.where(score_set[score_set > 0])
            positive_points = points[positive_index].astype(int)
            # random choice if the number of superpixels is over 1000
            if len(positive_points) > args['random_choice']:
                indices = np.random.choice(positive_points.shape[0],
                                           args['random_choice'],
                                           replace=False)
                positive_points = positive_points[indices]
            # perform Affinity Propagation Clustering
            af = AffinityPropagation().fit(positive_points)
            cluster_centers_indices = af.cluster_centers_indices_
            labels = af.labels_
            n_clusters_ = len(cluster_centers_indices)
            im1 = plt.imshow(downsampled_img)
            colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')
            for k, col in zip(range(n_clusters_), colors):
                class_members = labels == k
                cluster_center = positive_points[cluster_centers_indices[k]]
                plt.plot(positive_points[class_members, 0],
                         positive_points[class_members, 1], col + '.')
                plt.plot(cluster_center[0],
                         cluster_center[1],
                         'o',
                         markerfacecolor=col,
                         markeredgecolor='k',
                         markersize=5)
                for x in positive_points[class_members]:
                    plt.plot([cluster_center[0], x[0]],
                             [cluster_center[1], x[1]], col)
            plt.title('AffinityAnalyzed')
            plt.savefig(im_output)
    t1 = time()
    print("Training took ", t1 - t0)
Exemple #12
0
# load dataset
f = h5py.File(dataSet)
features = f['features'][:]
# load trainingset
c = h5py.File(trainSet)
object_num = len(c['slideIdx'][:])
sample_features = c['features'][:]
train_features = np.vstack((sample_features, c['augments_features'][:]))

sample_labels = c['labels'][:]
train_labels = np.vstack((sample_labels, c['augments_labels'][:]))
train_labels[train_labels < 0] = 0
train_labels = to_categorical(train_labels, num_classes=2)

# initialize neural network model
model = networks.Network()
model.init_model()

print('Training ... ', len(train_labels))
t0 = time()
model.train_model(train_features, train_labels)
t1 = time()
print('Training took ', t1 - t0)

print('Predict ... ', len(train_labels))
t0 = time()
predicts = model.predict(features)
t1 = time()
print('Predict took ', t1 - t0)
Exemple #13
0
def run_mlp_experiment(args, device):
    """
    Runs the experiment with a 3-layers fully connected network.
    :param args:    Namespace from utils.parse_arguments
    :param device:  torch.device
    :return: np.array of float, np.array of float, np.array of float; train_acc, val_acc, test_acc (in percentage)
    """
    validation_ratio, record_train_acc, record_val_acc, record_test_acc = utils.configure_training_mode(
        args)

    train_loader, validation_loader, test_loader = datasets.build_loaders_by_dataset(
        args.dataset,
        args.batch_size,
        validation_ratio=validation_ratio,
        train_validation_split_seed=0)
    local_loss_list = utils.get_loss(args)
    nonlinearity = utils.get_nonlinearity(args)

    optimizer_local, local_opt_arguments_dict, local_scheduler_arguments_dict, \
        optimizer_final, final_opt_arguments_dict, final_scheduler_arguments_dict = \
        utils.choose_optimizers_and_parameters(args)

    conv_sizes = []
    do_pooling = []
    kernel_sizes = []

    fc_layers = [args.mlp_layer_size, args.mlp_layer_size, args.mlp_layer_size]

    if args.divisive_norm_fc:
        divisive_norm_list = [
            networks.DivisiveNorm(args.divnorm_power, args.grouping_dim,
                                  args.grouped_var_delta)
            for i in range(len(fc_layers))
        ]
    else:
        divisive_norm_list = None

    alt_feedback_type = None
    if args.feedback_alignment:
        alt_feedback_type = 'feedback_alignment'
    elif args.sign_symmetry:
        alt_feedback_type = 'sign_symmetry'

    net = networks.Network(
        nonlinearity,
        local_loss_list,
        optimizer_local,
        torch.optim.lr_scheduler.MultiStepLR,
        conv_sizes,
        kernel_sizes,
        do_pooling,
        fc_layers,
        'max',
        args.dataset,
        bias=False,
        local_opt_arguments_dict=local_opt_arguments_dict,
        local_scheduler_arguments_dict=local_scheduler_arguments_dict,
        dropout_p=args.dropout_p,
        batch_norm=args.batch_norm,
        divisive_norm_list_conv=None,
        divisive_norm_list_fc=divisive_norm_list,
        spatial_dropout=args.spatial_dropout,
        alt_feedback_type=alt_feedback_type)

    net = net.to(device)
    print(net)

    final_loss = nn.CrossEntropyLoss()

    if args.backprop:
        final_opt = optimizer_final(net.parameters(),
                                    **final_opt_arguments_dict)
        compute_local_loss = False
        update_local_loss = False
    else:
        final_opt = optimizer_final(net.softmax_layer.parameters(),
                                    **final_opt_arguments_dict)
        compute_local_loss = True
        update_local_loss = True

    final_scheduler = torch.optim.lr_scheduler.MultiStepLR(
        final_opt, **final_scheduler_arguments_dict)

    train_acc, val_acc, test_acc = utils.train_network(
        net,
        device,
        final_loss,
        final_opt,
        final_scheduler,
        args.n_epochs,
        train_loader,
        validation_loader,
        test_loader,
        compute_local_loss=compute_local_loss,
        update_local_loss=update_local_loss,
        record_train_acc=record_train_acc,
        record_val_acc=record_val_acc,
        record_test_acc=record_test_acc,
        print_results=True,
        backprop_batch_manhattan=args.backprop_batch_manhattan)

    return train_acc, val_acc, test_acc
def run():
    # initialize VGG Model and PCA
    iset = init.Init()
    # initialize neural network model
    model = networks.Network()
    model.init_model()
    # initialize global instance
    uset = users.Users()

    # store special features in memory
    # dset_special = dataset.Dataset(set.PATH_TO_SPECIAL)
    dset_special = None
    print "Dataset Loaded."
    # set normal features in memory to false
    is_normal_loaded = True
    tset_name = None
    is_reloaded = False
    m_checkpoints = 0

    while True:

        queue = db.lrange(set.REQUEST_QUEUE, set.REQUEST_START, set.REQUEST_END)
        q_uid = None
        # initialize local instance
        select = selectonly.Select()
        finalize = save.Save()
        viewer = view.View()
        retrain_v = retrainView.retrainView()
        retrain_h = retrainHeatmap.retrainHeatmap()
        heat = heatmap.Heatmap()
        t_train = train.Train()
        report_label = label.label()
        report_count = count.count()
        report_map = mapping.map()

        for q in queue:

            q = json.loads(q.decode("utf-8"))
            q_uid = q["uid"]
            target = q["target"]
            session_uid = q["uid"]
            dataSetPath = set.DATASET_DIR + q["dataset"]
            pcaPath = set.DATASET_DIR + q["pca"]
            # if specific features then set m_loaded to true
            is_normal_loaded = False if dataSetPath == set.PATH_TO_SPECIAL else True

            if target == "label":
                report_label.setData(q)

            if target == "count":
                report_count.setData(q)

            if target == "map":
                report_map.setData(q)

            if target == 'selectonly':
                select.setData(q)

            if target == 'save':
                finalize.setData(q)

            if target == 'view':
                viewer.setData(q)

            if target == 'retrainView':
                retrain_v.setData(q)

            if target == 'retrainHeatmap':
                retrain_h.setData(q)

            if target == 'heatmapAll':
                heatmaps = q["viewJSONs"]

            if target == 'heatmap':
                heat.setData(q)

            if target == 'train':
                t_train.setData(q)

            if target == 'reload':
                t_path = set.TRAININGSET_DIR + q["trainingSetName"]
                is_reloaded = True

            if target == 'reviewSave':
                q_samples = json.loads(q["samples"])

        if q_uid is not None:

            print target, " Session Start ....."

            no_uid = True
            uidx = 0

            # find current user Index
            for i in range(len(uset.users)):
                if uset.users[i]['uid'] == session_uid:
                    uidx = i
                    no_uid = False

            if no_uid:
                # set users data
                uset.addUser(session_uid)

            if is_normal_loaded:
                dset = dataset.Dataset(dataSetPath)
            else:
                dset = dset_special

            PCA = joblib.load(pcaPath)

            if target == 'selectonly':
                uset.setIter(uidx, select.iter)
                print "Predict Start ... "
                t0 = time()
                scores = model.predict_prob(dset.features)
                t1 = time()
                print "Predict took ", t1 - t0
                # Find uncertain samples
                data = select.getData(scores, dset.slideIdx, dset.slides, dset.x_centroid, dset.y_centroid)
                db.set(q_uid, json.dumps(data))
                db.ltrim(set.REQUEST_QUEUE, len(q_uid), -1)

            if target == 'view':
                slide_idx = dset.getSlideIdx(viewer.slide)
                object_num = dset.getObjNum(slide_idx)
                data_idx = dset.getDataIdx(slide_idx)
                feature_set = dset.getFeatureSet(data_idx, object_num)
                x_centroid_set = dset.getXcentroidSet(data_idx, object_num)
                y_centroid_set = dset.getYcentroidSet(data_idx, object_num)

                print "Predict Start ... "
                t0 = time()
                predictions = model.predict(feature_set)
                t1 = time()
                print "Predict took ", t1 - t0
                object_idx = load(
                    viewer.left, viewer.right, viewer.top, viewer.bottom, x_centroid_set.astype(np.float), y_centroid_set.astype(np.float)
                )
                data = {}

                for i in object_idx:
                    data[str(x_centroid_set[i][0])+'_'+str(y_centroid_set[i][0])] = str(predictions[i])

                db.set(q_uid, json.dumps(data))
                db.ltrim(set.REQUEST_QUEUE, len(q_uid), -1)

            if target == 'heatmap':
                slide_idx = dset.getSlideIdx(heat.slide)
                object_num = dset.getObjNum(slide_idx)
                data_idx = dset.getDataIdx(slide_idx)
                feature_set = dset.getFeatureSet(data_idx, object_num)
                x_centroid_set = dset.getXcentroidSet(data_idx, object_num)
                y_centroid_set = dset.getYcentroidSet(data_idx, object_num)

                print "Predict Start ... "
                t0 = time()
                if set.IS_HEATMAP == False:
                    scores = model.predict_prob(feature_set)
                t1 = time()
                print "Predict took ", t1 - t0
                # set x and y maps
                heat.setXandYmap()
                # write heatmaps
                heat.setHeatMap(x_centroid_set, y_centroid_set, scores)
                # get heatmap data
                data = heat.getData(0)

                db.set(q_uid, json.dumps(data))
                db.ltrim(set.REQUEST_QUEUE, len(q_uid), -1)

            if target == 'heatmapAll':
                data = []
                index = 0

                t0 = time()
                scores = model.predict_prob(dset.features)
                t1 = time()
                print "Predict took ", t1 - t0

                for h in heatmaps:

                    h['uid'] = session_uid
                    heat.setData(h)

                    slide_idx = dset.getSlideIdx(heat.slide)
                    object_num = dset.getObjNum(slide_idx)
                    data_idx = dset.getDataIdx(slide_idx)
                    # feature_set = dset.getFeatureSet(data_idx, object_num)
                    x_centroid_set = dset.getXcentroidSet(data_idx, object_num)
                    y_centroid_set = dset.getYcentroidSet(data_idx, object_num)
                    score_set = scores[data_idx: data_idx+object_num]
                    # set x and y maps
                    heat.setXandYmap()
                    # write heatmaps
                    heat.setHeatMap(x_centroid_set, y_centroid_set, score_set)
                    # get heatmap data
                    data_k = heat.getData(index)
                    data.append(data_k)
                    index += 1

                # print data
                db.set(q_uid, json.dumps(data))
                db.ltrim(set.REQUEST_QUEUE, len(q_uid), -1)

            if target == 'reload':
                # initialize augment
                agen = augments.Augments()
                # set user train samples
                # uset.setReloadedData(uidx, t_path, dset.slides)
                uset.setReloadedData(uidx, t_path)

                sample_size = len(uset.users[uidx]['samples'])

                m_checkpoints = uset.users[uidx]['samples'][sample_size-1]['checkpoints']

                sample_batch_size = agen.AUG_BATCH_SIZE * sample_size
                train_size = sample_size + sample_batch_size

                train_features = np.zeros((train_size, set.FEATURE_DIM))
                train_labels = np.zeros((train_size, ))

                for i in range(sample_size):
                    train_features[i] = uset.users[uidx]['samples'][i]['feature']
                    train_labels[i] = uset.users[uidx]['samples'][i]['label']
                    train_features[i+sample_size:i+sample_size+agen.AUG_BATCH_SIZE] = uset.users[uidx]['augments'][i]['feature']
                    train_labels[i+sample_size:i+sample_size+agen.AUG_BATCH_SIZE] = uset.users[uidx]['augments'][i]['label']

                tset_path = t_path.split('/')[-1]
                tset_name = tset_path.split('.')[0]

                print "Training ... ", len(train_labels)
                t0 = time()
                model.train_model(train_features, train_labels, tset_name)
                t1 = time()
                print "Training took ", t1 - t0

                data = {"success": 'pass'}
                db.set(q_uid, json.dumps(data))
                db.ltrim(set.REQUEST_QUEUE, len(q_uid), -1)

            if target == 'label':
                # initialize augment
                agen = augments.Augments()
                # set user train samples
                uset.setReloadedData(uidx, report_label.trainSet)

                sample_size = len(uset.users[uidx]['samples'])
                sample_batch_size = agen.AUG_BATCH_SIZE * sample_size
                train_size = sample_size + sample_batch_size

                train_features = np.zeros((train_size, set.FEATURE_DIM))
                train_labels = np.zeros((train_size, ))

                for i in range(sample_size):
                    train_features[i] = uset.users[uidx]['samples'][i]['feature']
                    train_labels[i] = uset.users[uidx]['samples'][i]['label']
                    train_features[i+sample_size:i+sample_size+agen.AUG_BATCH_SIZE] = uset.users[uidx]['augments'][i]['feature']
                    train_labels[i+sample_size:i+sample_size+agen.AUG_BATCH_SIZE] = uset.users[uidx]['augments'][i]['label']

                print "Training ... ", len(train_labels)
                t0 = time()
                model.train_model(train_features, train_labels, report_label.classifier)
                t1 = time()
                print "Training took ", t1 - t0

                slide_idx = dset.getSlideIdx(report_label.slide)
                object_num = dset.getObjNum(slide_idx)
                data_idx = dset.getDataIdx(slide_idx)
                test_features = dset.getFeatureSet(data_idx, object_num)
                x_centroid_set = dset.getXcentroidSet(data_idx, object_num)
                y_centroid_set = dset.getYcentroidSet(data_idx, object_num)
                print "Testing Start ... "
                t0 = time()
                predicts = model.predict(test_features)
                t1 = time()
                print "Predict took ", t1 - t0

                inputImageFile = '/datasets/tif/'+ report_label.slide + '.svs.dzi.tif'

                bold = 512
                bold_left = report_label.left - bold
                bold_top = report_label.top - bold
                bold_bottom = report_label.bottom + bold
                bold_right = report_label.right + bold
                bold_width = report_label.width + 2*bold
                bold_height = report_label.height + 2*bold

                ts = large_image.getTileSource(inputImageFile)

                region = dict(
                    left=report_label.left, top=report_label.top,
                    width=report_label.width, height=report_label.height,
                )

                im_region = ts.getRegion(
                    region=region, format=large_image.tilesource.TILE_FORMAT_NUMPY
                )[0]

                mydb = mysql.connector.connect(
                  host=set.MYSQL_HOST,
                  user="******",
                  passwd="guest",
                  database="nuclei",
                  charset='utf8',
                  use_unicode=True
                )

                boundaryTablename = 'sregionboundaries'

                runcursor = mydb.cursor()

                query = 'SELECT centroid_x, centroid_y, boundary from ' + boundaryTablename + ' where slide="' +  report_label.slide + \
                '" AND centroid_x BETWEEN ' + str(report_label.left) + ' AND ' + str(report_label.right) + \
                ' AND centroid_y BETWEEN ' + str(report_label.top) + ' AND ' + str(report_label.bottom)

                runcursor.execute(query)

                boundarySet = runcursor.fetchall()

                # find region index from hdf5
                object_idx = load(
                    report_label.left, report_label.right, report_label.top, report_label.bottom, x_centroid_set.astype(np.float), y_centroid_set.astype(np.float)
                )

                # set an array for boundary points in a region to zero
                im_bold = np.zeros((bold_height, bold_width), dtype=np.uint8)

                for i in object_idx:
                    for j in range(len(boundarySet)):
                      x = int(boundarySet[j][0])
                      y = int(boundarySet[j][1])
                      boundaryPoints = []
                      if x == int(x_centroid_set[i, 0]) and y == int(y_centroid_set[i, 0]):
                          object = boundarySet[j][2].encode('utf-8').split(' ')
                          object_points = []
                          for p in range(len(object)-1):
                              intP = map(int, object[p].split(','))
                              intP[0] = intP[0] - report_label.left + bold
                              intP[1] = intP[1] - report_label.top + bold
                              object_points.append(intP)
                          boundaryPoints.append(np.asarray(object_points))
                          cv2.fillPoly(im_bold, boundaryPoints, 255 if predicts[i] > 0 else 128)

                im_out = im_bold[bold:bold+report_label.height, bold:bold+report_label.width]

                imsave(report_label.inFile, im_out)

                runcursor.close()
                mydb.close()

                print ("label success ", report_label.inFile)
                data = {"success": report_label.outFile}
                db.set(q_uid, json.dumps(data))
                db.ltrim(set.REQUEST_QUEUE, len(q_uid), -1)

                uset.users = []
                uset.u_size = 0

                model = networks.Network()
                model.init_model()
                print ("label done")

            if target == 'count':
                # initialize augment
                agen = augments.Augments()
                # set user train samples
                uset.setReloadedData(uidx, report_count.trainSet)

                sample_size = len(uset.users[uidx]['samples'])
                sample_batch_size = agen.AUG_BATCH_SIZE * sample_size
                train_size = sample_size + sample_batch_size

                train_features = np.zeros((train_size, set.FEATURE_DIM))
                train_labels = np.zeros((train_size, ))

                for i in range(sample_size):
                    train_features[i] = uset.users[uidx]['samples'][i]['feature']
                    train_labels[i] = uset.users[uidx]['samples'][i]['label']
                    train_features[i+sample_size:i+sample_size+agen.AUG_BATCH_SIZE] = uset.users[uidx]['augments'][i]['feature']
                    train_labels[i+sample_size:i+sample_size+agen.AUG_BATCH_SIZE] = uset.users[uidx]['augments'][i]['label']

                print "Training ... ", len(train_labels)
                t0 = time()
                model.train_model(train_features, train_labels, report_count.classifier)
                t1 = time()
                print "Training took ", t1 - t0

                print "Testing Start ... "
                t0 = time()
                predicts = model.predict(dset.features)
                t1 = time()
                print "Predict took ", t1 - t0

                # find positive and negative numbers for each slide
                pos_num = []
                neg_num = []

                for i in range(dset.n_slides):
                    if i == len(dset.dataIdx) - 1:
                        predict = predicts[dset.dataIdx[i, 0]:]
                    else:
                        predict = predicts[dset.dataIdx[i, 0]: dset.dataIdx[i+1, 0]]
                    pos = len(predict[predict>0])
                    neg = len(predict) - pos
                    pos_num.append(pos)
                    neg_num.append(neg)

                print('>> Writing count file')
                out_file = open(report_count.inFile, 'w')

                out_file.write("Slide\t")
                out_file.write("Predicted positive (superpixels)\t")
                out_file.write("Predicted negative (superpixels)\t")                
                out_file.write("\n")

                for i in range(len(dset.slides)):
                    out_file.write("%s\t" % dset.slides[i])
                    out_file.write("%d\t" % pos_num[i])
                    out_file.write("%d\t" % neg_num[i])
                    out_file.write("\n")

                out_file.close()
                print ("count success ", report_count.inFile)
                data = {"success": report_count.outFile}
                db.set(q_uid, json.dumps(data))
                db.ltrim(set.REQUEST_QUEUE, len(q_uid), -1)

                uset.users = []
                uset.u_size = 0

                model = networks.Network()
                model.init_model()
                print ("count done")

            if target == 'map':
                # initialize augment
                agen = augments.Augments()
                # set user train samples
                uset.setReloadedData(uidx, report_map.trainSet)

                sample_size = len(uset.users[uidx]['samples'])
                sample_batch_size = agen.AUG_BATCH_SIZE * sample_size
                train_size = sample_size + sample_batch_size

                train_features = np.zeros((train_size, set.FEATURE_DIM))
                train_labels = np.zeros((train_size, ))

                for i in range(sample_size):
                    train_features[i] = uset.users[uidx]['samples'][i]['feature']
                    train_labels[i] = uset.users[uidx]['samples'][i]['label']
                    train_features[i+sample_size:i+sample_size+agen.AUG_BATCH_SIZE] = uset.users[uidx]['augments'][i]['feature']
                    train_labels[i+sample_size:i+sample_size+agen.AUG_BATCH_SIZE] = uset.users[uidx]['augments'][i]['label']

                print "Training ... ", len(train_labels)
                t0 = time()
                model.train_model(train_features, train_labels, report_map.classifier)
                t1 = time()
                print "Training took ", t1 - t0

                slide_idx = dset.getSlideIdx(report_map.slide)
                object_num = dset.getObjNum(slide_idx)
                data_idx = dset.getDataIdx(slide_idx)
                test_features = dset.getFeatureSet(data_idx, object_num)
                x_centroid_set = dset.getXcentroidSet(data_idx, object_num)
                y_centroid_set = dset.getYcentroidSet(data_idx, object_num)

                print "Testing Start ... "
                t0 = time()
                predicts = model.predict(test_features)
                t1 = time()
                print "Predict took ", t1 - t0

                output = h5py.File(report_map.inFile, 'w')
                output.create_dataset('features', data=test_features)
                output.create_dataset('predicts', data=predicts)
                output.create_dataset('x_centroid', data=x_centroid_set)
                output.create_dataset('y_centroid', data=y_centroid_set)
                output.create_dataset('slides', data=[report_map.slide])
                output.close()

                print ("map success ", report_map.inFile)
                data = {"success": report_map.outFile}
                db.set(q_uid, json.dumps(data))
                db.ltrim(set.REQUEST_QUEUE, len(q_uid), -1)

                uset.users = []
                uset.u_size = 0

                model = networks.Network()
                model.init_model()
                print ("map done")

            if target == 'save':
                data = finalize.getData(uset.users[uidx])
                db.set(q_uid, json.dumps(data))
                db.ltrim(set.REQUEST_QUEUE, len(q_uid), -1)

            if target == 'review':
                data = {}
                data['review'] = []

                for sample in uset.users[uidx]['samples']:
                    sample_data = {}
                    sample_data['id'] = str(sample['id'])
                    sample_data['label'] = 1 if sample['label'] == 1 else -1
                    sample_data['iteration'] = int(sample['iteration'])
                    sample_data['slide'] = str(sample['slide'])
                    sample_data['centX'] = str(sample['centX'])
                    sample_data['centY'] = str(sample['centY'])
                    sample_data['boundary'] = ""
                    sample_data['maxX'] = 0
                    sample_data['maxY'] = 0

                    data['review'].append(sample_data)

                db.set(q_uid, json.dumps(data))
                db.ltrim(set.REQUEST_QUEUE, len(q_uid), -1)

            if target == 'train':
                # increase checkpoint by 1
                m_checkpoints += 1
                # initialize augment
                agen = augments.Augments()
                uset.setIter(uidx, t_train.iter)

                for sample in t_train.samples:
                    # init sample and augment
                    init_sample = dict(
                        id=0, f_idx=0, checkpoints=0,
                        aurl=None, feature=None, label=0,
                        iteration=0, centX=0, centY=0,
                        slideIdx=0, slide=None
                    )
                    init_augment = dict(
                        id=[], checkpoints=[], feature=[], label=[]
                    )

                    # check db_id in users samples
                    remove_idx = []
                    for u in range(len(uset.users[uidx]['samples'])):
                        if uset.users[uidx]['samples'][u]['id'] == sample['id']:
                            remove_idx.append(u)

                    for r in remove_idx:
                        uset.users[uidx]['samples'].pop(r)
                        uset.users[uidx]['augments'].pop(r)

                    # add feature
                    init_sample['id'] = sample['id']
                    init_sample['aurl'] = str(sample['aurl'])
                    init_sample['slide'] = str(sample['slide'])

                    slide_idx = dset.getSlideIdx(init_sample['slide'])
                    object_num = dset.getObjNum(slide_idx)
                    data_idx = dset.getDataIdx(slide_idx)
                    feature_set = dset.getFeatureSet(data_idx, object_num)
                    x_centroid_set = dset.getXcentroidSet(data_idx, object_num)
                    y_centroid_set = dset.getYcentroidSet(data_idx, object_num)
                    slideIdx_set = dset.getSlideIdxSet(data_idx, object_num)

                    c_idx = getIdx(
                        x_centroid_set.astype(np.float), y_centroid_set.astype(np.float), slideIdx_set.astype(np.int), np.float32(sample['centX']), np.float32(sample['centY']), slide_idx
                    )

                    f_idx = data_idx + c_idx

                    init_sample['f_idx'] =  f_idx
                    init_sample['feature'] = feature_set[c_idx]
                    init_sample['label'] = 1 if sample['label'] == 1 else 0
                    init_sample['iteration'] = t_train.iter
                    init_sample['centX'] = sample['centX']
                    init_sample['centY'] = sample['centY']
                    init_sample['checkpoints'] = m_checkpoints

                    # add augment features
                    slide_idx = dset.getSlideIdx(init_sample['slide'])
                    slide_mean = dset.getWSI_Mean(slide_idx)
                    slide_std = dset.getWSI_Std(slide_idx)

                    a_imgs = agen.prepare_image(init_sample['aurl'], slide_mean, slide_std)
                    a_featureSet = iset.FC1_MODEL.predict(a_imgs)
                    a_featureSet = PCA.transform(a_featureSet)
                    a_labelSet = np.zeros((agen.AUG_BATCH_SIZE, )).astype(np.uint8)
                    a_idSet = []
                    a_checkpointSet = []
                    for i in range(agen.AUG_BATCH_SIZE):
                        a_idSet.append(init_sample['id'])
                        a_checkpointSet.append(init_sample['checkpoints'])
                    if init_sample['label'] > 0:
                        a_labelSet.fill(1)

                    init_augment['id'] = a_idSet
                    init_augment['feature'] = a_featureSet
                    init_augment['label'] = a_labelSet
                    init_augment['checkpoints'] = a_checkpointSet

                    uset.setAugmentData(uidx, init_augment)
                    uset.setTrainSampleData(uidx, init_sample)

                sample_size = len(uset.users[uidx]['samples'])
                sample_batch_size = agen.AUG_BATCH_SIZE * sample_size
                train_size = sample_size + sample_batch_size

                train_features = np.zeros((train_size, set.FEATURE_DIM))
                train_labels = np.zeros((train_size, ))

                for i in range(sample_size):
                    train_features[i] = uset.users[uidx]['samples'][i]['feature']
                    train_labels[i] = uset.users[uidx]['samples'][i]['label']
                    train_features[i+sample_size:i+sample_size+agen.AUG_BATCH_SIZE] = uset.users[uidx]['augments'][i]['feature']
                    train_labels[i+sample_size:i+sample_size+agen.AUG_BATCH_SIZE] = uset.users[uidx]['augments'][i]['label']

                # train_labels = to_categorical(train_labels, num_classes=2)
                if tset_name is None:
                    tset_name = t_train.classifier

                print "Training ... ", len(train_labels)
                t0 = time()
                model.train_model(train_features, train_labels, tset_name)
                t1 = time()
                print "Training took ", t1 - t0

                data = {"success": 'pass'}
                db.set(q_uid, json.dumps(data))
                db.ltrim(set.REQUEST_QUEUE, len(q_uid), -1)

            if target == 'retrainView':

                m_checkpoints += 1
                # initialize augment
                agen = augments.Augments()

                uset.setIter(uidx, retrain_v.iter)

                print "Augment ... ", len(retrain_v.samples)
                t0 = time()
                for sample in retrain_v.samples:
                    # init sample and augment
                    init_sample = dict(
                        id=0, f_idx=0, checkpoints=0,
                        aurl=None, feature=None, label=0,
                        iteration=0, centX=0, centY=0,
                        slideIdx=0, slide=None
                    )
                    init_augment = dict(
                        id=[], checkpoints=[], feature=[], label=[]
                    )

                    # remove samples stored if it already exists
                    remove_idx = []
                    for u in range(len(uset.users[uidx]['samples'])):
                        if uset.users[uidx]['samples'][u]['id'] == sample['id']:
                            remove_idx.append(u)

                    for r in remove_idx:
                        uset.users[uidx]['samples'].pop(r)
                        uset.users[uidx]['augments'].pop(r)

                    # add feature
                    init_sample['id'] = sample['id']
                    init_sample['aurl'] = str(sample['aurl'])
                    init_sample['slide'] = str(sample['slide'])

                    slide_idx = dset.getSlideIdx(init_sample['slide'])
                    object_num = dset.getObjNum(slide_idx)
                    data_idx = dset.getDataIdx(slide_idx)
                    feature_set = dset.getFeatureSet(data_idx, object_num)
                    x_centroid_set = dset.getXcentroidSet(data_idx, object_num)
                    y_centroid_set = dset.getYcentroidSet(data_idx, object_num)
                    slideIdx_set = dset.getSlideIdxSet(data_idx, object_num)

                    c_idx = getIdx(
                        x_centroid_set.astype(np.float), y_centroid_set.astype(np.float), slideIdx_set.astype(np.int), np.float32(sample['centX']), np.float32(sample['centY']), slide_idx
                    )

                    f_idx = data_idx + c_idx

                    init_sample['f_idx'] =  f_idx
                    init_sample['feature'] = feature_set[c_idx]
                    init_sample['label'] = 1 if sample['label'] == 1 else 0
                    init_sample['iteration'] = retrain_v.iter
                    init_sample['centX'] = sample['centX']
                    init_sample['centY'] = sample['centY']
                    init_sample['checkpoints'] = m_checkpoints

                    # add augment features
                    slide_idx = dset.getSlideIdx(init_sample['slide'])
                    slide_mean = dset.getWSI_Mean(slide_idx)
                    slide_std = dset.getWSI_Std(slide_idx)

                    a_imgs = agen.prepare_image(init_sample['aurl'], slide_mean, slide_std)
                    a_featureSet = iset.FC1_MODEL.predict(a_imgs)
                    a_featureSet = PCA.transform(a_featureSet)
                    a_labelSet = np.zeros((agen.AUG_BATCH_SIZE, )).astype(np.uint8)
                    a_idSet = []
                    a_checkpointSet = []
                    for i in range(agen.AUG_BATCH_SIZE):
                        a_idSet.append(init_sample['id'])
                        a_checkpointSet.append(init_sample['checkpoints'])
                    if init_sample['label'] > 0:
                        a_labelSet.fill(1)

                    init_augment['id'] = a_idSet
                    init_augment['feature'] = a_featureSet
                    init_augment['label'] = a_labelSet
                    init_augment['checkpoints'] = a_checkpointSet

                    uset.setAugmentData(uidx, init_augment)
                    uset.setTrainSampleData(uidx, init_sample)

                t1 = time()
                print "Augmentation took ", t1 - t0
                sample_size = len(uset.users[uidx]['samples'])
                sample_batch_size = agen.AUG_BATCH_SIZE * sample_size
                train_size = sample_size + sample_batch_size

                train_features = np.zeros((train_size, set.FEATURE_DIM))
                train_labels = np.zeros((train_size, ))

                for i in range(sample_size):
                    train_features[i] = uset.users[uidx]['samples'][i]['feature']
                    train_labels[i] = uset.users[uidx]['samples'][i]['label']
                    train_features[i+sample_size:i+sample_size+agen.AUG_BATCH_SIZE] = uset.users[uidx]['augments'][i]['feature']
                    train_labels[i+sample_size:i+sample_size+agen.AUG_BATCH_SIZE] = uset.users[uidx]['augments'][i]['label']

                # train_labels = to_categorical(train_labels, num_classes=2)
                if tset_name is None:
                    tset_name = retrain_v.classifier

                t0 = time()
                model.train_model(train_features, train_labels, tset_name)
                t1 = time()
                print "Training took ", t1 - t0, " ", len(train_labels), "Samples"

                slide_idx = dset.getSlideIdx(retrain_v.slide)
                object_num = dset.getObjNum(slide_idx)
                data_idx = dset.getDataIdx(slide_idx)
                feature_set = dset.getFeatureSet(data_idx, object_num)
                x_centroid_set = dset.getXcentroidSet(data_idx, object_num)
                y_centroid_set = dset.getYcentroidSet(data_idx, object_num)

                print "Testing Start ... "
                t0 = time()
                predictions = model.predict(feature_set)
                t1 = time()
                print "Predict took ", t1 - t0

                object_idx = load(
                    retrain_v.left, retrain_v.right, retrain_v.top, retrain_v.bottom, x_centroid_set.astype(np.float), y_centroid_set.astype(np.float)
                )
                data = {}
                for i in object_idx:
                    data[str(x_centroid_set[i][0])+'_'+str(y_centroid_set[i][0])] = str(predictions[i])

                db.set(q_uid, json.dumps(data))
                db.ltrim(set.REQUEST_QUEUE, len(q_uid), -1)

            if target == 'retrainHeatmap':
                m_checkpoints += 1
                # initialize augment
                agen = augments.Augments()

                uset.setIter(uidx, retrain_h.iter)

                for sample in retrain_h.samples:
                    # init sample and augment
                    init_sample = dict(
                        id=0, f_idx=0, checkpoints=0,
                        aurl=None, feature=None, label=0,
                        iteration=0, centX=0, centY=0,
                        slideIdx=0, slide=None
                    )
                    init_augment = dict(
                        id=[], checkpoints=[], feature=[], label=[]
                    )

                    # remove samples stored if it already exists
                    remove_idx = []
                    for u in range(len(uset.users[uidx]['samples'])):
                        if uset.users[uidx]['samples'][u]['id'] == sample['id']:
                            remove_idx.append(u)

                    for r in remove_idx:
                        uset.users[uidx]['samples'].pop(r)
                        uset.users[uidx]['augments'].pop(r)

                    # add feature
                    init_sample['id'] = sample['id']
                    init_sample['aurl'] = str(sample['aurl'])
                    init_sample['slide'] = str(sample['slide'])

                    slide_idx = dset.getSlideIdx(init_sample['slide'])
                    object_num = dset.getObjNum(slide_idx)
                    data_idx = dset.getDataIdx(slide_idx)
                    feature_set = dset.getFeatureSet(data_idx, object_num)
                    x_centroid_set = dset.getXcentroidSet(data_idx, object_num)
                    y_centroid_set = dset.getYcentroidSet(data_idx, object_num)
                    slideIdx_set = dset.getSlideIdxSet(data_idx, object_num)

                    c_idx = getIdx(
                        x_centroid_set.astype(np.float), y_centroid_set.astype(np.float), slideIdx_set.astype(np.int), np.float32(sample['centX']), np.float32(sample['centY']), slide_idx
                    )

                    f_idx = data_idx + c_idx

                    init_sample['f_idx'] =  f_idx
                    init_sample['feature'] = feature_set[c_idx]
                    init_sample['label'] = 1 if sample['label'] == 1 else 0
                    init_sample['iteration'] = retrain_h.iter
                    init_sample['centX'] = sample['centX']
                    init_sample['centY'] = sample['centY']
                    init_sample['checkpoints'] = m_checkpoints

                    # add augment features
                    slide_idx = dset.getSlideIdx(init_sample['slide'])
                    slide_mean = dset.getWSI_Mean(slide_idx)
                    slide_std = dset.getWSI_Std(slide_idx)

                    a_imgs = agen.prepare_image(init_sample['aurl'], slide_mean, slide_std)
                    a_featureSet = iset.FC1_MODEL.predict(a_imgs)
                    a_featureSet = PCA.transform(a_featureSet)
                    a_labelSet = np.zeros((agen.AUG_BATCH_SIZE, )).astype(np.uint8)
                    a_idSet = []
                    a_checkpointSet = []
                    for i in range(agen.AUG_BATCH_SIZE):
                        a_idSet.append(init_sample['id'])
                        a_checkpointSet.append(init_sample['checkpoints'])
                    if init_sample['label'] > 0:
                        a_labelSet.fill(1)

                    init_augment['id'] = a_idSet
                    init_augment['feature'] = a_featureSet
                    init_augment['label'] = a_labelSet
                    init_augment['checkpoints'] = a_checkpointSet

                    uset.setAugmentData(uidx, init_augment)
                    uset.setTrainSampleData(uidx, init_sample)

                sample_size = len(uset.users[uidx]['samples'])
                sample_batch_size = agen.AUG_BATCH_SIZE * sample_size
                train_size = sample_size + sample_batch_size

                train_features = np.zeros((train_size, set.FEATURE_DIM))
                train_labels = np.zeros((train_size, ))

                for i in range(sample_size):
                    train_features[i] = uset.users[uidx]['samples'][i]['feature']
                    train_labels[i] = uset.users[uidx]['samples'][i]['label']
                    train_features[i+sample_size:i+sample_size+agen.AUG_BATCH_SIZE] = uset.users[uidx]['augments'][i]['feature']
                    train_labels[i+sample_size:i+sample_size+agen.AUG_BATCH_SIZE] = uset.users[uidx]['augments'][i]['label']

                if tset_name is None:
                    tset_name = retrain_h.classifier

                t0 = time()
                model.train_model(train_features, train_labels, tset_name)
                t1 = time()
                print "Training took ", t1 - t0, " ", len(train_labels), "Samples"

                slide_idx = dset.getSlideIdx(retrain_h.slide)
                object_num = dset.getObjNum(slide_idx)
                data_idx = dset.getDataIdx(slide_idx)
                feature_set = dset.getFeatureSet(data_idx, object_num)
                x_centroid_set = dset.getXcentroidSet(data_idx, object_num)
                y_centroid_set = dset.getYcentroidSet(data_idx, object_num)

                print "Testing Start ... "
                t0 = time()
                if set.IS_HEATMAP == False:
                    scores = model.predict_prob(feature_set)
                t1 = time()
                print "Predict took ", t1 - t0
                # set x and y maps
                retrain_h.setXandYmap()
                # write heatmaps
                retrain_h.setHeatMap(x_centroid_set, y_centroid_set, scores)
                # get heatmap data
                data = retrain_h.getData(0)

                db.set(q_uid, json.dumps(data))
                db.ltrim(set.REQUEST_QUEUE, len(q_uid), -1)

            if target == 'cancel':

                uset.users = []
                uset.u_size = 0
                is_normal_loaded = True
                tset_name = None
                is_reloaded = False
                m_checkpoints = 0

                del select
                del finalize
                del viewer
                del retrain_v
                del retrain_h
                del heat
                del t_train
                del report_label

                model = networks.Network()
                model.init_model()
                # dset = dataset.Dataset(set.PATH_TO_SPECIAL)

                data = {"success": 'pass'}
                db.set(q_uid, json.dumps(data))
                db.ltrim(set.REQUEST_QUEUE, len(q_uid), -1)

            if target == 'reviewSave':
                # modify labels if they are changed on review tab
                for q_sample in q_samples:
                    for sample in uset.users[uidx]['samples']:
                        if sample['id'] == q_sample['id']:
                            sample['label'] = 1 if q_sample['label'] == 1 else 0

                    for sample in uset.users[uidx]['augments']:
                        if sample['id'][0] == q_sample['id']:
                            sample['label'][:] = 1 if q_sample['label'] == 1 else 0

                data = {"success": 'pass'}
                db.set(q_uid, json.dumps(data))
                db.ltrim(set.REQUEST_QUEUE, len(q_uid), -1)
Exemple #15
0
def main():

    network = networks.Network().to(device)
    print(next(network.parameters()).is_cuda)

    game_data = []
    value_data = []
    policy_data = []

    print(device)

    replaybuffer = data_generator.ReplayBuffer(parameters.replay_buffer_size,
                                               device)

    generator = data_generator.DataGenerator(network, parameters.rollouts,
                                             replaybuffer, device, displayer)

    policy_loss = nn.KLDivLoss(reduction='batchmean')
    value_loss = nn.MSELoss()
    optimizer = optim.Adam(network.parameters(), lr=parameters.learning_rate)

    policy_activation = nn.LogSoftmax()

    while True:
        generator.generate_games(parameters.games_generated_at_a_time)
        if len(replaybuffer.games) >= parameters.batch_size:
            for epoch in range(batches_per_train_loop):
                input, policy_target, value_target = replaybuffer.sample(
                    parameters.batch_size)

                optimizer.zero_grad()

                policy_guess, value_guess = network(input)
                '''
                print("policy_guess")
                print(policy_guess)
                print("policy_target")
                print(policy_target)
                print("value_guess")
                print(value_guess)
                print("value_target")
                print(value_target)
                print((policy_guess, value_guess))
                '''
                loss1 = policy_loss(policy_activation(policy_guess),
                                    policy_target)
                loss2 = value_loss(value_guess, value_target)
                loss = loss1 + loss2
                '''
                print("policy_loss")
                print(loss1)
                print("value loss")
                print(loss2)
                print("total loss")
                '''
                print((loss1.item(), loss2.item()))
                loss.backward()
                optimizer.step()
    '''
    processes = []

    for i in range(4):
        print("hi")
        generator = data_generator.DataGenerator(network, rollouts, replaybuffer)
        process = mp.Process(target = generator.generate_games)
        process.start()
        processes.append(process)

    for process in processes:
        process.join()
    '''

    #pool = mp.Pool(mp.cpu_count())
    #queue = mp.Queue(4)
    #datagenerators = [data_generator.DataGenerator(network, rollouts, replaybuffer) for i in range(4)]

    #for datagenerator in datagenerators:
    #queue.put(datagenerator.generate_games())
    #pool.map(datagenerator.generate_games(),  range(6))
    '''