Пример #1
0
from load import main
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.INFO)

dataset, model = main(train=False)

session = model.restore_last_session()

while True:
    line = input("Q: ")
    line = line.strip()
    if not line:
        continue
    inp = dataset.str2sequence(line)
    print("INPUT:")
    print(inp)
    output = model.predict(session, inp.T)
    print("OUTPUT:")
    print(output)
    q = dataset.sequence2str(inp[0][::-1])
    a = dataset.sequence2str(output[0])
    print("Q: {}\nA: {}".format(q, a))
Пример #2
0
from load import main
import tensorflow as tf
tf.logging.set_verbosity(tf.logging.INFO)

batch_size = 256
dataset, model = main(train=True)

batch_size = 256
training = dataset.getTrainingData()
training_batch = dataset.getBatches(training, batch_size)
model.train(training_batch)
Пример #3
0
#!/usr/bin/python3
# check python version is > 3.6

import load

if __name__ == '__main__':
    load.main(args)
Пример #4
0
#!/usr/bin/env python3
# loadaverage exceution script
# for more info, see github.com/qguv/loadaverage

import sys
import load

sys.exit(load.main())
Пример #5
0
def main(binarize_qol, tensor_dir, min_date, cutoff):

    #Remove old tensors
    if os.path.exists(tensor_dir + "mm_joined.h5"):
        os.remove(tensor_dir + "mm_joined.h5")
    if os.path.exists(tensor_dir + "mm_separate.h5"):
        os.remove(tensor_dir + "mm_separate.h5")

    dataset = load.main("clinical", tensor_dir)
    clinical_tensor = dataset['tensor']
    clinical_obs_tensor = dataset['obs_tensor']
    clinical_feature_names = dataset['feature_names']
    clinical_feature_types = dataset['feature_types']
    people = dataset['people']

    if binarize_qol:
        ##Binarize QOL variables
        for k in range(clinical_tensor.shape[2]):
            if 'qol' in clinical_feature_names[k] and clinical_feature_types[
                    k] == 'categorical':
                clinical_tensor[:, :,
                                k] = (clinical_tensor[:, :, k] >= 2.) * 1.
                clinical_feature_types[k] = 'binary'

    #Clinical data before the start of treatment in 2-d
    initial_clinical_tensor, initial_clinical_mask = aggregateOverTime(
        clinical_tensor[:, :abs(min_date), :],
        clinical_obs_tensor[:, :abs(min_date), :],
        clinical_feature_types,
        FREQ=abs(min_date))

    print "Initial clinical aggregated"

    #Clinical data after the start of treatment in bins of 90 days
    aggregated_clinical_tensor, binary_clinical_mask = aggregateOverTime(
        clinical_tensor[:, abs(min_date) + 1:, :],
        clinical_obs_tensor[:, abs(min_date) + 1:, :], clinical_feature_types)

    del clinical_tensor
    del clinical_obs_tensor

    print "Clinical aggregated"

    dataset = load.main("initial", tensor_dir)
    initial_tensor = dataset['tensor']
    initial_obs_tensor = dataset['obs_tensor']
    initial_feature_names = dataset['feature_names']
    initial_feature_types = dataset['feature_types']
    assert np.array_equal(dataset['people'], people)

    #Clinical data before the start of treatment in 2-d
    aggregated_initial_tensor, aggregated_initial_mask = aggregateOverTime(
        initial_tensor[:, :abs(min_date), :],
        initial_obs_tensor[:, :, :],
        initial_feature_types,
        FREQ=initial_tensor.shape[1])

    del initial_tensor
    del initial_obs_tensor

    print "Initial aggregated"

    dataset = load.main("treatment", tensor_dir)
    treatment_tensor = dataset['tensor']
    treatment_obs_tensor = dataset['obs_tensor']
    treatment_feature_names = dataset['feature_names']
    treatment_feature_types = dataset['feature_types']
    assert np.array_equal(dataset['people'], people)

    #Treatment data after the start of treatment in bins of 90 days
    aggregated_treatment_tensor, binary_treatment_mask = aggregateOverTime(
        treatment_tensor[:, abs(min_date) + 1:, :],
        treatment_obs_tensor[:,
                             abs(min_date) + 1:, :], treatment_feature_types)

    del treatment_tensor
    del treatment_obs_tensor

    print "Treatment aggregated"

    INITIAL_MATRIX = np.concatenate(
        [initial_clinical_tensor, aggregated_initial_tensor], axis=2)
    INITIAL_MASK = np.concatenate(
        [initial_clinical_mask, aggregated_initial_mask], axis=2)
    INITIAL_FEATURE_NAMES = np.concatenate(
        [clinical_feature_names, initial_feature_names])
    INITIAL_FEATURE_TYPES = np.concatenate(
        [clinical_feature_types, initial_feature_types])

    indices = []
    for i in range(len(INITIAL_FEATURE_NAMES)):
        if len(np.nonzero(INITIAL_MATRIX[:, 0, i])[0]) >= cutoff:
            indices.append(i)

    INITIAL_MATRIX = INITIAL_MATRIX[:, :, indices]
    INITIAL_MASK = INITIAL_MASK[:, :, indices]
    INITIAL_FEATURE_NAMES = INITIAL_FEATURE_NAMES[indices]
    INITIAL_FEATURE_TYPES = INITIAL_FEATURE_TYPES[indices]

    #Treatment binary for now
    TREATMENT_TENSOR = binary_treatment_mask
    TREATMENT_MASK = binary_treatment_mask
    TREATMENT_FEATURE_NAMES = treatment_feature_names
    TREATMENT_FEATURE_TYPES = treatment_feature_types

    ##Shuffle features by type
    reshufidx = np.argsort(clinical_feature_types)
    CLINICAL_TENSOR = aggregated_clinical_tensor[:, :, reshufidx]
    CLINICAL_MASK = binary_clinical_mask[:, :, reshufidx]
    CLINICAL_FEATURE_NAMES = clinical_feature_names[reshufidx]
    CLINICAL_FEATURE_TYPES = clinical_feature_types[reshufidx]

    RESPONSE_IDX = list(CLINICAL_FEATURE_NAMES).index(
        "at_treatmentresp-treatment response-numerical")
    DEATH_IDX = list(CLINICAL_FEATURE_NAMES).index(
        "se_primaryreason-primary reason for discontinuation-death")

    patient_response = CLINICAL_TENSOR[:, :, RESPONSE_IDX]
    NpatientsProgressed = np.sum((patient_response.max(1) == 6) * 1.)
    prog_idx = np.where(patient_response.max(1) == 6)[0]
    print NpatientsProgressed, 'patients progressed'

    patient_death = CLINICAL_TENSOR[:, :, DEATH_IDX]
    NpatientsDied = np.sum((patient_death.max(1) == 1) * 1.)
    death_idx = np.where(patient_death.max(1) == 1)[0]
    print NpatientsDied, 'patients died'

    both_idx = np.array(
        list(set(prog_idx.tolist()).intersection(set(death_idx.tolist()))))
    print len(both_idx), ' progressed and died'

    only_prog = np.array([k for k in prog_idx if k not in both_idx.tolist()])
    only_death = np.array([k for k in death_idx if k not in both_idx.tolist()])
    print 'Only death: ', len(only_death), ' Only progressed', len(only_prog)

    np.random.seed(0)
    train_both, valid_both, test_both = splitRatio(both_idx)
    train_prog, valid_prog, test_prog = splitRatio(only_prog)
    train_death, valid_death, test_death = splitRatio(only_death)

    assert len(only_prog) + len(only_death) + len(both_idx) == len(
        set(prog_idx.tolist() + death_idx.tolist())), 'Check failed'

    remaining_idx = np.array(
        list(
            set(range(CLINICAL_TENSOR.shape[0])) -
            set(prog_idx.tolist() + death_idx.tolist())))
    train_remain, valid_remain, test_remain = splitRatio(remaining_idx)

    train_total = np.array(train_both.tolist() + train_prog.tolist() +
                           train_death.tolist() + train_remain.tolist())
    valid_total = np.array(valid_both.tolist() + valid_prog.tolist() +
                           valid_death.tolist() + valid_remain.tolist())
    test_total = np.array(test_both.tolist() + test_prog.tolist() +
                          test_death.tolist() + test_remain.tolist())

    np.random.shuffle(train_total)
    np.random.shuffle(valid_total)
    np.random.shuffle(test_total)

    print train_total.shape[0] + test_total.shape[0] + valid_total.shape[
        0], CLINICAL_TENSOR.shape[0]

    #Find index under current feature names
    LABEL_NAMES = np.array([
        'progression', 'progression_days', 'surrogates for overall survival',
        'os days', 'last obs days'
    ])

    CLINICAL_Y = np.concatenate([
        util.PDlabel(CLINICAL_TENSOR, RESPONSE_IDX).reshape(-1, 1),
        util.PDdays(CLINICAL_TENSOR, RESPONSE_IDX).reshape(-1, 1),
        util.deathLabel(CLINICAL_TENSOR, DEATH_IDX).reshape(-1, 1),
        util.deathDays(CLINICAL_TENSOR, DEATH_IDX).reshape(-1, 1),
        util.obsDays(CLINICAL_TENSOR).reshape(-1, 1)
    ],
                                axis=1)

    #Separate Tensors
    ## Clinical
    CLINICAL_TRAIN = CLINICAL_TENSOR[train_total]
    CLINICAL_TRAIN_Y = CLINICAL_Y[train_total]
    CLINICAL_TRAIN_MASK = CLINICAL_MASK[train_total]
    CLINICAL_VALID = CLINICAL_TENSOR[valid_total]
    CLINICAL_VALID_Y = CLINICAL_Y[valid_total]
    CLINICAL_VALID_MASK = CLINICAL_MASK[valid_total]
    CLINICAL_TEST_Y = CLINICAL_Y[test_total]
    CLINICAL_TEST = CLINICAL_TENSOR[test_total]
    CLINICAL_TEST_MASK = CLINICAL_MASK[test_total]

    ## Treatment
    TREATMENT_TRAIN = TREATMENT_TENSOR[train_total]
    TREATMENT_TRAIN_MASK = TREATMENT_MASK[train_total]
    TREATMENT_VALID = TREATMENT_TENSOR[valid_total]
    TREATMENT_VALID_MASK = TREATMENT_MASK[valid_total]
    TREATMENT_TEST = TREATMENT_TENSOR[test_total]
    TREATMENT_TEST_MASK = TREATMENT_MASK[test_total]

    ## Initial
    INITIAL_TRAIN = INITIAL_MATRIX[train_total]
    INITIAL_VALID = INITIAL_MATRIX[valid_total]
    INITIAL_TEST = INITIAL_MATRIX[test_total]

    #Normalize all according to train data distributions
    INITIAL_TRAIN, INITIAL_VALID, INITIAL_TEST = normalize(
        INITIAL_TRAIN, INITIAL_VALID, INITIAL_TEST, INITIAL_FEATURE_TYPES)
    CLINICAL_TRAIN, CLINICAL_VALID, CLINICAL_TEST = normalize(
        CLINICAL_TRAIN, CLINICAL_VALID, CLINICAL_TEST, CLINICAL_FEATURE_TYPES)
    TREATMENT_TRAIN, TREATMENT_VALID, TREATMENT_TEST = normalize(
        TREATMENT_TRAIN, TREATMENT_VALID, TREATMENT_TEST,
        TREATMENT_FEATURE_TYPES)

    DATASET_SEPARATE = OrderedDict()
    DATASET_SEPARATE['train_y'] = CLINICAL_TRAIN_Y
    DATASET_SEPARATE['valid_y'] = CLINICAL_VALID_Y
    DATASET_SEPARATE['test_y'] = CLINICAL_TEST_Y
    DATASET_SEPARATE['y_names'] = LABEL_NAMES

    DATASET_SEPARATE['train_x'] = CLINICAL_TRAIN
    DATASET_SEPARATE['train_x_mask'] = CLINICAL_TRAIN_MASK
    DATASET_SEPARATE['valid_x'] = CLINICAL_VALID
    DATASET_SEPARATE['valid_x_mask'] = CLINICAL_VALID_MASK
    DATASET_SEPARATE['test_x'] = CLINICAL_TEST
    DATASET_SEPARATE['test_x_mask'] = CLINICAL_TEST_MASK
    DATASET_SEPARATE['x_names'] = CLINICAL_FEATURE_NAMES
    DATASET_SEPARATE['x_types'] = CLINICAL_FEATURE_TYPES

    DATASET_SEPARATE['train_u'] = TREATMENT_TRAIN
    DATASET_SEPARATE['train_u_mask'] = TREATMENT_TRAIN_MASK
    DATASET_SEPARATE['valid_u'] = TREATMENT_VALID
    DATASET_SEPARATE['valid_u_mask'] = TREATMENT_VALID_MASK
    DATASET_SEPARATE['test_u'] = TREATMENT_TEST
    DATASET_SEPARATE['test_u_mask'] = TREATMENT_TEST_MASK
    DATASET_SEPARATE['u_names'] = TREATMENT_FEATURE_NAMES
    DATASET_SEPARATE['u_types'] = TREATMENT_FEATURE_TYPES

    DATASET_SEPARATE['train_init'] = INITIAL_TRAIN[:, 0, :]
    DATASET_SEPARATE['valid_init'] = INITIAL_VALID[:, 0, :]
    DATASET_SEPARATE['test_init'] = INITIAL_TEST[:, 0, :]
    DATASET_SEPARATE['init_names'] = INITIAL_FEATURE_NAMES
    DATASET_SEPARATE['init_types'] = INITIAL_FEATURE_TYPES
    saveHDF5(tensor_dir + 'mm_separate.h5', DATASET_SEPARATE)

    #Combined Tensors
    print 'WARNING: ASSUMES BINARY TREATMENT!!!'
    BOTH_TRAIN = np.concatenate([TREATMENT_TRAIN, CLINICAL_TRAIN], axis=2)
    BOTH_TRAIN_MASK = np.concatenate(
        [TREATMENT_TRAIN_MASK, CLINICAL_TRAIN_MASK], axis=2)
    BOTH_VALID = np.concatenate([TREATMENT_VALID, CLINICAL_VALID], axis=2)
    BOTH_VALID_MASK = np.concatenate(
        [TREATMENT_VALID_MASK, CLINICAL_VALID_MASK], axis=2)
    BOTH_TEST = np.concatenate([TREATMENT_TEST, CLINICAL_TEST], axis=2)
    BOTH_TEST_MASK = np.concatenate([TREATMENT_TEST_MASK, CLINICAL_TEST_MASK],
                                    axis=2)

    BOTH_FEATURE_NAMES = np.array(TREATMENT_FEATURE_NAMES.tolist() +
                                  CLINICAL_FEATURE_NAMES.tolist())
    BOTH_FEATURE_TYPES = np.array(TREATMENT_FEATURE_TYPES.tolist() +
                                  CLINICAL_FEATURE_TYPES.tolist())

    DATASET_JOINED = OrderedDict()
    DATASET_JOINED['train_y'] = CLINICAL_TRAIN_Y
    DATASET_JOINED['valid_y'] = CLINICAL_VALID_Y
    DATASET_JOINED['test_y'] = CLINICAL_TEST_Y
    DATASET_JOINED['y_names'] = LABEL_NAMES
    DATASET_JOINED['train_x'] = BOTH_TRAIN
    DATASET_JOINED['train_mask'] = BOTH_TRAIN_MASK
    DATASET_JOINED['valid_x'] = BOTH_VALID
    DATASET_JOINED['valid_mask'] = BOTH_VALID_MASK
    DATASET_JOINED['test_x'] = BOTH_TEST
    DATASET_JOINED['test_mask'] = BOTH_TEST_MASK
    DATASET_JOINED['x_names'] = BOTH_FEATURE_NAMES
    DATASET_JOINED['x_types'] = BOTH_FEATURE_NAMES
    saveHDF5(tensor_dir + 'mm_joined.h5', DATASET_JOINED)
Пример #6
0
def load_env(environment):
    """load("dev")
    :param environment:
    """
    print "Load {0} environment on box: {1}".format(environment, local_host)
    load.main(environment)
Пример #7
0
import load;
import math;

def dist(ax,ay,bx,by):
	x2 = (ax-bx)*(ax-bx);
	y2 = (ay-by)*(ay-by);
	res =math.sqrt(x2+y2);
	return(res);

def calcQuad(x,y):
	x_int = int(str(x).split(".")[0]);
	stry = str(y);


'''MAIN CODE '''
locations = load.main();
print(locations);

pos = list();
buff = list();

pos = load.getpos();

midx = pos[0];
midy = pos[1];

x0 = midx-0.005;
y0 = midy-0.005;

x1 = midx+0.005;
y1 = midy+0.005;
Пример #8
0
#!/usr/bin/env python3
# loadaverage execution script
# for more info, see github.com/qguv/loadaverage

import sys
import load
sys.exit(load.main())
Пример #9
0
        yy=[xx_min[xindex], xx_max[xindex]]
        iindex=self.element_indices.index(index)
        calibr=self.calibrations[iindex]
        print yy
        
        """
        xx=[
                self.equation(calibr[0], xx_min),
                self.equation(calibr[0], xx_max)
        ]
        """
        xx=[
                self.equation(calibr[0], 
                        self.choose(self.intensities[1])),
                self.equation(calibr[0],
                        self.choose(self.intensities[0]))
        ]
    
        line = biggles.Curve(xx, yy)
        line.label = "Calibrating line"
        
        #legend = biggles.PlotKey( .7, .9, [points, line] )
        
        #p.add( line, points, legend)
        p.add( points, line)
        p.show()

if __name__=='__main__':
    import load
    load.main()
Пример #10
0
	def confirm(self,fichier):
		var.lat,var.lon,var.name=load.main(fichier+".asc")
		print (var.lat,var.lon,var.name)
Пример #11
0
		tmpb=tmpb+tmpa
	flm.close()
	flm=open("tmp.txt","w")
	flm.write(tmpb)
	flm.close()

	xf=read_file("tmp.txt")
	os.remove("tmp.txt")
	xf.remove("")
	return xf


"""
	DEBUT DES CLASSES DE L'INTERFACE
"""
var.lat,var.lon,var.name=load.main("Lidl_Fr"+".asc")
class AffPrincipal(BoxLayout):
	def confirm(self,fichier):
		var.lat,var.lon,var.name=load.main(fichier+".asc")
		print (var.lat,var.lon,var.name)
	def confirmload(self,text):
		R=(var.name.index(text))
		print (var.lat[R], var.lon[R], text)		

	def update(self,*args):
		self.gridmain.box.boxf.spinaero.text=AffPrincipal.get_spin_f(self,1)
		self.gridmain.box.boxf.spinaero.values=AffPrincipal.get_spin_f(self,0)

	def update2(self,*args):
		self.gridmain.box.boxf.spinaero2.text=AffPrincipal.get_spin_f2(self,1)
		self.gridmain.box.boxf.spinaero2.values=AffPrincipal.get_spin_f2(self,0)