Example #1
0
def prob6(input, output):
	f = open(input, 'r');
	choice = int(f.readline());
	if choice == 0:
		FA = SDK.readDFAtable(f);
		Project_06.drawDFA(FA, output)
	else:
		FA = SDK.readEpsNFAtable(f);
		Project_06.drawNFA(FA, output)
	f.close();
Example #2
0
def prob3(input, output):
	f = open(input, 'r');
	alphabet = set(f.readline().split());
	s = f.readline();
	f.close();

	DFA = Project_03.ReToDFA(s, alphabet);

	g = open(output, 'w')
	SDK.writeDFAtable(g, DFA);
	g.close();
 def findAppInfo(self, id=0):
     if is_offline():
         apiSDK.throw_error("Cannot check balance in offline mode.",
                            NetworkException)
         return 0
     try:
         resp = self.api_wrapper.request('/manager/app/findAppInfo?id=%s' %
                                         id)
         return resp['data']
     except Exception as ex:
         msg = "Failed to get balance. ({})".format(ex)
         return 0
Example #4
0
def SetupUtils(env):
    env.SharedLibrarySafe = SharedLibrarySafe
    if (os.path.exists('sys/scons/SDK.py')):
        import SDK
        sdk = SDK.idSDK()
        env.BuildSDK = sdk.BuildSDK
    else:
        env.BuildSDK = NotImplementedStub

    if (os.path.exists('sys/scons/Setup.py')):
        import Setup
        setup = Setup.idSetup()
        env.Prepare = setup.Prepare
        env.BuildSetup = setup.BuildSetup
        env.BuildGamePak = setup.BuildGamePak
    else:
        env.Prepare = NotImplementedStub
        env.BuildSetup = NotImplementedStub
        env.BuildGamePak = NotImplementedStub

    if (os.path.exists('sys/scons/OSX.py')):
        import OSX
        OSX = OSX.idOSX()
        env.BuildBundle = OSX.BuildBundle
    else:
        env.BuildBundle = NotImplementedStub
def main():
    prefix = os.environ["prefix"]
    param_file = os.environ["param_file"]
    ref_uri = os.environ["ref_uri"]
    in_uri = os.environ["in_uri"]
    out_uri = os.environ["out_uri"]
    assets_uri = os.environ["assets_uri"]
    build = os.environ["build"]
    in_files = ["{}.sam".format(prefix)]

    start_time = datetime.now()
    print("SORT SAM for {} was started at {}.".format(prefix, str(start_time)))

    task = SDK.Task(step="sort_sam",
                    prefix=prefix,
                    in_files=in_files,
                    param_file=param_file,
                    ref_uri=ref_uri,
                    in_uri=in_uri,
                    out_uri=out_uri,
                    assets_uri=assets_uri)
    task.get_reference_files(build)
    task.download_files("INPUT")
    task.download_files("REF")
    task.download_files("PARAMS")
    task.build_cmd()
    task.run_cmd()
    task.upload_results()
    task.cleanup()

    end_time = datetime.now()
    print("SORT SAM for {} ended at {}.".format(prefix, str(end_time)))
    total_time = end_time - start_time
    print("Total time for SORT SAM was {}.".format(str(total_time)))
Example #6
0
def SetupUtils( env ):
	env.SharedLibrarySafe = SharedLibrarySafe
	if ( os.path.exists( 'sys/scons/SDK.py' ) ):
		import SDK
		sdk = SDK.idSDK()
		env.PreBuildSDK = sdk.PreBuildSDK
		env.BuildSDK = sdk.BuildSDK
	else:
		env.PreBuildSDK = NotImplementedStub
		env.BuildSDK = NotImplementedStub

	if ( os.path.exists( 'sys/scons/Setup.py' ) ):
		import Setup
		setup = Setup.idSetup()
		env.Prepare = setup.Prepare
		env.BuildSetup = setup.BuildSetup
		env.BuildGamePak = setup.BuildGamePak
	else:
		env.Prepare = NotImplementedStub
		env.BuildSetup = NotImplementedStub
		env.BuildGamePak = NotImplementedStub

	if ( os.path.exists( 'sys/scons/OSX.py' ) ):
		import OSX
		OSX = OSX.idOSX()
		env.BuildBundle = OSX.BuildBundle
	else:
		env.BuildBundle = NotImplementedStub
Example #7
0
def prob5(input, output):
	#input
	f = open(input, 'r');
	#allState, alphabet, startingState, dfaTable, acceptedState
	myDFA = SDK.readDFAtable(f);
	f.close();
	#--------------------------------------------------------------------
	#init

	#newAcceptedState = [u for u in newAcceptedState]
	DFA = Project_05.reduceDFA(myDFA);
	#print output
	g = open(output, 'w');
	SDK.writeDFAtable(g, DFA);
	#g.write('Description\n');
	#for u in newAllState:
		#g.write(str(u) + ': ' + ' '.join(map(str, [v for v in allState if id[v] == u])) + '\n')
	g.close();
Example #8
0
def save_classification_report(y_valid, extraction_type, y_pred, algorithm, **kwargs):
    file = sdk.get_or_create_output_file(extraction_type, algorithm, **kwargs)
    print("Start computing information")
    accuracy = metrics.accuracy_score(y_valid, y_pred)
    file.write("Classifications correctes : " + accuracy.__str__() + "%\n")
    file.write("Classifications incorrectes : " + (1 - accuracy).__str__() + "%\n")
    file.writelines(metrics.classification_report(y_valid, y_pred))
    file.close()
    print("Classification report has been saved !")
Example #9
0
def prob2(input, output):
	#input
	f = open(input, 'r');

	myNFA = SDK.readEpsNFAtable(f);
	f.close();


	g = open(output, 'w');

	#newDfaTable = dict([((id[frozenset(k[0])], k[1]), dfaTable[k]) for k in dfaTable.keys()]);

	DFA = Project_02.NfaToDfa(myNFA);

	SDK.writeDFAtable(g, DFA);

	#for st in dfaState:
		#g.write(str(id[frozenset(st)]) + ': ' + str(st) + '\n');
	g.close();
Example #10
0
def main():
    prefix = os.environ["prefix"]
    param_file = os.environ["param_file"]
    ref_uri = os.environ["ref_uri"]
    in_uri = os.environ["in_uri"]
    out_uri = os.environ["out_uri"]
    assets_uri = os.environ["assets_uri"]
    sample_file = os.environ["sample_file"]
    sentieon_pkg = os.environ["sentieon_pkg"]
    license_file = os.environ["sentieon_license"]
    ome = os.environ["ome"]
    build = os.environ["build"]
    if ome == "wes":
        target_file = os.environ["target_file"]
    else:
        target_file = None
    start_time = datetime.now()
    print("Sentieon's GENOTYPER for {} was started at {}.".format(prefix, str(start_time)))
    in_files = []
    task = SDK.Task(
        step="genotyper",
        prefix=prefix,
        in_files=in_files,
        param_file=param_file,
        sentieon_pkg=sentieon_pkg,
        license_file=license_file,
        ref_uri=ref_uri,
        in_uri=in_uri,
        out_uri=out_uri,
        assets_uri=assets_uri,
        target_file=target_file,
        sample_file=sample_file)

    if ome == "wes" and target_file:
        task.download_files("TARGET")
    task.get_reference_files(build)
    task.get_genotyping_samples()
    task.download_files("INPUT")
    task.download_files("REF")
    task.download_files("SENTIEON")
    task.download_files("PARAMS")
    # Note: genotyper is the only step where in_files is set as an env variable
    # Build in_files from sample_file
    task.build_cmd()
    task.run_cmd()
    task.upload_results()
    task.cleanup()

    end_time = datetime.now()
    print("Sentieon's GENOTYPER for {} ended at {}.".format(prefix, str(end_time)))
    total_time = end_time - start_time
    print("Total time for Sentieon's GENOTYPER was {}.".format(str(total_time)))
Example #11
0
def prob1(input, output):
	#input
	f = open(input, 'r');
	myDFA = SDK.readDFAtable(f);
	str = f.readline();
	f.close();

	g = open(output, 'w');
	if Project_01.isAcceptedString(myDFA):
		g.write('Accepted');
	else:
		g.write('Not accepted');
	g.close();
Example #12
0
def prob4(input, output):
	#def readDFAtable(f):
	#return (allState,  alphabet, startingState, dfaTable, acceptedState)
	#input
	f = open(input, 'r');
	myDFA = SDK.readDFAtable(f);
	f.close();

	s = Project_04.DfaToRE(myDFA);

	g = open(output, 'w');
	g.write(s);
	g.close();
def main():
    prefix = os.environ['prefix']
    param_file = os.environ['param_file']
    ref_uri = os.environ['ref_uri']
    in_uri = os.environ['in_uri']
    out_uri = os.environ['out_uri']
    assets_uri = os.environ['assets_uri']
    build = os.environ['build']
    fam_id = os.environ['fam_id']
    vcf = '{}.vcf'.format(fam_id)
    idx = '{}.idx'.format(vcf)

    in_files = [vcf, idx]

    print(in_files)

    start_time = datetime.now()
    print('SCRUB VCF for {} was started at {}.'.format(prefix, str(start_time)))

    task = SDK.Task(
        step='scrub_vcf',
        prefix=prefix,
        in_files=in_files,
        param_file=param_file,
        ref_uri=ref_uri,
        in_uri=in_uri,
        out_uri=out_uri,
        assets_uri=assets_uri)
    dir_contents = os.listdir('.')

    print('Current dir contents: {}'.format(str(dir_contents)))
    task.get_reference_files(build)
    task.download_files('INPUT')
    task.download_files('REF')
    task.download_files('PARAMS')
    task.build_cmd()
    task.run_cmd()
    task.upload_results()
    task.cleanup()

    end_time = datetime.now()
    print('SCRUB VCF for {} ended at {}.'.format(prefix, str(end_time)))
    total_time = end_time - start_time
    print('Total time for SCRUB VCF was {}.'.format(str(total_time)))
def main():
    prefix = os.environ["prefix"]
    param_file = os.environ["param_file"]
    ref_uri = os.environ["ref_uri"]
    in_uri = os.environ["in_uri"]
    out_uri = os.environ["out_uri"]
    assets_uri = os.environ["assets_uri"]
    build = os.environ["build"]
    R1 = os.environ["R1"]
    R2 = os.environ["R2"]
    in_files = [R1, R2]
    threads = os.environ['threads']

    print(in_files)

    start_time = datetime.now()
    print("BWA MEM for {} was started at {}.".format(prefix, str(start_time)))

    task = SDK.Task(step="bwa_mem",
                    prefix=prefix,
                    threads=threads,
                    in_files=in_files,
                    param_file=param_file,
                    ref_uri=ref_uri,
                    in_uri=in_uri,
                    out_uri=out_uri,
                    assets_uri=assets_uri)
    dir_contents = os.listdir(".")

    print("Current dir contents: {}".format(str(dir_contents)))
    task.get_reference_files(build)
    task.download_files("INPUT")
    task.download_files("REF")
    task.download_files("PARAMS")
    task.build_cmd()
    task.run_cmd()
    task.upload_results()
    task.cleanup()

    end_time = datetime.now()
    print("BWA MEM for {} ended at {}.".format(prefix, str(end_time)))
    total_time = end_time - start_time
    print("Total time for BWA MEM was {}.".format(str(total_time)))
Example #15
0
def SetupUtils(env):
    gamepaks = idGamePaks()
    env.BuildGamePak = gamepaks.BuildGamePak
    env.SharedLibrarySafe = SharedLibrarySafe
    try:
        import SDK
        sdk = SDK.idSDK()
        env.PreBuildSDK = sdk.PreBuildSDK
        env.BuildSDK = sdk.BuildSDK
    except:
        print 'SDK.py hookup failed'
        env.PreBuildSDK = NotImplementedStub
        env.BuildSDK = NotImplementedStub
    try:
        import Setup
        setup = Setup.idSetup()
        env.BuildSetup = setup.BuildSetup
    except:
        print 'Setup.py hookup failed'
        env.BuildSetup = NotImplementedStub
Example #16
0
def SetupUtils( env ):
	gamepaks = idGamePaks()
	env.BuildGamePak = gamepaks.BuildGamePak
	env.SharedLibrarySafe = SharedLibrarySafe
	try:
		import SDK
		sdk = SDK.idSDK()
		env.PreBuildSDK = sdk.PreBuildSDK
		env.BuildSDK = sdk.BuildSDK
	except:
		print 'SDK.py hookup failed'
		env.PreBuildSDK = NotImplementedStub
		env.BuildSDK = NotImplementedStub
	try:
		import Setup
		setup = Setup.idSetup()
		env.BuildSetup = setup.BuildSetup
	except:
		print 'Setup.py hookup failed'
		env.BuildSetup = NotImplementedStub
Example #17
0
def main():
    prefix = os.environ["prefix"]
    param_file = os.environ["param_file"]
    ref_uri = os.environ["ref_uri"]
    in_uri = os.environ["in_uri"]
    out_uri = os.environ["out_uri"]
    assets_uri = os.environ["assets_uri"]
    vcf = "{}.gt.snp.recal.vcf".format(prefix)
    idx = "{}.gt.snp.recal.vcf.idx".format(prefix)
    recal_file = "{}.gt.snp.indel.recal.model".format(prefix)
    tranches_file = "{}.gt.snp.indel.tranches".format(prefix)
    in_files = [vcf, idx, recal_file, tranches_file]
    build = os.environ["build"]

    start_time = datetime.now()
    print("VQSR INDEL APPLY for {} was started at {}.".format(
        prefix, str(start_time)))

    task = SDK.Task(step="vqsr_indel_apply",
                    prefix=prefix,
                    in_files=in_files,
                    param_file=param_file,
                    ref_uri=ref_uri,
                    in_uri=in_uri,
                    out_uri=out_uri,
                    assets_uri=assets_uri)
    task.get_reference_files(build)
    task.download_files("INPUT")
    task.download_files("REF")
    task.download_files("PARAMS")
    task.build_cmd()
    task.run_cmd()
    task.upload_results()
    task.cleanup()

    end_time = datetime.now()
    print("VQSR INDEL APPLY for {} ended at {}.".format(prefix, str(end_time)))
    total_time = end_time - start_time
    print("Total time for VQSR INDEL APPLY was {}.".format(str(total_time)))
Example #18
0
def neural_on_all_datasets():
    scaler = StandardScaler()
    # for extraction_type in os.listdir(path):
    for extraction_type in datasets:
        print("Starting new classification. Extraction method : " +
              extraction_type)
        folder = path + extraction_type + "/"

        try:
            training, validation = sdk.load_dataset_from_folder(
                folder, extraction_type)
            # Normalize training & validation sets
            print("Start normalizing data")
            scaler.fit(training.data)
            training.data = scaler.transform(training.data)
            print("Training data has been normalized")
            validation.data = scaler.transform(validation.data)
            print("Validation data has been normalized")
            # Normalize data
            print("Start balancing training data")
            xs, ys = sdk.balanced_subsample(training.data, training.target)
            # Build MLP model
            print("Start training MLP Classifier")
            hidden_layer_size = 200
            clf = MLPClassifier(verbose=True,
                                hidden_layer_sizes=hidden_layer_size)
            model = clf.fit(xs, ys)
            print("Training has ended")
            # Save MLP model
            sdk.save_model(model, extraction_type, algorithm)
            # Evaluation model
            print("Start predicting validation set")
            y_pred = model.predict(validation.data)
            # Save Evaluation report
            sdk.save_classification_report(
                validation,
                extraction_type,
                y_pred,
                algorithm,
                suffixe="__real_no_early_stop_balanced_data_hidden_" +
                str(hidden_layer_size) + "_layers")

        except Exception as e:
            print(str(e))
            pass
        print("Ended extraction : " + extraction_type)
def main():
    prefix = os.environ["prefix"]
    param_file = os.environ["param_file"]
    ref_uri = os.environ["ref_uri"]
    in_uri = os.environ["in_uri"]
    out_uri = os.environ["out_uri"]
    assets_uri = os.environ["assets_uri"]
    build = os.environ["build"]
    bam = "{}.sorted.deduped.bam".format(prefix)
    bai = "{}.sorted.deduped.bam.bai".format(prefix)
    bqsr = "{}.base_recal_table.txt".format(prefix)
    in_files = [bam, bai, bqsr]

    start_time = datetime.now()
    print("BASE RECAL for {} was started at {}.".format(
        prefix, str(start_time)))

    task = SDK.Task(step="base_recal",
                    prefix=prefix,
                    in_files=in_files,
                    param_file=param_file,
                    ref_uri=ref_uri,
                    in_uri=in_uri,
                    out_uri=out_uri,
                    assets_uri=assets_uri)
    task.get_reference_files(build)
    task.download_files("INPUT")
    task.download_files("REF")
    task.download_files("PARAMS")
    task.build_cmd()
    task.run_cmd()
    task.upload_results()
    task.cleanup()

    end_time = datetime.now()
    print("BASE RECAL for {} ended at {}.".format(prefix, str(end_time)))
    total_time = end_time - start_time
    print("Total time for BASE RECAL was {}.".format(str(total_time)))
def main():
    prefix = os.environ["prefix"]
    param_file = os.environ["param_file"]
    ref_uri = os.environ["ref_uri"]
    in_uri = os.environ["in_uri"]
    out_uri = os.environ["out_uri"]
    assets_uri = os.environ["assets_uri"]
    vcf = "{}.gt.vcf.gz".format(prefix)
    tbi = "{}.gt.vcf.gz.tbi".format(prefix)
    in_files = [vcf, tbi]
    build = os.environ["build"]

    start_time = datetime.now()
    print("VQSR SNP MODEL for {} was started at {}.".format(
        prefix, str(start_time)))

    task = SDK.Task(step="vqsr_snp_model",
                    prefix=prefix,
                    in_files=in_files,
                    param_file=param_file,
                    ref_uri=ref_uri,
                    in_uri=in_uri,
                    out_uri=out_uri,
                    assets_uri=assets_uri)
    task.get_reference_files(build)
    task.download_files("INPUT")
    task.download_files("REF")
    task.download_files("PARAMS")
    task.build_cmd()
    task.run_cmd()
    task.upload_results()
    task.cleanup()

    end_time = datetime.now()
    print("VQSR SNP MODEL for {} ended at {}.".format(prefix, str(end_time)))
    total_time = end_time - start_time
    print("Total time for VQSR SNP MODEL was {}.".format(str(total_time)))
Example #21
0
	raise ValueError("Unrecognized build - environment variable build must be GRCh38 or GRCh37")
if ome == "wes":
	target_file = os.environ["target_file"]
	ref_files.append(target_file)
else:
	target_file = None
start_time = datetime.now()
print "Sentieon's GENOTYPER for {} was started at {}.".format(", ".join(gvcfs), str(start_time))

task = SDK.Task(
	step="genotyper",  
	prefix=prefix, 
	in_files=in_files, 
	ref_files=ref_files,
	param_file=param_file,
	sentieon_pkg=sentieon_pkg,
	license_file=license_file,
	license_uri=license_uri,
	param_uri=param_uri,
	ref_uri=ref_uri,
	in_uri=in_uri,
	out_uri=out_uri,
	target_file=target_file)
if not set(in_files).issubset(set(os.listdir("."))):
	task.download_files("INPUT")
if not set(ref_files).issubset(set(os.listdir("."))):
	task.download_files("REF")
if license_file not in os.listdir("."):
	task.download_files("SENTIEON")
if param_file not in os.listdir("."):
	task.download_files("PARAMS")
if ome == "wes" and target_file not in os.listdir("."):
Example #22
0
	def moduleExec(self,
		strPath,
		oHtmlPage,
		dictQuery,
		dictForm
		):
		print("%r::moduleExec(strPath=%s, oHtmlPage=%s, dictQuery=%r, dictForm=%r)" % (
			self, strPath, oHtmlPage, dictQuery, dictForm))

		if not dictQuery:
			return False

		bResult = False
		for (strCmd, lstArg) in dictQuery.items():
			if (strCmd == "cputemp"):
				for strArg in lstArg:
					# Modultest für die CPU-Temperaturüberwachung
					if (strArg == "hysterese"):
						TaskSpeak(self.getWorker(), "Der Modultest für die Temperaturüberwachung der CPU wird vorbereitet").start()
						
						fCpuTempA = Globs.getSetting("System", "fCpuTempA", "\\d{2,}\\.\\d+", 60.0)
						fCpuTempB = Globs.getSetting("System", "fCpuTempB", "\\d{2,}\\.\\d+", 56.0)
						fCpuTempC = Globs.getSetting("System", "fCpuTempC", "\\d{2,}\\.\\d+", 53.0)
						fCpuTempH = Globs.getSetting("System", "fCpuTempH", "\\d{2,}\\.\\d+", 1.0)

						fCpuTempStep = fCpuTempH / 2.0
						fCpuTempDir = 1.0
						fCpuTempBase = SDK.getCpuTemp()
						fCpuTemp = fCpuTempBase
						fCpuTempHyst = fCpuTempH * 2.0
						nHysTest = 0

						Globs.s_oQueueTestCpuTempValues.put(fCpuTemp, block=False)

						# Erst steigende, dann sinkende Temperaturen
						while (fCpuTemp >= (fCpuTempC - (fCpuTempHyst * 2.0))):
							fCpuTemp += (fCpuTempStep * fCpuTempDir)
							Globs.s_oQueueTestCpuTempValues.put(fCpuTemp, block=False)
							
							# Erste Hysterese testen
							if (fCpuTemp > (fCpuTempC + fCpuTempHyst) and nHysTest == 0):
								while (fCpuTemp > (fCpuTempC - fCpuTempHyst)):
									fCpuTemp -= fCpuTempStep
									Globs.s_oQueueTestCpuTempValues.put(fCpuTemp, block=False)
								nHysTest += 1

							# Zweite Hysterese testen
							if (fCpuTemp > (fCpuTempB + fCpuTempHyst) and nHysTest == 1):
								while (fCpuTemp > (fCpuTempB - fCpuTempHyst)):
									fCpuTemp -= fCpuTempStep
									Globs.s_oQueueTestCpuTempValues.put(fCpuTemp, block=False)
								nHysTest += 1

							# Dritte Hysterese testen
							if (fCpuTemp > (fCpuTempA + fCpuTempHyst) and nHysTest == 2):
								while (fCpuTemp > (fCpuTempA - fCpuTempHyst)):
									fCpuTemp -= fCpuTempStep
									Globs.s_oQueueTestCpuTempValues.put(fCpuTemp, block=False)
								nHysTest += 1

							# Temperaturrichtung umkehren
							if (fCpuTemp >= (fCpuTempA + (fCpuTempHyst * 0.75)) and nHysTest == 3):
								fCpuTempDir *= (-1.0)
								nHysTest += 1

						bResult = True
						TaskSpeak(self.getWorker(), "Der Modultest für die Temperaturüberwachung der CPU ist jetzt bereit").start()
						continue
		# Unbekanntes Kommando
		return bResult
Example #23
0
        "Unrecognized build - environment variable build must be GRCh38 or GRCh37"
    )

bam = "{}.sorted.deduped.bam".format(prefix)
bai = "{}.sorted.deduped.bam.bai".format(prefix)
bqsr = "{}.base_recal_table.txt".format(prefix)
in_files = [bam, bai, bqsr]

start_time = datetime.now()
print "BASE RECAL for {} was started at {}.".format(prefix, str(start_time))

task = SDK.Task(step="base_recal",
                prefix=prefix,
                in_files=in_files,
                ref_files=ref_files,
                param_file=param_file,
                param_uri=param_uri,
                ref_uri=ref_uri,
                in_uri=in_uri,
                out_uri=out_uri)
if not set(in_files).issubset(set(os.listdir("."))):
    task.download_files("INPUT")
if not set(ref_files).issubset(set(os.listdir("."))):
    task.download_files("REF")
if param_file not in os.listdir("."):
    task.download_files("PARAMS")
task.build_cmd()
task.run_cmd()
task.upload_results()
task.cleanup()
Example #24
0
import personal_settings
from sklearn import ensemble
import os
import SDK as sdk

path = personal_settings.PATH
algorithm = os.path.basename(__file__).split(".py")[0]
datasets = ["MSD-MARSYAS"]
# datasets = os.listdir(path)

for extraction_type in datasets:
    print("Starting new classification. Extraction method : " +
          extraction_type)
    folder = path + extraction_type + "/"

    try:
        for n_trees in [100]:
            clf = ensemble.RandomForestClassifier(n_jobs=-1,
                                                  verbose=20,
                                                  n_estimators=n_trees)
            sdk.evaluate_classifier(clf,
                                    folder,
                                    extraction_type,
                                    algorithm,
                                    suffixe="_" + str(n_trees) + "_trees")

    except Exception as e:
        print(str(e))
        pass
print("Ended extraction : " + extraction_type)
Example #25
0
	def moduleInit(self, dictModCfg=None, dictCfgUsr=None):
		print("%r::moduleInit(%s) Version 5" % (self, SDK.getCpuTemp()))
		return True
Example #26
0
	def run(self):
		
		strProgram = None
		strPID = None
		oSysCallCmd = False
		strGoodBye = "Tschüssikovski!"
		
		print("Attempt to start message queue ...")
		self.m_oWorker.startQueue()
		print("OK")
		
		TaskSpeak(self.m_oWorker, "Servus!").start()
		
		while True:
			
			print("Attempt to start HTTP Server ...")
			try:
				self.m_oHttpd.run()
				break
			except:
				Globs.exc("HTTP Server starten und laufen lassen")
				
				if not (strProgram and strPID):
					# Einmalig versuchen, den belegten Port freizugeben
					oLines = SDK.getShellCmdOutput("netstat -pant")
					for strLine in oLines:
						if re.match("tcp\\s+.*\\s+%s\\:%s\\s+%s\\s+LISTEN\\s+\\d+/dbus-daemon" % (
							re.escape(Globs.s_oHttpd.server_address[0]),
							Globs.s_oHttpd.server_address[1],
							re.escape("0.0.0.0:*"), strLine)):
							for strToken in re.split("\\s+", strLine):
								if (re.match("\\d+/dbus-daemon", strToken)):
									strPID, strProgram = re.split("/", strToken)
									break
							if (strProgram and strPID):
								break;
					if (strProgram and strPID):
						TaskSpeak(self.m_oWorker,
							"Das Program %s mit der Prozesskennung %s belegt den Port %s" % (
							strProgram, strPID, Globs.s_oHttpd.server_address[1])).start()
						TaskSpeak(self.m_oWorker,
							"Ich versuche, das Program %s mit der Prozesskennung %s zu beenden" % (
							strProgram, strPID)).start()
							
						SDK.getShellCmdOutput("sudo kill %s" % strPID)
						continue
						
				TaskSpeak(self.m_oWorker,
					"Hoppla! Es gibt wohl Probleme mit dem Webb-Sörver.").start()
				break

		print("HTTP Server STOPPED")
		
		if Globs.s_strExitMode == "halt":
			oSysCallCmd = "sudo halt"
			strGoodBye += " Das System wird jetzt heruntergefahren."
		if Globs.s_strExitMode == "boot":
			oSysCallCmd = "sudo reboot"
			strGoodBye += " Das System wird jetzt neu gestartet."
		
		TaskSpeak(self.m_oWorker, strGoodBye).start()
		
		print("Attempt to stop message queue ...")
		if not self.m_oWorker.stopQueue():
			print("FAILED")
		else:
			print("OK")
		
		if oSysCallCmd:
			print("Executing final syscall: " + oSysCallCmd)
			subprocess.Popen(oSysCallCmd, shell=True)
		
		return
Example #27
0
algorithm = os.path.basename(__file__).split(".py")[0]
datasets = os.listdir(path)
datasets = personal_settings.LARGE_DATASETS

# execute for all datasets:
for extraction_type in datasets:
    print("Starting new classification. Extraction method : " +
          extraction_type)
    folder = path + extraction_type + "/"
    n_trees = 100

    try:
        in_clf_mlp = MLPClassifier(verbose=True,
                                   hidden_layer_sizes=300,
                                   early_stopping=True)
        in_clf_rd = ensemble.RandomForestClassifier(n_jobs=-1,
                                                    verbose=20,
                                                    n_estimators=n_trees)
        clf = OneVsRestClassifier(in_clf_mlp, n_jobs=-1)
        sdk.evaluate_classifier(clf,
                                folder,
                                extraction_type,
                                algorithm,
                                suffixe="_mlp")

    except Exception as e:
        print(str(e))
        pass

    print("Ended extraction : " + extraction_type)
                                             n_jobs=-1,
                                             weights='distance')
    clf_rd = RandomForestClassifier(n_estimators=n_trees, n_jobs=5, verbose=20)
    clf_gd = GradientBoostingClassifier(verbose=20, n_estimators=400)
    # model = clf_rd.fit(X_train, y_train)
    # print("Start predicting validation set")
    # y_pred = model.predict(X_valid)
    # # Save Evaluation report
    # save_classification_report(y_valid, "_".join(merge_d), y_pred, algorithm, suffixe="_gd_400")
    # y_pred = None
    # model = None

    #model = clf_rd.fit(X_train, y_train)
    #sdk.save_model(model,  "_".join(merge_d) + "_rd_550", algorithm)
    print("Start predicting validation set")
    model = sdk.load_model("_".join(merge_d) + "_rd_550", algorithm)
    print("Model loaded")
    y_pred = model.predict(X_valid)
    # Save Evaluation report
    save_classification_report(y_valid,
                               "_".join(merge_d),
                               y_pred,
                               algorithm,
                               suffixe="_rd_550")

except Exception as e:
    print(str(e))
    pass

# training, validation = load_dataset_from_folder(folder)
# X_train, y_train = training.data, training.target
    raise ValueError(
        "Unrecognized build - environment variable mode must be GRCh38 or GRCh37"
    )
vcf = "{}.gt.vcf.gz".format(prefix)
tbi = "{}.gt.vcf.gz.tbi".format(prefix)
in_files = [vcf, tbi]

start_time = datetime.now()
print "VQSR SNP MODEL for {} was started at {}.".format(
    prefix, str(start_time))

task = SDK.Task(step="vqsr_snp_model",
                prefix=prefix,
                in_files=in_files,
                ref_files=ref_files,
                param_file=param_file,
                param_uri=param_uri,
                ref_uri=ref_uri,
                in_uri=in_uri,
                out_uri=out_uri)
if not set(in_files).issubset(set(os.listdir("."))):
    task.download_files("INPUT")
if not set(ref_files).issubset(set(os.listdir("."))):
    task.download_files("REF")
if param_file not in os.listdir("."):
    task.download_files("PARAMS")
task.build_cmd()
task.run_cmd()
task.upload_results()
task.cleanup()
Example #30
0
from sklearn.naive_bayes import GaussianNB
import personal_settings
import SDK as sdk
import os

path = personal_settings.PATH
algorithm = os.path.basename(__file__).split(".py")[0]

# execute for all datasets:
for extraction_type in os.listdir(path):

    print("Starting new classification. Extraction method : " + extraction_type)
    folder = path + extraction_type + "/"

    try:
        clf = GaussianNB()
        sdk.evaluate_classifier(clf, folder, extraction_type, algorithm)
    except Exception as e:
        print(str(e))
        pass

    print("Ended extraction : " + extraction_type)
Example #31
0
def main():
    start_point = os.environ['start_point']
    prefix = os.environ['prefix']
    param_file = os.environ['param_file']
    ref_uri = os.environ['ref_uri']
    in_uri = os.environ['in_uri']
    out_uri = os.environ['out_uri']
    assets_uri = os.environ['assets_uri']
    sentieon_pkg = os.environ['sentieon_pkg']
    license_file = os.environ['sentieon_license']
    build = os.environ['build']
    ome = os.environ['ome']
    if ome == 'wes':
        target_file = os.environ['target_file']
    else:
        target_file = None
    if start_point == 'fastq':
        bam = '{}.sorted.deduped.recalibrated.bam'.format(prefix)
        bai = '{}.sorted.deduped.recalibrated.bai'.format(prefix)
    else:
        bam = os.environ['in_file']
        bai = '{}.bai'.format('.'.join(bam.split('.')[:-1]))
        #bai = '{}.bai'.format(bam)
        #bai = '{}.crai'.format(bam)

    in_files = [bam, bai]
    threads = os.environ['threads']

    start_time = datetime.now()
    print('Sentieons HAPLOTYPER for {} was started at {}.'.format(
        prefix, str(start_time)))

    task = SDK.Task(step='haplotyper',
                    prefix=prefix,
                    threads=threads,
                    in_files=in_files,
                    param_file=param_file,
                    sentieon_pkg=sentieon_pkg,
                    license_file=license_file,
                    ref_uri=ref_uri,
                    in_uri=in_uri,
                    out_uri=out_uri,
                    assets_uri=assets_uri,
                    target_file=target_file)

    if ome == 'wes' and target_file:
        task.download_files('TARGET')
    task.get_reference_files(build)
    task.download_files('INPUT')
    task.download_files('REF')
    task.download_files('SENTIEON')
    task.download_files('PARAMS')
    task.build_cmd()
    task.run_cmd()
    task.upload_results()
    task.cleanup()

    end_time = datetime.now()
    print('Sentieons HAPLOTYPER for {} ended at {}.'.format(
        prefix, str(end_time)))
    total_time = end_time - start_time
    print('Total time for Sentieons HAPLOTYPER was {}.'.format(
        str(total_time)))
Example #32
0
param_uri = os.environ["param_uri"]
param_file = param_uri.split("/")[-1]
ref_uri = os.environ["ref_uri"]
in_uri = os.environ["in_uri"]
out_uri = os.environ["out_uri"]
ref_files = []
in_files = ["{}.sorted.deduped.bam".format(prefix)]

start_time = datetime.now()
print "INDEX BAM for {} was started at {}.".format(prefix, str(start_time))

task = SDK.Task(
	step="index_bam",  
	prefix=prefix, 
	in_files=in_files, 
	ref_files=ref_files,
	param_file=param_file,
	param_uri=param_uri,
	ref_uri=ref_uri,
	in_uri=in_uri,
	out_uri=out_uri)
if not set(in_files).issubset(set(os.listdir("."))):
	task.download_files("INPUT")
if not set(ref_files).issubset(set(os.listdir("."))):
	task.download_files("REF")
if param_file not in os.listdir("."):
	task.download_files("PARAMS")
task.build_cmd()
task.run_cmd()
task.upload_results()
task.cleanup()
Example #33
0
import personal_settings

path = personal_settings.PATH
algorithm = "adaboost_nn"

datasets = personal_settings.ALL_DATASETS

for extraction_type in datasets:

    print("Starting new classification. Extraction method : " +
          extraction_type)
    folder = path + extraction_type + "/"

    try:
        scaler = StandardScaler()
        training, validation = sdk.load_dataset_from_folder(
            folder, extraction_type)
        # Normalize training & validation sets
        print("Start normalizing data")
        # scaler.fit(training.data)
        # training.data = scaler.transform(training.data)
        print("Training data has been normalized")
        # validation.data = scaler.transform(validation.data)
        print("Validation data has been normalized")
        # Build MLP model
        print("Start training MLP Classifier with adaboost")
        # clf = MLPClassifier(verbose=True, hidden_layer_sizes=300, early_stopping=True)
        # Create and fit an AdaBoosted MLP
        #        bdt = AdaBoostClassifier(clf, algorithm="SAMME", n_estimators=500)
        #       model = bdt.fit(training.data, training.target)
        print("Training has ended")
        # Save MLP model
Example #34
0
#execute for all datasets:
for extraction_type in datasets:
    print("Starting new classification. Extraction method : " +
          extraction_type)
    folder = path + extraction_type + "/"

    for weights in ['distance']:

        for n_neighbors in [3, 5, 6, 7]:

            print("Starting new classification. Extraction method : " +
                  extraction_type)
            print("weights = " + weights)

            try:
                clf = neighbors.KNeighborsClassifier(n_neighbors=n_neighbors,
                                                     n_jobs=-1,
                                                     weights=weights)
                sdk.evaluate_classifier(clf,
                                        folder,
                                        extraction_type,
                                        algorithm,
                                        suffixe="_" + str(n_neighbors) +
                                        "NN_" + weights)

            except Exception as e:
                print(str(e))
                pass

            print("Ended extraction : " + extraction_type)
Example #35
0
from sklearn.svm import SVC

import SDK as sdk
import personal_settings

path = personal_settings.PATH
extraction_type = "MSD-JMIRMOMENTS"
folder = path + extraction_type + "/"
algorithm = "grid_search_svc"

for extraction_type in personal_settings.LARGE_DATASETS:

    print("Starting new classification. Extraction method : " +
          extraction_type)
    folder = path + extraction_type + "/"
    training, valid = sdk.load_dataset_from_folder(folder, extraction_type)

    X = training.data
    y = training.target

    # Split the dataset in two equal parts
    X_train, X_test, y_train, y_test = train_test_split(X,
                                                        y,
                                                        test_size=0.5,
                                                        random_state=20)

    tuned_parameters = [{
        'kernel': ['rbf'],
        'gamma': [1e-3, 1e-4],
        'C': [1, 10, 100, 1000]
    }]
Example #36
0
    raise ValueError(
        "Unrecognized build - environment variable mode must be GRCh38 or GRCh37"
    )
R1 = "{}_R1{}".format(prefix, suffix)
R2 = "{}_R2{}".format(prefix, suffix)
in_files = [R1, R2]
threads = os.environ['threads']

start_time = datetime.now()
print "BWA MEM for {} was started at {}.".format(prefix, str(start_time))

task = SDK.Task(step="bwa_mem",
                prefix=prefix,
                threads=threads,
                in_files=in_files,
                ref_files=ref_files,
                param_file=param_file,
                param_uri=param_uri,
                ref_uri=ref_uri,
                in_uri=in_uri,
                out_uri=out_uri)
dir_contents = os.listdir(".")

print "Current dir contents: {}".format(str(dir_contents))
if not set(in_files).issubset(set(dir_contents)):
    task.download_files("INPUT")
if not set(ref_files).issubset(set(dir_contents)):
    task.download_files("REF")
if param_file not in dir_contents:
    task.download_files("PARAMS")
task.build_cmd()
task.run_cmd()
Example #37
0
    Rr = {'alpha': 90, 'A': 210, 'D': chi * 1225}
    Br = {'alpha': 90, 'A': 0, 'D': 0}
    Tr = {'alpha': -90, 'A': 0, 'D': 0}
    Fr = {'alpha': 0, 'A': 0, 'D': 175}

    Sn = {'alpha': 0, 'A': 0, 'D': 540}  #(alpha, A, D, theta)
    Ln = {'alpha': 90, 'A': 140, 'D': 0}
    Un = {'alpha': 0, 'A': 1150, 'D': 0}
    Rn = {'alpha': 90, 'A': 210, 'D': 1225}
    Bn = {'alpha': 90, 'A': 0, 'D': 0}
    Tn = {'alpha': -90, 'A': 0, 'D': 0}
    Fn = {'alpha': 0, 'A': 0, 'D': 175}

    dhParamsR = {'S': Sr, 'L': Lr, 'U': Ur, 'R': Rr, 'B': Br, 'T': Tr, 'F': Fr}
    dhParamsN = {'S': Sn, 'L': Ln, 'U': Un, 'R': Rn, 'B': Bn, 'T': Tn, 'F': Fn}
    NrkSDK = SDK.SDKlib("SDKMFC2015.dll")
    NrkSDK.connToSA()
    homePoseSA = (0, 90, 0, 0, 90, 0, 0)
    rN = robot.robot('NominaLRobot', NrkSDK, 'SA', *homePoseSA, **dhParamsN)

    targs = ['T0', 'T1', 'T2']
    for t in targs:
        T = NrkSDK.GetWorkingTransformOfObjectFixedXYZ('A', t)
        p = getPoseForFrame(T, rN)
        input('Hit Enter to Move to Target')
        rN.setPose(*p)

    input('Hit Enter to Go Home')
    rN.setPose(*homePoseSA)
    input('Hit Enter to Kill Robot')
    NrkSDK.DeleteCollection(rN.col)
Example #38
0
from SDK import Application
import SDK as apiSDK

if __name__ == '__main__':
    wrapper = apiSDK.create_api_wrapper()
    app = Application(wrapper)
    print(app.findAppInfo(1))
Example #39
0
datasets = ["MSD-SSD"]

pca_training_sets = [0] * 4
pca_validation_sets = [0] * 4
counter = -1

for extraction_type in personal_settings.SMALL_DATASETS[:4]:
    print("Starting new classification. Extraction method : " +
          extraction_type)
    folder = path + extraction_type + "/"
    clf = GaussianNB()
    counter += 1

    try:

        training, validation = sdk.load_dataset_from_folder(path, datasets)
        print("Start training MLP Classifier")
        pca = PCA(2)
        pca_t = pca.fit_transform(training.data)
        pca_v = pca.fit_transform(validation.data)
        pca_training_sets[counter] = pca_t
        pca_validation_sets[counter] = pca_v

        # print("Training has ended")
        # # Save MLP model
        # sdk.save_model(model, extraction_type, algorithm)
        # # Evaluation model
        # print("Start predicting validation set")
        # y_pred = model.predict(validation.data)
        # # Save Evaluation report
        # sdk.save_classification_report(validation, extraction_type, y_pred, algorithm, suffixe="_no_early_stop")
Example #40
0
    )
vcf = "{}.gt.snp.recal.vcf".format(prefix)
idx = "{}.gt.snp.recal.vcf.idx".format(prefix)
recal_file = "{}.gt.snp.indel.recal.model".format(prefix)
tranches_file = "{}.gt.snp.indel.tranches".format(prefix)
in_files = [vcf, idx, recal_file, tranches_file]

start_time = datetime.now()
print "VQSR INDEL APPLY for {} was started at {}.".format(
    prefix, str(start_time))

task = SDK.Task(step="vqsr_indel_apply",
                prefix=prefix,
                in_files=in_files,
                ref_files=ref_files,
                param_file=param_file,
                param_uri=param_uri,
                ref_uri=ref_uri,
                in_uri=in_uri,
                out_uri=out_uri)
if not set(in_files).issubset(set(os.listdir("."))):
    task.download_files("INPUT")
if not set(ref_files).issubset(set(os.listdir("."))):
    task.download_files("REF")
if param_file not in os.listdir("."):
    task.download_files("PARAMS")
task.build_cmd()
task.run_cmd()
task.upload_results()
task.cleanup()
Example #41
0
	def do(self):
		fCpuTemp = SDK.getCpuTemp()
		strCpuUse = SDK.getCpuUse().strip()
		lstRamInfo = SDK.getRamInfo()
		lstDiskSpace = SDK.getDiskSpace()

		fCpuTempA = Globs.getSetting("System", "fCpuTempA", "\\d{2,}\\.\\d+", 60.0)
		fCpuTempB = Globs.getSetting("System", "fCpuTempB", "\\d{2,}\\.\\d+", 56.0)
		fCpuTempC = Globs.getSetting("System", "fCpuTempC", "\\d{2,}\\.\\d+", 53.0)
		fCpuTempH = Globs.getSetting("System", "fCpuTempH", "\\d{2,}\\.\\d+", 1.0)

		try:
			fCpuUse = float(strCpuUse.replace(",", ".", 1))
		except:
			fCpuUse = 0.0
		# IP-Adresse ermitteln
		if not TaskSystemWatchDog.s_strIpAddr:
			TaskSystemWatchDog.s_strIpAddr = SDK.getNetworkInfo(
				Globs.getSetting("System", "strNetInfoName"))
			if TaskSystemWatchDog.s_strIpAddr:
				TaskSpeak(self.m_oWorker,
				"Die aktuelle Netzwerkadresse ist: %s" % (
				TaskSystemWatchDog.s_strIpAddr.replace(".", " Punkt "))).start()
			elif TaskSystemWatchDog.s_nIpFailCnt < 4:
				TaskSystemWatchDog.s_nIpFailCnt += 1
				TaskSpeak(self.m_oWorker,
				"Die aktuelle Netzwerkadresse konnte nicht ermittelt werden.").start()
			else:
				TaskSystemWatchDog.s_strIpAddr = "127.0.0.1"
				TaskSpeak(self.m_oWorker,
				"Die Netzwerkadresse kann nicht ermittelt werden, daher wird %s angenommen." % (
				TaskSystemWatchDog.s_strIpAddr.replace(".", " Punkt "))).start()
		# CPU-Statistik erstellen
		if not TaskSystemWatchDog.s_bHistory:
			# Statistik initialisieren
			TaskSystemWatchDog.s_fCpuTempMin = fCpuTemp
			TaskSystemWatchDog.s_fCpuTempMax = fCpuTemp
			TaskSystemWatchDog.s_fCpuTempAvg = fCpuTemp
			TaskSystemWatchDog.s_fCpuUseMin = fCpuUse
			TaskSystemWatchDog.s_fCpuUseMax = fCpuUse
			TaskSystemWatchDog.s_fCpuUseAvg = fCpuUse
		else:
			# CPU-Temperaturen
			TaskSystemWatchDog.s_fCpuTempMin = min(
				TaskSystemWatchDog.s_fCpuTempMin,
				fCpuTemp)
			TaskSystemWatchDog.s_fCpuTempMax = max(
				TaskSystemWatchDog.s_fCpuTempMax,
				fCpuTemp)
			TaskSystemWatchDog.s_fCpuTempAvg += fCpuTemp
			TaskSystemWatchDog.s_fCpuTempAvg /= 2.0
			# CPU-Auslastungen
			TaskSystemWatchDog.s_fCpuUseMin = min(
				TaskSystemWatchDog.s_fCpuUseMin,
				fCpuUse)
			TaskSystemWatchDog.s_fCpuUseMax = max(
				TaskSystemWatchDog.s_fCpuUseMax,
				fCpuUse)
			TaskSystemWatchDog.s_fCpuUseAvg += fCpuUse
			TaskSystemWatchDog.s_fCpuUseAvg /= 2.0
		# Systemwerte vorbereiten
		if "CPU" not in Globs.s_dictSystemValues:
			Globs.s_dictSystemValues.update({"CPU" : {}})
		if "RAM" not in Globs.s_dictSystemValues:
			Globs.s_dictSystemValues.update({"Arbeitsspeicher" : {}})
		if "MEM" not in Globs.s_dictSystemValues:
			Globs.s_dictSystemValues.update({"Speicherkapazität" : {}})
		if "Netzwerk" not in Globs.s_dictSystemValues:
			Globs.s_dictSystemValues.update({"Netzwerk" : {}})
		# Systemwerte eintragen
		Globs.s_dictSystemValues["CPU"].update({
			"Auslastung"		: "%s%%" % (strCpuUse),
			"Auslastung Min"	: "%0.2f%%" % (TaskSystemWatchDog.s_fCpuUseMin),
			"Auslastung Max"	: "%0.2f%%" % (TaskSystemWatchDog.s_fCpuUseMax),
			"Auslastung Avg"	: "%0.2f%%" % (TaskSystemWatchDog.s_fCpuUseAvg),
			"Temperatur"		: "%0.1f°C" % (fCpuTemp),
			"Temperatur Min"	: "%0.2f°C" % (TaskSystemWatchDog.s_fCpuTempMin),
			"Temperatur Max"	: "%0.2f°C" % (TaskSystemWatchDog.s_fCpuTempMax),
			"Temperatur Avg"	: "%0.2f°C" % (TaskSystemWatchDog.s_fCpuTempAvg),})
		Globs.s_dictSystemValues["Netzwerk"].update({
			"IP-Adresse"		: "%s" % (TaskSystemWatchDog.s_strIpAddr),})
		lstLabels = ["Gesamt", "Belegt", "Frei", "Geteilt", "Gepuffert", "Im Cache"]
		nIndex = 0
		for strData in lstRamInfo:
			Globs.s_dictSystemValues["RAM"].update({
			lstLabels[nIndex]	: strData + "K"})
			nIndex += 1
		lstLabels = ["Gesamt", "Belegt", "Verfügbar", "Belegung"]
		nIndex = 0
		for strData in lstDiskSpace:
			Globs.s_dictSystemValues["MEM"].update({
			lstLabels[nIndex]	: strData})
			nIndex += 1
		# Nächsten Durchlauf einplanen
		self.m_oWorker.runSystemWatchDog()
		# CPU-Temperatur auswerten
		strCpuTemp = ("%0.1f Grad" % (TaskSystemWatchDog.s_fCpuTempAvg)
			).replace(".", " Komma ")
		if TaskSystemWatchDog.s_fCpuTempAvg > fCpuTempA:
			#
			# Warn-Level 3 - Notabschaltung
			#
			TaskSystemWatchDog.s_nCpuTooHot += 1
			if TaskSystemWatchDog.s_nCpuTempLvl != 3: 
				TaskSpeak(self.m_oWorker, "Achtung!").start()
				TaskSpeak(self.m_oWorker, "Temperaturüberschreitung mit %s!" % (
					strCpuTemp)).start()
			TaskSystemWatchDog.s_nCpuTempLvl = 3
			if (TaskSystemWatchDog.s_nCpuTooHot >= 10):
				TaskSpeak(self.m_oWorker, "Notabschaltung eingeleitet!").start()
				TaskExit(self.m_oWorker, "term").start()
				Globs.stop()
			else:
				TaskSpeak(self.m_oWorker,
					"Für Abkühlung sorgen! Notabschaltung %s Prozent!" % (
					TaskSystemWatchDog.s_nCpuTooHot * 10)).start()
		elif (TaskSystemWatchDog.s_fCpuTempAvg > fCpuTempB
			and TaskSystemWatchDog.s_fCpuTempAvg < (fCpuTempA - fCpuTempH)):
			#
			# Warn-Level 2
			#
			TaskSystemWatchDog.s_nCpuTooHot = 0
#			if TaskSystemWatchDog.s_nCpuTooHot > 0:
#				TaskSystemWatchDog.s_nCpuTooHot -= 1
			if TaskSystemWatchDog.s_nCpuTempLvl != 2:
				TaskSpeak(self.m_oWorker,
					"Die Temperatur ist mit %s zu hoch!" % (
					strCpuTemp)).start()
			TaskSystemWatchDog.s_nCpuTempLvl = 2
		elif (TaskSystemWatchDog.s_fCpuTempAvg > fCpuTempC
			and TaskSystemWatchDog.s_fCpuTempAvg < (fCpuTempB - fCpuTempH)):
			#
			# Warn-Level 1
			#
			TaskSystemWatchDog.s_nCpuTooHot = 0
			if TaskSystemWatchDog.s_nCpuTempLvl != 1:
				TaskSpeak(self.m_oWorker,
					"Die Temperatur ist mit %s erhöht!" % (
					strCpuTemp)).start()
			TaskSystemWatchDog.s_nCpuTempLvl = 1
		elif (TaskSystemWatchDog.s_nCpuTempLvl != 0
			and TaskSystemWatchDog.s_fCpuTempAvg < (fCpuTempC - fCpuTempH)):
			#
			# Warn-Level 0 - Normalbereich
			#
			TaskSystemWatchDog.s_nCpuTooHot = 0
			TaskSpeak(self.m_oWorker,
				"Die Temperatur ist mit %s wieder im normalen Bereich" % (
				strCpuTemp)).start()
			TaskSystemWatchDog.s_nCpuTempLvl = 0
		elif not TaskSystemWatchDog.s_bHistory:
			TaskSpeak(self.m_oWorker, 
				"Die Temperatur liegt mit %s im normalen Bereich" % (
				strCpuTemp)).start()
		# Es liegen jetzt Statistikwerte aus der Vergangenheit vor
		if not TaskSystemWatchDog.s_bHistory:
			TaskSystemWatchDog.s_bHistory = True
		return
Example #42
0
param_uri = os.environ["param_uri"]
param_file = param_uri.split("/")[-1]
ref_uri = os.environ["ref_uri"]
in_uri = os.environ["in_uri"]
out_uri = os.environ["out_uri"]
ref_files = []
in_files = ["{}.sorted.bam".format(prefix)]

start_time = datetime.now()
print "MARK DUPS for {} was started at {}.".format(prefix, str(start_time))

task = SDK.Task(step="mark_dups",
                prefix=prefix,
                in_files=in_files,
                ref_files=ref_files,
                param_file=param_file,
                param_uri=param_uri,
                ref_uri=ref_uri,
                in_uri=in_uri,
                out_uri=out_uri)
if not set(in_files).issubset(set(os.listdir("."))):
    task.download_files("INPUT")
if not set(ref_files).issubset(set(os.listdir("."))):
    task.download_files("REF")
if param_file not in os.listdir("."):
    task.download_files("PARAMS")
task.build_cmd()
task.run_cmd()
task.upload_results()
task.cleanup()