Beispiel #1
0
def energies(seq_list):
    energy_list = []

    energy_list = multiprocessing(rna_folder, [sequence for sequence in seq_list], 12)
    # for sequence in seq_list:
    #     #fc = RNA.fold_compound(str(sequence))
    #     (structure, MFE) = RNA.fold(str(sequence)) # calculate and define variables for mfe and structure
    #     energy_list.append(MFE) # adds the native fragment to list

    return energy_list;
Beispiel #2
0
  def __init__(
    self,
    constraint,
    caption = "concurrency",
    single = single(),
    multicore = multicore(),
    extras = [ batch() ],
    conf_threading = threading(),
    conf_multiprocessing = multiprocessing(),
    prefer_mp = False,
    ):

    self.caption = caption
    self.single = single
    self.multicore = multicore

    if prefer_mp:
      technologies = [ conf_multiprocessing, conf_threading ]

    else:
      technologies = [ conf_threading, conf_multiprocessing ]

    self.option_for = {}

    try:
      self.option_for[ single.caption ] = single(
        constraint = constraint,
        technologies = technologies,
        )

    except ConfigurationError:
      raise ConfigurationError("'%s' option not valid with this setup" % single.caption)

    try:
      self.option_for[ multicore.caption ] = multicore(
        constraint = constraint,
        technologies = technologies,
        )

    except ConfigurationError:
      pass

    self.extras = []

    for platform in extras:
      try:
        self.option_for[ platform.caption ] = platform( constraint = constraint )

      except ConfigurationError:
        continue

      self.extras.append( platform )
Beispiel #3
0
  def __init__(
    self,
    constraint,
    caption = "concurrency",
    single = single(),
    multicore = multicore(),
    extras = [ batch() ],
    conf_threading = threading(),
    conf_multiprocessing = multiprocessing(),
    prefer_mp = False,
    ):

    self.caption = caption
    self.single = single
    self.multicore = multicore

    if prefer_mp:
      technologies = [ conf_multiprocessing, conf_threading ]

    else:
      technologies = [ conf_threading, conf_multiprocessing ]

    self.option_for = {}

    try:
      self.option_for[ single.caption ] = single(
        constraint = constraint,
        technologies = technologies,
        )

    except ConfigurationError:
      raise ConfigurationError, "'%s' option not valid with this setup" % single.caption

    try:
      self.option_for[ multicore.caption ] = multicore(
        constraint = constraint,
        technologies = technologies,
        )

    except ConfigurationError:
      pass

    self.extras = []

    for platform in extras:
      try:
        self.option_for[ platform.caption ] = platform( constraint = constraint )

      except ConfigurationError:
        continue

      self.extras.append( platform )
Beispiel #4
0
def scramble(text, randomizations, type):
    frag = str(text)
    frag_seqs = []
    if type == "di":
        for _ in range(randomizations):
            result = dinuclShuffle(frag)
            frag_seqs.append(result)
    elif type == "mono":
        frag_seqs = multiprocessing(randomizer, [frag for i in range(randomizations)], 12)

        # for _ in range(int(randomizations)):
        #     result = ''.join(random.sample(frag,len(frag)))
        #     frag_seqs.append(result)
    else:
        print("Shuffle type not properly designated; please input \"di\" or \"mono\"")

    return frag_seqs;
Beispiel #5
0
            y_axis = np.power((y - float(grid_np_list_y[m])), 2)
            z_axis = np.power((z - float(grid_np_list_z[m])), 2)
            distance_to_atoms = (np.sqrt(x_axis + y_axis + z_axis))
            n = 0
            count = 0
            while n < len(x_axis):
                if distance_to_atoms[n] >= (surface_probe_diameter / 2):
                    count += 1
                else:
                    break
                n += 1
            if count == len(x_axis):
                return check_line


multiprocessing()

vs.pdb.divided_points("protein_points.txt", "protein_grid.pdb")
vs.pdb.divided_points("void_points.txt", "void_grid.pdb")
vs.pdb.divided_points("probe_points.txt", "probe_grid.pdb")

print("Done step 6/6")

protein_infile = open("protein_points.txt", "r").read().split()
void_infile = open("void_points.txt", "r").read().split()

void_volume = np.power(probe_size, 3) * len(void_infile)
protein_volume = np.power(probe_size, 3) * len(protein_infile)  #cubic
print("\n\n\n" + str(protein_volume) + " A^3 protein volume")
print(str(void_volume) + " A^3 void volume")
total_volume = protein_volume + void_volume
__osplat = fn.extplatform()
executeable = fn.copasiexec()
fn.copasiexist()

print ("Found OS: %s\n" % __osplat)
print ("I'm going to use the following executeable: %s\n" % executeable)


print ("Please select the run mode:\n")
print ("Press 'c' for Multi config file mode:")
print ("Edits the filename of the data set in multiple cpsfiles (Models) \n\n\n")
print ("Press 'd' for Multi dataset file mode")
print ("Adds multiple Datasets to one cps file\n\n")

runmode = str ( input() )

runfileprocessing = []


if runmode in ("c", "C"):
	returncode, runfileprocessing = multicpsfiles() # List backref
elif runmode in ("d", "D"):
	returncode, runfileprocessing = multidataset() # Tupel backref

if returncode:
	multiprocessing(runfileprocessing)
else:
	sys.exit("Error: Method failed!")


Beispiel #7
0
        if '.minion' in i:
            try:
                test_names.append(subfolder + '/' + i.replace('.minion', ''))
                subprocess.check_call(['rm', test_names[-1] + '.xls'
                                       ])  # , shell=True)
            except:
                continue


if __name__ == '__main__':
    for i in range(10):
        test_names = []
        benchmark = []
        try:
            subdirs = [x[0] for x in os.walk('./testcases/')]
            #subdirs = subdirs[0:]
            for i in subdirs:
                test_name_add(i)
        except:
            subdirs = []
        if len(sys.argv) > 2:
            if sys.argv[2] == 'c':
                clear_results(test_names)
        else:
            benchmark = ['x'] * len(test_names)
            benchmark = multiprocessing(parproc, range(len(test_names)),
                                        min(len(test_names), 4))
            bm = get_benchmark()

    print('done')
Beispiel #8
0
 def hardest_read2(self):
     read = multiprocessing(avg_vowels, url_list)
     read.sort(reverse=True)
     return read[0]
#     # jobs = []
#     #
#     # start_time = time.time()
#     # for i in range(0, 100):
#     #     p = multiprocessing.Process(target=worker, args=(i,))
#     #     jobs.append(p)
#     #     p.start()
#     #
#     # print(time.time() - start_time)
#     #
#     # print('-' * 50)

from concurrent.futures import ProcessPoolExecutor, ThreadPoolExecutor


def multithreading(func, args, workers):
    with ThreadPoolExecutor(workers) as ex:
        res = ex.map(func, args)
    return list(res)


def multiprocessing(func, args, workers):
    with ProcessPoolExecutor(workers) as ex:
        res = ex.map(func, args)
    return list(res)


if __name__ == '__main__':
    # multithreading(worker, range(1000), 6)
    multiprocessing(worker, range(1000), 6)