def composeSong(analysis_result_json_string, sleep_result_json_string):
	respiration_timestamps, respiration_cycle_lengths, ihr_timestamps, ihr_values, actigram_timestamps = reader.getSignalAnalysisResults(analysis_result_json_string)
	
	reader.processSleepData(sleep_result_json_string)
	
	actigram = reader.processActigram(actigram_timestamps)
	
	respiration = reader.processRespirationVariability(analysis_result_json_string) 
	IHR = reader.processIHR(ihr_timestamps, ihr_values)
	
	reader.syncStages(IHR) #take tempos and stages and make the faster stages have more beats and slower ones less. Gets stages from reader...
	
	nightLength = reader.getNightLength()
	generateKunquat.makeDirectories()
	generateKunquat.makeFiles()
	voices = 2 #not counting actigramtrack
	scaletype = 1#0 for minor, 1 for Major
	music.generate([], voices, reader.getNightLength(), reader.getStages(), respiration, scaletype, actigram) #second last parameter = rythm dencity, last parameter = volume variation
	
	#music.actigramNoise(actigram)
	
	music.setTempos(IHR) #works with both respiration and IHR
	
	voices = music.howManyVoices() #now counting actigramtrack
	for i in range(voices): #for each voice (if several)
	#	print str(i)+":"
	#	if (i==2):
	#		print music.getVoice(i)
		generateKunquat.columnWriter(i, music.getVoice(i))
	generateKunquat.patternWriter(music.getLength()+4)
	#debug.debugRythm(music)
	debug.debugprint(music)
Example #2
0
def debugprint(music):
    print ""
    print "DEGUB PRINTS: "
    print ""
    print "Actigram (ticks per beat -version):"
    print reader.actigram
    print ""
    print "Respiration: " + str(reader.respiration)
    print "Data in respiration that is not < 0.5 long..:"
    for a in range(len(reader.respiration)):
        if (a != len(reader.respiration) - 1 and
                reader.respiration[a + 1][0] - reader.respiration[a][0] > 0.5):
            print reader.respiration[a]
    print ""
    print "Respiration length: " + str(len(reader.respiration))
    print ""
    print "IHR: " + str(reader.IHR)
    print "Data in IHR that is not < 0.5 long..:"
    for a in range(len(reader.IHR)):
        if (a != len(reader.IHR) - 1
                and reader.IHR[a + 1][0] - reader.IHR[a][0] > 0.5):
            print reader.IHR[a]
    print ""
    print "IHR length: " + str(len(reader.IHR))
    print ""
    print "Chords: " + str(music.chorddata)
    print "Number of chords: " + str(len(music.chorddata))
    print ""
    print "Scale type: " + music.scaletype
    print ""
    print "Start: " + str(reader.getStart())
    print "End: " + str(reader.getEnd())
    print "Difference: " + str(reader.getEnd() - reader.getStart())
    print ""
    length = 0
    stages = reader.getStages()
    for a in stages:
        length = length + a[1]
    print "Sleep stages (prosessed): " + str(stages)
    print "Combined length of stages: " + str(length)
    print ""
    print "Night Length: " + str(reader.getNightLength()) + " beats"
    print "Really its " + str(reader.getNightLength() -
                              reader.getNightLength() % music.bar)
    print ""
    print "A bar is " + str(music.bar) + " beats"
    print ""
    for i in range(music.howManyVoices()):
        voice = music.getVoice(i)
        print "Last events for voice " + str(i) + ":  " + str(voice[-3]) + str(
            voice[-2]) + str(voice[-1])
        print ""
Example #3
0
def debugprint(music):
	print ""
	print "DEGUB PRINTS: "
	print ""
	print "Actigram (ticks per beat -version):"
	print reader.actigram
	print ""
	print "Respiration: "+str(reader.respiration)
	print "Data in respiration that is not < 0.5 long..:"
	for a in range(len(reader.respiration)):
		if (a != len(reader.respiration)-1 and reader.respiration[a+1][0] - reader.respiration[a][0] > 0.5):
			print reader.respiration[a]
	print ""
	print "Respiration length: "+str(len(reader.respiration))
	print ""
	print "IHR: "+str(reader.IHR)
	print "Data in IHR that is not < 0.5 long..:"
	for a in range(len(reader.IHR)):
		if (a != len(reader.IHR)-1 and reader.IHR[a+1][0] - reader.IHR[a][0] > 0.5):
			print reader.IHR[a]
	print ""
	print "IHR length: "+str(len(reader.IHR))
	print ""
	print "Chords: "+str(music.chorddata)
	print "Number of chords: "+str(len(music.chorddata))
	print ""
	print "Scale type: "+music.scaletype
	print ""
	print "Start: "+str(reader.getStart())
	print "End: "+str(reader.getEnd())
	print "Difference: "+str(reader.getEnd() - reader.getStart())
	print ""
	length = 0
	stages = reader.getStages()
	for a in stages:
		length= length + a[1]
	print "Sleep stages (prosessed): "+str(stages)
	print "Combined length of stages: "+str(length)
	print ""
	print "Night Length: "+str(reader.getNightLength())+" beats"
	print "Really its "+str(reader.getNightLength()-reader.getNightLength()%music.bar)
	print ""
	print "A bar is "+str(music.bar)+" beats"
	print ""
	for i in range(music.howManyVoices()):
		voice = music.getVoice(i)
		print "Last events for voice "+str(i)+":  "+str(voice[-3])+str(voice[-2])+str(voice[-1])
		print ""
Example #4
0
def composeSong(analysis_result_json_string, sleep_result_json_string):
    respiration_timestamps, respiration_cycle_lengths, ihr_timestamps, ihr_values, actigram_timestamps = reader.getSignalAnalysisResults(
        analysis_result_json_string)

    reader.processSleepData(sleep_result_json_string)

    actigram = reader.processActigram(actigram_timestamps)

    respiration = reader.processRespirationVariability(
        analysis_result_json_string)
    IHR = reader.processIHR(ihr_timestamps, ihr_values)

    reader.syncStages(
        IHR
    )  #take tempos and stages and make the faster stages have more beats and slower ones less. Gets stages from reader...

    nightLength = reader.getNightLength()
    generateKunquat.makeDirectories()
    generateKunquat.makeFiles()
    voices = 2  #not counting actigramtrack
    scaletype = 1  #0 for minor, 1 for Major
    music.generate(
        [], voices, reader.getNightLength(), reader.getStages(), respiration,
        scaletype, actigram
    )  #second last parameter = rythm dencity, last parameter = volume variation

    #music.actigramNoise(actigram)

    music.setTempos(IHR)  #works with both respiration and IHR

    voices = music.howManyVoices()  #now counting actigramtrack
    for i in range(voices):  #for each voice (if several)
        #	print str(i)+":"
        #	if (i==2):
        #		print music.getVoice(i)
        generateKunquat.columnWriter(i, music.getVoice(i))
    generateKunquat.patternWriter(music.getLength() + 4)
    #debug.debugRythm(music)
    debug.debugprint(music)