示例#1
0
 def test_convolve_with_concat(self):
     fake_fs = {
         'd0_0': {
             '%': None,
             'd1_0': {
                 'd1_0_0.yaml': None,
             },
             'd1_1': {
                 'd1_1_0.yaml': None,
                 'd1_1_1.yaml': None,
             },
             'd1_2': {
                 '+': None,
                 'd1_2_0.yaml': None,
                 'd1_2_1.yaml': None,
                 'd1_2_2.yaml': None,
                 'd1_2_3.yaml': None,
             },
         },
     }
     self.start_patchers(fake_fs)
     result = build_matrix.build_matrix('d0_0')
     assert len(result) == 2
     for i in result:
         assert 'd0_0/d1_2/d1_2_0.yaml' in i[1]
         assert 'd0_0/d1_2/d1_2_1.yaml' in i[1]
         assert 'd0_0/d1_2/d1_2_2.yaml' in i[1]
         assert 'd0_0/d1_2/d1_2_3.yaml' in i[1]
 def test_emulate_teuthology_noceph(self):
     fake_fs = {
         'teuthology': {
             'no-ceph': {
                 '%': None,
                 'clusters': {
                     'single.yaml': None,
                 },
                 'distros': {
                     'baremetal.yaml': None,
                     'rhel7.0.yaml': None,
                     'ubuntu12.04.yaml': None,
                     'ubuntu14.04.yaml': None,
                     'vps.yaml': None,
                     'vps_centos6.5.yaml': None,
                     'vps_debian7.yaml': None,
                     'vps_rhel6.4.yaml': None,
                     'vps_rhel6.5.yaml': None,
                     'vps_rhel7.0.yaml': None,
                     'vps_ubuntu14.04.yaml': None,
                 },
                 'tasks': {
                     'teuthology.yaml': None,
                 },
             },
         },
     }
     self.start_patchers(fake_fs)
     result = build_matrix.build_matrix('teuthology/no-ceph')
     assert len(result) == 11
     assert self.fragment_occurences(result, 'vps.yaml') == 1 / 11.0
示例#3
0
 def test_emulate_teuthology_noceph(self):
     fake_fs = {
         'teuthology': {
             'no-ceph': {
                 '%': None,
                 'clusters': {
                     'single.yaml': None,
                 },
                 'distros': {
                     'baremetal.yaml': None,
                     'rhel7.0.yaml': None,
                     'ubuntu12.04.yaml': None,
                     'ubuntu14.04.yaml': None,
                     'vps.yaml': None,
                     'vps_centos6.5.yaml': None,
                     'vps_debian7.yaml': None,
                     'vps_rhel6.4.yaml': None,
                     'vps_rhel6.5.yaml': None,
                     'vps_rhel7.0.yaml': None,
                     'vps_ubuntu14.04.yaml': None,
                 },
                 'tasks': {
                     'teuthology.yaml': None,
                 },
             },
         },
     }
     self.start_patchers(fake_fs)
     result = build_matrix.build_matrix('teuthology/no-ceph')
     assert len(result) == 11
     assert self.fragment_occurences(result, 'vps.yaml') == 1 / 11.0
 def test_convolve_with_concat(self):
     fake_fs = {
         'd0_0': {
             '%': None,
             'd1_0': {
                 'd1_0_0.yaml': None,
             },
             'd1_1': {
                 'd1_1_0.yaml': None,
                 'd1_1_1.yaml': None,
             },
             'd1_2': {
                 '+': None,
                 'd1_2_0.yaml': None,
                 'd1_2_1.yaml': None,
                 'd1_2_2.yaml': None,
                 'd1_2_3.yaml': None,
             },
         },
     }
     self.start_patchers(fake_fs)
     result = build_matrix.build_matrix('d0_0')
     assert len(result) == 2
     for i in result:
         assert 'd0_0/d1_2/d1_2_0.yaml' in i[1]
         assert 'd0_0/d1_2/d1_2_1.yaml' in i[1]
         assert 'd0_0/d1_2/d1_2_2.yaml' in i[1]
         assert 'd0_0/d1_2/d1_2_3.yaml' in i[1]
示例#5
0
 def test_random_dollar_sign_with_concat(self):
     fake_fs = {
         'd0_0': {
             '$': None,
             'd1_0': {
                 'd1_0_0.yaml': None,
             },
             'd1_1': {
                 'd1_1_0.yaml': None,
                 'd1_1_1.yaml': None,
             },
             'd1_2': {
                 '+': None,
                 'd1_2_0.yaml': None,
                 'd1_2_1.yaml': None,
                 'd1_2_2.yaml': None,
                 'd1_2_3.yaml': None,
             },
         },
     }
     self.start_patchers(fake_fs)
     result = build_matrix.build_matrix('d0_0')
     assert len(result) == 1
     if result[0][0][1:].startswith('d1_2'):
         for i in result:
             assert 'd0_0/d1_2/d1_2_0.yaml' in i[1]
             assert 'd0_0/d1_2/d1_2_1.yaml' in i[1]
             assert 'd0_0/d1_2/d1_2_2.yaml' in i[1]
             assert 'd0_0/d1_2/d1_2_3.yaml' in i[1]
示例#6
0
 def test_random_dollar_sign_with_convolve(self):
     fake_fs = {
         'd0_0': {
             '%': None,
             'd1_0': {
                 'd1_0_0.yaml': None,
                 'd1_0_1.yaml': None,
             },
             'd1_1': {
                 'd1_1_0.yaml': None,
                 'd1_1_1.yaml': None,
             },
             'd1_2': {
                 '$': None,
                 'd1_2_0.yaml': None,
                 'd1_2_1.yaml': None,
                 'd1_2_2.yaml': None,
             },
         },
     }
     self.start_patchers(fake_fs)
     result = build_matrix.build_matrix('d0_0')
     assert len(result) == 4
     fake_fs1 = {
         'd0_0': {
             '%': None,
             'd1_0': {
                 'd1_0_0.yaml': None,
                 'd1_0_1.yaml': None,
             },
             'd1_1': {
                 'd1_1_0.yaml': None,
                 'd1_1_1.yaml': None,
             },
             'd1_2$': {
                 'd1_2_0.yaml': None,
                 'd1_2_1.yaml': None,
                 'd1_2_2.yaml': None,
             },
         },
     }
     self.stop_patchers()
     self.start_patchers(fake_fs1)
     result = build_matrix.build_matrix('d0_0')
     assert len(result) == 4
 def test_random_dollar_sign_with_convolve(self):
     fake_fs = {
         'd0_0': {
             '%': None,
             'd1_0': {
                 'd1_0_0.yaml': None,
                 'd1_0_1.yaml': None,
             },
             'd1_1': {
                 'd1_1_0.yaml': None,
                 'd1_1_1.yaml': None,
             },
             'd1_2': {
                 '$': None,
                 'd1_2_0.yaml': None,
                 'd1_2_1.yaml': None,
                 'd1_2_2.yaml': None,
             },
         },
     }
     self.start_patchers(fake_fs)
     result = build_matrix.build_matrix('d0_0')
     assert len(result) == 4
     fake_fs1 = {
         'd0_0': {
             '%': None,
             'd1_0': {
                 'd1_0_0.yaml': None,
                 'd1_0_1.yaml': None,
             },
             'd1_1': {
                 'd1_1_0.yaml': None,
                 'd1_1_1.yaml': None,
             },
             'd1_2$': {
                 'd1_2_0.yaml': None,
                 'd1_2_1.yaml': None,
                 'd1_2_2.yaml': None,
             },
         },
     }
     self.stop_patchers()
     self.start_patchers(fake_fs1)
     result = build_matrix.build_matrix('d0_0')
     assert len(result) == 4
示例#8
0
 def test_random_dollar_sign_with_concat(self):
     fake_fs = {
         'd0_0': {
             '$': None,
             'd1_0': {
                 'd1_0_0.yaml': None,
             },
             'd1_1': {
                 'd1_1_0.yaml': None,
                 'd1_1_1.yaml': None,
             },
             'd1_2': {
                 '+': None,
                 'd1_2_0.yaml': None,
                 'd1_2_1.yaml': None,
                 'd1_2_2.yaml': None,
                 'd1_2_3.yaml': None,
             },
         },
     }
     fake_fs1 = {
         'd0_0$': {
             'd1_0': {
                 'd1_0_0.yaml': None,
             },
             'd1_1': {
                 'd1_1_0.yaml': None,
                 'd1_1_1.yaml': None,
             },
             'd1_2': {
                 '+': None,
                 'd1_2_0.yaml': None,
                 'd1_2_1.yaml': None,
                 'd1_2_2.yaml': None,
                 'd1_2_3.yaml': None,
             },
         },
     }
     for info in [(fake_fs, 'd0_0'), (fake_fs1, 'd0_0$')]:
         fsv = info[0]
         dval = info[1]
         self.start_patchers(fsv)
         result = build_matrix.build_matrix(dval)
         assert len(result) == 1
         if result[0][0][1:].startswith('d1_2'):
             for i in result:
                 assert 'd0_0/d1_2/d1_2_0.yaml' in i[1]
                 assert 'd0_0/d1_2/d1_2_1.yaml' in i[1]
                 assert 'd0_0/d1_2/d1_2_2.yaml' in i[1]
                 assert 'd0_0/d1_2/d1_2_3.yaml' in i[1]
         if dval == 'd0_0':
             self.stop_patchers()
 def test_random_dollar_sign_with_concat(self):
     fake_fs = {
         'd0_0': {
             '$': None,
             'd1_0': {
                 'd1_0_0.yaml': None,
             },
             'd1_1': {
                 'd1_1_0.yaml': None,
                 'd1_1_1.yaml': None,
             },
             'd1_2': {
                 '+': None,
                 'd1_2_0.yaml': None,
                 'd1_2_1.yaml': None,
                 'd1_2_2.yaml': None,
                 'd1_2_3.yaml': None,
             },
         },
     }
     fake_fs1 = {
         'd0_0$': {
             'd1_0': {
                 'd1_0_0.yaml': None,
             },
             'd1_1': {
                 'd1_1_0.yaml': None,
                 'd1_1_1.yaml': None,
             },
             'd1_2': {
                 '+': None,
                 'd1_2_0.yaml': None,
                 'd1_2_1.yaml': None,
                 'd1_2_2.yaml': None,
                 'd1_2_3.yaml': None,
             },
         },
     }
     for info in [(fake_fs,'d0_0'), (fake_fs1,'d0_0$')]:
         fsv = info[0]
         dval = info[1]
         self.start_patchers(fsv)
         result = build_matrix.build_matrix(dval)
         assert len(result) == 1
         if result[0][0][1:].startswith('d1_2'):
             for i in result:
                 assert 'd0_0/d1_2/d1_2_0.yaml' in i[1]
                 assert 'd0_0/d1_2/d1_2_1.yaml' in i[1]
                 assert 'd0_0/d1_2/d1_2_2.yaml' in i[1]
                 assert 'd0_0/d1_2/d1_2_3.yaml' in i[1]
         if dval == 'd0_0':
             self.stop_patchers()
示例#10
0
 def test_random_dollar_sign_with_concat(self):
     fake_fs = {
         'd0_0': {
             '$': None,
             'd1_0': {
                 'd1_0_0.yaml': None,
             },
             'd1_1': {
                 'd1_1_0.yaml': None,
                 'd1_1_1.yaml': None,
             },
             'd1_2': {
                 '+': None,
                 'd1_2_0.yaml': None,
                 'd1_2_1.yaml': None,
                 'd1_2_2.yaml': None,
                 'd1_2_3.yaml': None,
             },
         },
     }
     fake_fs1 = {
         'd0_0$': {
             'd1_0': {
                 'd1_0_0.yaml': None,
             },
             'd1_1': {
                 'd1_1_0.yaml': None,
                 'd1_1_1.yaml': None,
             },
             'd1_2': {
                 '+': None,
                 'd1_2_0.yaml': None,
                 'd1_2_1.yaml': None,
                 'd1_2_2.yaml': None,
                 'd1_2_3.yaml': None,
             },
         },
     }
     for fs, root in [(fake_fs,'d0_0'), (fake_fs1,'d0_0$')]:
         self.start_patchers(fs)
         result = build_matrix.build_matrix(root)
         assert len(result) == 1
         if result[0][0][1:].startswith('d1_2'):
             for i in result:
                 assert os.path.join(root, 'd1_2/d1_2_0.yaml') in i[1]
                 assert os.path.join(root, 'd1_2/d1_2_1.yaml') in i[1]
                 assert os.path.join(root, 'd1_2/d1_2_2.yaml') in i[1]
                 assert os.path.join(root, 'd1_2/d1_2_3.yaml') in i[1]
         if root == 'd0_0':
             self.stop_patchers()
示例#11
0
 def test_random_dollar_sign_with_concat(self):
     fake_fs = {
         'd0_0': {
             '$': None,
             'd1_0': {
                 'd1_0_0.yaml': None,
             },
             'd1_1': {
                 'd1_1_0.yaml': None,
                 'd1_1_1.yaml': None,
             },
             'd1_2': {
                 '+': None,
                 'd1_2_0.yaml': None,
                 'd1_2_1.yaml': None,
                 'd1_2_2.yaml': None,
                 'd1_2_3.yaml': None,
             },
         },
     }
     fake_fs1 = {
         'd0_0$': {
             'd1_0': {
                 'd1_0_0.yaml': None,
             },
             'd1_1': {
                 'd1_1_0.yaml': None,
                 'd1_1_1.yaml': None,
             },
             'd1_2': {
                 '+': None,
                 'd1_2_0.yaml': None,
                 'd1_2_1.yaml': None,
                 'd1_2_2.yaml': None,
                 'd1_2_3.yaml': None,
             },
         },
     }
     for fs, root in [(fake_fs,'d0_0'), (fake_fs1,'d0_0$')]:
         self.start_patchers(fs)
         result = build_matrix.build_matrix(root)
         assert len(result) == 1
         if result[0][0][1:].startswith('d1_2'):
             for i in result:
                 assert os.path.join(root, 'd1_2/d1_2_0.yaml') in i[1]
                 assert os.path.join(root, 'd1_2/d1_2_1.yaml') in i[1]
                 assert os.path.join(root, 'd1_2/d1_2_2.yaml') in i[1]
                 assert os.path.join(root, 'd1_2/d1_2_3.yaml') in i[1]
         if root == 'd0_0':
             self.stop_patchers()
 def test_convolve_2x2(self):
     fake_fs = {
         'd0_0': {
             '%': None,
             'd1_0': {
                 'd1_0_0.yaml': None,
                 'd1_0_1.yaml': None,
             },
             'd1_1': {
                 'd1_1_0.yaml': None,
                 'd1_1_1.yaml': None,
             },
         },
     }
     self.start_patchers(fake_fs)
     result = build_matrix.build_matrix('d0_0')
     assert len(result) == 4
     assert self.fragment_occurences(result, 'd1_1_1.yaml') == 0.5
示例#13
0
 def test_convolve_2x2(self):
     fake_fs = {
         'd0_0': {
             '%': None,
             'd1_0': {
                 'd1_0_0.yaml': None,
                 'd1_0_1.yaml': None,
             },
             'd1_1': {
                 'd1_1_0.yaml': None,
                 'd1_1_1.yaml': None,
             },
         },
     }
     self.start_patchers(fake_fs)
     result = build_matrix.build_matrix('d0_0')
     assert len(result) == 4
     assert self.fragment_occurences(result, 'd1_1_1.yaml') == 0.5
 def test_concatenate_1x2x3(self):
     fake_fs = {
         'd0_0': {
             '+': None,
             'd1_0': {
                 'd1_0_0.yaml': None,
             },
             'd1_1': {
                 'd1_1_0.yaml': None,
                 'd1_1_1.yaml': None,
             },
             'd1_2': {
                 'd1_2_0.yaml': None,
                 'd1_2_1.yaml': None,
                 'd1_2_2.yaml': None,
             },
         },
     }
     self.start_patchers(fake_fs)
     result = build_matrix.build_matrix('d0_0')
     assert len(result) == 1
示例#15
0
 def test_concatenate_1x2x3(self):
     fake_fs = {
         'd0_0': {
             '+': None,
             'd1_0': {
                 'd1_0_0.yaml': None,
             },
             'd1_1': {
                 'd1_1_0.yaml': None,
                 'd1_1_1.yaml': None,
             },
             'd1_2': {
                 'd1_2_0.yaml': None,
                 'd1_2_1.yaml': None,
                 'd1_2_2.yaml': None,
             },
         },
     }
     self.start_patchers(fake_fs)
     result = build_matrix.build_matrix('d0_0')
     assert len(result) == 1
示例#16
0
 def test_sort_order(self):
     # This test ensures that 'ceph' comes before 'ceph-thrash' when yaml
     # fragments are sorted.
     fake_fs = {
         'thrash': {
             '%': None,
             'ceph-thrash': {
                 'default.yaml': None
             },
             'ceph': {
                 'base.yaml': None
             },
             'clusters': {
                 'mds-1active-1standby.yaml': None
             },
             'debug': {
                 'mds_client.yaml': None
             },
             'fs': {
                 'btrfs.yaml': None
             },
             'msgr-failures': {
                 'none.yaml': None
             },
             'overrides': {
                 'whitelist_wrongly_marked_down.yaml': None
             },
             'tasks': {
                 'cfuse_workunit_suites_fsstress.yaml': None
             },
         },
     }
     self.start_patchers(fake_fs)
     result = build_matrix.build_matrix('thrash')
     assert len(result) == 1
     assert self.fragment_occurences(result, 'base.yaml') == 1
     fragments = result[0][1]
     assert fragments[0] == 'thrash/ceph/base.yaml'
     assert fragments[1] == 'thrash/ceph-thrash/default.yaml'
示例#17
0
 def test_random_dollar_sign_2x2x3(self):
     fake_fs = {
         'd0_0': {
             '$': None,
             'd1_0': {
                 'd1_0_0.yaml': None,
                 'd1_0_1.yaml': None,
             },
             'd1_1': {
                 'd1_1_0.yaml': None,
                 'd1_1_1.yaml': None,
             },
             'd1_2': {
                 'd1_2_0.yaml': None,
                 'd1_2_1.yaml': None,
                 'd1_2_2.yaml': None,
             },
         },
     }
     self.start_patchers(fake_fs)
     result = build_matrix.build_matrix('d0_0')
     assert len(result) == 1
 def test_sort_order(self):
     # This test ensures that 'ceph' comes before 'ceph-thrash' when yaml
     # fragments are sorted.
     fake_fs = {
         'thrash': {
             '%': None,
             'ceph-thrash': {'default.yaml': None},
             'ceph': {'base.yaml': None},
             'clusters': {'mds-1active-1standby.yaml': None},
             'debug': {'mds_client.yaml': None},
             'fs': {'btrfs.yaml': None},
             'msgr-failures': {'none.yaml': None},
             'overrides': {'whitelist_wrongly_marked_down.yaml': None},
             'tasks': {'cfuse_workunit_suites_fsstress.yaml': None},
         },
     }
     self.start_patchers(fake_fs)
     result = build_matrix.build_matrix('thrash')
     assert len(result) == 1
     assert self.fragment_occurences(result, 'base.yaml') == 1
     fragments = result[0][1]
     assert fragments[0] == 'thrash/ceph/base.yaml'
     assert fragments[1] == 'thrash/ceph-thrash/default.yaml'
示例#19
0
    def test_disable_extension(self):
        fake_fs = {
            'teuthology': {
                'no-ceph': {
                    '%': None,
                    'clusters': {
                        'single.yaml': None,
                    },
                    'distros': {
                        'baremetal.yaml': None,
                        'rhel7.0.yaml': None,
                        'ubuntu12.04.yaml': None,
                        'ubuntu14.04.yaml': None,
                        'vps.yaml': None,
                        'vps_centos6.5.yaml': None,
                        'vps_debian7.yaml': None,
                        'vps_rhel6.4.yaml': None,
                        'vps_rhel6.5.yaml': None,
                        'vps_rhel7.0.yaml': None,
                        'vps_ubuntu14.04.yaml': None,
                    },
                    'tasks': {
                        'teuthology.yaml': None,
                    },
                },
            },
        }
        self.start_patchers(fake_fs)
        result = build_matrix.build_matrix('teuthology/no-ceph')
        self.stop_patchers()

        fake_fs2 = {
            'teuthology': {
                'no-ceph': {
                    '%': None,
                    'clusters': {
                        'single.yaml': None,
                    },
                    'distros': {
                        'baremetal.yaml': None,
                        'rhel7.0.yaml': None,
                        'ubuntu12.04.yaml': None,
                        'ubuntu14.04.yaml': None,
                        'vps.yaml': None,
                        'vps_centos6.5.yaml': None,
                        'vps_debian7.yaml': None,
                        'vps_rhel6.4.yaml': None,
                        'vps_rhel6.5.yaml': None,
                        'vps_rhel7.0.yaml': None,
                        'vps_ubuntu14.04.yaml': None,
                        'forcefilevps_ubuntu14.04.yaml.disable': None,
                        'forcefilevps_ubuntu14.04.yaml.anotherextension': None,
                    },
                    'tasks': {
                        'teuthology.yaml': None,
                        'forcefilevps_ubuntu14.04notyaml': None,
                    },
                    'forcefilevps_ubuntu14.04notyaml': None,
                    'tasks.disable': {
                        'teuthology2.yaml': None,
                        'forcefilevps_ubuntu14.04notyaml': None,
                    },
                },
            },
        }
        self.start_patchers(fake_fs2)
        result2 = build_matrix.build_matrix('teuthology/no-ceph')
        assert len(result) == 11
        assert len(result2) == len(result)
示例#20
0
    def test_empty_dirs(self):
        fake_fs = {
            'teuthology': {
                'no-ceph': {
                    '%': None,
                    'clusters': {
                        'single.yaml': None,
                    },
                    'distros': {
                        'baremetal.yaml': None,
                        'rhel7.0.yaml': None,
                        'ubuntu12.04.yaml': None,
                        'ubuntu14.04.yaml': None,
                        'vps.yaml': None,
                        'vps_centos6.5.yaml': None,
                        'vps_debian7.yaml': None,
                        'vps_rhel6.4.yaml': None,
                        'vps_rhel6.5.yaml': None,
                        'vps_rhel7.0.yaml': None,
                        'vps_ubuntu14.04.yaml': None,
                    },
                    'tasks': {
                        'teuthology.yaml': None,
                    },
                },
            },
        }
        self.start_patchers(fake_fs)
        result = build_matrix.build_matrix('teuthology/no-ceph')
        self.stop_patchers()

        fake_fs2 = {
            'teuthology': {
                'no-ceph': {
                    '%': None,
                    'clusters': {
                        'single.yaml': None,
                    },
                    'distros': {
                        'empty': {},
                        'baremetal.yaml': None,
                        'rhel7.0.yaml': None,
                        'ubuntu12.04.yaml': None,
                        'ubuntu14.04.yaml': None,
                        'vps.yaml': None,
                        'vps_centos6.5.yaml': None,
                        'vps_debian7.yaml': None,
                        'vps_rhel6.4.yaml': None,
                        'vps_rhel6.5.yaml': None,
                        'vps_rhel7.0.yaml': None,
                        'vps_ubuntu14.04.yaml': None,
                    },
                    'tasks': {
                        'teuthology.yaml': None,
                    },
                    'empty': {},
                },
            },
        }
        self.start_patchers(fake_fs2)
        result2 = build_matrix.build_matrix('teuthology/no-ceph')
        assert len(result) == 11
        assert len(result2) == len(result)
    def test_empty_dirs(self):
        fake_fs = {
            'teuthology': {
                'no-ceph': {
                    '%': None,
                    'clusters': {
                        'single.yaml': None,
                    },
                    'distros': {
                        'baremetal.yaml': None,
                        'rhel7.0.yaml': None,
                        'ubuntu12.04.yaml': None,
                        'ubuntu14.04.yaml': None,
                        'vps.yaml': None,
                        'vps_centos6.5.yaml': None,
                        'vps_debian7.yaml': None,
                        'vps_rhel6.4.yaml': None,
                        'vps_rhel6.5.yaml': None,
                        'vps_rhel7.0.yaml': None,
                        'vps_ubuntu14.04.yaml': None,
                    },
                    'tasks': {
                        'teuthology.yaml': None,
                    },
                },
            },
        }
        self.start_patchers(fake_fs)
        result = build_matrix.build_matrix('teuthology/no-ceph')
        self.stop_patchers()

        fake_fs2 = {
            'teuthology': {
                'no-ceph': {
                    '%': None,
                    'clusters': {
                        'single.yaml': None,
                    },
                    'distros': {
                        'empty': {},
                        'baremetal.yaml': None,
                        'rhel7.0.yaml': None,
                        'ubuntu12.04.yaml': None,
                        'ubuntu14.04.yaml': None,
                        'vps.yaml': None,
                        'vps_centos6.5.yaml': None,
                        'vps_debian7.yaml': None,
                        'vps_rhel6.4.yaml': None,
                        'vps_rhel6.5.yaml': None,
                        'vps_rhel7.0.yaml': None,
                        'vps_ubuntu14.04.yaml': None,
                    },
                    'tasks': {
                        'teuthology.yaml': None,
                    },
                    'empty': {},
                },
            },
        }
        self.start_patchers(fake_fs2)
        result2 = build_matrix.build_matrix('teuthology/no-ceph')
        assert len(result) == 11
        assert len(result2) == len(result)
    def test_disable_extension(self):
        fake_fs = {
            'teuthology': {
                'no-ceph': {
                    '%': None,
                    'clusters': {
                        'single.yaml': None,
                    },
                    'distros': {
                        'baremetal.yaml': None,
                        'rhel7.0.yaml': None,
                        'ubuntu12.04.yaml': None,
                        'ubuntu14.04.yaml': None,
                        'vps.yaml': None,
                        'vps_centos6.5.yaml': None,
                        'vps_debian7.yaml': None,
                        'vps_rhel6.4.yaml': None,
                        'vps_rhel6.5.yaml': None,
                        'vps_rhel7.0.yaml': None,
                        'vps_ubuntu14.04.yaml': None,
                    },
                    'tasks': {
                        'teuthology.yaml': None,
                    },
                },
            },
        }
        self.start_patchers(fake_fs)
        result = build_matrix.build_matrix('teuthology/no-ceph')
        self.stop_patchers()

        fake_fs2 = {
            'teuthology': {
                'no-ceph': {
                    '%': None,
                    'clusters': {
                        'single.yaml': None,
                    },
                    'distros': {
                        'baremetal.yaml': None,
                        'rhel7.0.yaml': None,
                        'ubuntu12.04.yaml': None,
                        'ubuntu14.04.yaml': None,
                        'vps.yaml': None,
                        'vps_centos6.5.yaml': None,
                        'vps_debian7.yaml': None,
                        'vps_rhel6.4.yaml': None,
                        'vps_rhel6.5.yaml': None,
                        'vps_rhel7.0.yaml': None,
                        'vps_ubuntu14.04.yaml': None,
                        'forcefilevps_ubuntu14.04.yaml.disable': None,
                        'forcefilevps_ubuntu14.04.yaml.anotherextension': None,
                    },
                    'tasks': {
                        'teuthology.yaml': None,
                        'forcefilevps_ubuntu14.04notyaml': None,
                    },
                    'forcefilevps_ubuntu14.04notyaml': None,
                    'tasks.disable': {
                        'teuthology2.yaml': None,
                        'forcefilevps_ubuntu14.04notyaml': None,
                    },
                },
            },
        }
        self.start_patchers(fake_fs2)
        result2 = build_matrix.build_matrix('teuthology/no-ceph')
        assert len(result) == 11
        assert len(result2) == len(result)
示例#23
0
def get_combinations(suite_dir, fields, subset, limit, filter_in, filter_out,
                     include_facet):
    """
    Describes the combinations of a suite, optionally limiting
    or filtering output based on the given parameters. Includes
    columns for the subsuite and facets when include_facet is True.

    Returns a tuple of (headers, rows) where both elements are lists
    of strings.
    """
    configs = [(combine_path(suite_dir, item[0]), item[1])
               for item in build_matrix(suite_dir, subset)]

    num_listed = 0
    rows = []

    facet_headers = set()
    dirs = {}
    max_dir_depth = 0

    for _, fragment_paths in configs:
        if limit > 0 and num_listed >= limit:
            break
        if filter_in and not any(
            [f in path for f in filter_in for path in fragment_paths]):
            continue
        if filter_out and any(
            [f in path for f in filter_out for path in fragment_paths]):
            continue

        fragment_fields = [
            extract_info(path, fields) for path in fragment_paths
        ]

        # merge fields from multiple fragments by joining their values with \n
        metadata = {}
        for fragment_meta in fragment_fields:
            for field, value in fragment_meta.items():
                if value == '':
                    continue
                if field in metadata:
                    metadata[field] += '\n' + str(value)
                else:
                    metadata[field] = str(value)

        if include_facet:
            # map final dir (facet) -> filename without the .yaml suffix
            for path in fragment_paths:
                facet_dir = os.path.dirname(path)
                facet = os.path.basename(facet_dir)
                metadata[facet] = os.path.basename(path)[:-5]
                facet_headers.add(facet)
                facet_dirs = facet_dir.split('/')[:-1]
                for i, dir_ in enumerate(facet_dirs):
                    if i not in dirs:
                        dirs[i] = set()
                    dirs[i].add(dir_)
                    metadata['_dir_' + str(i)] = os.path.basename(dir_)
                    max_dir_depth = max(max_dir_depth, i)

        rows.append(metadata)
        num_listed += 1

    subsuite_headers = []
    if include_facet:
        first_subsuite_depth = max_dir_depth
        for i in range(max_dir_depth):
            if len(dirs[i]) > 1:
                first_subsuite_depth = i
                break

        subsuite_headers = [
            'subsuite depth ' + str(i)
            for i in range(0, max_dir_depth - first_subsuite_depth + 1)
        ]

        for row in rows:
            for i in range(first_subsuite_depth, max_dir_depth + 1):
                row[subsuite_headers[i - first_subsuite_depth]] = \
                    row.get('_dir_' + str(i), '')

    headers = subsuite_headers + sorted(facet_headers) + fields
    return headers, sorted([[row.get(field, '') for field in headers]
                            for row in rows])
示例#24
0
    def schedule_suite(self):
        """
        Schedule the suite-run. Returns the number of jobs scheduled.
        """
        name = self.name
        if self.args.arch:
            arch = self.args.arch
            log.debug("Using '%s' as an arch" % arch)
        else:
            arch = util.get_arch(self.base_config.machine_type)
        suite_name = self.base_config.suite
        suite_path = os.path.normpath(
            os.path.join(
                self.suite_repo_path,
                self.args.suite_relpath,
                'suites',
                self.base_config.suite.replace(':', '/'),
            ))
        log.debug('Suite %s in %s' % (suite_name, suite_path))
        configs = build_matrix(suite_path,
                               subset=self.args.subset,
                               seed=self.args.seed)
        log.info('Suite %s in %s generated %d jobs (not yet filtered)' %
                 (suite_name, suite_path, len(configs)))

        if self.args.dry_run:
            log.debug("Base job config:\n%s" % self.base_config)

        # create, but do not write, the temp file here, so it can be
        # added to the args in collect_jobs, but not filled until
        # any backtracking is done
        base_yaml_path = NamedTemporaryFile(prefix='schedule_suite_',
                                            delete=False).name
        self.base_yaml_paths.insert(0, base_yaml_path)

        # compute job limit in respect of --sleep-before-teardown
        job_limit = self.args.limit or 0
        sleep_before_teardown = int(self.args.sleep_before_teardown or 0)
        if sleep_before_teardown:
            if job_limit == 0:
                log.warning('The --sleep-before-teardown option was provided: '
                            'only 1 job will be scheduled. '
                            'Use --limit to run more jobs')
                # give user a moment to read this warning
                time.sleep(5)
                job_limit = 1
            elif self.args.non_interactive:
                log.warning('The --sleep-before-teardown option is active. '
                            'There will be a maximum {} jobs running '
                            'which will fall asleep for {}'.format(
                                job_limit,
                                format_timespan(sleep_before_teardown)))
            elif job_limit > 4:
                are_you_insane = (
                    'There are {total} configs and {maximum} job limit is used. '
                    'Do you really want to lock all machines needed for '
                    'this run for {that_long}? (y/N):'.format(
                        that_long=format_timespan(sleep_before_teardown),
                        total=len(configs),
                        maximum=job_limit))
                while True:
                    insane = (input(are_you_insane) or 'n').lower()
                    if insane == 'y':
                        break
                    elif insane == 'n':
                        exit(0)

        # if newest, do this until there are no missing packages
        # if not, do it once
        backtrack = 0
        limit = self.args.newest
        while backtrack <= limit:
            jobs_missing_packages, jobs_to_schedule = \
                self.collect_jobs(arch,
                    util.filter_configs(configs,
                        filter_in=self.args.filter_in,
                        filter_out=self.args.filter_out,
                        filter_all=self.args.filter_all,
                        filter_fragments=self.args.filter_fragments,
                        suite_name=suite_name),
                                  self.args.newest, job_limit)
            if jobs_missing_packages and self.args.newest:
                new_sha1 = \
                    util.find_git_parent('ceph', self.base_config.sha1)
                if new_sha1 is None:
                    util.schedule_fail('Backtrack for --newest failed', name)
                # rebuild the base config to resubstitute sha1
                self.config_input['ceph_hash'] = new_sha1
                self.base_config = self.build_base_config()
                backtrack += 1
                continue
            if backtrack:
                log.info("--newest supplied, backtracked %d commits to %s" %
                         (backtrack, self.base_config.sha1))
            break
        else:
            if self.args.newest:
                util.schedule_fail(
                    'Exceeded %d backtracks; raise --newest value' % limit,
                    name,
                )

        if self.args.dry_run:
            log.debug("Base job config:\n%s" % self.base_config)

        with open(base_yaml_path, 'w+b') as base_yaml:
            base_yaml.write(str(self.base_config).encode())

        if jobs_to_schedule:
            self.write_rerun_memo()

        # Before scheduling jobs, check the priority
        if self.args.priority and jobs_to_schedule and not self.args.force_priority:
            self.check_priority(len(jobs_to_schedule))

        self.check_num_jobs(len(jobs_to_schedule))

        self.schedule_jobs(jobs_missing_packages, jobs_to_schedule, name)

        os.remove(base_yaml_path)

        count = len(jobs_to_schedule)
        missing_count = len(jobs_missing_packages)
        log.info('Suite %s in %s scheduled %d jobs.' %
                 (suite_name, suite_path, count))
        log.info('%d/%d jobs were filtered out.', (len(configs) - count),
                 len(configs))
        if missing_count:
            log.warning('Scheduled %d/%d jobs that are missing packages!',
                        missing_count, count)
        return count
示例#25
0
    def schedule_suite(self):
        """
        Schedule the suite-run. Returns the number of jobs scheduled.
        """
        name = self.name
        arch = util.get_arch(self.base_config.machine_type)
        suite_name = self.base_config.suite
        suite_path = os.path.normpath(os.path.join(
            self.suite_repo_path,
            self.args.suite_relpath,
            'suites',
            self.base_config.suite.replace(':', '/'),
        ))
        log.debug('Suite %s in %s' % (suite_name, suite_path))
        configs = [
            (combine_path(suite_name, item[0]), item[1]) for item in
            build_matrix(suite_path, subset=self.args.subset, seed=self.args.seed)
        ]
        log.info('Suite %s in %s generated %d jobs (not yet filtered)' % (
            suite_name, suite_path, len(configs)))

        if self.args.dry_run:
            log.debug("Base job config:\n%s" % self.base_config)

        # create, but do not write, the temp file here, so it can be
        # added to the args in collect_jobs, but not filled until
        # any backtracking is done
        base_yaml_path = NamedTemporaryFile(
            prefix='schedule_suite_', delete=False
        ).name
        self.base_yaml_paths.insert(0, base_yaml_path)

        # if newest, do this until there are no missing packages
        # if not, do it once
        backtrack = 0
        limit = self.args.newest
        while backtrack <= limit:
            jobs_missing_packages, jobs_to_schedule = \
                self.collect_jobs(arch, configs, self.args.newest)
            if jobs_missing_packages and self.args.newest:
                new_sha1 = \
                    util.find_git_parent('ceph', self.base_config.sha1)
                if new_sha1 is None:
                    util.schedule_fail('Backtrack for --newest failed', name)
                 # rebuild the base config to resubstitute sha1
                self.config_input['ceph_hash'] = new_sha1
                self.base_config = self.build_base_config()
                backtrack += 1
                continue
            if backtrack:
                log.info("--newest supplied, backtracked %d commits to %s" %
                         (backtrack, self.base_config.sha1))
            break
        else:
            if self.args.newest:
                util.schedule_fail(
                    'Exceeded %d backtracks; raise --newest value' % limit,
                    name,
                )

        if self.args.dry_run:
            log.debug("Base job config:\n%s" % self.base_config)

        with open(base_yaml_path, 'w+b') as base_yaml:
            base_yaml.write(str(self.base_config))

        if jobs_to_schedule:
            self.write_rerun_memo()

        self.schedule_jobs(jobs_missing_packages, jobs_to_schedule, name)

        os.remove(base_yaml_path)

        count = len(jobs_to_schedule)
        missing_count = len(jobs_missing_packages)
        log.info(
            'Suite %s in %s scheduled %d jobs.' %
            (suite_name, suite_path, count)
        )
        log.info('%d/%d jobs were filtered out.',
                 (len(configs) - count),
                 len(configs))
        if missing_count:
            log.warn('Scheduled %d/%d jobs that are missing packages!',
                     missing_count, count)
        return count
示例#26
0
def get_combinations(suite_dir, fields, subset,
                     limit, filter_in, filter_out,
                     include_facet):
    """
    Describes the combinations of a suite, optionally limiting
    or filtering output based on the given parameters. Includes
    columns for the subsuite and facets when include_facet is True.

    Returns a tuple of (headers, rows) where both elements are lists
    of strings.
    """
    configs = [(combine_path(suite_dir, item[0]), item[1]) for item in
               build_matrix(suite_dir, subset)]

    num_listed = 0
    rows = []

    facet_headers = set()
    dirs = {}
    max_dir_depth = 0

    for _, fragment_paths in configs:
        if limit > 0 and num_listed >= limit:
            break
        if filter_in and not any([f in path for f in filter_in
                                  for path in fragment_paths]):
            continue
        if filter_out and any([f in path for f in filter_out
                               for path in fragment_paths]):
            continue

        fragment_fields = [extract_info(path, fields)
                           for path in fragment_paths]

        # merge fields from multiple fragments by joining their values with \n
        metadata = {}
        for fragment_meta in fragment_fields:
            for field, value in fragment_meta.items():
                if value == '':
                    continue
                if field in metadata:
                    metadata[field] += '\n' + str(value)
                else:
                    metadata[field] = str(value)

        if include_facet:
            # map final dir (facet) -> filename without the .yaml suffix
            for path in fragment_paths:
                facet_dir = os.path.dirname(path)
                facet = os.path.basename(facet_dir)
                metadata[facet] = os.path.basename(path)[:-5]
                facet_headers.add(facet)
                facet_dirs = facet_dir.split('/')[:-1]
                for i, dir_ in enumerate(facet_dirs):
                    if i not in dirs:
                        dirs[i] = set()
                    dirs[i].add(dir_)
                    metadata['_dir_' + str(i)] = os.path.basename(dir_)
                    max_dir_depth = max(max_dir_depth, i)

        rows.append(metadata)
        num_listed += 1

    subsuite_headers = []
    if include_facet:
        first_subsuite_depth = max_dir_depth
        for i in range(max_dir_depth):
            if len(dirs[i]) > 1:
                first_subsuite_depth = i
                break

        subsuite_headers = ['subsuite depth ' + str(i)
                            for i in
                            range(0, max_dir_depth - first_subsuite_depth + 1)]

        for row in rows:
            for i in range(first_subsuite_depth, max_dir_depth + 1):
                row[subsuite_headers[i - first_subsuite_depth]] = \
                    row.get('_dir_' + str(i), '')

    headers = subsuite_headers + sorted(facet_headers) + fields
    return headers, sorted([[row.get(field, '') for field in headers]
                            for row in rows])