Browse Source

ci: run unittests and browsertests separately (#14826)

Alexey Kuzmin 6 years ago
parent
commit
8963529238
6 changed files with 278 additions and 115 deletions
  1. 48 24
      .circleci/config.yml
  2. 2 10
      BUILD.gn
  3. 2 0
      script/browsertests.yml
  4. 100 34
      script/native-tests.py
  5. 0 47
      script/tests.yml
  6. 126 0
      script/unittests.yml

+ 48 - 24
.circleci/config.yml

@@ -27,6 +27,14 @@ env-release-build: &env-release-build
   NOTIFY_SLACK: true
   ELECTRON_RELEASE: 1
 
+env-browsertests: &env-browsertests
+  BUILD_TARGET: electron:chromium_browsertests
+  TESTS_CONFIG: src/electron/script/browsertests.yml
+
+env-unittests: &env-unittests
+  BUILD_TARGET: electron:chromium_unittests
+  TESTS_CONFIG: src/electron/script/unittests.yml
+
 # Build targets options.
 env-ia32: &env-ia32
   GN_EXTRA_ARGS: 'target_cpu = "x86"'
@@ -207,6 +215,7 @@ step-setup-linux-for-headless-testing: &step-setup-linux-for-headless-testing
     name: Setup for headless testing
     command: |
       if [ "`uname`" != "Darwin" ]; then
+        echo 'export DISPLAY=":99.0"' >> $BASH_ENV
         sh -e /etc/init.d/xvfb start
       fi
 
@@ -368,36 +377,39 @@ steps-native-tests: &steps-native-tests
         at: .
     - *step-depot-tools-add-to-path
     - *step-setup-env-for-build
-
     - *step-electron-gn-gen
+
+    # This list is used to split tests across executors.
     - run:
-        name: List native tests
+        name: List tests
         command: |
-          cd src
-          python electron/script/native-tests.py list \
-            --config electron/script/tests.yml > testslist.txt
+          python src/electron/script/native-tests.py list \
+            --config $TESTS_CONFIG > tests_list.txt
+
     # TODO(alexeykuzmin): Build only a subset of all tests.
     - run:
-        name: Build native tests
+        name: Build tests
         command: |
           cd src
-          ninja -C out/Default electron:electron_tests
-    # TODO(alexeykuzmin): Run only a subset of all tests.
-    # --binary $(circleci tests split testslist.txt)
+          ninja -C out/Default $BUILD_TARGET
+    - *step-show-sccache-stats
+
+    - *step-setup-linux-for-headless-testing
     - run:
-        name: Run native tests
+        name: Run tests
         command: |
-          cd src
-          mkdir tests_results
-          python electron/script/native-tests.py run \
-            --config electron/script/tests.yml \
-            --tests-dir out/Default \
-            --output-dir tests_results
+          mkdir test_results
+          python src/electron/script/native-tests.py run \
+            --config $TESTS_CONFIG \
+            --tests-dir src/out/Default \
+            --output-dir test_results \
+            --binary $(circleci tests split tests_list.txt)
+
     - store_artifacts:
-        path: src/tests_results
-        destination: results
+        path: test_results
+        destination: test_results  # Put it in the root folder.
     - store_test_results:
-        path: src/tests_results
+        path: test_results
 
 steps-verify-ffmpeg: &steps-verify-ffmpeg
   steps:
@@ -421,7 +433,6 @@ steps-tests: &steps-tests
     - run:
         name: Run Electron tests
         environment:
-          DISPLAY: ':99.0'
           MOCHA_REPORTER: mocha-multi-reporters
           MOCHA_FILE: junit/test-results.xml
           MOCHA_MULTI_REPORTERS: mocha-junit-reporter, tap
@@ -643,13 +654,21 @@ jobs:
     <<: *steps-build-mac
 
   # Layer 3: Tests.
-  linux-x64-native-tests:
+  linux-x64-unittests:
+    <<: *machine-linux-2xlarge
+    environment:
+      <<: *env-testing-build
+      <<: *env-unittests
+    <<: *steps-native-tests
+#    TODO(alexeykuzmin): Use parallelism.
+#    parallelism: 4  # https://xkcd.com/221/
+
+  linux-x64-browsertests:
     <<: *machine-linux-2xlarge
     environment:
       <<: *env-testing-build
+      <<: *env-browsertests
     <<: *steps-native-tests
-    # TODO(alexeykuzmin): Use parallelism.
-    # parallelism: 4  # https://xkcd.com/221/
 
   linux-x64-testing-tests:
     <<: *machine-linux-medium
@@ -787,7 +806,12 @@ workflows:
       - linux-arm-checkout
       - linux-arm64-checkout
 
-      - linux-x64-native-tests:
+# TODO(alexeykuzmin): The only suite there fails to start, so skip
+# the job for now. Fix the "content_browsertests" and enable the job back.
+#      - linux-x64-browsertests:
+#          requires:
+#            - linux-checkout
+      - linux-x64-unittests:
           requires:
             - linux-checkout
 

+ 2 - 10
BUILD.gn

@@ -760,21 +760,11 @@ group("electron_tests") {
   testonly = true
 
   deps = [
-    ":blink_unittests",
     ":chromium_browsertests",
     ":chromium_unittests",
   ]
 }
 
-group("blink_unittests") {
-  testonly = true
-
-  deps = [
-    "//third_party/blink/public:all_blink",
-    "//third_party/blink/public:test_support",
-  ]
-}
-
 group("chromium_unittests") {
   testonly = true
 
@@ -801,6 +791,8 @@ group("chromium_unittests") {
     "//sql:sql_unittests",
     "//storage:storage_unittests",
     "//third_party/angle/src/tests:angle_unittests",
+    "//third_party/blink/public:all_blink",
+    "//third_party/blink/public:test_support",
     "//third_party/leveldatabase:env_chromium_unittests",
     "//tools/gn:gn_unittests",
     "//ui/base:ui_base_unittests",

+ 2 - 0
script/browsertests.yml

@@ -0,0 +1,2 @@
+tests:
+  - content_browsertests

+ 100 - 34
script/native-tests.py

@@ -1,5 +1,7 @@
 #!/usr/bin/env python
 
+from __future__ import print_function
+
 import argparse
 import os
 import subprocess
@@ -17,10 +19,39 @@ class Command:
   RUN = 'run'
 
 class Verbosity:
-  ALL = 'all'  # stdout and stderr
+  CHATTY = 'chatty'  # stdout and stderr
   ERRORS = 'errors'  # stderr only
   SILENT = 'silent'  # no output
 
+  @staticmethod
+  def get_all():
+    return [Verbosity.CHATTY, Verbosity.ERRORS, Verbosity.SILENT]
+
+class Platform:
+  LINUX = 'linux'
+  MAC = 'mac'
+  WINDOWS = 'windows'
+
+  @staticmethod
+  def get_current():
+    platform = sys.platform
+    if platform in ('linux', 'linux2'):
+      return Platform.LINUX
+    if platform == 'darwin':
+      return Platform.MAC
+    if platform in ('cygwin', 'win32'):
+      return Platform.WINDOWS
+
+    assert False, "unexpected current platform '{}'".format(platform)
+
+  @staticmethod
+  def get_all():
+    return [Platform.LINUX, Platform.MAC, Platform.WINDOWS]
+
+  @staticmethod
+  def is_valid(platform):
+    return platform in Platform.get_all()
+
 def parse_args():
   parser = argparse.ArgumentParser(description='Run Google Test binaries')
 
@@ -39,17 +70,14 @@ def parse_args():
 
   verbosity = parser.add_mutually_exclusive_group()
   verbosity.add_argument('-v', '--verbosity', required=False,
-                         default=Verbosity.ALL,
-                         choices=[
-                            Verbosity.ALL,
-                            Verbosity.ERRORS,
-                            Verbosity.SILENT],
+                         default=Verbosity.CHATTY,
+                         choices=Verbosity.get_all(),
                          help='set verbosity level')
   verbosity.add_argument('-q', '--quiet', required=False, action='store_const',
                          const=Verbosity.ERRORS, dest='verbosity',
                          help='suppress stdout from test binaries')
   verbosity.add_argument('-qq', '--quiet-quiet',
-                         # https://youtu.be/o0u4M6vppCI?t=1m18s
+                         # https://youtu.be/bXd-zZLV2i0?t=41s
                          required=False, action='store_const',
                          const=Verbosity.SILENT, dest='verbosity',
                          help='suppress stdout and stderr from test binaries')
@@ -86,8 +114,8 @@ def main():
   tests_list = TestsList(args.config, args.tests_dir)
 
   if args.command == Command.LIST:
-    all_binaries_names = tests_list.get_names()
-    print '\n'.join(all_binaries_names)
+    all_binaries_names = tests_list.get_for_current_platform()
+    print('\n'.join(all_binaries_names))
     return 0
 
   if args.command == Command.RUN:
@@ -96,7 +124,7 @@ def main():
     else:
       return tests_list.run_all(args.output_dir, args.verbosity)
 
-  raise Exception("unexpected command '{}'".format(args.command))
+  assert False, "unexpected command '{}'".format(args.command)
 
 
 class TestsList():
@@ -111,29 +139,39 @@ class TestsList():
   def __len__(self):
     return len(self.tests)
 
-  def get_names(self):
-    return self.tests.keys()
+  def get_for_current_platform(self):
+    all_binaries = self.tests.keys()
 
-  def run(self, binaries, output_dir=None, verbosity=Verbosity.ALL):
+    supported_binaries = filter(self.__platform_supports, all_binaries)
+
+    return supported_binaries
+
+  def run(self, binaries, output_dir=None, verbosity=Verbosity.CHATTY):
     # Don't run anything twice.
     binaries = set(binaries)
 
     # First check that all names are present in the config.
-    if any([binary_name not in self.tests for binary_name in binaries]):
-      raise Exception("binary '{0}' not found in config '{1}'".format(
-          binary_name, self.config_path))
-
-    # TODO(alexeykuzmin): Respect the "platform" setting.
+    for binary_name in binaries:
+      if binary_name not in self.tests:
+        raise Exception("binary {0} not found in config '{1}'".format(
+            binary_name, self.config_path))
+
+    # Respect the "platform" setting.
+    for binary_name in binaries:
+      if not self.__platform_supports(binary_name):
+        raise Exception(
+            "binary {0} cannot be run on {1}, check the config".format(
+                binary_name, Platform.get_current()))
 
     suite_returncode = sum(
         [self.__run(binary, output_dir, verbosity) for binary in binaries])
     return suite_returncode
 
-  def run_only(self, binary_name, output_dir=None, verbosity=Verbosity.ALL):
+  def run_only(self, binary_name, output_dir=None, verbosity=Verbosity.CHATTY):
     return self.run([binary_name], output_dir, verbosity)
 
-  def run_all(self, output_dir=None, verbosity=Verbosity.ALL):
-    return self.run(self.get_names(), output_dir, verbosity)
+  def run_all(self, output_dir=None, verbosity=Verbosity.CHATTY):
+    return self.run(self.get_for_current_platform(), output_dir, verbosity)
 
   @staticmethod
   def __get_tests_list(config_path):
@@ -169,6 +207,21 @@ class TestsList():
         return value
     return [value]
 
+  @staticmethod
+  def __merge_nested_lists(value):
+    """Converts a dict of lists to a list."""
+    if isinstance(value, list):
+        return value
+
+    if isinstance(value, dict):
+      # It looks ugly as hell, but it does the job.
+      return [list_item for key in value for list_item in value[key]]
+
+    assert False, "unexpected type for list merging: {}".format(type(value))
+
+  def __platform_supports(self, binary_name):
+    return Platform.get_current() in self.tests[binary_name]['platforms']
+
   @staticmethod
   def __get_test_data(data_item):
     data_item = TestsList.__expand_shorthand(data_item)
@@ -176,21 +229,26 @@ class TestsList():
     binary_name = data_item.keys()[0]
     test_data = {
       'excluded_tests': None,
-      'platforms': None  # None means all? Weird.
+      'platforms': Platform.get_all()
     }
 
     configs = data_item[binary_name]
     if configs is not None:
       # List of excluded tests.
-      if 'to_fix' in configs:
-        test_data['excluded_tests'] = configs['to_fix']
-
-      # TODO(alexeykuzmin): Also add to 'excluded_tests'
-      # those that should be permanently disabled.
+      if 'disabled' in configs:
+        excluded_tests = TestsList.__merge_nested_lists(configs['disabled'])
+        test_data['excluded_tests'] = excluded_tests
 
       # List of platforms to run the tests on.
       if 'platform' in configs:
-          test_data['platforms'] = TestsList.__make_a_list(configs['platform'])
+          platforms = TestsList.__make_a_list(configs['platform'])
+
+          for platform in platforms:
+            assert Platform.is_valid(platform), \
+                "platform '{0}' is not supported, check {1} config" \
+                    .format(platform, binary_name)
+
+          test_data['platforms'] = platforms
 
     return (binary_name, test_data)
 
@@ -216,14 +274,14 @@ class TestsList():
 
 
 class TestBinary():
+  # Is only used when writing to a file.
+  output_format = 'xml'
+
   def __init__(self, binary_path):
     self.binary_path = binary_path
 
-    # Is only used when writing to a file.
-    self.output_format = 'xml'
-
   def run(self, excluded_tests=None, output_file_path=None,
-      verbosity=Verbosity.ALL):
+      verbosity=Verbosity.CHATTY):
     gtest_filter = ""
     if excluded_tests is not None and len(excluded_tests) > 0:
       excluded_tests_string = TestBinary.__format_excluded_tests(
@@ -232,13 +290,21 @@ class TestBinary():
 
     gtest_output = ""
     if output_file_path is not None:
-      gtest_output = "--gtest_output={0}:{1}".format(self.output_format,
+      gtest_output = "--gtest_output={0}:{1}".format(TestBinary.output_format,
                                                      output_file_path)
 
     args = [self.binary_path, gtest_filter, gtest_output]
     stdout, stderr = TestBinary.__get_stdout_and_stderr(verbosity)
 
-    returncode = subprocess.call(args, stdout=stdout, stderr=stderr)
+    returncode = 0
+    try:
+      returncode = subprocess.call(args, stdout=stdout, stderr=stderr)
+    except Exception as exception:
+      if verbosity in (Verbosity.CHATTY, Verbosity.ERRORS):
+        print("An error occurred while running '{}':".format(self.binary_path),
+            '\n', exception, file=sys.stderr)
+      returncode = 1
+
     return returncode
 
   @staticmethod

+ 0 - 47
script/tests.yml

@@ -1,47 +0,0 @@
-tests:
-  - base_unittests:
-      to_fix:
-        - WeakPtrDeathTest.NonOwnerThreadDeletesObjectAfterReference
-        - WeakPtrDeathTest.NonOwnerThreadDeletesWeakPtrAfterReference
-        - WeakPtrDeathTest.NonOwnerThreadDereferencesWeakPtrAfterReference
-        - WeakPtrDeathTest.NonOwnerThreadReferencesObjectAfterDeletion
-        - WeakPtrDeathTest.WeakPtrCopyDoesNotChangeThreadBinding
-  - cc_unittests
-  - cc_blink_unittests
-  - content_unittests
-  - crypto_unittests
-  - device_unittests
-  - gin_unittests
-  - gpu_unittests
-  - ipc_tests
-  - media_unittests
-  - capture_unittests
-  - midi_unittests
-  - media_mojo_unittests
-  - mojo_unittests
-  - mojo_common_unittests
-  - net_unittests
-  - ppapi_unittests
-  - printing_unittests
-  - skia_unittests
-  - sql_unittests
-  - storage_unittests
-  - angle_unittests
-  - env_chromium_unittests
-  - gn_unittests
-  - ui_base_unittests
-  - compositor_unittests
-  - display_unittests
-  - events_unittests
-  - gl_unittests
-  - url_unittests
-  - url_ipc_unittests
-  - unittests
-  - content_browsertests
-  # TODO(alexeykuzmin): Do not ignore `platform` setting.
-  - disk_cache_memory_test:
-      platform: linux
-  - sandbox_linux_unittests:
-      platform: linux
-  - dbus_unittests:
-      platform: linux

+ 126 - 0
script/unittests.yml

@@ -0,0 +1,126 @@
+tests:
+  - base_unittests:
+      # "disabled" dict can have arbitrary named properties
+      # with lists of disabled tests. Those properties' names
+      # are used only to explain why a group of tests is disabled.
+      disabled:
+        to_fix:
+          - WeakPtrDeathTest.NonOwnerThreadDeletesObjectAfterReference
+          - WeakPtrDeathTest.NonOwnerThreadDeletesWeakPtrAfterReference
+          - WeakPtrDeathTest.NonOwnerThreadDereferencesWeakPtrAfterReference
+          - WeakPtrDeathTest.NonOwnerThreadReferencesObjectAfterDeletion
+          - WeakPtrDeathTest.WeakPtrCopyDoesNotChangeThreadBinding
+        undecided:
+          - FilePathTest*
+          - RTLTest*
+          - SysStrings*
+          - UTFOffsetStringConversionsTest*
+# TODO(alexeykuzmin): The binary can't be launched, fix it and enable it back.
+#  - cc_unittests
+# TODO(alexeykuzmin): The binary can't be launched, fix it and enable it back.
+#  - cc_blink_unittests
+# TODO(alexeykuzmin): The binary can't be launched, fix it and enable it back.
+#  - content_unittests:
+#      disabled:
+        # TODO(alexeykuzmin): Should those be fixed?
+#        - _/DOMStorageMapParamTest.EnforcesQuota/0
+#        - _/DOMStorageMapParamTest.EnforcesQuota/1
+  - crypto_unittests
+  - device_unittests
+  - gin_unittests
+  - gpu_unittests
+  - ipc_tests
+# TODO(alexeykuzmin): The binary can't be launched, fix it and enable it back.
+#  - media_unittests
+  - capture_unittests
+  - midi_unittests
+  - media_mojo_unittests
+  - mojo_unittests
+# TODO(alexeykuzmin): The binary can't be launched, fix it and enable it back.
+#  - mojo_common_unittests
+  - net_unittests:
+      disabled:
+        # TODO(alexeykuzmin): Should those be fixed?
+        - EndToEndTestsWithTls*
+        - FilenameUtilTest*
+        - HTTPSAIATest.AIAFetching
+        - HTTPSCRLSetTest*
+        - HTTPSEVCRLSetTest*
+        - HTTPSFallbackTest*
+        - HTTPSHardFailTest.FailsOnOCSPInvalid
+        - HTTPSOCSPTest*
+        - HTTPSRequestTest*
+        - HTTPSSessionTest*
+        - OCSPVerify*
+        - ProxyConfigServiceLinuxTest*
+        - SSLClientSocketCertRequestInfoTest*
+        - SSLClientSocketChannelIDTest*
+        - SSLClientSocketFalseStartTest*
+        - SSLClientSocketReadTest*
+        - SSLClientSocketTest*
+        - TokenBindingURLRequestTest*
+        - UDPSocketTest*
+        - URLRequestTestFTP*
+        - URLRequestTestHTTP*
+        - WebSocketEndToEndTest*
+  - ppapi_unittests
+  - printing_unittests
+  - skia_unittests
+  - sql_unittests
+  - storage_unittests
+  - gn_unittests
+  - ui_base_unittests:
+      disabled:
+        # TODO(alexeykuzmin): Should it be fixed?
+        - AcceleratorTest.GetShortcutText
+  - compositor_unittests
+  - display_unittests
+  - events_unittests
+  - gl_unittests
+  - url_unittests
+  - url_ipc_unittests
+  # V8
+  - unittests:
+      disabled:
+        # TODO(alexeykuzmin): Should it be fixed?
+        - LiveRangeUnitTest*
+  # Blink
+# TODO(alexeykuzmin): Doesn't exist in Ch67.
+#  - blink_common_unittests
+# TODO(alexeykuzmin): The binary can't be launched, fix it and enable it back.
+#  - blink_heap_unittests
+  - blink_platform_unittests:
+      disabled:
+        # TODO(alexeykuzmin): Should those be fixed?
+        - AnimatedPNGTests*
+        - AnimatedWebPTests*
+        - AnimatedWEBPTests.clearCacheExceptFrameWithAncestors
+        - BitmapImageTest*
+        - BMPImageDecoderTest*
+        - DecodedImageOrientationHistogramTest*
+        - DecodedImageTypeHistogramTest*
+        - DeferredImageDecoderTestWoPlatform*
+        - GIFImageDecoderTest*
+        - HighContrastImageClassifierTest*
+        - ICOImageDecoderTests*
+        - JPEGImageDecoderTest*
+        - FilePathConversionTest*
+        - PNGTests*
+        - StaticPNGTests*
+        - StaticWebPTests*
+# TODO(alexeykuzmin): The binary can't be launched, fix it and enable it back.
+#  - webkit_unit_tests
+  - wtf_unittests
+  # Third party
+  - angle_unittests
+  - env_chromium_unittests
+# TODO(alexeykuzmin): Should be run somehow differently.
+#  - disk_cache_memory_test:
+#      platform: linux
+  - sandbox_linux_unittests:
+      platform: linux
+      disabled:
+        # TODO(alexeykuzmin): Should it be fixed?
+        - BrokerProcess*
+  - dbus_unittests:
+      platform: linux