Revert "Put name of in-progress test in crash metadata."

This reverts commit 20e1936de1ba7cbb7cb3811d2e277c188098c51c.

Reason for revert:
The change seems to break Android PFQ due to lack of permission.

The line causing the error is client/common_lib/test.py:399.

https://ci.chromium.org/p/chromeos/builders/general/PFQ/b8867149612828084176?
```
19:47:27 INFO | autoserv|   File "/build/betty-pi-arc/usr/local/build/autotest/client/common_lib/test.py", line 399, in _call_run_once
19:47:27 INFO | autoserv|     os.mkdir(crash_run_dir, 0755)
19:47:27 INFO | autoserv| OSError: [Errno 13] Permission denied: '/run/crash_reporter'
```

Original change's description:
> Put name of in-progress test in crash metadata.
>
> When we start uploading crashes from test results, it will be useful to
> know during which test a crash occurred (if any).
>
> Also, run git cl format.
>
> This is a reland of https://crrev.com/c/2393262, fixed to work for
> server tests.
>
> BUG=chromium:1119443
> TEST=test_that --board=eve $DUT logging_UserCrash\
>      logging_GenerateCrashFiles
>
> Change-Id: I4491d45da66554cfd0a55d40dcde0f56fab5e330
> Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/autotest/+/2429928
> Reviewed-by: Derek Beckett <dbeckett@chromium.org>
> Reviewed-by: Greg Edelston <gredelston@google.com>
> Reviewed-by: Joon Ahn <joonbug@chromium.org>
> Commit-Queue: Miriam Zimmerman <mutexlox@chromium.org>
> Tested-by: Miriam Zimmerman <mutexlox@chromium.org>

Bug: chromium:1119443
Change-Id: Iec2baee298557f454a8e09eb4f20396ab1a6e036
Reviewed-on: https://chromium-review.googlesource.com/c/chromiumos/third_party/autotest/+/2454963
Reviewed-by: Jiyoun Ha <jiyounha@chromium.org>
Reviewed-by: Yuta Hijikata <ythjkt@chromium.org>
Commit-Queue: Yuta Hijikata <ythjkt@chromium.org>
Tested-by: Yuta Hijikata <ythjkt@chromium.org>
diff --git a/client/common_lib/test.py b/client/common_lib/test.py
index 0ca78fa..b79f754 100644
--- a/client/common_lib/test.py
+++ b/client/common_lib/test.py
@@ -18,7 +18,6 @@
 
 #pylint: disable=C0111
 
-import errno
 import fcntl
 import json
 import logging
@@ -67,9 +66,6 @@
         self.srcdir = os.path.join(self.bindir, 'src')
         self.tmpdir = tempfile.mkdtemp("_" + self.tagged_testname,
                                        dir=job.tmpdir)
-        # The crash_reporter uses this file to determine which test is in
-        # progress.
-        self.test_in_prog_file = '/run/crash_reporter/test-in-prog'
         self._keyvals = []
         self._new_keyval = False
         self.failed_constraints = []
@@ -173,7 +169,7 @@
             with open(output_file, 'r') as fp:
                 contents = fp.read()
                 if contents:
-                    charts = json.loads(contents)
+                     charts = json.loads(contents)
 
         if graph:
             first_level = graph
@@ -188,9 +184,9 @@
         # representing numbers logged, attempt to convert them to numbers.
         # If a non number string is logged an exception will be thrown.
         if isinstance(value, list):
-            value = map(float, value)
+          value = map(float, value)
         else:
-            value = float(value)
+          value = float(value)
 
         result_type = 'scalar'
         value_key = 'value'
@@ -365,42 +361,6 @@
             logging.debug('before_iteration_hooks completed')
 
         finished = False
-
-        # Mark the current test in progress so that crash_reporter can report
-        # it in uploaded crashes.
-        # if the file already exists, truncate and overwrite.
-        # TODO(mutexlox): Determine what to do if the file already exists, which
-        # could happen for a few reasons:
-        #   * An earlier tast or autotest run crashed before removing the
-        #     test-in-prog file. In this case, we would ideally overwrite the
-        #     existing test name and _not_ restore it.
-        #   * An autotest ran another autotest (e.g. logging_GenerateCrashFiles
-        #     runs desktopui_SimpleLogin). In this case, arguably it makes sense
-        #     to attribute the crash to logging_GenerateCrashFiles and not to
-        #     desktopui_SimpleLogin (or to attribute it to both), since the
-        #     context in which it's running is different than if it were run on
-        #     its own.
-        #   * Every tast test is kicked off by the 'tast' autotest (see
-        #     server/site_tests/tast/tast.py), so the file will always be
-        #     populated when the tast suite starts running. In this case, we
-        #     want to attribute crashes that happen during a specific test to
-        #     that test, but if the tast infra causes a crash we should
-        #     attribute the crash to it (e.g. to the 'tast.critical-system'
-        #     "autotest").  For this case, we should save the contents of the
-        #     file before a test and restore it after.
-        if 'host' in dargs:
-            dargs['host'].run('echo %s > %s' %
-                              (self.tagged_testname, self.test_in_prog_file),
-                              ignore_status=True)
-        else:
-            crash_run_dir = os.path.dirname(self.test_in_prog_file)
-            try:
-                if not os.path.exists(crash_run_dir):
-                    os.mkdir(crash_run_dir, 0755)
-                with open(self.test_in_prog_file, 'w') as f:
-                    f.write(self.tagged_testname)
-            except IOError:
-                logging.warning('failed to write in progress test name')
         try:
             if profile_only:
                 if not self.job.profilers.present():
@@ -427,20 +387,6 @@
                           'after_iteration_hooks.', str(e))
             raise
         finally:
-            if 'host' in dargs:
-                dargs['host'].run('rm -f %s' % self.test_in_prog_file)
-            else:
-                try:
-                    # Unmark the test as running.
-                    os.remove(self.test_in_prog_file)
-                except OSError as e:
-                    # If something removed it, do nothing--we're in the desired
-                    # state (the file is gone). Otherwise, log.
-                    if e.errno != errno.ENOENT:
-                        logging.warning(
-                                "Couldn't remove test-in-prog file: %s",
-                                traceback.format_exc())
-
             if not finished or not self.job.fast:
                 logging.debug('Starting after_iteration_hooks for %s',
                               self.tagged_testname)
@@ -811,18 +757,10 @@
         raise error.UnhandledTestFail(e)
 
 
-def runtest(job,
-            url,
-            tag,
-            args,
-            dargs,
-            local_namespace={},
-            global_namespace={},
-            before_test_hook=None,
-            after_test_hook=None,
-            before_iteration_hook=None,
-            after_iteration_hook=None,
-            override_test_in_prog_file=None):
+def runtest(job, url, tag, args, dargs,
+            local_namespace={}, global_namespace={},
+            before_test_hook=None, after_test_hook=None,
+            before_iteration_hook=None, after_iteration_hook=None):
     local_namespace = local_namespace.copy()
     global_namespace = global_namespace.copy()
     # if this is not a plain test name then download and install the
@@ -884,8 +822,6 @@
 
     try:
         mytest = global_namespace['mytest']
-        if override_test_in_prog_file:
-            mytest.test_in_prog_file = override_test_in_prog_file
         mytest.success = False
         if not job.fast and before_test_hook:
             logging.info('Starting before_hook for %s', mytest.tagged_testname)
diff --git a/client/common_lib/test_unittest.py b/client/common_lib/test_unittest.py
index 3241347..22c3e24 100755
--- a/client/common_lib/test_unittest.py
+++ b/client/common_lib/test_unittest.py
@@ -36,10 +36,6 @@
             self.before_iteration_hooks = []
             self.after_iteration_hooks = []
 
-            self.crash_reporter_dir = tempfile.mkdtemp()
-            # Make a temp dir for the test-in-prog file to be created.
-            self.test_in_prog_file = os.path.join(self.crash_reporter_dir,
-                                                  "test-in-prog")
 
     def setUp(self):
         self.god = mock.mock_god()
@@ -48,7 +44,6 @@
 
     def tearDown(self):
         self.god.unstub_all()
-        shutil.rmtree(self.test.crash_reporter_dir)
 
 
 
@@ -427,11 +422,10 @@
 
 
         for (config_tag, ap_config_tag, bt_tag, drop) in test_data:
-            self.test.output_perf_value(config_tag + '_' + bt_tag + '_drop',
-                                        drop,
-                                        units='percent_drop',
-                                        higher_is_better=False,
-                                        graph=ap_config_tag + '_drop')
+          self.test.output_perf_value(config_tag + '_' + bt_tag + '_drop',
+                                      drop, units='percent_drop',
+                                      higher_is_better=False,
+                                      graph=ap_config_tag + '_drop')
         f = open(self.test.resultsdir + "/results-chart.json")
         expected_result = {
           "ch006_mode11B_none_drop": {
@@ -540,8 +534,6 @@
         resultdir = os.path.join(self.workdir, 'results')
         tmpdir = os.path.join(self.workdir, 'tmp')
 
-        self.test_in_prog_file = os.path.join(tmpdir, "test-in-prog")
-
         os.makedirs(os.path.join(testdir, self.testname))
         os.makedirs(os.path.join(resultdir, self.testname))
         os.makedirs(tmpdir)
@@ -558,11 +550,7 @@
 
     def test_runtest(self):
         all_args = {'host': 'hostvalue', 'arg1': 'value1', 'arg2': 'value2'}
-        test.runtest(self.job,
-                     self.testname,
-                     '', (),
-                     all_args,
-                     override_test_in_prog_file=self.test_in_prog_file)
+        test.runtest(self.job, self.testname, '', (), all_args)
         self.job.initialize_mock.assert_called_with('hostvalue', 'value1')
         self.job.warmup_mock.assert_called_with('hostvalue')
         self.job.run_once_mock.assert_called_with('value2')
diff --git a/client/cros/crash/user_crash_test.py b/client/cros/crash/user_crash_test.py
index 480e999..097ad3a 100644
--- a/client/cros/crash/user_crash_test.py
+++ b/client/cros/crash/user_crash_test.py
@@ -104,7 +104,8 @@
         first_line = symbols.split('\n')[0]
         tokens = first_line.split()
         if tokens[0] != 'MODULE' or tokens[1] != 'Linux':
-            raise error.TestError('Unexpected symbols format: %s', first_line)
+          raise error.TestError('Unexpected symbols format: %s',
+                                first_line)
         file_id = tokens[3]
         target_dir = os.path.join(self._symbol_dir, basename, file_id)
         os.makedirs(target_dir)
@@ -418,8 +419,8 @@
         crash_contents = os.listdir(crash_dir)
         basename = os.path.basename(crasher_path or self._crasher_path)
         if expect_crash_reporter_fail:
-            old_basename = basename
-            basename = "crash_reporter_failure"
+          old_basename = basename
+          basename = "crash_reporter_failure"
 
         # A dict tracking files for each crash report.
         crash_report_files = {}
@@ -495,15 +496,10 @@
             raise error.TestFail('crash_reporter did not catch crash')
 
 
-    def _check_crashing_process(self,
-                                username,
-                                consent=True,
-                                crasher_path=None,
-                                run_crasher=None,
-                                expected_uid=None,
-                                expected_gid=None,
-                                expected_exit_code=None,
-                                extra_meta_contents=None):
+    def _check_crashing_process(self, username, consent=True,
+                                crasher_path=None, run_crasher=None,
+                                expected_uid=None, expected_gid=None,
+                                expected_exit_code=None):
         result = self._run_crasher_process_and_analyze(
             username, consent=consent,
             crasher_path=crasher_path,
@@ -517,12 +513,6 @@
         if not consent:
             return
 
-        if extra_meta_contents:
-            with open(result['meta'], 'r') as f:
-                if extra_meta_contents not in f.read():
-                    raise error.TestFail('metadata did not contain "%s"' %
-                                         extra_meta_contents)
-
         if not result['minidump']:
             raise error.TestFail('crash reporter did not generate minidump')
 
diff --git a/client/site_tests/logging_UserCrash/logging_UserCrash.py b/client/site_tests/logging_UserCrash/logging_UserCrash.py
index 42396b4..6654620 100644
--- a/client/site_tests/logging_UserCrash/logging_UserCrash.py
+++ b/client/site_tests/logging_UserCrash/logging_UserCrash.py
@@ -65,8 +65,7 @@
 
     def _test_chronos_crasher(self):
         """Test a user space crash when running as chronos is handled."""
-        contents = 'upload_var_in_progress_integration_test=logging_UserCrash'
-        self._check_crashing_process('chronos', extra_meta_contents=contents)
+        self._check_crashing_process('chronos')
 
 
     def _test_chronos_crasher_no_consent(self):