summaryrefslogtreecommitdiff
path: root/testing/xpcshell
diff options
context:
space:
mode:
authorMatt A. Tobin <mattatobin@localhost.localdomain>2018-02-02 04:16:08 -0500
committerMatt A. Tobin <mattatobin@localhost.localdomain>2018-02-02 04:16:08 -0500
commit5f8de423f190bbb79a62f804151bc24824fa32d8 (patch)
tree10027f336435511475e392454359edea8e25895d /testing/xpcshell
parent49ee0794b5d912db1f95dce6eb52d781dc210db5 (diff)
downloaduxp-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.gz
Add m-esr52 at 52.6.0
Diffstat (limited to 'testing/xpcshell')
-rw-r--r--testing/xpcshell/README7
-rw-r--r--testing/xpcshell/dbg-actors.js51
-rw-r--r--testing/xpcshell/example/moz.build9
-rw-r--r--testing/xpcshell/example/unit/check_profile.js52
-rw-r--r--testing/xpcshell/example/unit/file.txt1
-rw-r--r--testing/xpcshell/example/unit/import_module.jsm34
-rw-r--r--testing/xpcshell/example/unit/import_sub_module.jsm10
-rw-r--r--testing/xpcshell/example/unit/load_subscript.js5
-rw-r--r--testing/xpcshell/example/unit/location_load.js6
-rw-r--r--testing/xpcshell/example/unit/subdir/file.txt1
-rw-r--r--testing/xpcshell/example/unit/test_check_nsIException.js11
-rw-r--r--testing/xpcshell/example/unit/test_check_nsIException_failing.js9
-rw-r--r--testing/xpcshell/example/unit/test_do_check_matches.js14
-rw-r--r--testing/xpcshell/example/unit/test_do_check_matches_failing.js12
-rw-r--r--testing/xpcshell/example/unit/test_do_check_null.js6
-rw-r--r--testing/xpcshell/example/unit/test_do_check_null_failing.js6
-rw-r--r--testing/xpcshell/example/unit/test_do_get_tempdir.js16
-rw-r--r--testing/xpcshell/example/unit/test_execute_soon.js20
-rw-r--r--testing/xpcshell/example/unit/test_fail.js8
-rw-r--r--testing/xpcshell/example/unit/test_get_file.js33
-rw-r--r--testing/xpcshell/example/unit/test_get_idle.js23
-rw-r--r--testing/xpcshell/example/unit/test_import_module.js22
-rw-r--r--testing/xpcshell/example/unit/test_load.js21
-rw-r--r--testing/xpcshell/example/unit/test_load_httpd_js.js13
-rw-r--r--testing/xpcshell/example/unit/test_location.js11
-rw-r--r--testing/xpcshell/example/unit/test_profile.js11
-rw-r--r--testing/xpcshell/example/unit/test_profile_afterChange.js11
-rw-r--r--testing/xpcshell/example/unit/test_sample.js21
-rw-r--r--testing/xpcshell/example/unit/test_skip.js8
-rw-r--r--testing/xpcshell/example/unit/xpcshell.ini47
-rw-r--r--testing/xpcshell/head.js1663
-rw-r--r--testing/xpcshell/mach_commands.py274
-rw-r--r--testing/xpcshell/mach_test_package_commands.py64
-rw-r--r--testing/xpcshell/moz-http2/http2-cert.pem79
-rw-r--r--testing/xpcshell/moz-http2/http2-key.pem28
-rw-r--r--testing/xpcshell/moz-http2/moz-http2.js786
-rw-r--r--testing/xpcshell/moz.build18
-rw-r--r--testing/xpcshell/node-http2/.gitignore7
-rw-r--r--testing/xpcshell/node-http2/.travis.yml5
-rw-r--r--testing/xpcshell/node-http2/HISTORY.md258
-rw-r--r--testing/xpcshell/node-http2/LICENSE22
-rw-r--r--testing/xpcshell/node-http2/README.md171
-rw-r--r--testing/xpcshell/node-http2/example/client.js48
-rw-r--r--testing/xpcshell/node-http2/example/localhost.crt14
-rw-r--r--testing/xpcshell/node-http2/example/localhost.key15
-rw-r--r--testing/xpcshell/node-http2/example/server.js67
-rw-r--r--testing/xpcshell/node-http2/lib/http.js1262
-rw-r--r--testing/xpcshell/node-http2/lib/index.js52
-rw-r--r--testing/xpcshell/node-http2/lib/protocol/compressor.js1366
-rw-r--r--testing/xpcshell/node-http2/lib/protocol/connection.js619
-rw-r--r--testing/xpcshell/node-http2/lib/protocol/endpoint.js262
-rw-r--r--testing/xpcshell/node-http2/lib/protocol/flow.js353
-rw-r--r--testing/xpcshell/node-http2/lib/protocol/framer.js1165
-rw-r--r--testing/xpcshell/node-http2/lib/protocol/index.js91
-rw-r--r--testing/xpcshell/node-http2/lib/protocol/stream.js659
-rw-r--r--testing/xpcshell/node-http2/package.json46
-rw-r--r--testing/xpcshell/node-http2/test/compressor.js575
-rw-r--r--testing/xpcshell/node-http2/test/connection.js237
-rw-r--r--testing/xpcshell/node-http2/test/endpoint.js41
-rw-r--r--testing/xpcshell/node-http2/test/flow.js260
-rw-r--r--testing/xpcshell/node-http2/test/framer.js395
-rw-r--r--testing/xpcshell/node-http2/test/http.js793
-rw-r--r--testing/xpcshell/node-http2/test/stream.js413
-rw-r--r--testing/xpcshell/node-http2/test/util.js89
-rw-r--r--testing/xpcshell/remotexpcshelltests.py614
-rwxr-xr-xtesting/xpcshell/runxpcshelltests.py1501
-rwxr-xr-xtesting/xpcshell/selftest.py1344
-rw-r--r--testing/xpcshell/xpcshell.eslintrc.js57
-rw-r--r--testing/xpcshell/xpcshellcommandline.py166
69 files changed, 16378 insertions, 0 deletions
diff --git a/testing/xpcshell/README b/testing/xpcshell/README
new file mode 100644
index 0000000000..a359b3c8d5
--- /dev/null
+++ b/testing/xpcshell/README
@@ -0,0 +1,7 @@
+Simple xpcshell-based test harness
+
+converted from netwerk/test/unit
+
+Some documentation at http://developer.mozilla.org/en/docs/Writing_xpcshell-based_unit_tests
+See also http://wiki.mozilla.org/SoftwareTesting:Tools:Simple_xpcshell_test_harness
+
diff --git a/testing/xpcshell/dbg-actors.js b/testing/xpcshell/dbg-actors.js
new file mode 100644
index 0000000000..dd6b388a49
--- /dev/null
+++ b/testing/xpcshell/dbg-actors.js
@@ -0,0 +1,51 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this file,
+ * You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+'use strict';
+
+const { Promise } = Cu.import("resource://gre/modules/Promise.jsm", {});
+var { Services } = Cu.import("resource://gre/modules/Services.jsm", {});
+const { devtools } = Cu.import("resource://devtools/shared/Loader.jsm", {});
+const { RootActor } = devtools.require("devtools/server/actors/root");
+const { BrowserTabList } = devtools.require("devtools/server/actors/webbrowser");
+
+/**
+ * xpcshell-test (XPCST) specific actors.
+ *
+ */
+
+/**
+ * Construct a root actor appropriate for use in a server running xpcshell
+ * tests. <snip boilerplate> :)
+ */
+function createRootActor(connection)
+{
+ let parameters = {
+ tabList: new XPCSTTabList(connection),
+ globalActorFactories: DebuggerServer.globalActorFactories,
+ onShutdown() {
+ // If the user never switches to the "debugger" tab we might get a
+ // shutdown before we've attached.
+ Services.obs.notifyObservers(null, "xpcshell-test-devtools-shutdown", null);
+ }
+ };
+ return new RootActor(connection, parameters);
+}
+
+/**
+ * A "stub" TabList implementation that provides no tabs.
+ */
+
+function XPCSTTabList(connection)
+{
+ BrowserTabList.call(this, connection);
+}
+
+XPCSTTabList.prototype = Object.create(BrowserTabList.prototype);
+
+XPCSTTabList.prototype.constructor = XPCSTTabList;
+
+XPCSTTabList.prototype.getList = function() {
+ return Promise.resolve([]);
+};
diff --git a/testing/xpcshell/example/moz.build b/testing/xpcshell/example/moz.build
new file mode 100644
index 0000000000..078d8cd630
--- /dev/null
+++ b/testing/xpcshell/example/moz.build
@@ -0,0 +1,9 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+# This is a list of directories containing tests to run, separated by spaces.
+# Most likely, tho, you won't use more than one directory here.
+XPCSHELL_TESTS_MANIFESTS += ['unit/xpcshell.ini']
diff --git a/testing/xpcshell/example/unit/check_profile.js b/testing/xpcshell/example/unit/check_profile.js
new file mode 100644
index 0000000000..b2451c5f98
--- /dev/null
+++ b/testing/xpcshell/example/unit/check_profile.js
@@ -0,0 +1,52 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+var {classes: Cc, interfaces: Ci} = Components;
+
+function check_profile_dir(profd)
+{
+ Assert.ok(profd.exists());
+ Assert.ok(profd.isDirectory());
+ let dirSvc = Cc["@mozilla.org/file/directory_service;1"]
+ .getService(Ci.nsIProperties);
+ let profd2 = dirSvc.get("ProfD", Ci.nsILocalFile);
+ Assert.ok(profd2.exists());
+ Assert.ok(profd2.isDirectory());
+ // make sure we got the same thing back...
+ Assert.ok(profd.equals(profd2));
+}
+
+function check_do_get_profile(fireProfileAfterChange)
+{
+ const observedTopics = new Map([
+ ["profile-do-change", 0],
+ ["profile-after-change", 0],
+ ]);
+ const expectedTopics = new Map(observedTopics);
+
+ const obs = Cc["@mozilla.org/observer-service;1"]
+ .getService(Ci.nsIObserverService);
+ for (let [topic,] of observedTopics) {
+ obs.addObserver(() => {
+ let val = observedTopics.get(topic) + 1;
+ observedTopics.set(topic, val);
+ }, topic, false);
+ }
+
+ // Trigger profile creation.
+ let profd = do_get_profile();
+ check_profile_dir(profd);
+
+ // Check the observed topics
+ expectedTopics.set("profile-do-change", 1);
+ if (fireProfileAfterChange) {
+ expectedTopics.set("profile-after-change", 1);
+ }
+ Assert.deepEqual(observedTopics, expectedTopics);
+
+ // A second do_get_profile() should not trigger more notifications.
+ profd = do_get_profile();
+ check_profile_dir(profd);
+ Assert.deepEqual(observedTopics, expectedTopics);
+}
diff --git a/testing/xpcshell/example/unit/file.txt b/testing/xpcshell/example/unit/file.txt
new file mode 100644
index 0000000000..ce01362503
--- /dev/null
+++ b/testing/xpcshell/example/unit/file.txt
@@ -0,0 +1 @@
+hello
diff --git a/testing/xpcshell/example/unit/import_module.jsm b/testing/xpcshell/example/unit/import_module.jsm
new file mode 100644
index 0000000000..3b1ddaf9f3
--- /dev/null
+++ b/testing/xpcshell/example/unit/import_module.jsm
@@ -0,0 +1,34 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+// Module used by test_import_module.js
+
+const EXPORTED_SYMBOLS = [ "MODULE_IMPORTED", "MODULE_URI", "SUBMODULE_IMPORTED", "same_scope", "SUBMODULE_IMPORTED_TO_SCOPE" ];
+
+const MODULE_IMPORTED = true;
+const MODULE_URI = __URI__;
+
+// Will import SUBMODULE_IMPORTED into scope.
+Components.utils.import("resource://gre/modules/XPCOMUtils.jsm");
+XPCOMUtils.importRelative(this, "import_sub_module.jsm");
+
+// Prepare two scopes that we can import the submodule into.
+var scope1 = { __URI__: __URI__ };
+var scope2 = { __URI__: __URI__ };
+// First one is the regular path.
+XPCOMUtils.importRelative(scope1, "import_sub_module.jsm");
+scope1.test_obj.i++;
+// Second one is with a different path (leads to the same file).
+XPCOMUtils.importRelative(scope2, "duh/../import_sub_module.jsm");
+// test_obj belongs to import_sub_module.jsm and has a mutable field name i, if
+// the two modules are actually the same, then they'll share the same value.
+// We'll leave it up to test_import_module.js to check that this variable is
+// true.
+var same_scope = (scope1.test_obj.i == scope2.test_obj.i);
+
+// Check that importRelative can also import into a given scope
+var testScope = {};
+XPCOMUtils.importRelative(this, "import_sub_module.jsm", testScope);
+var SUBMODULE_IMPORTED_TO_SCOPE = testScope.SUBMODULE_IMPORTED;
+
diff --git a/testing/xpcshell/example/unit/import_sub_module.jsm b/testing/xpcshell/example/unit/import_sub_module.jsm
new file mode 100644
index 0000000000..1649517171
--- /dev/null
+++ b/testing/xpcshell/example/unit/import_sub_module.jsm
@@ -0,0 +1,10 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+// Module used by import_module.jsm
+
+var EXPORTED_SYMBOLS = [ "SUBMODULE_IMPORTED", "test_obj" ];
+
+const SUBMODULE_IMPORTED = true;
+var test_obj = { i: 0 };
diff --git a/testing/xpcshell/example/unit/load_subscript.js b/testing/xpcshell/example/unit/load_subscript.js
new file mode 100644
index 0000000000..d626038973
--- /dev/null
+++ b/testing/xpcshell/example/unit/load_subscript.js
@@ -0,0 +1,5 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+subscriptLoaded = true;
diff --git a/testing/xpcshell/example/unit/location_load.js b/testing/xpcshell/example/unit/location_load.js
new file mode 100644
index 0000000000..2357ed44f8
--- /dev/null
+++ b/testing/xpcshell/example/unit/location_load.js
@@ -0,0 +1,6 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+// Gets loaded via test_location.js
+do_check_eq(__LOCATION__.leafName, "location_load.js");
diff --git a/testing/xpcshell/example/unit/subdir/file.txt b/testing/xpcshell/example/unit/subdir/file.txt
new file mode 100644
index 0000000000..c4f6b5f708
--- /dev/null
+++ b/testing/xpcshell/example/unit/subdir/file.txt
@@ -0,0 +1 @@
+subdir hello
diff --git a/testing/xpcshell/example/unit/test_check_nsIException.js b/testing/xpcshell/example/unit/test_check_nsIException.js
new file mode 100644
index 0000000000..115542fca2
--- /dev/null
+++ b/testing/xpcshell/example/unit/test_check_nsIException.js
@@ -0,0 +1,11 @@
+/* Any copyright is dedicated to the Public Domain.
+ * http://creativecommons.org/publicdomain/zero/1.0/ */
+
+function run_test() {
+ let env = Components.classes["@mozilla.org/process/environment;1"]
+ .getService(Components.interfaces.nsIEnvironment);
+ do_check_throws_nsIException(function () {
+ env.QueryInterface(Components.interfaces.nsIFile);
+ }, "NS_NOINTERFACE");
+}
+
diff --git a/testing/xpcshell/example/unit/test_check_nsIException_failing.js b/testing/xpcshell/example/unit/test_check_nsIException_failing.js
new file mode 100644
index 0000000000..4e562b6d8c
--- /dev/null
+++ b/testing/xpcshell/example/unit/test_check_nsIException_failing.js
@@ -0,0 +1,9 @@
+/* Any copyright is dedicated to the Public Domain.
+ * http://creativecommons.org/publicdomain/zero/1.0/ */
+
+function run_test() {
+ do_check_throws_nsIException(function () {
+ throw Error("I find your relaxed dishabille unpalatable");
+ }, "NS_NOINTERFACE");
+}
+
diff --git a/testing/xpcshell/example/unit/test_do_check_matches.js b/testing/xpcshell/example/unit/test_do_check_matches.js
new file mode 100644
index 0000000000..51d720b0c5
--- /dev/null
+++ b/testing/xpcshell/example/unit/test_do_check_matches.js
@@ -0,0 +1,14 @@
+/* Any copyright is dedicated to the Public Domain.
+ * http://creativecommons.org/publicdomain/zero/1.0/ */
+
+function run_test() {
+ do_check_matches({x:1}, {x:1});
+
+ // Property order is irrelevant.
+ do_check_matches({x:"foo", y:"bar"}, {y:"bar", x:"foo"});// pass
+
+ // Patterns nest.
+ do_check_matches({a:1, b:{c:2,d:3}}, {a:1, b:{c:2,d:3}});
+
+ do_check_matches([3,4,5], [3,4,5]);
+}
diff --git a/testing/xpcshell/example/unit/test_do_check_matches_failing.js b/testing/xpcshell/example/unit/test_do_check_matches_failing.js
new file mode 100644
index 0000000000..21c70c968f
--- /dev/null
+++ b/testing/xpcshell/example/unit/test_do_check_matches_failing.js
@@ -0,0 +1,12 @@
+/* Any copyright is dedicated to the Public Domain.
+ * http://creativecommons.org/publicdomain/zero/1.0/ */
+
+function run_test() {
+ do_check_matches({x:1}, {}); // fail: all pattern props required
+ do_check_matches({x:1}, {x:2}); // fail: values must match
+ do_check_matches({x:undefined}, {});
+
+ // 'length' property counts, even if non-enumerable.
+ do_check_matches([3,4,5], [3,5,5]); // fail; value doesn't match
+ do_check_matches([3,4,5], [3,4,5,6]);// fail; length doesn't match
+}
diff --git a/testing/xpcshell/example/unit/test_do_check_null.js b/testing/xpcshell/example/unit/test_do_check_null.js
new file mode 100644
index 0000000000..67a2fd9677
--- /dev/null
+++ b/testing/xpcshell/example/unit/test_do_check_null.js
@@ -0,0 +1,6 @@
+/* Any copyright is dedicated to the Public Domain.
+ * http://creativecommons.org/publicdomain/zero/1.0/ */
+
+function run_test() {
+ do_check_null(null);
+}
diff --git a/testing/xpcshell/example/unit/test_do_check_null_failing.js b/testing/xpcshell/example/unit/test_do_check_null_failing.js
new file mode 100644
index 0000000000..7b800f65fb
--- /dev/null
+++ b/testing/xpcshell/example/unit/test_do_check_null_failing.js
@@ -0,0 +1,6 @@
+/* Any copyright is dedicated to the Public Domain.
+ * http://creativecommons.org/publicdomain/zero/1.0/ */
+
+function run_test() {
+ do_check_null(0);
+}
diff --git a/testing/xpcshell/example/unit/test_do_get_tempdir.js b/testing/xpcshell/example/unit/test_do_get_tempdir.js
new file mode 100644
index 0000000000..44b780f849
--- /dev/null
+++ b/testing/xpcshell/example/unit/test_do_get_tempdir.js
@@ -0,0 +1,16 @@
+/* -*- indent-tabs-mode: nil; js-indent-level: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et: */
+/* Any copyright is dedicated to the Public Domain.
+ * http://creativecommons.org/publicdomain/zero/1.0/ */
+
+/* This tests that do_get_tempdir returns a directory that we can write to. */
+
+var Ci = Components.interfaces;
+
+function run_test() {
+ let tmpd = do_get_tempdir();
+ do_check_true(tmpd.exists());
+ tmpd.append("testfile");
+ tmpd.create(Ci.nsIFile.NORMAL_FILE_TYPE, 600);
+ do_check_true(tmpd.exists());
+}
diff --git a/testing/xpcshell/example/unit/test_execute_soon.js b/testing/xpcshell/example/unit/test_execute_soon.js
new file mode 100644
index 0000000000..0520583f35
--- /dev/null
+++ b/testing/xpcshell/example/unit/test_execute_soon.js
@@ -0,0 +1,20 @@
+/* vim:set ts=2 sw=2 sts=2 et: */
+/* ***** BEGIN LICENSE BLOCK *****
+ * Any copyright is dedicated to the Public Domain.
+ * http://creativecommons.org/licenses/publicdomain/
+ * ***** END LICENSE BLOCK ***** */
+
+var complete = false;
+
+function run_test() {
+ dump("Starting test\n");
+ do_register_cleanup(function() {
+ dump("Checking test completed\n");
+ do_check_true(complete);
+ });
+
+ do_execute_soon(function execute_soon_callback() {
+ dump("do_execute_soon callback\n");
+ complete = true;
+ });
+}
diff --git a/testing/xpcshell/example/unit/test_fail.js b/testing/xpcshell/example/unit/test_fail.js
new file mode 100644
index 0000000000..8d9fea2ada
--- /dev/null
+++ b/testing/xpcshell/example/unit/test_fail.js
@@ -0,0 +1,8 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+function run_test() {
+ // This test expects to fail.
+ do_check_true(false);
+}
diff --git a/testing/xpcshell/example/unit/test_get_file.js b/testing/xpcshell/example/unit/test_get_file.js
new file mode 100644
index 0000000000..3fe80255a2
--- /dev/null
+++ b/testing/xpcshell/example/unit/test_get_file.js
@@ -0,0 +1,33 @@
+/* vim:set ts=2 sw=2 sts=2 et: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+var subscriptLoaded = false;
+
+function run_test() {
+ var lf = do_get_file("file.txt");
+ do_check_true(lf.exists());
+ do_check_true(lf.isFile());
+ // check that allowNonexistent works
+ lf = do_get_file("file.txt.notfound", true);
+ do_check_false(lf.exists());
+ // check that we can get a file from a subdirectory
+ lf = do_get_file("subdir/file.txt");
+ do_check_true(lf.exists());
+ do_check_true(lf.isFile());
+ // and that we can get a handle to a directory itself
+ lf = do_get_file("subdir/");
+ do_check_true(lf.exists());
+ do_check_true(lf.isDirectory());
+ // check that we can go up a level
+ lf = do_get_file("..");
+ do_check_true(lf.exists());
+ lf.append("unit");
+ lf.append("file.txt");
+ do_check_true(lf.exists());
+ // check that do_get_cwd works
+ lf = do_get_cwd();
+ do_check_true(lf.exists());
+ do_check_true(lf.isDirectory());
+}
diff --git a/testing/xpcshell/example/unit/test_get_idle.js b/testing/xpcshell/example/unit/test_get_idle.js
new file mode 100644
index 0000000000..fe0a42f06e
--- /dev/null
+++ b/testing/xpcshell/example/unit/test_get_idle.js
@@ -0,0 +1,23 @@
+/* Any copyright is dedicated to the Public Domain.
+ * http://creativecommons.org/publicdomain/zero/1.0/
+ */
+
+function run_test() {
+ print("Init the fake idle service and check its identity.");
+ let fakeIdleService = Components.classes["@mozilla.org/widget/idleservice;1"].
+ getService(Components.interfaces.nsIIdleService);
+ try {
+ fakeIdleService.QueryInterface(Components.interfaces.nsIFactory);
+ } catch (ex) {
+ do_throw("The fake idle service implements nsIFactory.");
+ }
+ // We need at least one PASS, thus sanity check the idle time.
+ do_check_eq(fakeIdleService.idleTime, 0);
+
+ print("Init the real idle service and check its identity.");
+ let realIdleService = do_get_idle();
+ try {
+ realIdleService.QueryInterface(Components.interfaces.nsIFactory);
+ do_throw("The real idle service does not implement nsIFactory.");
+ } catch (ex) {}
+}
diff --git a/testing/xpcshell/example/unit/test_import_module.js b/testing/xpcshell/example/unit/test_import_module.js
new file mode 100644
index 0000000000..280d63ad2f
--- /dev/null
+++ b/testing/xpcshell/example/unit/test_import_module.js
@@ -0,0 +1,22 @@
+/* -*- indent-tabs-mode: nil; js-indent-level: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/**
+ * Ensures that tests can import a module in the same folder through:
+ * Components.utils.import("resource://test/module.jsm");
+ */
+
+function run_test() {
+ do_check_true(typeof(this['MODULE_IMPORTED']) == "undefined");
+ do_check_true(typeof(this['MODULE_URI']) == "undefined");
+ let uri = "resource://test/import_module.jsm";
+ Components.utils.import(uri);
+ do_check_true(MODULE_URI == uri);
+ do_check_true(MODULE_IMPORTED);
+ do_check_true(SUBMODULE_IMPORTED);
+ do_check_true(same_scope);
+ do_check_true(SUBMODULE_IMPORTED_TO_SCOPE);
+}
diff --git a/testing/xpcshell/example/unit/test_load.js b/testing/xpcshell/example/unit/test_load.js
new file mode 100644
index 0000000000..4f99626007
--- /dev/null
+++ b/testing/xpcshell/example/unit/test_load.js
@@ -0,0 +1,21 @@
+/* -*- indent-tabs-mode: nil; js-indent-level: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+var subscriptLoaded = false;
+
+function run_test() {
+ load("load_subscript.js");
+ do_check_true(subscriptLoaded);
+ subscriptLoaded = false;
+ try {
+ load("file_that_does_not_exist.js");
+ subscriptLoaded = true;
+ }
+ catch (ex) {
+ do_check_eq(ex.message.substring(0,16), "cannot open file");
+ }
+ do_check_false(subscriptLoaded, "load() should throw an error");
+}
diff --git a/testing/xpcshell/example/unit/test_load_httpd_js.js b/testing/xpcshell/example/unit/test_load_httpd_js.js
new file mode 100644
index 0000000000..fbba83300d
--- /dev/null
+++ b/testing/xpcshell/example/unit/test_load_httpd_js.js
@@ -0,0 +1,13 @@
+/* -*- indent-tabs-mode: nil; js-indent-level: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+Components.utils.import("resource://testing-common/httpd.js");
+
+function run_test() {
+ var httpserver = new HttpServer();
+ do_check_neq(httpserver, null);
+ do_check_neq(httpserver.QueryInterface(Components.interfaces.nsIHttpServer), null);
+}
diff --git a/testing/xpcshell/example/unit/test_location.js b/testing/xpcshell/example/unit/test_location.js
new file mode 100644
index 0000000000..8c17b1decd
--- /dev/null
+++ b/testing/xpcshell/example/unit/test_location.js
@@ -0,0 +1,11 @@
+/* -*- indent-tabs-mode: nil; js-indent-level: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+function run_test() {
+ do_check_eq(__LOCATION__.leafName, "test_location.js");
+ // also check that __LOCATION__ works via load()
+ load("location_load.js");
+}
diff --git a/testing/xpcshell/example/unit/test_profile.js b/testing/xpcshell/example/unit/test_profile.js
new file mode 100644
index 0000000000..29a34c7c8c
--- /dev/null
+++ b/testing/xpcshell/example/unit/test_profile.js
@@ -0,0 +1,11 @@
+/* -*- indent-tabs-mode: nil; js-indent-level: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+function run_test()
+{
+ load("check_profile.js");
+ check_do_get_profile(false);
+}
diff --git a/testing/xpcshell/example/unit/test_profile_afterChange.js b/testing/xpcshell/example/unit/test_profile_afterChange.js
new file mode 100644
index 0000000000..d3c4ce031f
--- /dev/null
+++ b/testing/xpcshell/example/unit/test_profile_afterChange.js
@@ -0,0 +1,11 @@
+/* -*- indent-tabs-mode: nil; js-indent-level: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+function run_test()
+{
+ load("check_profile.js");
+ check_do_get_profile(true);
+}
diff --git a/testing/xpcshell/example/unit/test_sample.js b/testing/xpcshell/example/unit/test_sample.js
new file mode 100644
index 0000000000..2e7b7dbb63
--- /dev/null
+++ b/testing/xpcshell/example/unit/test_sample.js
@@ -0,0 +1,21 @@
+/* -*- indent-tabs-mode: nil; js-indent-level: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* This is the most basic testcase. It makes some trivial assertions,
+ * then sets a timeout, and exits the test harness when that timeout
+ * fires. This is meant to demonstrate that there is a complete event
+ * system available to test scripts.
+ * Available functions are described at:
+ * http://developer.mozilla.org/en/docs/Writing_xpcshell-based_unit_tests
+ */
+function run_test() {
+ do_check_eq(57, 57)
+ do_check_neq(1, 2)
+ do_check_true(true);
+
+ do_test_pending();
+ do_timeout(100, do_test_finished);
+}
diff --git a/testing/xpcshell/example/unit/test_skip.js b/testing/xpcshell/example/unit/test_skip.js
new file mode 100644
index 0000000000..8d9fea2ada
--- /dev/null
+++ b/testing/xpcshell/example/unit/test_skip.js
@@ -0,0 +1,8 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+function run_test() {
+ // This test expects to fail.
+ do_check_true(false);
+}
diff --git a/testing/xpcshell/example/unit/xpcshell.ini b/testing/xpcshell/example/unit/xpcshell.ini
new file mode 100644
index 0000000000..3af6770af3
--- /dev/null
+++ b/testing/xpcshell/example/unit/xpcshell.ini
@@ -0,0 +1,47 @@
+; This Source Code Form is subject to the terms of the Mozilla Public
+; License, v. 2.0. If a copy of the MPL was not distributed with this
+; file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+[DEFAULT]
+head =
+tail =
+skip-if = toolkit == 'gonk'
+support-files =
+ subdir/file.txt
+ file.txt
+ import_module.jsm
+ import_sub_module.jsm
+ load_subscript.js
+ location_load.js
+ check_profile.js
+
+[test_check_nsIException.js]
+[test_check_nsIException_failing.js]
+fail-if = true
+
+[test_do_get_tempdir.js]
+[test_execute_soon.js]
+[test_get_file.js]
+[test_get_idle.js]
+[test_import_module.js]
+[test_load.js]
+[test_load_httpd_js.js]
+[test_location.js]
+[test_profile.js]
+[test_profile_afterChange.js]
+[test_sample.js]
+
+[test_fail.js]
+fail-if = true
+
+[test_skip.js]
+skip-if = true
+
+[test_do_check_null.js]
+
+[test_do_check_null_failing.js]
+fail-if = true
+
+[test_do_check_matches.js]
+[test_do_check_matches_failing.js]
+fail-if = true
diff --git a/testing/xpcshell/head.js b/testing/xpcshell/head.js
new file mode 100644
index 0000000000..74fd482cf0
--- /dev/null
+++ b/testing/xpcshell/head.js
@@ -0,0 +1,1663 @@
+/* -*- indent-tabs-mode: nil; js-indent-level: 2 -*- */
+/* vim:set ts=2 sw=2 sts=2 et: */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * This file contains common code that is loaded before each test file(s).
+ * See http://developer.mozilla.org/en/docs/Writing_xpcshell-based_unit_tests
+ * for more information.
+ */
+
+var _quit = false;
+var _passed = true;
+var _tests_pending = 0;
+var _cleanupFunctions = [];
+var _pendingTimers = [];
+var _profileInitialized = false;
+
+// Register the testing-common resource protocol early, to have access to its
+// modules.
+_register_modules_protocol_handler();
+
+var _Promise = Components.utils.import("resource://gre/modules/Promise.jsm", {}).Promise;
+var _PromiseTestUtils = Components.utils.import("resource://testing-common/PromiseTestUtils.jsm", {}).PromiseTestUtils;
+Components.utils.importGlobalProperties(["XMLHttpRequest"]);
+
+// Support a common assertion library, Assert.jsm.
+var AssertCls = Components.utils.import("resource://testing-common/Assert.jsm", null).Assert;
+// Pass a custom report function for xpcshell-test style reporting.
+var Assert = new AssertCls(function(err, message, stack) {
+ if (err) {
+ do_report_result(false, err.message, err.stack);
+ } else {
+ do_report_result(true, message, stack);
+ }
+});
+
+
+var _add_params = function (params) {
+ if (typeof _XPCSHELL_PROCESS != "undefined") {
+ params.xpcshell_process = _XPCSHELL_PROCESS;
+ }
+};
+
+var _dumpLog = function (raw_msg) {
+ dump("\n" + JSON.stringify(raw_msg) + "\n");
+}
+
+var _LoggerClass = Components.utils.import("resource://testing-common/StructuredLog.jsm", null).StructuredLogger;
+var _testLogger = new _LoggerClass("xpcshell/head.js", _dumpLog, [_add_params]);
+
+// Disable automatic network detection, so tests work correctly when
+// not connected to a network.
+{
+ let ios = Components.classes["@mozilla.org/network/io-service;1"]
+ .getService(Components.interfaces.nsIIOService2);
+ ios.manageOfflineStatus = false;
+ ios.offline = false;
+}
+
+// Determine if we're running on parent or child
+var runningInParent = true;
+try {
+ runningInParent = Components.classes["@mozilla.org/xre/runtime;1"].
+ getService(Components.interfaces.nsIXULRuntime).processType
+ == Components.interfaces.nsIXULRuntime.PROCESS_TYPE_DEFAULT;
+}
+catch (e) { }
+
+// Only if building of places is enabled.
+if (runningInParent &&
+ "mozIAsyncHistory" in Components.interfaces) {
+ // Ensure places history is enabled for xpcshell-tests as some non-FF
+ // apps disable it.
+ let prefs = Components.classes["@mozilla.org/preferences-service;1"]
+ .getService(Components.interfaces.nsIPrefBranch);
+ prefs.setBoolPref("places.history.enabled", true);
+}
+
+try {
+ if (runningInParent) {
+ let prefs = Components.classes["@mozilla.org/preferences-service;1"]
+ .getService(Components.interfaces.nsIPrefBranch);
+
+ // disable necko IPC security checks for xpcshell, as they lack the
+ // docshells needed to pass them
+ prefs.setBoolPref("network.disable.ipc.security", true);
+
+ // Disable IPv6 lookups for 'localhost' on windows.
+ if ("@mozilla.org/windows-registry-key;1" in Components.classes) {
+ prefs.setCharPref("network.dns.ipv4OnlyDomains", "localhost");
+ }
+ }
+}
+catch (e) { }
+
+// Configure crash reporting, if possible
+// We rely on the Python harness to set MOZ_CRASHREPORTER,
+// MOZ_CRASHREPORTER_NO_REPORT, and handle checking for minidumps.
+// Note that if we're in a child process, we don't want to init the
+// crashreporter component.
+try {
+ if (runningInParent &&
+ "@mozilla.org/toolkit/crash-reporter;1" in Components.classes) {
+ let crashReporter =
+ Components.classes["@mozilla.org/toolkit/crash-reporter;1"]
+ .getService(Components.interfaces.nsICrashReporter);
+ crashReporter.UpdateCrashEventsDir();
+ crashReporter.minidumpPath = do_get_minidumpdir();
+ }
+}
+catch (e) { }
+
+// Configure a console listener so messages sent to it are logged as part
+// of the test.
+try {
+ let levelNames = {}
+ for (let level of ["debug", "info", "warn", "error"]) {
+ levelNames[Components.interfaces.nsIConsoleMessage[level]] = level;
+ }
+
+ let listener = {
+ QueryInterface : function(iid) {
+ if (!iid.equals(Components.interfaces.nsISupports) &&
+ !iid.equals(Components.interfaces.nsIConsoleListener)) {
+ throw Components.results.NS_NOINTERFACE;
+ }
+ return this;
+ },
+ observe : function (msg) {
+ if (typeof do_print === "function")
+ do_print("CONSOLE_MESSAGE: (" + levelNames[msg.logLevel] + ") " + msg.toString());
+ }
+ };
+ Components.classes["@mozilla.org/consoleservice;1"]
+ .getService(Components.interfaces.nsIConsoleService)
+ .registerListener(listener);
+} catch (e) {}
+/**
+ * Date.now() is not necessarily monotonically increasing (insert sob story
+ * about times not being the right tool to use for measuring intervals of time,
+ * robarnold can tell all), so be wary of error by erring by at least
+ * _timerFuzz ms.
+ */
+const _timerFuzz = 15;
+
+function _Timer(func, delay) {
+ delay = Number(delay);
+ if (delay < 0)
+ do_throw("do_timeout() delay must be nonnegative");
+
+ if (typeof func !== "function")
+ do_throw("string callbacks no longer accepted; use a function!");
+
+ this._func = func;
+ this._start = Date.now();
+ this._delay = delay;
+
+ var timer = Components.classes["@mozilla.org/timer;1"]
+ .createInstance(Components.interfaces.nsITimer);
+ timer.initWithCallback(this, delay + _timerFuzz, timer.TYPE_ONE_SHOT);
+
+ // Keep timer alive until it fires
+ _pendingTimers.push(timer);
+}
+_Timer.prototype = {
+ QueryInterface: function(iid) {
+ if (iid.equals(Components.interfaces.nsITimerCallback) ||
+ iid.equals(Components.interfaces.nsISupports))
+ return this;
+
+ throw Components.results.NS_ERROR_NO_INTERFACE;
+ },
+
+ notify: function(timer) {
+ _pendingTimers.splice(_pendingTimers.indexOf(timer), 1);
+
+ // The current nsITimer implementation can undershoot, but even if it
+ // couldn't, paranoia is probably a virtue here given the potential for
+ // random orange on tinderboxen.
+ var end = Date.now();
+ var elapsed = end - this._start;
+ if (elapsed >= this._delay) {
+ try {
+ this._func.call(null);
+ } catch (e) {
+ do_throw("exception thrown from do_timeout callback: " + e);
+ }
+ return;
+ }
+
+ // Timer undershot, retry with a little overshoot to try to avoid more
+ // undershoots.
+ var newDelay = this._delay - elapsed;
+ do_timeout(newDelay, this._func);
+ }
+};
+
+function _do_main() {
+ if (_quit)
+ return;
+
+ _testLogger.info("running event loop");
+
+ var thr = Components.classes["@mozilla.org/thread-manager;1"]
+ .getService().currentThread;
+
+ while (!_quit)
+ thr.processNextEvent(true);
+
+ while (thr.hasPendingEvents())
+ thr.processNextEvent(true);
+}
+
+function _do_quit() {
+ _testLogger.info("exiting test");
+ _quit = true;
+}
+
+/**
+ * Overrides idleService with a mock. Idle is commonly used for maintenance
+ * tasks, thus if a test uses a service that requires the idle service, it will
+ * start handling them.
+ * This behaviour would cause random failures and slowdown tests execution,
+ * for example by running database vacuum or cleanups for each test.
+ *
+ * @note Idle service is overridden by default. If a test requires it, it will
+ * have to call do_get_idle() function at least once before use.
+ */
+var _fakeIdleService = {
+ get registrar() {
+ delete this.registrar;
+ return this.registrar =
+ Components.manager.QueryInterface(Components.interfaces.nsIComponentRegistrar);
+ },
+ contractID: "@mozilla.org/widget/idleservice;1",
+ get CID() {
+ return this.registrar.contractIDToCID(this.contractID);
+ },
+
+ activate: function FIS_activate()
+ {
+ if (!this.originalFactory) {
+ // Save original factory.
+ this.originalFactory =
+ Components.manager.getClassObject(Components.classes[this.contractID],
+ Components.interfaces.nsIFactory);
+ // Unregister original factory.
+ this.registrar.unregisterFactory(this.CID, this.originalFactory);
+ // Replace with the mock.
+ this.registrar.registerFactory(this.CID, "Fake Idle Service",
+ this.contractID, this.factory
+ );
+ }
+ },
+
+ deactivate: function FIS_deactivate()
+ {
+ if (this.originalFactory) {
+ // Unregister the mock.
+ this.registrar.unregisterFactory(this.CID, this.factory);
+ // Restore original factory.
+ this.registrar.registerFactory(this.CID, "Idle Service",
+ this.contractID, this.originalFactory);
+ delete this.originalFactory;
+ }
+ },
+
+ factory: {
+ // nsIFactory
+ createInstance: function (aOuter, aIID)
+ {
+ if (aOuter) {
+ throw Components.results.NS_ERROR_NO_AGGREGATION;
+ }
+ return _fakeIdleService.QueryInterface(aIID);
+ },
+ lockFactory: function (aLock) {
+ throw Components.results.NS_ERROR_NOT_IMPLEMENTED;
+ },
+ QueryInterface: function(aIID) {
+ if (aIID.equals(Components.interfaces.nsIFactory) ||
+ aIID.equals(Components.interfaces.nsISupports)) {
+ return this;
+ }
+ throw Components.results.NS_ERROR_NO_INTERFACE;
+ }
+ },
+
+ // nsIIdleService
+ get idleTime() {
+ return 0;
+ },
+ addIdleObserver: function () {},
+ removeIdleObserver: function () {},
+
+ QueryInterface: function(aIID) {
+ // Useful for testing purposes, see test_get_idle.js.
+ if (aIID.equals(Components.interfaces.nsIFactory)) {
+ return this.factory;
+ }
+ if (aIID.equals(Components.interfaces.nsIIdleService) ||
+ aIID.equals(Components.interfaces.nsISupports)) {
+ return this;
+ }
+ throw Components.results.NS_ERROR_NO_INTERFACE;
+ }
+}
+
+/**
+ * Restores the idle service factory if needed and returns the service's handle.
+ * @return A handle to the idle service.
+ */
+function do_get_idle() {
+ _fakeIdleService.deactivate();
+ return Components.classes[_fakeIdleService.contractID]
+ .getService(Components.interfaces.nsIIdleService);
+}
+
+// Map resource://test/ to current working directory and
+// resource://testing-common/ to the shared test modules directory.
+function _register_protocol_handlers() {
+ let ios = Components.classes["@mozilla.org/network/io-service;1"]
+ .getService(Components.interfaces.nsIIOService);
+ let protocolHandler =
+ ios.getProtocolHandler("resource")
+ .QueryInterface(Components.interfaces.nsIResProtocolHandler);
+
+ let curDirURI = ios.newFileURI(do_get_cwd());
+ protocolHandler.setSubstitution("test", curDirURI);
+
+ _register_modules_protocol_handler();
+}
+
+function _register_modules_protocol_handler() {
+ if (!_TESTING_MODULES_DIR) {
+ throw new Error("Please define a path where the testing modules can be " +
+ "found in a variable called '_TESTING_MODULES_DIR' before " +
+ "head.js is included.");
+ }
+
+ let ios = Components.classes["@mozilla.org/network/io-service;1"]
+ .getService(Components.interfaces.nsIIOService);
+ let protocolHandler =
+ ios.getProtocolHandler("resource")
+ .QueryInterface(Components.interfaces.nsIResProtocolHandler);
+
+ let modulesFile = Components.classes["@mozilla.org/file/local;1"].
+ createInstance(Components.interfaces.nsILocalFile);
+ modulesFile.initWithPath(_TESTING_MODULES_DIR);
+
+ if (!modulesFile.exists()) {
+ throw new Error("Specified modules directory does not exist: " +
+ _TESTING_MODULES_DIR);
+ }
+
+ if (!modulesFile.isDirectory()) {
+ throw new Error("Specified modules directory is not a directory: " +
+ _TESTING_MODULES_DIR);
+ }
+
+ let modulesURI = ios.newFileURI(modulesFile);
+
+ protocolHandler.setSubstitution("testing-common", modulesURI);
+}
+
+/* Debugging support */
+// Used locally and by our self-tests.
+function _setupDebuggerServer(breakpointFiles, callback) {
+ let prefs = Components.classes["@mozilla.org/preferences-service;1"]
+ .getService(Components.interfaces.nsIPrefBranch);
+
+ // Always allow remote debugging.
+ prefs.setBoolPref("devtools.debugger.remote-enabled", true);
+
+ // for debugging-the-debugging, let an env var cause log spew.
+ let env = Components.classes["@mozilla.org/process/environment;1"]
+ .getService(Components.interfaces.nsIEnvironment);
+ if (env.get("DEVTOOLS_DEBUGGER_LOG")) {
+ prefs.setBoolPref("devtools.debugger.log", true);
+ }
+ if (env.get("DEVTOOLS_DEBUGGER_LOG_VERBOSE")) {
+ prefs.setBoolPref("devtools.debugger.log.verbose", true);
+ }
+
+ let require;
+ try {
+ ({ require } = Components.utils.import("resource://devtools/shared/Loader.jsm", {}));
+ } catch (e) {
+ throw new Error("resource://devtools appears to be inaccessible from the " +
+ "xpcshell environment.\n" +
+ "This can usually be resolved by adding:\n" +
+ " firefox-appdir = browser\n" +
+ "to the xpcshell.ini manifest.\n" +
+ "It is possible for this to alter test behevior by " +
+ "triggering additional browser code to run, so check " +
+ "test behavior after making this change.\n" +
+ "See also https://bugzil.la/1215378.")
+ }
+ let { DebuggerServer } = require("devtools/server/main");
+ let { OriginalLocation } = require("devtools/server/actors/common");
+ DebuggerServer.init();
+ DebuggerServer.addBrowserActors();
+ DebuggerServer.addActors("resource://testing-common/dbg-actors.js");
+ DebuggerServer.allowChromeProcess = true;
+
+ // An observer notification that tells us when we can "resume" script
+ // execution.
+ let obsSvc = Components.classes["@mozilla.org/observer-service;1"].
+ getService(Components.interfaces.nsIObserverService);
+
+ const TOPICS = ["devtools-thread-resumed", "xpcshell-test-devtools-shutdown"];
+ let observe = function(subject, topic, data) {
+ switch (topic) {
+ case "devtools-thread-resumed":
+ // Exceptions in here aren't reported and block the debugger from
+ // resuming, so...
+ try {
+ // Add a breakpoint for the first line in our test files.
+ let threadActor = subject.wrappedJSObject;
+ for (let file of breakpointFiles) {
+ // Pass an empty `source` object to workaround `source` function assertion
+ let sourceActor = threadActor.sources.source({originalUrl: file, source: {}});
+ sourceActor._getOrCreateBreakpointActor(new OriginalLocation(sourceActor, 1));
+ }
+ } catch (ex) {
+ do_print("Failed to initialize breakpoints: " + ex + "\n" + ex.stack);
+ }
+ break;
+ case "xpcshell-test-devtools-shutdown":
+ // the debugger has shutdown before we got a resume event - nothing
+ // special to do here.
+ break;
+ }
+ for (let topicToRemove of TOPICS) {
+ obsSvc.removeObserver(observe, topicToRemove);
+ }
+ callback();
+ };
+
+ for (let topic of TOPICS) {
+ obsSvc.addObserver(observe, topic, false);
+ }
+ return DebuggerServer;
+}
+
+function _initDebugging(port) {
+ let initialized = false;
+ let DebuggerServer = _setupDebuggerServer(_TEST_FILE, () => {initialized = true;});
+
+ do_print("");
+ do_print("*******************************************************************");
+ do_print("Waiting for the debugger to connect on port " + port)
+ do_print("")
+ do_print("To connect the debugger, open a Firefox instance, select 'Connect'");
+ do_print("from the Developer menu and specify the port as " + port);
+ do_print("*******************************************************************");
+ do_print("")
+
+ let AuthenticatorType = DebuggerServer.Authenticators.get("PROMPT");
+ let authenticator = new AuthenticatorType.Server();
+ authenticator.allowConnection = () => {
+ return DebuggerServer.AuthenticationResult.ALLOW;
+ };
+
+ let listener = DebuggerServer.createListener();
+ listener.portOrPath = port;
+ listener.authenticator = authenticator;
+ listener.open();
+
+ // spin an event loop until the debugger connects.
+ let thr = Components.classes["@mozilla.org/thread-manager;1"]
+ .getService().currentThread;
+ while (!initialized) {
+ do_print("Still waiting for debugger to connect...");
+ thr.processNextEvent(true);
+ }
+ // NOTE: if you want to debug the harness itself, you can now add a 'debugger'
+ // statement anywhere and it will stop - but we've already added a breakpoint
+ // for the first line of the test scripts, so we just continue...
+ do_print("Debugger connected, starting test execution");
+}
+
+function _execute_test() {
+ // _JSDEBUGGER_PORT is dynamically defined by <runxpcshelltests.py>.
+ if (_JSDEBUGGER_PORT) {
+ try {
+ _initDebugging(_JSDEBUGGER_PORT);
+ } catch (ex) {
+ // Fail the test run immediately if debugging is requested but fails, so
+ // that the failure state is more obvious.
+ do_throw(`Failed to initialize debugging: ${ex}`, ex.stack);
+ }
+ }
+
+ _register_protocol_handlers();
+
+ // Override idle service by default.
+ // Call do_get_idle() to restore the factory and get the service.
+ _fakeIdleService.activate();
+
+ _PromiseTestUtils.init();
+ _PromiseTestUtils.Assert = Assert;
+
+ let coverageCollector = null;
+ if (typeof _JSCOV_DIR === 'string') {
+ let _CoverageCollector = Components.utils.import("resource://testing-common/CoverageUtils.jsm", {}).CoverageCollector;
+ coverageCollector = new _CoverageCollector(_JSCOV_DIR);
+ }
+
+ // _HEAD_FILES is dynamically defined by <runxpcshelltests.py>.
+ _load_files(_HEAD_FILES);
+ // _TEST_FILE is dynamically defined by <runxpcshelltests.py>.
+ _load_files(_TEST_FILE);
+
+ // Tack Assert.jsm methods to the current scope.
+ this.Assert = Assert;
+ for (let func in Assert) {
+ this[func] = Assert[func].bind(Assert);
+ }
+
+ if (_gTestHasOnly) {
+ _gTests = _gTests.filter(([props,]) => {
+ return ("_only" in props) && props._only;
+ });
+ }
+
+ try {
+ do_test_pending("MAIN run_test");
+ // Check if run_test() is defined. If defined, run it.
+ // Else, call run_next_test() directly to invoke tests
+ // added by add_test() and add_task().
+ if (typeof run_test === "function") {
+ run_test();
+ } else {
+ run_next_test();
+ }
+
+ if (coverageCollector != null) {
+ coverageCollector.recordTestCoverage(_TEST_FILE[0]);
+ }
+
+ do_test_finished("MAIN run_test");
+ _do_main();
+ _PromiseTestUtils.assertNoUncaughtRejections();
+ } catch (e) {
+ _passed = false;
+ // do_check failures are already logged and set _quit to true and throw
+ // NS_ERROR_ABORT. If both of those are true it is likely this exception
+ // has already been logged so there is no need to log it again. It's
+ // possible that this will mask an NS_ERROR_ABORT that happens after a
+ // do_check failure though.
+ if (coverageCollector != null) {
+ coverageCollector.recordTestCoverage(_TEST_FILE[0]);
+ }
+
+ if (!_quit || e != Components.results.NS_ERROR_ABORT) {
+ let extra = {};
+ if (e.fileName) {
+ extra.source_file = e.fileName;
+ if (e.lineNumber) {
+ extra.line_number = e.lineNumber;
+ }
+ } else {
+ extra.source_file = "xpcshell/head.js";
+ }
+ let message = _exception_message(e);
+ if (e.stack) {
+ extra.stack = _format_stack(e.stack);
+ }
+ _testLogger.error(message, extra);
+ }
+ }
+
+ if (coverageCollector != null) {
+ coverageCollector.finalize();
+ }
+
+ // _TAIL_FILES is dynamically defined by <runxpcshelltests.py>.
+ _load_files(_TAIL_FILES);
+
+ // Execute all of our cleanup functions.
+ let reportCleanupError = function(ex) {
+ let stack, filename;
+ if (ex && typeof ex == "object" && "stack" in ex) {
+ stack = ex.stack;
+ } else {
+ stack = Components.stack.caller;
+ }
+ if (stack instanceof Components.interfaces.nsIStackFrame) {
+ filename = stack.filename;
+ } else if (ex.fileName) {
+ filename = ex.fileName;
+ }
+ _testLogger.error(_exception_message(ex),
+ {
+ stack: _format_stack(stack),
+ source_file: filename
+ });
+ };
+
+ let func;
+ while ((func = _cleanupFunctions.pop())) {
+ let result;
+ try {
+ result = func();
+ } catch (ex) {
+ reportCleanupError(ex);
+ continue;
+ }
+ if (result && typeof result == "object"
+ && "then" in result && typeof result.then == "function") {
+ // This is a promise, wait until it is satisfied before proceeding
+ let complete = false;
+ let promise = result.then(null, reportCleanupError);
+ promise = promise.then(() => complete = true);
+ let thr = Components.classes["@mozilla.org/thread-manager;1"]
+ .getService().currentThread;
+ while (!complete) {
+ thr.processNextEvent(true);
+ }
+ }
+ }
+
+ // Restore idle service to avoid leaks.
+ _fakeIdleService.deactivate();
+
+ if (_profileInitialized) {
+ // Since we have a profile, we will notify profile shutdown topics at
+ // the end of the current test, to ensure correct cleanup on shutdown.
+ let obs = Components.classes["@mozilla.org/observer-service;1"]
+ .getService(Components.interfaces.nsIObserverService);
+ obs.notifyObservers(null, "profile-change-net-teardown", null);
+ obs.notifyObservers(null, "profile-change-teardown", null);
+ obs.notifyObservers(null, "profile-before-change", null);
+ obs.notifyObservers(null, "profile-before-change-qm", null);
+
+ _profileInitialized = false;
+ }
+
+ try {
+ _PromiseTestUtils.ensureDOMPromiseRejectionsProcessed();
+ _PromiseTestUtils.assertNoUncaughtRejections();
+ _PromiseTestUtils.assertNoMoreExpectedRejections();
+ } finally {
+ // It's important to terminate the module to avoid crashes on shutdown.
+ _PromiseTestUtils.uninit();
+ }
+}
+
+/**
+ * Loads files.
+ *
+ * @param aFiles Array of files to load.
+ */
+function _load_files(aFiles) {
+ function load_file(element, index, array) {
+ try {
+ load(element);
+ } catch (e) {
+ let extra = {
+ source_file: element
+ }
+ if (e.stack) {
+ extra.stack = _format_stack(e.stack);
+ }
+ _testLogger.error(_exception_message(e), extra);
+ }
+ }
+
+ aFiles.forEach(load_file);
+}
+
+function _wrap_with_quotes_if_necessary(val) {
+ return typeof val == "string" ? '"' + val + '"' : val;
+}
+
+/************** Functions to be used from the tests **************/
+
+/**
+ * Prints a message to the output log.
+ */
+function do_print(msg, data) {
+ msg = _wrap_with_quotes_if_necessary(msg);
+ data = data ? data : null;
+ _testLogger.info(msg, data);
+}
+
+/**
+ * Calls the given function at least the specified number of milliseconds later.
+ * The callback will not undershoot the given time, but it might overshoot --
+ * don't expect precision!
+ *
+ * @param delay : uint
+ * the number of milliseconds to delay
+ * @param callback : function() : void
+ * the function to call
+ */
+function do_timeout(delay, func) {
+ new _Timer(func, Number(delay));
+}
+
+function do_execute_soon(callback, aName) {
+ let funcName = (aName ? aName : callback.name);
+ do_test_pending(funcName);
+ var tm = Components.classes["@mozilla.org/thread-manager;1"]
+ .getService(Components.interfaces.nsIThreadManager);
+
+ tm.mainThread.dispatch({
+ run: function() {
+ try {
+ callback();
+ } catch (e) {
+ // do_check failures are already logged and set _quit to true and throw
+ // NS_ERROR_ABORT. If both of those are true it is likely this exception
+ // has already been logged so there is no need to log it again. It's
+ // possible that this will mask an NS_ERROR_ABORT that happens after a
+ // do_check failure though.
+ if (!_quit || e != Components.results.NS_ERROR_ABORT) {
+ let stack = e.stack ? _format_stack(e.stack) : null;
+ _testLogger.testStatus(_TEST_NAME,
+ funcName,
+ 'FAIL',
+ 'PASS',
+ _exception_message(e),
+ stack);
+ _do_quit();
+ }
+ }
+ finally {
+ do_test_finished(funcName);
+ }
+ }
+ }, Components.interfaces.nsIThread.DISPATCH_NORMAL);
+}
+
+/**
+ * Shows an error message and the current stack and aborts the test.
+ *
+ * @param error A message string or an Error object.
+ * @param stack null or nsIStackFrame object or a string containing
+ * \n separated stack lines (as in Error().stack).
+ */
+function do_throw(error, stack) {
+ let filename = "";
+ // If we didn't get passed a stack, maybe the error has one
+ // otherwise get it from our call context
+ stack = stack || error.stack || Components.stack.caller;
+
+ if (stack instanceof Components.interfaces.nsIStackFrame)
+ filename = stack.filename;
+ else if (error.fileName)
+ filename = error.fileName;
+
+ _testLogger.error(_exception_message(error),
+ {
+ source_file: filename,
+ stack: _format_stack(stack)
+ });
+ _abort_failed_test();
+}
+
+function _abort_failed_test() {
+ // Called to abort the test run after all failures are logged.
+ _passed = false;
+ _do_quit();
+ throw Components.results.NS_ERROR_ABORT;
+}
+
+function _format_stack(stack) {
+ let normalized;
+ if (stack instanceof Components.interfaces.nsIStackFrame) {
+ let frames = [];
+ for (let frame = stack; frame; frame = frame.caller) {
+ frames.push(frame.filename + ":" + frame.name + ":" + frame.lineNumber);
+ }
+ normalized = frames.join("\n");
+ } else {
+ normalized = "" + stack;
+ }
+ return _Task.Debugging.generateReadableStack(normalized, " ");
+}
+
+// Make a nice display string from an object that behaves
+// like Error
+function _exception_message(ex) {
+ let message = "";
+ if (ex.name) {
+ message = ex.name + ": ";
+ }
+ if (ex.message) {
+ message += ex.message;
+ }
+ if (ex.fileName) {
+ message += (" at " + ex.fileName);
+ if (ex.lineNumber) {
+ message += (":" + ex.lineNumber);
+ }
+ }
+ if (message !== "") {
+ return message;
+ }
+ // Force ex to be stringified
+ return "" + ex;
+}
+
+function do_report_unexpected_exception(ex, text) {
+ let filename = Components.stack.caller.filename;
+ text = text ? text + " - " : "";
+
+ _passed = false;
+ _testLogger.error(text + "Unexpected exception " + _exception_message(ex),
+ {
+ source_file: filename,
+ stack: _format_stack(ex.stack)
+ });
+ _do_quit();
+ throw Components.results.NS_ERROR_ABORT;
+}
+
+function do_note_exception(ex, text) {
+ let filename = Components.stack.caller.filename;
+ _testLogger.info(text + "Swallowed exception " + _exception_message(ex),
+ {
+ source_file: filename,
+ stack: _format_stack(ex.stack)
+ });
+}
+
+function _do_check_neq(left, right, stack, todo) {
+ Assert.notEqual(left, right);
+}
+
+function do_check_neq(left, right, stack) {
+ if (!stack)
+ stack = Components.stack.caller;
+
+ _do_check_neq(left, right, stack, false);
+}
+
+function todo_check_neq(left, right, stack) {
+ if (!stack)
+ stack = Components.stack.caller;
+
+ _do_check_neq(left, right, stack, true);
+}
+
+function do_report_result(passed, text, stack, todo) {
+ while (stack.filename.includes("head.js") && stack.caller) {
+ stack = stack.caller;
+ }
+
+ let name = _gRunningTest ? _gRunningTest.name : stack.name;
+ let message;
+ if (name) {
+ message = "[" + name + " : " + stack.lineNumber + "] " + text;
+ } else {
+ message = text;
+ }
+
+ if (passed) {
+ if (todo) {
+ _testLogger.testStatus(_TEST_NAME,
+ name,
+ "PASS",
+ "FAIL",
+ message,
+ _format_stack(stack));
+ _abort_failed_test();
+ } else {
+ _testLogger.testStatus(_TEST_NAME,
+ name,
+ "PASS",
+ "PASS",
+ message);
+ }
+ } else {
+ if (todo) {
+ _testLogger.testStatus(_TEST_NAME,
+ name,
+ "FAIL",
+ "FAIL",
+ message);
+ } else {
+ _testLogger.testStatus(_TEST_NAME,
+ name,
+ "FAIL",
+ "PASS",
+ message,
+ _format_stack(stack));
+ _abort_failed_test();
+ }
+ }
+}
+
+function _do_check_eq(left, right, stack, todo) {
+ if (!stack)
+ stack = Components.stack.caller;
+
+ var text = _wrap_with_quotes_if_necessary(left) + " == " +
+ _wrap_with_quotes_if_necessary(right);
+ do_report_result(left == right, text, stack, todo);
+}
+
+function do_check_eq(left, right, stack) {
+ Assert.equal(left, right);
+}
+
+function todo_check_eq(left, right, stack) {
+ if (!stack)
+ stack = Components.stack.caller;
+
+ _do_check_eq(left, right, stack, true);
+}
+
+function do_check_true(condition, stack) {
+ Assert.ok(condition, stack);
+}
+
+function todo_check_true(condition, stack) {
+ if (!stack)
+ stack = Components.stack.caller;
+
+ todo_check_eq(condition, true, stack);
+}
+
+function do_check_false(condition, stack) {
+ Assert.ok(!condition, stack);
+}
+
+function todo_check_false(condition, stack) {
+ if (!stack)
+ stack = Components.stack.caller;
+
+ todo_check_eq(condition, false, stack);
+}
+
+function do_check_null(condition, stack) {
+ Assert.equal(condition, null);
+}
+
+function todo_check_null(condition, stack=Components.stack.caller) {
+ todo_check_eq(condition, null, stack);
+}
+function do_check_matches(pattern, value) {
+ Assert.deepEqual(pattern, value);
+}
+
+// Check that |func| throws an nsIException that has
+// |Components.results[resultName]| as the value of its 'result' property.
+function do_check_throws_nsIException(func, resultName,
+ stack=Components.stack.caller, todo=false)
+{
+ let expected = Components.results[resultName];
+ if (typeof expected !== 'number') {
+ do_throw("do_check_throws_nsIException requires a Components.results" +
+ " property name, not " + uneval(resultName), stack);
+ }
+
+ let msg = ("do_check_throws_nsIException: func should throw" +
+ " an nsIException whose 'result' is Components.results." +
+ resultName);
+
+ try {
+ func();
+ } catch (ex) {
+ if (!(ex instanceof Components.interfaces.nsIException) ||
+ ex.result !== expected) {
+ do_report_result(false, msg + ", threw " + legible_exception(ex) +
+ " instead", stack, todo);
+ }
+
+ do_report_result(true, msg, stack, todo);
+ return;
+ }
+
+ // Call this here, not in the 'try' clause, so do_report_result's own
+ // throw doesn't get caught by our 'catch' clause.
+ do_report_result(false, msg + ", but returned normally", stack, todo);
+}
+
+// Produce a human-readable form of |exception|. This looks up
+// Components.results values, tries toString methods, and so on.
+function legible_exception(exception)
+{
+ switch (typeof exception) {
+ case 'object':
+ if (exception instanceof Components.interfaces.nsIException) {
+ return "nsIException instance: " + uneval(exception.toString());
+ }
+ return exception.toString();
+
+ case 'number':
+ for (let name in Components.results) {
+ if (exception === Components.results[name]) {
+ return "Components.results." + name;
+ }
+ }
+
+ // Fall through.
+ default:
+ return uneval(exception);
+ }
+}
+
+function do_check_instanceof(value, constructor,
+ stack=Components.stack.caller, todo=false) {
+ do_report_result(value instanceof constructor,
+ "value should be an instance of " + constructor.name,
+ stack, todo);
+}
+
+function todo_check_instanceof(value, constructor,
+ stack=Components.stack.caller) {
+ do_check_instanceof(value, constructor, stack, true);
+}
+
+function do_test_pending(aName) {
+ ++_tests_pending;
+
+ _testLogger.info("(xpcshell/head.js) | test" +
+ (aName ? " " + aName : "") +
+ " pending (" + _tests_pending + ")");
+}
+
+function do_test_finished(aName) {
+ _testLogger.info("(xpcshell/head.js) | test" +
+ (aName ? " " + aName : "") +
+ " finished (" + _tests_pending + ")");
+ if (--_tests_pending == 0)
+ _do_quit();
+}
+
+function do_get_file(path, allowNonexistent) {
+ try {
+ let lf = Components.classes["@mozilla.org/file/directory_service;1"]
+ .getService(Components.interfaces.nsIProperties)
+ .get("CurWorkD", Components.interfaces.nsILocalFile);
+
+ let bits = path.split("/");
+ for (let i = 0; i < bits.length; i++) {
+ if (bits[i]) {
+ if (bits[i] == "..")
+ lf = lf.parent;
+ else
+ lf.append(bits[i]);
+ }
+ }
+
+ if (!allowNonexistent && !lf.exists()) {
+ // Not using do_throw(): caller will continue.
+ _passed = false;
+ var stack = Components.stack.caller;
+ _testLogger.error("[" + stack.name + " : " + stack.lineNumber + "] " +
+ lf.path + " does not exist");
+ }
+
+ return lf;
+ }
+ catch (ex) {
+ do_throw(ex.toString(), Components.stack.caller);
+ }
+
+ return null;
+}
+
+// do_get_cwd() isn't exactly self-explanatory, so provide a helper
+function do_get_cwd() {
+ return do_get_file("");
+}
+
+function do_load_manifest(path) {
+ var lf = do_get_file(path);
+ const nsIComponentRegistrar = Components.interfaces.nsIComponentRegistrar;
+ do_check_true(Components.manager instanceof nsIComponentRegistrar);
+ // Previous do_check_true() is not a test check.
+ Components.manager.autoRegister(lf);
+}
+
+/**
+ * Parse a DOM document.
+ *
+ * @param aPath File path to the document.
+ * @param aType Content type to use in DOMParser.
+ *
+ * @return nsIDOMDocument from the file.
+ */
+function do_parse_document(aPath, aType) {
+ switch (aType) {
+ case "application/xhtml+xml":
+ case "application/xml":
+ case "text/xml":
+ break;
+
+ default:
+ do_throw("type: expected application/xhtml+xml, application/xml or text/xml," +
+ " got '" + aType + "'",
+ Components.stack.caller);
+ }
+
+ let file = do_get_file(aPath),
+ ios = Components.classes['@mozilla.org/network/io-service;1']
+ .getService(Components.interfaces.nsIIOService),
+ url = ios.newFileURI(file).spec;
+ file = null;
+ return new Promise((resolve, reject) => {
+ let xhr = new XMLHttpRequest();
+ xhr.open("GET", url);
+ xhr.responseType = "document";
+ xhr.onerror = reject;
+ xhr.onload = () => {
+ resolve(xhr.response);
+ };
+ xhr.send();
+ });
+}
+
+/**
+ * Registers a function that will run when the test harness is done running all
+ * tests.
+ *
+ * @param aFunction
+ * The function to be called when the test harness has finished running.
+ */
+function do_register_cleanup(aFunction)
+{
+ _cleanupFunctions.push(aFunction);
+}
+
+/**
+ * Returns the directory for a temp dir, which is created by the
+ * test harness. Every test gets its own temp dir.
+ *
+ * @return nsILocalFile of the temporary directory
+ */
+function do_get_tempdir() {
+ let env = Components.classes["@mozilla.org/process/environment;1"]
+ .getService(Components.interfaces.nsIEnvironment);
+ // the python harness sets this in the environment for us
+ let path = env.get("XPCSHELL_TEST_TEMP_DIR");
+ let file = Components.classes["@mozilla.org/file/local;1"]
+ .createInstance(Components.interfaces.nsILocalFile);
+ file.initWithPath(path);
+ return file;
+}
+
+/**
+ * Returns the directory for crashreporter minidumps.
+ *
+ * @return nsILocalFile of the minidump directory
+ */
+function do_get_minidumpdir() {
+ let env = Components.classes["@mozilla.org/process/environment;1"]
+ .getService(Components.interfaces.nsIEnvironment);
+ // the python harness may set this in the environment for us
+ let path = env.get("XPCSHELL_MINIDUMP_DIR");
+ if (path) {
+ let file = Components.classes["@mozilla.org/file/local;1"]
+ .createInstance(Components.interfaces.nsILocalFile);
+ file.initWithPath(path);
+ return file;
+ } else {
+ return do_get_tempdir();
+ }
+}
+
+/**
+ * Registers a directory with the profile service,
+ * and return the directory as an nsILocalFile.
+ *
+ * @param notifyProfileAfterChange Whether to notify for "profile-after-change".
+ * @return nsILocalFile of the profile directory.
+ */
+function do_get_profile(notifyProfileAfterChange = false) {
+ if (!runningInParent) {
+ _testLogger.info("Ignoring profile creation from child process.");
+ return null;
+ }
+
+ let env = Components.classes["@mozilla.org/process/environment;1"]
+ .getService(Components.interfaces.nsIEnvironment);
+ // the python harness sets this in the environment for us
+ let profd = env.get("XPCSHELL_TEST_PROFILE_DIR");
+ let file = Components.classes["@mozilla.org/file/local;1"]
+ .createInstance(Components.interfaces.nsILocalFile);
+ file.initWithPath(profd);
+
+ let dirSvc = Components.classes["@mozilla.org/file/directory_service;1"]
+ .getService(Components.interfaces.nsIProperties);
+ let provider = {
+ getFile: function(prop, persistent) {
+ persistent.value = true;
+ if (prop == "ProfD" || prop == "ProfLD" || prop == "ProfDS" ||
+ prop == "ProfLDS" || prop == "TmpD") {
+ return file.clone();
+ }
+ return null;
+ },
+ QueryInterface: function(iid) {
+ if (iid.equals(Components.interfaces.nsIDirectoryServiceProvider) ||
+ iid.equals(Components.interfaces.nsISupports)) {
+ return this;
+ }
+ throw Components.results.NS_ERROR_NO_INTERFACE;
+ }
+ };
+ dirSvc.QueryInterface(Components.interfaces.nsIDirectoryService)
+ .registerProvider(provider);
+
+ let obsSvc = Components.classes["@mozilla.org/observer-service;1"].
+ getService(Components.interfaces.nsIObserverService);
+
+ // We need to update the crash events directory when the profile changes.
+ if (runningInParent &&
+ "@mozilla.org/toolkit/crash-reporter;1" in Components.classes) {
+ let crashReporter =
+ Components.classes["@mozilla.org/toolkit/crash-reporter;1"]
+ .getService(Components.interfaces.nsICrashReporter);
+ crashReporter.UpdateCrashEventsDir();
+ }
+
+ if (!_profileInitialized) {
+ obsSvc.notifyObservers(null, "profile-do-change", "xpcshell-do-get-profile");
+ _profileInitialized = true;
+ if (notifyProfileAfterChange) {
+ obsSvc.notifyObservers(null, "profile-after-change", "xpcshell-do-get-profile");
+ }
+ }
+
+ // The methods of 'provider' will retain this scope so null out everything
+ // to avoid spurious leak reports.
+ env = null;
+ profd = null;
+ dirSvc = null;
+ provider = null;
+ obsSvc = null;
+ return file.clone();
+}
+
+/**
+ * This function loads head.js (this file) in the child process, so that all
+ * functions defined in this file (do_throw, etc) are available to subsequent
+ * sendCommand calls. It also sets various constants used by these functions.
+ *
+ * (Note that you may use sendCommand without calling this function first; you
+ * simply won't have any of the functions in this file available.)
+ */
+function do_load_child_test_harness()
+{
+ // Make sure this isn't called from child process
+ if (!runningInParent) {
+ do_throw("run_test_in_child cannot be called from child!");
+ }
+
+ // Allow to be called multiple times, but only run once
+ if (typeof do_load_child_test_harness.alreadyRun != "undefined")
+ return;
+ do_load_child_test_harness.alreadyRun = 1;
+
+ _XPCSHELL_PROCESS = "parent";
+
+ let command =
+ "const _HEAD_JS_PATH=" + uneval(_HEAD_JS_PATH) + "; "
+ + "const _HEAD_FILES=" + uneval(_HEAD_FILES) + "; "
+ + "const _MOZINFO_JS_PATH=" + uneval(_MOZINFO_JS_PATH) + "; "
+ + "const _TAIL_FILES=" + uneval(_TAIL_FILES) + "; "
+ + "const _TEST_NAME=" + uneval(_TEST_NAME) + "; "
+ // We'll need more magic to get the debugger working in the child
+ + "const _JSDEBUGGER_PORT=0; "
+ + "const _XPCSHELL_PROCESS='child';";
+
+ if (typeof _JSCOV_DIR === 'string') {
+ command += " const _JSCOV_DIR=" + uneval(_JSCOV_DIR) + ";";
+ }
+
+ if (_TESTING_MODULES_DIR) {
+ command += " const _TESTING_MODULES_DIR=" + uneval(_TESTING_MODULES_DIR) + ";";
+ }
+
+ command += " load(_HEAD_JS_PATH);";
+ sendCommand(command);
+}
+
+/**
+ * Runs an entire xpcshell unit test in a child process (rather than in chrome,
+ * which is the default).
+ *
+ * This function returns immediately, before the test has completed.
+ *
+ * @param testFile
+ * The name of the script to run. Path format same as load().
+ * @param optionalCallback.
+ * Optional function to be called (in parent) when test on child is
+ * complete. If provided, the function must call do_test_finished();
+ * @return Promise Resolved when the test in the child is complete.
+ */
+function run_test_in_child(testFile, optionalCallback)
+{
+ return new Promise((resolve) => {
+ var callback = () => {
+ resolve();
+ if (typeof optionalCallback == 'undefined') {
+ do_test_finished();
+ } else {
+ optionalCallback();
+ }
+ };
+
+ do_load_child_test_harness();
+
+ var testPath = do_get_file(testFile).path.replace(/\\/g, "/");
+ do_test_pending("run in child");
+ sendCommand("_testLogger.info('CHILD-TEST-STARTED'); "
+ + "const _TEST_FILE=['" + testPath + "']; "
+ + "_execute_test(); "
+ + "_testLogger.info('CHILD-TEST-COMPLETED');",
+ callback);
+ });
+}
+
+/**
+ * Execute a given function as soon as a particular cross-process message is received.
+ * Must be paired with do_send_remote_message or equivalent ProcessMessageManager calls.
+ *
+ * @param optionalCallback
+ * Optional callback that is invoked when the message is received. If provided,
+ * the function must call do_test_finished().
+ * @return Promise Promise that is resolved when the message is received.
+ */
+function do_await_remote_message(name, optionalCallback)
+{
+ return new Promise((resolve) => {
+ var listener = {
+ receiveMessage: function(message) {
+ if (message.name == name) {
+ mm.removeMessageListener(name, listener);
+ resolve();
+ if (optionalCallback) {
+ optionalCallback();
+ } else {
+ do_test_finished();
+ }
+ }
+ }
+ };
+
+ var mm;
+ if (runningInParent) {
+ mm = Cc["@mozilla.org/parentprocessmessagemanager;1"].getService(Ci.nsIMessageBroadcaster);
+ } else {
+ mm = Cc["@mozilla.org/childprocessmessagemanager;1"].getService(Ci.nsISyncMessageSender);
+ }
+ do_test_pending();
+ mm.addMessageListener(name, listener);
+ });
+}
+
+/**
+ * Asynchronously send a message to all remote processes. Pairs with do_await_remote_message
+ * or equivalent ProcessMessageManager listeners.
+ */
+function do_send_remote_message(name) {
+ var mm;
+ var sender;
+ if (runningInParent) {
+ mm = Cc["@mozilla.org/parentprocessmessagemanager;1"].getService(Ci.nsIMessageBroadcaster);
+ sender = 'broadcastAsyncMessage';
+ } else {
+ mm = Cc["@mozilla.org/childprocessmessagemanager;1"].getService(Ci.nsISyncMessageSender);
+ sender = 'sendAsyncMessage';
+ }
+ mm[sender](name);
+}
+
+/**
+ * Helper function to add the _only property to add_task/add_test function when
+ * running it as add_task.only(...).
+ *
+ * @param addFunc
+ * The parent function to call, e.g. add_task or add_test.
+ * @param funcOrProperties
+ * A function to be run or an object represents test properties.
+ * @param func
+ * A function to be run only if the funcOrProperies is not a function.
+ */
+function _add_only(addFunc, funcOrProperties, func) {
+ _gTestHasOnly = true;
+ if (typeof funcOrProperties == "function") {
+ func = funcOrProperties;
+ funcOrProperties = {};
+ }
+
+ if (typeof funcOrProperties == "object") {
+ funcOrProperties._only = true;
+ }
+ return addFunc(funcOrProperties, func);
+}
+
+/**
+ * Helper function to skip the test using e.g. add_task.skip(...)
+ *
+ * @param addFunc
+ * The parent function to call, e.g. add_task or add_test.
+ * @param funcOrProperties
+ * A function to be run or an object represents test properties.
+ * @param func
+ * A function to be run only if the funcOrProperies is not a function.
+ */
+function _add_skip(addFunc, funcOrProperties, func) {
+ if (typeof funcOrProperties == "function") {
+ func = funcOrProperties;
+ funcOrProperties = {};
+ }
+
+ if (typeof funcOrProperties == "object") {
+ funcOrProperties.skip_if = () => true;
+ }
+ return addFunc(funcOrProperties, func);
+}
+
+/**
+ * Add a test function to the list of tests that are to be run asynchronously.
+ *
+ * @param funcOrProperties
+ * A function to be run or an object represents test properties.
+ * Supported properties:
+ * skip_if : An arrow function which has an expression to be
+ * evaluated whether the test is skipped or not.
+ * @param func
+ * A function to be run only if the funcOrProperies is not a function.
+ *
+ * Each test function must call run_next_test() when it's done. Test files
+ * should call run_next_test() in their run_test function to execute all
+ * async tests.
+ *
+ * @return the test function that was passed in.
+ */
+var _gTests = [];
+function add_test(funcOrProperties, func) {
+ if (typeof funcOrProperties == "function") {
+ _gTests.push([{ _isTask: false }, funcOrProperties]);
+ } else if (typeof funcOrProperties == "object") {
+ funcOrProperties._isTask = false;
+ _gTests.push([funcOrProperties, func]);
+ } else {
+ do_throw("add_test() should take a function or an object and a function");
+ }
+ return func;
+}
+add_test.only = _add_only.bind(undefined, add_test);
+add_test.skip = _add_skip.bind(undefined, add_test);
+
+/**
+ * Add a test function which is a Task function.
+ *
+ * @param funcOrProperties
+ * A generator function to be run or an object represents test
+ * properties.
+ * Supported properties:
+ * skip_if : An arrow function which has an expression to be
+ * evaluated whether the test is skipped or not.
+ * @param func
+ * A generator function to be run only if the funcOrProperies is not a
+ * function.
+ *
+ * Task functions are functions fed into Task.jsm's Task.spawn(). They are
+ * generators that emit promises.
+ *
+ * If an exception is thrown, a do_check_* comparison fails, or if a rejected
+ * promise is yielded, the test function aborts immediately and the test is
+ * reported as a failure.
+ *
+ * Unlike add_test(), there is no need to call run_next_test(). The next test
+ * will run automatically as soon the task function is exhausted. To trigger
+ * premature (but successful) termination of the function, simply return or
+ * throw a Task.Result instance.
+ *
+ * Example usage:
+ *
+ * add_task(function* test() {
+ * let result = yield Promise.resolve(true);
+ *
+ * do_check_true(result);
+ *
+ * let secondary = yield someFunctionThatReturnsAPromise(result);
+ * do_check_eq(secondary, "expected value");
+ * });
+ *
+ * add_task(function* test_early_return() {
+ * let result = yield somethingThatReturnsAPromise();
+ *
+ * if (!result) {
+ * // Test is ended immediately, with success.
+ * return;
+ * }
+ *
+ * do_check_eq(result, "foo");
+ * });
+ *
+ * add_task({
+ * skip_if: () => !("@mozilla.org/telephony/volume-service;1" in Components.classes),
+ * }, function* test_volume_service() {
+ * let volumeService = Cc["@mozilla.org/telephony/volume-service;1"]
+ * .getService(Ci.nsIVolumeService);
+ * ...
+ * });
+ */
+function add_task(funcOrProperties, func) {
+ if (typeof funcOrProperties == "function") {
+ _gTests.push([{ _isTask: true }, funcOrProperties]);
+ } else if (typeof funcOrProperties == "object") {
+ funcOrProperties._isTask = true;
+ _gTests.push([funcOrProperties, func]);
+ } else {
+ do_throw("add_task() should take a function or an object and a function");
+ }
+}
+add_task.only = _add_only.bind(undefined, add_task);
+add_task.skip = _add_skip.bind(undefined, add_task);
+
+var _Task = Components.utils.import("resource://gre/modules/Task.jsm", {}).Task;
+_Task.Debugging.maintainStack = true;
+
+
+/**
+ * Runs the next test function from the list of async tests.
+ */
+var _gRunningTest = null;
+var _gTestIndex = 0; // The index of the currently running test.
+var _gTaskRunning = false;
+var _gTestHasOnly = false;
+function run_next_test()
+{
+ if (_gTaskRunning) {
+ throw new Error("run_next_test() called from an add_task() test function. " +
+ "run_next_test() should not be called from inside add_task() " +
+ "under any circumstances!");
+ }
+
+ function _run_next_test()
+ {
+ if (_gTestIndex < _gTests.length) {
+ // Check for uncaught rejections as early and often as possible.
+ _PromiseTestUtils.assertNoUncaughtRejections();
+ let _properties;
+ [_properties, _gRunningTest,] = _gTests[_gTestIndex++];
+ if (typeof(_properties.skip_if) == "function" && _properties.skip_if()) {
+ let _condition = _properties.skip_if.toSource().replace(/\(\)\s*=>\s*/, "");
+ let _message = _gRunningTest.name
+ + " skipped because the following conditions were"
+ + " met: (" + _condition + ")";
+ _testLogger.testStatus(_TEST_NAME,
+ _gRunningTest.name,
+ "SKIP",
+ "SKIP",
+ _message);
+ do_execute_soon(run_next_test);
+ return;
+ }
+ _testLogger.info(_TEST_NAME + " | Starting " + _gRunningTest.name);
+ do_test_pending(_gRunningTest.name);
+
+ if (_properties._isTask) {
+ _gTaskRunning = true;
+ _Task.spawn(_gRunningTest).then(() => {
+ _gTaskRunning = false;
+ run_next_test();
+ }, ex => {
+ _gTaskRunning = false;
+ try {
+ do_report_unexpected_exception(ex);
+ } catch (ex) {
+ // The above throws NS_ERROR_ABORT and we don't want this to show up
+ // as an unhandled rejection later.
+ }
+ });
+ } else {
+ // Exceptions do not kill asynchronous tests, so they'll time out.
+ try {
+ _gRunningTest();
+ } catch (e) {
+ do_throw(e);
+ }
+ }
+ }
+ }
+
+ // For sane stacks during failures, we execute this code soon, but not now.
+ // We do this now, before we call do_test_finished(), to ensure the pending
+ // counter (_tests_pending) never reaches 0 while we still have tests to run
+ // (do_execute_soon bumps that counter).
+ do_execute_soon(_run_next_test, "run_next_test " + _gTestIndex);
+
+ if (_gRunningTest !== null) {
+ // Close the previous test do_test_pending call.
+ do_test_finished(_gRunningTest.name);
+ }
+}
+
+try {
+ if (runningInParent) {
+ // Always use network provider for geolocation tests
+ // so we bypass the OSX dialog raised by the corelocation provider
+ let prefs = Components.classes["@mozilla.org/preferences-service;1"]
+ .getService(Components.interfaces.nsIPrefBranch);
+
+ prefs.setBoolPref("geo.provider.testing", true);
+ }
+} catch (e) { }
+
+// We need to avoid hitting the network with certain components.
+try {
+ if (runningInParent) {
+ let prefs = Components.classes["@mozilla.org/preferences-service;1"]
+ .getService(Components.interfaces.nsIPrefBranch);
+
+ prefs.setCharPref("media.gmp-manager.url.override", "http://%(server)s/dummy-gmp-manager.xml");
+ prefs.setCharPref("media.gmp-manager.updateEnabled", false);
+ prefs.setCharPref("extensions.systemAddon.update.url", "http://%(server)s/dummy-system-addons.xml");
+ prefs.setCharPref("browser.selfsupport.url", "https://%(server)s/selfsupport-dummy/");
+ prefs.setCharPref("toolkit.telemetry.server", "https://%(server)s/telemetry-dummy");
+ prefs.setCharPref("browser.search.geoip.url", "https://%(server)s/geoip-dummy");
+ }
+} catch (e) { }
+
+// Make tests run consistently on DevEdition (which has a lightweight theme
+// selected by default).
+try {
+ if (runningInParent) {
+ let prefs = Components.classes["@mozilla.org/preferences-service;1"]
+ .getService(Components.interfaces.nsIPrefBranch);
+
+ prefs.deleteBranch("lightweightThemes.selectedThemeID");
+ prefs.deleteBranch("browser.devedition.theme.enabled");
+ }
+} catch (e) { }
+
+function _load_mozinfo() {
+ let mozinfoFile = Components.classes["@mozilla.org/file/local;1"]
+ .createInstance(Components.interfaces.nsIFile);
+ mozinfoFile.initWithPath(_MOZINFO_JS_PATH);
+ let stream = Components.classes["@mozilla.org/network/file-input-stream;1"]
+ .createInstance(Components.interfaces.nsIFileInputStream);
+ stream.init(mozinfoFile, -1, 0, 0);
+ let json = Components.classes["@mozilla.org/dom/json;1"]
+ .createInstance(Components.interfaces.nsIJSON);
+ let mozinfo = json.decodeFromStream(stream, stream.available());
+ stream.close();
+ return mozinfo;
+}
+
+Object.defineProperty(this, "mozinfo", {
+ configurable: true,
+ get() {
+ let _mozinfo = _load_mozinfo();
+ Object.defineProperty(this, "mozinfo", {
+ configurable: false,
+ value: _mozinfo
+ });
+ return _mozinfo;
+ }
+});
diff --git a/testing/xpcshell/mach_commands.py b/testing/xpcshell/mach_commands.py
new file mode 100644
index 0000000000..d821ff46fb
--- /dev/null
+++ b/testing/xpcshell/mach_commands.py
@@ -0,0 +1,274 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+# Integrates the xpcshell test runner with mach.
+
+from __future__ import absolute_import, unicode_literals, print_function
+
+import argparse
+import errno
+import os
+import sys
+
+from mozlog import structured
+
+from mozbuild.base import (
+ MachCommandBase,
+ MozbuildObject,
+ MachCommandConditions as conditions,
+)
+
+from mach.decorators import (
+ CommandProvider,
+ Command,
+)
+
+from xpcshellcommandline import parser_desktop, parser_remote
+
+here = os.path.abspath(os.path.dirname(__file__))
+
+if sys.version_info[0] < 3:
+ unicode_type = unicode
+else:
+ unicode_type = str
+
+
+# This should probably be consolidated with similar classes in other test
+# runners.
+class InvalidTestPathError(Exception):
+ """Exception raised when the test path is not valid."""
+
+
+class XPCShellRunner(MozbuildObject):
+ """Run xpcshell tests."""
+ def run_suite(self, **kwargs):
+ return self._run_xpcshell_harness(**kwargs)
+
+ def run_test(self, **kwargs):
+ """Runs an individual xpcshell test."""
+
+ # TODO Bug 794506 remove once mach integrates with virtualenv.
+ build_path = os.path.join(self.topobjdir, 'build')
+ if build_path not in sys.path:
+ sys.path.append(build_path)
+
+ src_build_path = os.path.join(self.topsrcdir, 'mozilla', 'build')
+ if os.path.isdir(src_build_path):
+ sys.path.append(src_build_path)
+
+ return self.run_suite(**kwargs)
+
+ def _run_xpcshell_harness(self, **kwargs):
+ # Obtain a reference to the xpcshell test runner.
+ import runxpcshelltests
+
+ log = kwargs.pop("log")
+
+ xpcshell = runxpcshelltests.XPCShellTests(log=log)
+ self.log_manager.enable_unstructured()
+
+ tests_dir = os.path.join(self.topobjdir, '_tests', 'xpcshell')
+ # We want output from the test to be written immediately if we are only
+ # running a single test.
+ single_test = (len(kwargs["testPaths"]) == 1 and
+ os.path.isfile(kwargs["testPaths"][0]) or
+ kwargs["manifest"] and
+ (len(kwargs["manifest"].test_paths()) == 1))
+
+ if single_test:
+ kwargs["verbose"] = True
+
+ if kwargs["xpcshell"] is None:
+ kwargs["xpcshell"] = self.get_binary_path('xpcshell')
+
+ if kwargs["mozInfo"] is None:
+ kwargs["mozInfo"] = os.path.join(self.topobjdir, 'mozinfo.json')
+
+ if kwargs["symbolsPath"] is None:
+ kwargs["symbolsPath"] = os.path.join(self.distdir, 'crashreporter-symbols')
+
+ if kwargs["logfiles"] is None:
+ kwargs["logfiles"] = False
+
+ if kwargs["profileName"] is None:
+ kwargs["profileName"] = "firefox"
+
+ if kwargs["pluginsPath"] is None:
+ kwargs['pluginsPath'] = os.path.join(self.distdir, 'plugins')
+
+ if kwargs["testingModulesDir"] is None:
+ kwargs["testingModulesDir"] = os.path.join(self.topobjdir, '_tests/modules')
+
+ if kwargs["utility_path"] is None:
+ kwargs['utility_path'] = self.bindir
+
+ if kwargs["manifest"] is None:
+ kwargs["manifest"] = os.path.join(tests_dir, "xpcshell.ini")
+
+ if kwargs["failure_manifest"] is None:
+ kwargs["failure_manifest"] = os.path.join(self.statedir, 'xpcshell.failures.ini')
+
+ # Use the object directory for the temp directory to minimize the chance
+ # of file scanning. The overhead from e.g. search indexers and anti-virus
+ # scanners like Windows Defender can add tons of overhead to test execution.
+ # We encourage people to disable these things in the object directory.
+ temp_dir = os.path.join(self.topobjdir, 'temp')
+ try:
+ os.mkdir(temp_dir)
+ except OSError as e:
+ if e.errno != errno.EEXIST:
+ raise
+ kwargs['tempDir'] = temp_dir
+
+ # Python through 2.7.2 has issues with unicode in some of the
+ # arguments. Work around that.
+ filtered_args = {}
+ for k, v in kwargs.iteritems():
+ if isinstance(v, unicode_type):
+ v = v.encode('utf-8')
+
+ if isinstance(k, unicode_type):
+ k = k.encode('utf-8')
+
+ filtered_args[k] = v
+
+ result = xpcshell.runTests(**filtered_args)
+
+ self.log_manager.disable_unstructured()
+
+ if not result and not xpcshell.sequential:
+ print("Tests were run in parallel. Try running with --sequential "
+ "to make sure the failures were not caused by this.")
+ return int(not result)
+
+
+class AndroidXPCShellRunner(MozbuildObject):
+ """Get specified DeviceManager"""
+ def get_devicemanager(self, devicemanager, ip, port, remote_test_root):
+ import mozdevice
+ dm = None
+ if devicemanager == "adb":
+ if ip:
+ dm = mozdevice.DroidADB(ip, port, packageName=None, deviceRoot=remote_test_root)
+ else:
+ dm = mozdevice.DroidADB(packageName=None, deviceRoot=remote_test_root)
+ else:
+ if ip:
+ dm = mozdevice.DroidSUT(ip, port, deviceRoot=remote_test_root)
+ else:
+ raise Exception("You must provide a device IP to connect to via the --ip option")
+ return dm
+
+ """Run Android xpcshell tests."""
+ def run_test(self, **kwargs):
+ # TODO Bug 794506 remove once mach integrates with virtualenv.
+ build_path = os.path.join(self.topobjdir, 'build')
+ if build_path not in sys.path:
+ sys.path.append(build_path)
+
+ import remotexpcshelltests
+
+ dm = self.get_devicemanager(kwargs["dm_trans"], kwargs["deviceIP"], kwargs["devicePort"],
+ kwargs["remoteTestRoot"])
+
+ log = kwargs.pop("log")
+ self.log_manager.enable_unstructured()
+
+ if kwargs["xpcshell"] is None:
+ kwargs["xpcshell"] = "xpcshell"
+
+ if not kwargs["objdir"]:
+ kwargs["objdir"] = self.topobjdir
+
+ if not kwargs["localLib"]:
+ kwargs["localLib"] = os.path.join(self.topobjdir, 'dist/fennec')
+
+ if not kwargs["localBin"]:
+ kwargs["localBin"] = os.path.join(self.topobjdir, 'dist/bin')
+
+ if not kwargs["testingModulesDir"]:
+ kwargs["testingModulesDir"] = os.path.join(self.topobjdir, '_tests/modules')
+
+ if not kwargs["mozInfo"]:
+ kwargs["mozInfo"] = os.path.join(self.topobjdir, 'mozinfo.json')
+
+ if not kwargs["manifest"]:
+ kwargs["manifest"] = os.path.join(self.topobjdir, '_tests/xpcshell/xpcshell.ini')
+
+ if not kwargs["symbolsPath"]:
+ kwargs["symbolsPath"] = os.path.join(self.distdir, 'crashreporter-symbols')
+
+ if not kwargs["localAPK"]:
+ for file_name in os.listdir(os.path.join(kwargs["objdir"], "dist")):
+ if file_name.endswith(".apk") and file_name.startswith("fennec"):
+ kwargs["localAPK"] = os.path.join(kwargs["objdir"], "dist", file_name)
+ print ("using APK: %s" % kwargs["localAPK"])
+ break
+ else:
+ raise Exception("APK not found in objdir. You must specify an APK.")
+
+ if not kwargs["sequential"]:
+ kwargs["sequential"] = True
+
+ options = argparse.Namespace(**kwargs)
+ xpcshell = remotexpcshelltests.XPCShellRemote(dm, options, log)
+
+ result = xpcshell.runTests(testClass=remotexpcshelltests.RemoteXPCShellTestThread,
+ mobileArgs=xpcshell.mobileArgs,
+ **vars(options))
+
+ self.log_manager.disable_unstructured()
+
+ return int(not result)
+
+
+def get_parser():
+ build_obj = MozbuildObject.from_environment(cwd=here)
+ if conditions.is_android(build_obj):
+ return parser_remote()
+ else:
+ return parser_desktop()
+
+
+@CommandProvider
+class MachCommands(MachCommandBase):
+ @Command('xpcshell-test', category='testing',
+ description='Run XPCOM Shell tests (API direct unit testing)',
+ conditions=[lambda *args: True],
+ parser=get_parser)
+ def run_xpcshell_test(self, test_objects=None, **params):
+ from mozbuild.controller.building import BuildDriver
+
+ if test_objects is not None:
+ from manifestparser import TestManifest
+ m = TestManifest()
+ m.tests.extend(test_objects)
+ params['manifest'] = m
+
+ driver = self._spawn(BuildDriver)
+ driver.install_tests(test_objects)
+
+ # We should probably have a utility function to ensure the tree is
+ # ready to run tests. Until then, we just create the state dir (in
+ # case the tree wasn't built with mach).
+ self._ensure_state_subdir_exists('.')
+
+ params['log'] = structured.commandline.setup_logging("XPCShellTests",
+ params,
+ {"mach": sys.stdout},
+ {"verbose": True})
+
+ if conditions.is_android(self):
+ from mozrunner.devices.android_device import verify_android_device
+ verify_android_device(self)
+ xpcshell = self._spawn(AndroidXPCShellRunner)
+ else:
+ xpcshell = self._spawn(XPCShellRunner)
+ xpcshell.cwd = self._mach_context.cwd
+
+ try:
+ return xpcshell.run_test(**params)
+ except InvalidTestPathError as e:
+ print(e.message)
+ return 1
diff --git a/testing/xpcshell/mach_test_package_commands.py b/testing/xpcshell/mach_test_package_commands.py
new file mode 100644
index 0000000000..fc7d273859
--- /dev/null
+++ b/testing/xpcshell/mach_test_package_commands.py
@@ -0,0 +1,64 @@
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+from __future__ import unicode_literals
+
+import os
+import sys
+from argparse import Namespace
+from functools import partial
+
+
+import mozlog
+from xpcshellcommandline import parser_desktop
+
+from mach.decorators import (
+ CommandProvider,
+ Command,
+)
+
+
+def run_xpcshell(context, **kwargs):
+ args = Namespace(**kwargs)
+ args.appPath = args.appPath or os.path.dirname(context.firefox_bin)
+ args.e10s = context.mozharness_config.get('e10s', args.e10s)
+ args.utility_path = context.bin_dir
+ args.testingModulesDir = context.modules_dir
+
+ if not args.xpcshell:
+ args.xpcshell = os.path.join(args.appPath, 'xpcshell')
+
+ if not args.pluginsPath:
+ for path in context.ancestors(args.appPath, depth=2):
+ test = os.path.join(path, 'plugins')
+ if os.path.isdir(test):
+ args.pluginsPath = test
+ break
+
+ log = mozlog.commandline.setup_logging("XPCShellTests",
+ args,
+ {"mach": sys.stdout},
+ {"verbose": True})
+
+ if args.testPaths:
+ test_root = os.path.join(context.package_root, 'xpcshell', 'tests')
+ normalize = partial(context.normalize_test_path, test_root)
+ args.testPaths = map(normalize, args.testPaths)
+
+ import runxpcshelltests
+ xpcshell = runxpcshelltests.XPCShellTests(log=log)
+ return xpcshell.runTests(**vars(args))
+
+
+@CommandProvider
+class MochitestCommands(object):
+
+ def __init__(self, context):
+ self.context = context
+
+ @Command('xpcshell-test', category='testing',
+ description='Run the xpcshell harness.',
+ parser=parser_desktop)
+ def xpcshell(self, **kwargs):
+ return run_xpcshell(self.context, **kwargs)
diff --git a/testing/xpcshell/moz-http2/http2-cert.pem b/testing/xpcshell/moz-http2/http2-cert.pem
new file mode 100644
index 0000000000..d5944e5316
--- /dev/null
+++ b/testing/xpcshell/moz-http2/http2-cert.pem
@@ -0,0 +1,79 @@
+Certificate:
+ Data:
+ Version: 3 (0x2)
+ Serial Number: 1 (0x1)
+ Signature Algorithm: sha256WithRSAEncryption
+ Issuer: C=US, ST=Maine, O=CA Example
+ Validity
+ Not Before: Apr 29 05:29:19 2015 GMT
+ Not After : Apr 26 05:29:19 2025 GMT
+ Subject: C=US, ST=Maine, O=Example Com, CN=foo.example.com
+ Subject Public Key Info:
+ Public Key Algorithm: rsaEncryption
+ Public-Key: (2048 bit)
+ Modulus:
+ 00:cf:ff:c0:27:3b:a3:11:b5:7f:5d:4f:22:f9:75:
+ 48:47:d9:3a:ce:9b:66:82:4e:e4:ae:ab:78:d3:4c:
+ 3a:9a:5c:37:97:b2:7b:4e:2a:54:77:16:2a:3e:6f:
+ 52:ee:4b:49:46:1d:6b:18:9a:ed:b1:ad:64:9f:8b:
+ e5:fa:e4:60:7b:39:0e:db:e8:b4:2d:4b:e8:ab:37:
+ e8:90:ec:eb:0f:3e:6b:40:7a:d1:da:e6:68:b3:f4:
+ f6:68:54:5b:27:90:6d:c2:c3:04:de:85:23:2b:3c:
+ 66:4e:06:79:58:93:a1:71:d7:ec:74:55:a4:84:9d:
+ 41:22:2a:7a:76:ae:56:b1:6f:15:2d:f2:f5:9c:64:
+ 3e:4f:0f:6e:8f:b6:28:66:e9:89:04:5d:1d:21:77:
+ f8:03:d3:89:ed:7c:f4:3b:42:02:c8:8d:de:47:74:
+ 1f:4a:5d:fe:8d:d1:57:37:08:54:bf:89:d8:f7:27:
+ 22:a7:2a:5d:aa:d5:b0:61:22:9b:96:75:ee:ab:09:
+ ca:a9:cb:2b:1e:88:7c:5a:53:7e:5f:88:c4:43:ea:
+ e8:a7:db:35:6c:b2:89:ad:98:e0:96:c9:83:c4:c1:
+ e7:2a:5c:f8:99:5c:9e:01:9c:e6:99:bd:18:5c:69:
+ d4:10:f1:46:88:37:0b:4e:76:5f:6a:1a:21:c2:a4:
+ 16:d1
+ Exponent: 65537 (0x10001)
+ X509v3 extensions:
+ X509v3 Subject Key Identifier:
+ 76:BC:13:90:F7:85:1B:1C:24:A1:CC:65:8A:4F:4C:0C:7F:10:D3:F5
+ X509v3 Authority Key Identifier:
+ keyid:F7:FC:76:AF:C5:1A:E9:C9:42:6C:38:DF:8B:07:9E:2B:2C:E5:8E:20
+
+ X509v3 Basic Constraints:
+ CA:FALSE
+ X509v3 Key Usage:
+ Digital Signature, Key Encipherment
+ Signature Algorithm: sha256WithRSAEncryption
+ 03:ab:2a:9e:e5:cd:5c:88:5a:6c:f7:4b:7a:7c:ef:85:2c:31:
+ df:03:79:31:a6:c5:c8:2b:c6:21:a5:33:2b:a0:4b:e2:7e:0a:
+ 86:9b:72:25:b6:75:43:41:7c:30:9f:15:b4:9f:34:50:57:eb:
+ 87:f9:1e:9f:b6:cd:81:36:92:61:66:d5:fe:e2:c5:ed:de:f1:
+ ce:85:0b:f9:6a:2b:32:4d:29:f1:a9:94:57:a3:0f:74:93:12:
+ c9:0a:28:5e:72:9f:4f:0f:78:f5:84:11:5a:9f:d7:1c:4c:fd:
+ 13:d8:3d:4c:f8:dd:4c:c6:1c:fd:63:ee:f5:d5:96:f5:00:2c:
+ e6:bb:c9:4c:d8:6a:19:59:58:2b:d4:05:ab:57:47:1c:49:d6:
+ c5:56:1a:e3:64:10:19:9b:44:3e:74:8b:19:73:28:86:96:b4:
+ d1:2a:49:23:07:25:97:64:8f:1b:1c:64:76:12:e0:df:e3:cf:
+ 55:d5:7c:e9:77:d4:69:2f:c7:9a:fd:ce:1a:29:ab:d7:88:68:
+ 93:de:75:e4:d6:85:29:e2:b6:b7:59:20:e3:b5:20:b7:e8:0b:
+ 23:9b:4c:b4:e8:d9:90:cf:e9:2f:9e:a8:22:a2:ef:6a:68:65:
+ f6:c4:81:ed:75:77:88:01:f2:47:03:1a:de:1f:44:38:47:fa:
+ aa:69:f2:98
+-----BEGIN CERTIFICATE-----
+MIIDVDCCAjygAwIBAgIBATANBgkqhkiG9w0BAQsFADAyMQswCQYDVQQGEwJVUzEO
+MAwGA1UECAwFTWFpbmUxEzARBgNVBAoMCkNBIEV4YW1wbGUwHhcNMTUwNDI5MDUy
+OTE5WhcNMjUwNDI2MDUyOTE5WjBNMQswCQYDVQQGEwJVUzEOMAwGA1UECAwFTWFp
+bmUxFDASBgNVBAoMC0V4YW1wbGUgQ29tMRgwFgYDVQQDDA9mb28uZXhhbXBsZS5j
+b20wggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDP/8AnO6MRtX9dTyL5
+dUhH2TrOm2aCTuSuq3jTTDqaXDeXsntOKlR3Fio+b1LuS0lGHWsYmu2xrWSfi+X6
+5GB7OQ7b6LQtS+irN+iQ7OsPPmtAetHa5miz9PZoVFsnkG3CwwTehSMrPGZOBnlY
+k6Fx1+x0VaSEnUEiKnp2rlaxbxUt8vWcZD5PD26Ptihm6YkEXR0hd/gD04ntfPQ7
+QgLIjd5HdB9KXf6N0Vc3CFS/idj3JyKnKl2q1bBhIpuWde6rCcqpyyseiHxaU35f
+iMRD6uin2zVssomtmOCWyYPEwecqXPiZXJ4BnOaZvRhcadQQ8UaINwtOdl9qGiHC
+pBbRAgMBAAGjWjBYMB0GA1UdDgQWBBR2vBOQ94UbHCShzGWKT0wMfxDT9TAfBgNV
+HSMEGDAWgBT3/HavxRrpyUJsON+LB54rLOWOIDAJBgNVHRMEAjAAMAsGA1UdDwQE
+AwIFoDANBgkqhkiG9w0BAQsFAAOCAQEAA6sqnuXNXIhabPdLenzvhSwx3wN5MabF
+yCvGIaUzK6BL4n4KhptyJbZ1Q0F8MJ8VtJ80UFfrh/ken7bNgTaSYWbV/uLF7d7x
+zoUL+WorMk0p8amUV6MPdJMSyQooXnKfTw949YQRWp/XHEz9E9g9TPjdTMYc/WPu
+9dWW9QAs5rvJTNhqGVlYK9QFq1dHHEnWxVYa42QQGZtEPnSLGXMohpa00SpJIwcl
+l2SPGxxkdhLg3+PPVdV86XfUaS/Hmv3OGimr14hok9515NaFKeK2t1kg47Ugt+gL
+I5tMtOjZkM/pL56oIqLvamhl9sSB7XV3iAHyRwMa3h9EOEf6qmnymA==
+-----END CERTIFICATE-----
diff --git a/testing/xpcshell/moz-http2/http2-key.pem b/testing/xpcshell/moz-http2/http2-key.pem
new file mode 100644
index 0000000000..387449ddc5
--- /dev/null
+++ b/testing/xpcshell/moz-http2/http2-key.pem
@@ -0,0 +1,28 @@
+-----BEGIN PRIVATE KEY-----
+MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQDP/8AnO6MRtX9d
+TyL5dUhH2TrOm2aCTuSuq3jTTDqaXDeXsntOKlR3Fio+b1LuS0lGHWsYmu2xrWSf
+i+X65GB7OQ7b6LQtS+irN+iQ7OsPPmtAetHa5miz9PZoVFsnkG3CwwTehSMrPGZO
+BnlYk6Fx1+x0VaSEnUEiKnp2rlaxbxUt8vWcZD5PD26Ptihm6YkEXR0hd/gD04nt
+fPQ7QgLIjd5HdB9KXf6N0Vc3CFS/idj3JyKnKl2q1bBhIpuWde6rCcqpyyseiHxa
+U35fiMRD6uin2zVssomtmOCWyYPEwecqXPiZXJ4BnOaZvRhcadQQ8UaINwtOdl9q
+GiHCpBbRAgMBAAECggEBAKqcsQQ9cdQr2S4zpI+UuVZeBFPGun32srPn6TMA2y0U
+qXEgMO574E7SepI+BHt8e70sklVbd89/WANa4Kk8vTs2IU8XAPwKwO347SY7f9BA
+Nf9s/0gcKRQ7wgbv8tlwKehQyWSxNpjXcV9dBho29n2ITOdb/Jfe2bdpuowxEuF0
+rugkKh7P7LJTG1SAw01UTIszoOGIqHU2XlmYQOws4EvRov/BRTn9axBHH33top+m
+dX+96ntgWxdHOJjTcoXLGhTu1c0ZlJgtgEaH03jjy0f+3Qc+jIgbaZ4WLZkF/oZh
+hscL56XhsT3hR2Sdtxccw2zZ0exLO+qV1RykIAlUXkECgYEA7U+ljowyPxbREHnf
+SRTauIZfJNP6IHT60MkslltlYn7jABvx+u2xCC/QhZxCJi/iAs6iNvkbXR6uK/MH
+NrXwdk67SDUXaDZ9LM3rXPqjuwmvkc+e7P5an6KRtyzQD8K8mjbze1NfxbcGgKti
+A+8GL8H3V29EQ6xp2+UxIF/3UNkCgYEA4GEm9NLbu4neP+A+1NpUS4tUgMCdTkPm
+fiOECd4jjTizPZjjrk+zTin9aP+eBRYHharIGrDP2Uj98uv4kQ8u0rQbcjPwitog
+8DgccMQ92E6DYGDGECh5Hg2Zu71+zQQNzOEJTyrFLx4Gf5SkBzLlbDZDpNhbuQc9
+zvRYBc11urkCgYBOu2Dy9SJqefhsnfJtfaS/GZ2RS16tzAG2qTfIvpPZZL2NOLhE
+hv13+N0WpuvvXW1/fuykjmr8rwQcAqo/BYe8yIwr/alBYuqOpdbTZzhRAnqkRpy0
+hgKs+bOccRqqT/Jgu6B2JwgcQYe/wpxnL7L+vzx/XqPoS9hnIxf0ZMJZqQKBgQDa
+KJuf3oQWS23z3Sw5+C2NZeK7bIuF1S795bozffBDFqXvdf+pM4S6ssjYlfAmMc0O
+gYYdrVvpf7apwhTjtUdpRgSJfUabOopcBbJhUexvq6bAxlbMzw0z0zVt/EiVPSPN
+198dQhCGR0M6OGNjPHEkTX5ngJVtyUSnO5t5yNJ2wQKBgQDheEUJYgo2UjLNsdTs
+b4og5gHkyoKS3paWV64itJQbVBuri4HWeIExM9ayBB6nSJ2VvpZPyE6XfiYYGNhR
+jOc394qlnrx+oi2KdSmIWfQU0I+rW3bMqpoyWPYxP/hN6w4LAwjnJOSOIMCACm5J
+d8IebWjY2B3Zc6FFVzbmhXtlig==
+-----END PRIVATE KEY-----
diff --git a/testing/xpcshell/moz-http2/moz-http2.js b/testing/xpcshell/moz-http2/moz-http2.js
new file mode 100644
index 0000000000..760fef1ef3
--- /dev/null
+++ b/testing/xpcshell/moz-http2/moz-http2.js
@@ -0,0 +1,786 @@
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+// This module is the stateful server side of test_http2.js and is meant
+// to have node be restarted in between each invocation
+
+var node_http2_root = '../node-http2';
+if (process.env.NODE_HTTP2_ROOT) {
+ node_http2_root = process.env.NODE_HTTP2_ROOT;
+}
+var http2 = require(node_http2_root);
+var fs = require('fs');
+var url = require('url');
+var crypto = require('crypto');
+
+// Hook into the decompression code to log the decompressed name-value pairs
+var compression_module = node_http2_root + "/lib/protocol/compressor";
+var http2_compression = require(compression_module);
+var HeaderSetDecompressor = http2_compression.HeaderSetDecompressor;
+var originalRead = HeaderSetDecompressor.prototype.read;
+var lastDecompressor;
+var decompressedPairs;
+HeaderSetDecompressor.prototype.read = function() {
+ if (this != lastDecompressor) {
+ lastDecompressor = this;
+ decompressedPairs = [];
+ }
+ var pair = originalRead.apply(this, arguments);
+ if (pair) {
+ decompressedPairs.push(pair);
+ }
+ return pair;
+}
+
+var connection_module = node_http2_root + "/lib/protocol/connection";
+var http2_connection = require(connection_module);
+var Connection = http2_connection.Connection;
+var originalClose = Connection.prototype.close;
+Connection.prototype.close = function (error, lastId) {
+ if (lastId !== undefined) {
+ this._lastIncomingStream = lastId;
+ }
+
+ originalClose.apply(this, arguments);
+}
+
+var framer_module = node_http2_root + "/lib/protocol/framer";
+var http2_framer = require(framer_module);
+var Serializer = http2_framer.Serializer;
+var originalTransform = Serializer.prototype._transform;
+var newTransform = function (frame, encoding, done) {
+ if (frame.type == 'DATA') {
+ // Insert our empty DATA frame
+ emptyFrame = {};
+ emptyFrame.type = 'DATA';
+ emptyFrame.data = new Buffer(0);
+ emptyFrame.flags = [];
+ emptyFrame.stream = frame.stream;
+ var buffers = [];
+ Serializer['DATA'](emptyFrame, buffers);
+ Serializer.commonHeader(emptyFrame, buffers);
+ for (var i = 0; i < buffers.length; i++) {
+ this.push(buffers[i]);
+ }
+
+ // Reset to the original version for later uses
+ Serializer.prototype._transform = originalTransform;
+ }
+ originalTransform.apply(this, arguments);
+};
+
+function getHttpContent(path) {
+ var content = '<!doctype html>' +
+ '<html>' +
+ '<head><title>HOORAY!</title></head>' +
+ '<body>You Win! (by requesting' + path + ')</body>' +
+ '</html>';
+ return content;
+}
+
+function generateContent(size) {
+ var content = '';
+ for (var i = 0; i < size; i++) {
+ content += '0';
+ }
+ return content;
+}
+
+/* This takes care of responding to the multiplexed request for us */
+var m = {
+ mp1res: null,
+ mp2res: null,
+ buf: null,
+ mp1start: 0,
+ mp2start: 0,
+
+ checkReady: function() {
+ if (this.mp1res != null && this.mp2res != null) {
+ this.buf = generateContent(30*1024);
+ this.mp1start = 0;
+ this.mp2start = 0;
+ this.send(this.mp1res, 0);
+ setTimeout(this.send.bind(this, this.mp2res, 0), 5);
+ }
+ },
+
+ send: function(res, start) {
+ var end = Math.min(start + 1024, this.buf.length);
+ var content = this.buf.substring(start, end);
+ res.write(content);
+ if (end < this.buf.length) {
+ setTimeout(this.send.bind(this, res, end), 10);
+ } else {
+ res.end();
+ }
+ }
+};
+
+var runlater = function() {};
+runlater.prototype = {
+ req : null,
+ resp : null,
+
+ onTimeout : function onTimeout() {
+ this.resp.writeHead(200);
+ this.resp.end("It's all good 750ms.");
+ }
+};
+
+var moreData = function() {};
+moreData.prototype = {
+ req : null,
+ resp : null,
+ iter: 3,
+
+ onTimeout : function onTimeout() {
+ // 1mb of data
+ content = generateContent(1024*1024);
+ this.resp.write(content); // 1mb chunk
+ this.iter--;
+ if (!this.iter) {
+ this.resp.end();
+ } else {
+ setTimeout(executeRunLater, 1, this);
+ }
+ }
+};
+
+function executeRunLater(arg) {
+ arg.onTimeout();
+}
+
+var Compressor = http2_compression.Compressor;
+var HeaderSetCompressor = http2_compression.HeaderSetCompressor;
+var originalCompressHeaders = Compressor.prototype.compress;
+
+function insertSoftIllegalHpack(headers) {
+ var originalCompressed = originalCompressHeaders.apply(this, headers);
+ var illegalLiteral = new Buffer([
+ 0x00, // Literal, no index
+ 0x08, // Name: not huffman encoded, 8 bytes long
+ 0x3a, 0x69, 0x6c, 0x6c, 0x65, 0x67, 0x61, 0x6c, // :illegal
+ 0x10, // Value: not huffman encoded, 16 bytes long
+ // REALLY NOT LEGAL
+ 0x52, 0x45, 0x41, 0x4c, 0x4c, 0x59, 0x20, 0x4e, 0x4f, 0x54, 0x20, 0x4c, 0x45, 0x47, 0x41, 0x4c
+ ]);
+ var newBufferLength = originalCompressed.length + illegalLiteral.length;
+ var concatenated = new Buffer(newBufferLength);
+ originalCompressed.copy(concatenated, 0);
+ illegalLiteral.copy(concatenated, originalCompressed.length);
+ return concatenated;
+}
+
+function insertHardIllegalHpack(headers) {
+ var originalCompressed = originalCompressHeaders.apply(this, headers);
+ // Now we have to add an invalid header
+ var illegalIndexed = HeaderSetCompressor.integer(5000, 7);
+ // The above returns an array of buffers, but there's only one buffer, so
+ // get rid of the array.
+ illegalIndexed = illegalIndexed[0];
+ // Set the first bit to 1 to signal this is an indexed representation
+ illegalIndexed[0] |= 0x80;
+ var newBufferLength = originalCompressed.length + illegalIndexed.length;
+ var concatenated = new Buffer(newBufferLength);
+ originalCompressed.copy(concatenated, 0);
+ illegalIndexed.copy(concatenated, originalCompressed.length);
+ return concatenated;
+}
+
+var h11required_conn = null;
+var h11required_header = "yes";
+var didRst = false;
+var rstConnection = null;
+var illegalheader_conn = null;
+
+function handleRequest(req, res) {
+ // We do this first to ensure nothing goes wonky in our tests that don't want
+ // the headers to have something illegal in them
+ Compressor.prototype.compress = originalCompressHeaders;
+
+ var u = url.parse(req.url);
+ var content = getHttpContent(u.pathname);
+ var push, push1, push1a, push2, push3;
+
+ // PushService tests.
+ var pushPushServer1, pushPushServer2, pushPushServer3, pushPushServer4;
+
+ if (req.httpVersionMajor === 2) {
+ res.setHeader('X-Connection-Http2', 'yes');
+ res.setHeader('X-Http2-StreamId', '' + req.stream.id);
+ } else {
+ res.setHeader('X-Connection-Http2', 'no');
+ }
+
+ if (u.pathname === '/exit') {
+ res.setHeader('Content-Type', 'text/plain');
+ res.setHeader('Connection', 'close');
+ res.writeHead(200);
+ res.end('ok');
+ process.exit();
+ }
+
+ if (u.pathname === '/750ms') {
+ var rl = new runlater();
+ rl.req = req;
+ rl.resp = res;
+ setTimeout(executeRunLater, 750, rl);
+ return;
+ }
+
+ else if ((u.pathname === '/multiplex1') && (req.httpVersionMajor === 2)) {
+ res.setHeader('Content-Type', 'text/plain');
+ res.writeHead(200);
+ m.mp1res = res;
+ m.checkReady();
+ return;
+ }
+
+ else if ((u.pathname === '/multiplex2') && (req.httpVersionMajor === 2)) {
+ res.setHeader('Content-Type', 'text/plain');
+ res.writeHead(200);
+ m.mp2res = res;
+ m.checkReady();
+ return;
+ }
+
+ else if (u.pathname === "/header") {
+ var val = req.headers["x-test-header"];
+ if (val) {
+ res.setHeader("X-Received-Test-Header", val);
+ }
+ }
+
+ else if (u.pathname === "/doubleheader") {
+ res.setHeader('Content-Type', 'text/html');
+ res.writeHead(200);
+ res.write(content);
+ res.writeHead(200);
+ res.end();
+ return;
+ }
+
+ else if (u.pathname === "/cookie_crumbling") {
+ res.setHeader("X-Received-Header-Pairs", JSON.stringify(decompressedPairs));
+ }
+
+ else if (u.pathname === "/push") {
+ push = res.push('/push.js');
+ push.writeHead(200, {
+ 'content-type': 'application/javascript',
+ 'pushed' : 'yes',
+ 'content-length' : 11,
+ 'X-Connection-Http2': 'yes'
+ });
+ push.end('// comments');
+ content = '<head> <script src="push.js"/></head>body text';
+ }
+
+ else if (u.pathname === "/push2") {
+ push = res.push('/push2.js');
+ push.writeHead(200, {
+ 'content-type': 'application/javascript',
+ 'pushed' : 'yes',
+ // no content-length
+ 'X-Connection-Http2': 'yes'
+ });
+ push.end('// comments');
+ content = '<head> <script src="push2.js"/></head>body text';
+ }
+
+ else if (u.pathname === "/push5") {
+ push = res.push('/push5.js');
+ push.writeHead(200, {
+ 'content-type': 'application/javascript',
+ 'pushed' : 'yes',
+ // no content-length
+ 'X-Connection-Http2': 'yes'
+ });
+ content = generateContent(1024 * 150);
+ push.write(content);
+ push.end();
+ content = '<head> <script src="push5.js"/></head>body text';
+ }
+
+ else if (u.pathname === "/pushapi1") {
+ push1 = res.push(
+ { hostname: 'localhost:' + serverPort, port: serverPort, path : '/pushapi1/1', method : 'GET',
+ headers: {'x-pushed-request': 'true', 'x-foo' : 'bar'}});
+ push1.writeHead(200, {
+ 'pushed' : 'yes',
+ 'content-length' : 1,
+ 'subresource' : '1',
+ 'X-Connection-Http2': 'yes'
+ });
+ push1.end('1');
+
+ push1a = res.push(
+ { hostname: 'localhost:' + serverPort, port: serverPort, path : '/pushapi1/1', method : 'GET',
+ headers: {'x-foo' : 'bar', 'x-pushed-request': 'true'}});
+ push1a.writeHead(200, {
+ 'pushed' : 'yes',
+ 'content-length' : 1,
+ 'subresource' : '1a',
+ 'X-Connection-Http2': 'yes'
+ });
+ push1a.end('1');
+
+ push2 = res.push(
+ { hostname: 'localhost:' + serverPort, port: serverPort, path : '/pushapi1/2', method : 'GET',
+ headers: {'x-pushed-request': 'true'}});
+ push2.writeHead(200, {
+ 'pushed' : 'yes',
+ 'subresource' : '2',
+ 'content-length' : 1,
+ 'X-Connection-Http2': 'yes'
+ });
+ push2.end('2');
+
+ push3 = res.push(
+ { hostname: 'localhost:' + serverPort, port: serverPort, path : '/pushapi1/3', method : 'GET',
+ headers: {'x-pushed-request': 'true'}});
+ push3.writeHead(200, {
+ 'pushed' : 'yes',
+ 'content-length' : 1,
+ 'subresource' : '3',
+ 'X-Connection-Http2': 'yes'
+ });
+ push3.end('3');
+
+ content = '0';
+ }
+
+ else if (u.pathname === "/big") {
+ content = generateContent(128 * 1024);
+ var hash = crypto.createHash('md5');
+ hash.update(content);
+ var md5 = hash.digest('hex');
+ res.setHeader("X-Expected-MD5", md5);
+ }
+
+ else if (u.pathname === "/huge") {
+ content = generateContent(1024);
+ res.setHeader('Content-Type', 'text/plain');
+ res.writeHead(200);
+ // 1mb of data
+ for (var i = 0; i < (1024 * 1); i++) {
+ res.write(content); // 1kb chunk
+ }
+ res.end();
+ return;
+ }
+
+ else if (u.pathname === "/post" || u.pathname === "/patch") {
+ if (req.method != "POST" && req.method != "PATCH") {
+ res.writeHead(405);
+ res.end('Unexpected method: ' + req.method);
+ return;
+ }
+
+ var post_hash = crypto.createHash('md5');
+ req.on('data', function receivePostData(chunk) {
+ post_hash.update(chunk.toString());
+ });
+ req.on('end', function finishPost() {
+ var md5 = post_hash.digest('hex');
+ res.setHeader('X-Calculated-MD5', md5);
+ res.writeHead(200);
+ res.end(content);
+ });
+
+ return;
+ }
+
+ else if (u.pathname === "/750msPost") {
+ if (req.method != "POST") {
+ res.writeHead(405);
+ res.end('Unexpected method: ' + req.method);
+ return;
+ }
+
+ var accum = 0;
+ req.on('data', function receivePostData(chunk) {
+ accum += chunk.length;
+ });
+ req.on('end', function finishPost() {
+ res.setHeader('X-Recvd', accum);
+ var rl = new runlater();
+ rl.req = req;
+ rl.resp = res;
+ setTimeout(executeRunLater, 750, rl);
+ return;
+ });
+
+ return;
+ }
+
+ else if (u.pathname === "/h11required_stream") {
+ if (req.httpVersionMajor === 2) {
+ h11required_conn = req.stream.connection;
+ res.stream.reset('HTTP_1_1_REQUIRED');
+ return;
+ }
+ }
+
+ else if (u.pathname === "/bigdownload") {
+
+ res.setHeader('Content-Type', 'text/html');
+ res.writeHead(200);
+
+ var rl = new moreData();
+ rl.req = req;
+ rl.resp = res;
+ setTimeout(executeRunLater, 1, rl);
+ return;
+ }
+
+ else if (u.pathname === "/h11required_session") {
+ if (req.httpVersionMajor === 2) {
+ if (h11required_conn !== req.stream.connection) {
+ h11required_header = "no";
+ }
+ res.stream.connection.close('HTTP_1_1_REQUIRED', res.stream.id - 2);
+ return;
+ } else {
+ res.setHeader('X-H11Required-Stream-Ok', h11required_header);
+ }
+ }
+
+ else if (u.pathname === "/rstonce") {
+ if (!didRst && req.httpVersionMajor === 2) {
+ didRst = true;
+ rstConnection = req.stream.connection;
+ req.stream.reset('REFUSED_STREAM');
+ return;
+ }
+
+ if (rstConnection === null ||
+ rstConnection !== req.stream.connection) {
+ res.setHeader('Connection', 'close');
+ res.writeHead(400);
+ res.end("WRONG CONNECTION, HOMIE!");
+ return;
+ }
+
+ if (req.httpVersionMajor != 2) {
+ res.setHeader('Connection', 'close');
+ }
+ res.writeHead(200);
+ res.end("It's all good.");
+ return;
+ }
+
+ else if (u.pathname === "/continuedheaders") {
+ var pushRequestHeaders = {'x-pushed-request': 'true'};
+ var pushResponseHeaders = {'content-type': 'text/plain',
+ 'content-length': '2',
+ 'X-Connection-Http2': 'yes'};
+ var pushHdrTxt = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789';
+ var pullHdrTxt = pushHdrTxt.split('').reverse().join('');
+ for (var i = 0; i < 265; i++) {
+ pushRequestHeaders['X-Push-Test-Header-' + i] = pushHdrTxt;
+ res.setHeader('X-Pull-Test-Header-' + i, pullHdrTxt);
+ }
+ push = res.push({
+ hostname: 'localhost:' + serverPort,
+ port: serverPort,
+ path: '/continuedheaders/push',
+ method: 'GET',
+ headers: pushRequestHeaders
+ });
+ push.writeHead(200, pushResponseHeaders);
+ push.end("ok");
+ }
+
+ else if (u.pathname === "/altsvc1") {
+ if (req.httpVersionMajor != 2 ||
+ req.scheme != "http" ||
+ req.headers['alt-used'] != ("foo.example.com:" + serverPort)) {
+ res.writeHead(400);
+ res.end("WHAT?");
+ return;
+ }
+ // test the alt svc frame for use with altsvc2
+ res.altsvc("foo.example.com", serverPort, "h2", 3600, req.headers['x-redirect-origin']);
+ }
+
+ else if (u.pathname === "/altsvc2") {
+ if (req.httpVersionMajor != 2 ||
+ req.scheme != "http" ||
+ req.headers['alt-used'] != ("foo.example.com:" + serverPort)) {
+ res.writeHead(400);
+ res.end("WHAT?");
+ return;
+ }
+ }
+
+ // for use with test_altsvc.js
+ else if (u.pathname === "/altsvc-test") {
+ res.setHeader('Cache-Control', 'no-cache');
+ res.setHeader('Alt-Svc', 'h2=' + req.headers['x-altsvc']);
+ }
+
+ else if (u.pathname === "/.well-known/http-opportunistic") {
+ res.setHeader('Cache-Control', 'no-cache');
+ res.setHeader('Content-Type', 'application/json');
+ res.writeHead(200, "OK");
+ res.end('{"http://' + req.headers['host'] + '": { "tls-ports": [' + serverPort + '] }}');
+ return;
+ }
+
+ // for PushService tests.
+ else if (u.pathname === "/pushSubscriptionSuccess/subscribe") {
+ res.setHeader("Location",
+ 'https://localhost:' + serverPort + '/pushSubscriptionSuccesss');
+ res.setHeader("Link",
+ '</pushEndpointSuccess>; rel="urn:ietf:params:push", ' +
+ '</receiptPushEndpointSuccess>; rel="urn:ietf:params:push:receipt"');
+ res.writeHead(201, "OK");
+ res.end("");
+ return;
+ }
+
+ else if (u.pathname === "/pushSubscriptionSuccesss") {
+ // do nothing.
+ return;
+ }
+
+ else if (u.pathname === "/pushSubscriptionMissingLocation/subscribe") {
+ res.setHeader("Link",
+ '</pushEndpointMissingLocation>; rel="urn:ietf:params:push", ' +
+ '</receiptPushEndpointMissingLocation>; rel="urn:ietf:params:push:receipt"');
+ res.writeHead(201, "OK");
+ res.end("");
+ return;
+ }
+
+ else if (u.pathname === "/pushSubscriptionMissingLink/subscribe") {
+ res.setHeader("Location",
+ 'https://localhost:' + serverPort + '/subscriptionMissingLink');
+ res.writeHead(201, "OK");
+ res.end("");
+ return;
+ }
+
+ else if (u.pathname === "/pushSubscriptionLocationBogus/subscribe") {
+ res.setHeader("Location", '1234');
+ res.setHeader("Link",
+ '</pushEndpointLocationBogus; rel="urn:ietf:params:push", ' +
+ '</receiptPushEndpointLocationBogus>; rel="urn:ietf:params:push:receipt"');
+ res.writeHead(201, "OK");
+ res.end("");
+ return;
+ }
+
+ else if (u.pathname === "/pushSubscriptionMissingLink1/subscribe") {
+ res.setHeader("Location",
+ 'https://localhost:' + serverPort + '/subscriptionMissingLink1');
+ res.setHeader("Link",
+ '</receiptPushEndpointMissingLink1>; rel="urn:ietf:params:push:receipt"');
+ res.writeHead(201, "OK");
+ res.end("");
+ return;
+ }
+
+ else if (u.pathname === "/pushSubscriptionMissingLink2/subscribe") {
+ res.setHeader("Location",
+ 'https://localhost:' + serverPort + '/subscriptionMissingLink2');
+ res.setHeader("Link",
+ '</pushEndpointMissingLink2>; rel="urn:ietf:params:push"');
+ res.writeHead(201, "OK");
+ res.end("");
+ return;
+ }
+
+ else if (u.pathname === "/subscriptionMissingLink2") {
+ // do nothing.
+ return;
+ }
+
+ else if (u.pathname === "/pushSubscriptionNot201Code/subscribe") {
+ res.setHeader("Location",
+ 'https://localhost:' + serverPort + '/subscriptionNot2xxCode');
+ res.setHeader("Link",
+ '</pushEndpointNot201Code>; rel="urn:ietf:params:push", ' +
+ '</receiptPushEndpointNot201Code>; rel="urn:ietf:params:push:receipt"');
+ res.writeHead(200, "OK");
+ res.end("");
+ return;
+ }
+
+ else if (u.pathname ==="/pushNotifications/subscription1") {
+ pushPushServer1 = res.push(
+ { hostname: 'localhost:' + serverPort, port: serverPort,
+ path : '/pushNotificationsDeliver1', method : 'GET',
+ headers: { 'Encryption-Key': 'keyid="notification1"; dh="BO_tgGm-yvYAGLeRe16AvhzaUcpYRiqgsGOlXpt0DRWDRGGdzVLGlEVJMygqAUECarLnxCiAOHTP_znkedrlWoU"',
+ 'Encryption': 'keyid="notification1";salt="uAZaiXpOSfOLJxtOCZ09dA"',
+ 'Content-Encoding': 'aesgcm128',
+ }
+ });
+ pushPushServer1.writeHead(200, {
+ 'subresource' : '1'
+ });
+
+ pushPushServer1.end('370aeb3963f12c4f12bf946bd0a7a9ee7d3eaff8f7aec62b530fc25cfa', 'hex');
+ return;
+ }
+
+ else if (u.pathname ==="/pushNotifications/subscription2") {
+ pushPushServer2 = res.push(
+ { hostname: 'localhost:' + serverPort, port: serverPort,
+ path : '/pushNotificationsDeliver3', method : 'GET',
+ headers: { 'Encryption-Key': 'keyid="notification2"; dh="BKVdQcgfncpNyNWsGrbecX0zq3eHIlHu5XbCGmVcxPnRSbhjrA6GyBIeGdqsUL69j5Z2CvbZd-9z1UBH0akUnGQ"',
+ 'Encryption': 'keyid="notification2";salt="vFn3t3M_k42zHBdpch3VRw"',
+ 'Content-Encoding': 'aesgcm128',
+ }
+ });
+ pushPushServer2.writeHead(200, {
+ 'subresource' : '1'
+ });
+
+ pushPushServer2.end('66df5d11daa01e5c802ff97cdf7f39684b5bf7c6418a5cf9b609c6826c04b25e403823607ac514278a7da945', 'hex');
+ return;
+ }
+
+ else if (u.pathname ==="/pushNotifications/subscription3") {
+ pushPushServer3 = res.push(
+ { hostname: 'localhost:' + serverPort, port: serverPort,
+ path : '/pushNotificationsDeliver3', method : 'GET',
+ headers: { 'Encryption-Key': 'keyid="notification3";dh="BD3xV_ACT8r6hdIYES3BJj1qhz9wyv7MBrG9vM2UCnjPzwE_YFVpkD-SGqE-BR2--0M-Yf31wctwNsO1qjBUeMg"',
+ 'Encryption': 'keyid="notification3"; salt="DFq188piWU7osPBgqn4Nlg"; rs=24',
+ 'Content-Encoding': 'aesgcm128',
+ }
+ });
+ pushPushServer3.writeHead(200, {
+ 'subresource' : '1'
+ });
+
+ pushPushServer3.end('2caaeedd9cf1059b80c58b6c6827da8ff7de864ac8bea6d5775892c27c005209cbf9c4de0c3fbcddb9711d74eaeebd33f7275374cb42dd48c07168bc2cc9df63e045ce2d2a2408c66088a40c', 'hex');
+ return;
+ }
+
+ else if (u.pathname == "/pushNotifications/subscription4") {
+ pushPushServer4 = res.push(
+ { hostname: 'localhost:' + serverPort, port: serverPort,
+ path : '/pushNotificationsDeliver4', method : 'GET',
+ headers: { 'Crypto-Key': 'keyid="notification4";dh="BJScXUUTcs7D8jJWI1AOxSgAKkF7e56ay4Lek52TqDlWo1yGd5czaxFWfsuP4j7XNWgGYm60-LKpSUMlptxPFVQ"',
+ 'Encryption': 'keyid="notification4"; salt="sn9p2QqF3V6KBclda8vx7w"',
+ 'Content-Encoding': 'aesgcm',
+ }
+ });
+ pushPushServer4.writeHead(200, {
+ 'subresource' : '1'
+ });
+
+ pushPushServer4.end('9eba7ba6192544a39bd9e9b58e702d0748f1776b27f6616cdc55d29ed5a015a6db8f2dd82cd5751a14315546194ff1c18458ab91eb36c9760ccb042670001fd9964557a079553c3591ee131ceb259389cfffab3ab873f873caa6a72e87d262b8684c3260e5940b992234deebf57a9ff3a8775742f3cbcb152d249725a28326717e19cce8506813a155eff5df9bdba9e3ae8801d3cc2b7e7f2f1b6896e63d1fdda6f85df704b1a34db7b2dd63eba11ede154300a318c6f83c41a3d32356a196e36bc905b99195fd91ae4ff3f545c42d17f1fdc1d5bd2bf7516d0765e3a859fffac84f46160b79cedda589f74c25357cf6988cd8ba83867ebd86e4579c9d3b00a712c77fcea3b663007076e21f9819423faa830c2176ff1001c1690f34be26229a191a938517', 'hex');
+ return;
+ }
+
+ else if ((u.pathname === "/pushNotificationsDeliver1") ||
+ (u.pathname === "/pushNotificationsDeliver2") ||
+ (u.pathname === "/pushNotificationsDeliver3")) {
+ res.writeHead(410, "GONE");
+ res.end("");
+ return;
+ }
+
+ else if (u.pathname === "/illegalhpacksoft") {
+ // This will cause the compressor to compress a header that is not legal,
+ // but only affects the stream, not the session.
+ illegalheader_conn = req.stream.connection;
+ Compressor.prototype.compress = insertSoftIllegalHpack;
+ // Fall through to the default response behavior
+ }
+
+ else if (u.pathname === "/illegalhpackhard") {
+ // This will cause the compressor to insert an HPACK instruction that will
+ // cause a session failure.
+ Compressor.prototype.compress = insertHardIllegalHpack;
+ // Fall through to default response behavior
+ }
+
+ else if (u.pathname === "/illegalhpack_validate") {
+ if (req.stream.connection === illegalheader_conn) {
+ res.setHeader('X-Did-Goaway', 'no');
+ } else {
+ res.setHeader('X-Did-Goaway', 'yes');
+ }
+ // Fall through to the default response behavior
+ }
+
+ else if (u.pathname === "/foldedheader") {
+ res.setHeader('X-Folded-Header', 'this is\n folded');
+ // Fall through to the default response behavior
+ }
+
+ else if (u.pathname === "/emptydata") {
+ // Overwrite the original transform with our version that will insert an
+ // empty DATA frame at the beginning of the stream response, then fall
+ // through to the default response behavior.
+ Serializer.prototype._transform = newTransform;
+ }
+
+ // for use with test_immutable.js
+ else if (u.pathname === "/immutable-test-without-attribute") {
+ res.setHeader('Cache-Control', 'max-age=100000');
+ res.setHeader('Etag', '1');
+ if (req.headers["if-none-match"]) {
+ res.setHeader("x-conditional", "true");
+ }
+ // default response from here
+ }
+ else if (u.pathname === "/immutable-test-with-attribute") {
+ res.setHeader('Cache-Control', 'max-age=100000, immutable');
+ res.setHeader('Etag', '2');
+ if (req.headers["if-none-match"]) {
+ res.setHeader("x-conditional", "true");
+ }
+ // default response from here
+ }
+
+ res.setHeader('Content-Type', 'text/html');
+ if (req.httpVersionMajor != 2) {
+ res.setHeader('Connection', 'close');
+ }
+ res.writeHead(200);
+ res.end(content);
+}
+
+// Set up the SSL certs for our server - this server has a cert for foo.example.com
+// signed by netwerk/tests/unit/CA.cert.der
+var options = {
+ key: fs.readFileSync(__dirname + '/http2-key.pem'),
+ cert: fs.readFileSync(__dirname + '/http2-cert.pem'),
+};
+
+if (process.env.HTTP2_LOG !== undefined) {
+ var log_module = node_http2_root + "/test/util";
+ options.log = require(log_module).createLogger('server')
+}
+
+var server = http2.createServer(options, handleRequest);
+
+server.on('connection', function(socket) {
+ socket.on('error', function() {
+ // Ignoring SSL socket errors, since they usually represent a connection that was tore down
+ // by the browser because of an untrusted certificate. And this happens at least once, when
+ // the first test case if done.
+ });
+});
+
+var serverPort;
+function listenok() {
+ serverPort = server._server.address().port;
+ console.log('HTTP2 server listening on port ' + serverPort);
+}
+var portSelection = 0;
+var envport = process.env.MOZHTTP2_PORT;
+if (envport !== undefined) {
+ try {
+ portSelection = parseInt(envport, 10);
+ } catch (e) {
+ portSelection = -1;
+ }
+}
+server.listen(portSelection, "0.0.0.0", 200, listenok);
diff --git a/testing/xpcshell/moz.build b/testing/xpcshell/moz.build
new file mode 100644
index 0000000000..27d0a53c66
--- /dev/null
+++ b/testing/xpcshell/moz.build
@@ -0,0 +1,18 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+TEST_DIRS += ['example']
+
+if CONFIG['HOST_OS_ARCH'] != 'Darwin':
+ # Disabled on Mac due to our builders still being on MacOS 10.7,
+ # see bug 1255588
+ PYTHON_UNIT_TESTS += [
+ 'selftest.py',
+ ]
+
+TESTING_JS_MODULES += [
+ 'dbg-actors.js',
+]
diff --git a/testing/xpcshell/node-http2/.gitignore b/testing/xpcshell/node-http2/.gitignore
new file mode 100644
index 0000000000..bc483625e6
--- /dev/null
+++ b/testing/xpcshell/node-http2/.gitignore
@@ -0,0 +1,7 @@
+node_modules
+.idea
+coverage
+doc
+.vscode/.browse*
+npm-debug.log
+typings \ No newline at end of file
diff --git a/testing/xpcshell/node-http2/.travis.yml b/testing/xpcshell/node-http2/.travis.yml
new file mode 100644
index 0000000000..5ca377d612
--- /dev/null
+++ b/testing/xpcshell/node-http2/.travis.yml
@@ -0,0 +1,5 @@
+ language: node_js
+ node_js:
+ - "iojs"
+ - "0.12"
+
diff --git a/testing/xpcshell/node-http2/HISTORY.md b/testing/xpcshell/node-http2/HISTORY.md
new file mode 100644
index 0000000000..20bd0c3507
--- /dev/null
+++ b/testing/xpcshell/node-http2/HISTORY.md
@@ -0,0 +1,258 @@
+Version history
+===============
+
+### 3.3.6 (2016-09-16) ###
+* We were not appropriately sending HPACK context updates when receiving SETTINGS_HEADER_TABLE_SIZE. This release fixes that bug.
+
+### 3.3.5 (2016-09-06) ###
+* Fix issues with large DATA frames (https://github.com/molnarg/node-http2/issues/207)
+
+### 3.3.4 (2016-04-22) ###
+* More PR bugfixes (https://github.com/molnarg/node-http2/issues?q=milestone%3Av3.3.4)
+
+### 3.3.3 (2016-04-21) ###
+
+* Bugfixes from pull requests (https://github.com/molnarg/node-http2/search?q=milestone%3Av3.3.3&type=Issues&utf8=%E2%9C%93)
+
+### 3.3.2 (2016-01-11) ###
+
+* Fix an incompatibility with Firefox (issue 167)
+
+### 3.3.1 (2016-01-11) ###
+
+* Fix some DoS bugs (issues 145, 146, 147, and 148)
+
+### 3.3.0 (2016-01-10) ###
+
+* Bugfix updates from pull requests
+
+### 3.2.0 (2015-02-19) ###
+
+* Update ALPN token to final RFC version (h2).
+* Update altsvc implementation to draft 06: [draft-ietf-httpbis-alt-svc-06]
+
+[draft-ietf-httpbis-altsvc-06]: http://tools.ietf.org/html/draft-ietf-httpbis-alt-svc-06
+
+### 3.1.2 (2015-02-17) ###
+
+* Update the example server to have a safe push example.
+
+### 3.1.1 (2015-01-29) ###
+
+* Bugfix release.
+* Fixes an issue sending a push promise that is large enough to fill the frame (#93).
+
+### 3.1.0 (2014-12-11) ###
+
+* Upgrade to the latest draft: [draft-ietf-httpbis-http2-16]
+ * This involves some state transition changes that are technically incompatible with draft-14. If you need to be assured to interop on -14, continue using 3.0.1
+
+[draft-ietf-httpbis-http2-16]: http://tools.ietf.org/html/draft-ietf-httpbis-http2-16
+
+### 3.0.1 (2014-11-20) ###
+
+* Bugfix release.
+* Fixed #81 and #87
+* Fixed a bug in flow control (without GitHub issue)
+
+### 3.0.0 (2014-08-25) ###
+
+* Re-join node-http2 and node-http2-protocol into one repository
+* API Changes
+ * The default versions of createServer, request, and get now enforce TLS-only
+ * The raw versions of createServer, request, and get are now under http2.raw instead of http2
+ * What was previously in the http2-protocol repository/module is now available under http2.protocol from this repo/module
+ * http2-protocol.ImplementedVersion is now http2.protocol.VERSION (the ALPN token)
+
+### 2.7.1 (2014-08-01) ###
+
+* Require protocol 0.14.1 (bugfix release)
+
+### 2.7.0 (2014-07-31) ###
+
+* Upgrade to the latest draft: [draft-ietf-httpbis-http2-14]
+
+[draft-ietf-httpbis-http2-14]: http://tools.ietf.org/html/draft-ietf-httpbis-http2-14
+
+### 2.6.0 (2014-06-18) ###
+
+* Upgrade to the latest draft: [draft-ietf-httpbis-http2-13]
+
+[draft-ietf-httpbis-http2-13]: http://tools.ietf.org/html/draft-ietf-httpbis-http2-13
+
+### 2.5.3 (2014-06-15) ###
+
+* Exposing API to send ALTSVC frames
+
+### 2.5.2 (2014-05-25) ###
+
+* Fix a bug that occurs when the ALPN negotiation is unsuccessful
+
+### 2.5.1 (2014-05-25) ###
+
+* Support for node 0.11.x
+* New cipher suite priority list with comformant ciphers on the top (only available in node >=0.11.x)
+
+### 2.5.0 (2014-04-24) ###
+
+* Upgrade to the latest draft: [draft-ietf-httpbis-http2-12]
+
+[draft-ietf-httpbis-http2-12]: http://tools.ietf.org/html/draft-ietf-httpbis-http2-12
+
+### 2.4.0 (2014-04-16) ###
+
+* Upgrade to the latest draft: [draft-ietf-httpbis-http2-11]
+
+[draft-ietf-httpbis-http2-11]: http://tools.ietf.org/html/draft-ietf-httpbis-http2-11
+
+### 2.3.0 (2014-03-12) ###
+
+* Upgrade to the latest draft: [draft-ietf-httpbis-http2-10]
+
+[draft-ietf-httpbis-http2-10]: http://tools.ietf.org/html/draft-ietf-httpbis-http2-10
+
+### 2.2.0 (2013-12-25) ###
+
+* Upgrade to the latest draft: [draft-ietf-httpbis-http2-09]
+* [Tarball](https://github.com/molnarg/node-http2/archive/node-http2-2.2.0.tar.gz)
+
+[draft-ietf-httpbis-http2-09]: http://tools.ietf.org/html/draft-ietf-httpbis-http2-09
+
+### 2.1.1 (2013-12-21) ###
+
+* Minor bugfix
+* [Tarball](https://github.com/molnarg/node-http2/archive/node-http2-2.1.1.tar.gz)
+
+### 2.1.0 (2013-11-10) ###
+
+* Upgrade to the latest draft: [draft-ietf-httpbis-http2-07][draft-07]
+* [Tarball](https://github.com/molnarg/node-http2/archive/node-http2-2.1.0.tar.gz)
+
+[draft-07]: http://tools.ietf.org/html/draft-ietf-httpbis-http2-07
+
+### 2.0.0 (2013-11-09) ###
+
+* Splitting out everything that is not related to negotiating HTTP2 or the node-like HTTP API.
+ These live in separate module from now on:
+ [http2-protocol](https://github.com/molnarg/node-http2-protocol).
+* The only backwards incompatible change: the `Endpoint` class is not exported anymore. Use the
+ http2-protocol module if you want to use this low level interface.
+* [Tarball](https://github.com/molnarg/node-http2/archive/node-http2-2.0.0.tar.gz)
+
+### 1.0.1 (2013-10-14) ###
+
+* Support for ALPN if node supports it (currently needs a custom build)
+* Fix for a few small issues
+* [Tarball](https://github.com/molnarg/node-http2/archive/node-http2-1.0.1.tar.gz)
+
+### 1.0.0 (2013-09-23) ###
+
+* Exporting Endpoint class
+* Support for 'filters' in Endpoint
+* The last time-based release
+* [Tarball](https://github.com/molnarg/node-http2/archive/node-http2-1.0.0.tar.gz)
+
+### 0.4.1 (2013-09-15) ###
+
+* Major performance improvements
+* Minor improvements to error handling
+* [Blog post](http://gabor.molnar.es/blog/2013/09/15/gsoc-week-number-13/)
+* [Tarball](https://github.com/molnarg/node-http2/archive/node-http2-0.4.1.tar.gz)
+
+### 0.4.0 (2013-09-09) ###
+
+* Upgrade to the latest draft: [draft-ietf-httpbis-http2-06][draft-06]
+* Support for HTTP trailers
+* Support for TLS SNI (Server Name Indication)
+* Improved stream scheduling algorithm
+* [Blog post](http://gabor.molnar.es/blog/2013/09/09/gsoc-week-number-12/)
+* [Tarball](https://github.com/molnarg/node-http2/archive/node-http2-0.4.0.tar.gz)
+
+[draft-06]: http://tools.ietf.org/html/draft-ietf-httpbis-http2-06
+
+### 0.3.1 (2013-09-03) ###
+
+* Lot of testing, bugfixes
+* [Blog post](http://gabor.molnar.es/blog/2013/09/03/gsoc-week-number-11/)
+* [Tarball](https://github.com/molnarg/node-http2/archive/node-http2-0.3.1.tar.gz)
+
+### 0.3.0 (2013-08-27) ###
+
+* Support for prioritization
+* Small API compatibility improvements (compatibility with the standard node.js HTTP API)
+* Minor push API change
+* Ability to pass an external bunyan logger when creating a Server or Agent
+* [Blog post](http://gabor.molnar.es/blog/2013/08/27/gsoc-week-number-10/)
+* [Tarball](https://github.com/molnarg/node-http2/archive/node-http2-0.3.0.tar.gz)
+
+### 0.2.1 (2013-08-20) ###
+
+* Fixing a flow control bug
+* [Tarball](https://github.com/molnarg/node-http2/archive/node-http2-0.2.1.tar.gz)
+
+### 0.2.0 (2013-08-19) ###
+
+* Exposing server push in the public API
+* Connection pooling when operating as client
+* Much better API compatibility with the standard node.js HTTPS module
+* Logging improvements
+* [Blog post](http://gabor.molnar.es/blog/2013/08/19/gsoc-week-number-9/)
+* [Tarball](https://github.com/molnarg/node-http2/archive/node-http2-0.2.0.tar.gz)
+
+### 0.1.1 (2013-08-12) ###
+
+* Lots of bugfixes
+* Proper flow control for outgoing frames
+* Basic flow control for incoming frames
+* [Blog post](http://gabor.molnar.es/blog/2013/08/12/gsoc-week-number-8/)
+* [Tarball](https://github.com/molnarg/node-http2/archive/node-http2-0.1.1.tar.gz)
+
+### 0.1.0 (2013-08-06) ###
+
+* First release with public API (similar to the standard node HTTPS module)
+* Support for NPN negotiation (no ALPN or Upgrade yet)
+* Stream number limitation is in place
+* Push streams works but not exposed yet in the public API
+* [Blog post](http://gabor.molnar.es/blog/2013/08/05/gsoc-week-number-6-and-number-7/)
+* [Tarball](https://github.com/molnarg/node-http2/archive/node-http2-0.1.0.tar.gz)
+
+### 0.0.6 (2013-07-19) ###
+
+* `Connection` and `Endpoint` classes are usable, but not yet ready
+* Addition of an exmaple server and client
+* Using [istanbul](https://github.com/gotwarlost/istanbul) for measuring code coverage
+* [Blog post](http://gabor.molnar.es/blog/2013/07/19/gsoc-week-number-5/)
+* [Tarball](https://github.com/molnarg/node-http2/archive/node-http2-0.0.6.tar.gz)
+
+### 0.0.5 (2013-07-14) ###
+
+* `Stream` class is done
+* Public API stubs are in place
+* [Blog post](http://gabor.molnar.es/blog/2013/07/14/gsoc-week-number-4/)
+* [Tarball](https://github.com/molnarg/node-http2/archive/node-http2-0.0.5.tar.gz)
+
+### 0.0.4 (2013-07-08) ###
+
+* Added logging
+* Started `Stream` class implementation
+* [Blog post](http://gabor.molnar.es/blog/2013/07/08/gsoc-week-number-3/)
+* [Tarball](https://github.com/molnarg/node-http2/archive/node-http2-0.0.4.tar.gz)
+
+### 0.0.3 (2013-07-03) ###
+
+* Header compression is ready
+* [Blog post](http://gabor.molnar.es/blog/2013/07/03/the-http-slash-2-header-compression-implementation-of-node-http2/)
+* [Tarball](https://github.com/molnarg/node-http2/archive/node-http2-0.0.3.tar.gz)
+
+### 0.0.2 (2013-07-01) ###
+
+* Frame serialization and deserialization ready and updated to match the newest spec
+* Header compression implementation started
+* [Blog post](http://gabor.molnar.es/blog/2013/07/01/gsoc-week-number-2/)
+* [Tarball](https://github.com/molnarg/node-http2/archive/node-http2-0.0.2.tar.gz)
+
+### 0.0.1 (2013-06-23) ###
+
+* Frame serialization and deserialization largely done
+* [Blog post](http://gabor.molnar.es/blog/2013/06/23/gsoc-week-number-1/)
+* [Tarball](https://github.com/molnarg/node-http2/archive/node-http2-0.0.1.tar.gz)
diff --git a/testing/xpcshell/node-http2/LICENSE b/testing/xpcshell/node-http2/LICENSE
new file mode 100644
index 0000000000..9bb2e9ce57
--- /dev/null
+++ b/testing/xpcshell/node-http2/LICENSE
@@ -0,0 +1,22 @@
+The MIT License
+
+Copyright (C) 2013 Gábor Molnár <gabor@molnar.es>, Google Inc
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+'Software'), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/testing/xpcshell/node-http2/README.md b/testing/xpcshell/node-http2/README.md
new file mode 100644
index 0000000000..fa57ae9657
--- /dev/null
+++ b/testing/xpcshell/node-http2/README.md
@@ -0,0 +1,171 @@
+node-http2
+==========
+
+An HTTP/2 ([RFC 7540](http://tools.ietf.org/html/rfc7540))
+client and server implementation for node.js.
+
+![Travis CI status](https://travis-ci.org/molnarg/node-http2.svg?branch=master)
+
+Installation
+------------
+
+```
+npm install http2
+```
+
+API
+---
+
+The API is very similar to the [standard node.js HTTPS API](http://nodejs.org/api/https.html). The
+goal is the perfect API compatibility, with additional HTTP2 related extensions (like server push).
+
+Detailed API documentation is primarily maintained in the `lib/http.js` file and is [available in
+the wiki](https://github.com/molnarg/node-http2/wiki/Public-API) as well.
+
+Examples
+--------
+
+### Using as a server ###
+
+```javascript
+var options = {
+ key: fs.readFileSync('./example/localhost.key'),
+ cert: fs.readFileSync('./example/localhost.crt')
+};
+
+require('http2').createServer(options, function(request, response) {
+ response.end('Hello world!');
+}).listen(8080);
+```
+
+### Using as a client ###
+
+```javascript
+require('http2').get('https://localhost:8080/', function(response) {
+ response.pipe(process.stdout);
+});
+```
+
+### Simple static file server ###
+
+An simple static file server serving up content from its own directory is available in the `example`
+directory. Running the server:
+
+```bash
+$ node ./example/server.js
+```
+
+### Simple command line client ###
+
+An example client is also available. Downloading the server's own source code from the server:
+
+```bash
+$ node ./example/client.js 'https://localhost:8080/server.js' >/tmp/server.js
+```
+
+### Server push ###
+
+For a server push example, see the source code of the example
+[server](https://github.com/molnarg/node-http2/blob/master/example/server.js) and
+[client](https://github.com/molnarg/node-http2/blob/master/example/client.js).
+
+Status
+------
+
+* ALPN is only supported in node.js >= 5.0
+* Upgrade mechanism to start HTTP/2 over unencrypted channel is not implemented yet
+ (issue [#4](https://github.com/molnarg/node-http2/issues/4))
+* Other minor features found in
+ [this list](https://github.com/molnarg/node-http2/issues?labels=feature) are not implemented yet
+
+Development
+-----------
+
+### Development dependencies ###
+
+There's a few library you will need to have installed to do anything described in the following
+sections. After installing/cloning node-http2, run `npm install` in its directory to install
+development dependencies.
+
+Used libraries:
+
+* [mocha](http://visionmedia.github.io/mocha/) for tests
+* [chai](http://chaijs.com/) for assertions
+* [istanbul](https://github.com/gotwarlost/istanbul) for code coverage analysis
+* [docco](http://jashkenas.github.io/docco/) for developer documentation
+* [bunyan](https://github.com/trentm/node-bunyan) for logging
+
+For pretty printing logs, you will also need a global install of bunyan (`npm install -g bunyan`).
+
+### Developer documentation ###
+
+The developer documentation is generated from the source code using docco and can be viewed online
+[here](http://molnarg.github.io/node-http2/doc/). If you'd like to have an offline copy, just run
+`npm run-script doc`.
+
+### Running the tests ###
+
+It's easy, just run `npm test`. The tests are written in BDD style, so they are a good starting
+point to understand the code.
+
+### Test coverage ###
+
+To generate a code coverage report, run `npm test --coverage` (which runs very slowly, be patient).
+Code coverage summary as of version 3.0.1:
+```
+Statements : 92.09% ( 1759/1910 )
+Branches : 82.56% ( 696/843 )
+Functions : 91.38% ( 212/232 )
+Lines : 92.17% ( 1753/1902 )
+```
+
+There's a hosted version of the detailed (line-by-line) coverage report
+[here](http://molnarg.github.io/node-http2/coverage/lcov-report/lib/).
+
+### Logging ###
+
+Logging is turned off by default. You can turn it on by passing a bunyan logger as `log` option when
+creating a server or agent.
+
+When using the example server or client, it's very easy to turn logging on: set the `HTTP2_LOG`
+environment variable to `fatal`, `error`, `warn`, `info`, `debug` or `trace` (the logging level).
+To log every single incoming and outgoing data chunk, use `HTTP2_LOG_DATA=1` besides
+`HTTP2_LOG=trace`. Log output goes to the standard error output. If the standard error is redirected
+into a file, then the log output is in bunyan's JSON format for easier post-mortem analysis.
+
+Running the example server and client with `info` level logging output:
+
+```bash
+$ HTTP2_LOG=info node ./example/server.js
+```
+
+```bash
+$ HTTP2_LOG=info node ./example/client.js 'https://localhost:8080/server.js' >/dev/null
+```
+
+Contributors
+------------
+
+The co-maintainer of the project is [Nick Hurley](https://github.com/todesschaf).
+
+Code contributions are always welcome! People who contributed to node-http2 so far:
+
+* [Nick Hurley](https://github.com/todesschaf)
+* [Mike Belshe](https://github.com/mbelshe)
+* [Yoshihiro Iwanaga](https://github.com/iwanaga)
+* [Igor Novikov](https://github.com/vsemogutor)
+* [James Willcox](https://github.com/snorp)
+* [David Björklund](https://github.com/kesla)
+* [Patrick McManus](https://github.com/mcmanus)
+
+Special thanks to Google for financing the development of this module as part of their [Summer of
+Code program](https://developers.google.com/open-source/soc/) (project: [HTTP/2 prototype server
+implementation](https://google-melange.appspot.com/gsoc/project/details/google/gsoc2013/molnarg/5818821692620800)), and
+Nick Hurley of Mozilla, my GSoC mentor, who helped with regular code review and technical advices.
+
+License
+-------
+
+The MIT License
+
+Copyright (C) 2013 Gábor Molnár <gabor@molnar.es>
diff --git a/testing/xpcshell/node-http2/example/client.js b/testing/xpcshell/node-http2/example/client.js
new file mode 100644
index 0000000000..75a4bc011b
--- /dev/null
+++ b/testing/xpcshell/node-http2/example/client.js
@@ -0,0 +1,48 @@
+var fs = require('fs');
+var path = require('path');
+var http2 = require('..');
+var urlParse = require('url').parse;
+
+// Setting the global logger (optional)
+http2.globalAgent = new http2.Agent({
+ rejectUnauthorized: true,
+ log: require('../test/util').createLogger('client')
+});
+
+// Sending the request
+var url = process.argv.pop();
+var options = urlParse(url);
+
+// Optionally verify self-signed certificates.
+if (options.hostname == 'localhost') {
+ options.key = fs.readFileSync(path.join(__dirname, '/localhost.key'));
+ options.ca = fs.readFileSync(path.join(__dirname, '/localhost.crt'));
+}
+
+var request = process.env.HTTP2_PLAIN ? http2.raw.get(options) : http2.get(options);
+
+// Receiving the response
+request.on('response', function(response) {
+ response.pipe(process.stdout);
+ response.on('end', finish);
+});
+
+// Receiving push streams
+request.on('push', function(pushRequest) {
+ var filename = path.join(__dirname, '/push-' + push_count);
+ push_count += 1;
+ console.error('Receiving pushed resource: ' + pushRequest.url + ' -> ' + filename);
+ pushRequest.on('response', function(pushResponse) {
+ pushResponse.pipe(fs.createWriteStream(filename)).on('finish', finish);
+ });
+});
+
+// Quitting after both the response and the associated pushed resources have arrived
+var push_count = 0;
+var finished = 0;
+function finish() {
+ finished += 1;
+ if (finished === (1 + push_count)) {
+ process.exit();
+ }
+}
diff --git a/testing/xpcshell/node-http2/example/localhost.crt b/testing/xpcshell/node-http2/example/localhost.crt
new file mode 100644
index 0000000000..c4e4d2e96d
--- /dev/null
+++ b/testing/xpcshell/node-http2/example/localhost.crt
@@ -0,0 +1,14 @@
+-----BEGIN CERTIFICATE-----
+MIICDTCCAXYCCQC7iiBVXeTv1DANBgkqhkiG9w0BAQUFADBLMQswCQYDVQQGEwJI
+VTETMBEGA1UECBMKU29tZS1TdGF0ZTETMBEGA1UEChMKbm9kZS1odHRwMjESMBAG
+A1UEAxMJbG9jYWxob3N0MB4XDTE0MTIwMjE4NDcwNFoXDTI0MTEyOTE4NDcwNFow
+SzELMAkGA1UEBhMCSFUxEzARBgNVBAgTClNvbWUtU3RhdGUxEzARBgNVBAoTCm5v
+ZGUtaHR0cDIxEjAQBgNVBAMTCWxvY2FsaG9zdDCBnzANBgkqhkiG9w0BAQEFAAOB
+jQAwgYkCgYEA8As7rj7xdD+RuAmORju9NI+jtOScGgiAbfovaFyzTu0O0H9SCExi
+u6e2iXMRfzomTix/yjRvbdHEXfgONG1MnKUc0oC4GxHXshyMDEXq9LadgAmR/nDL
+UVT0eo7KqC21ufaca2nVS9qOdlSCE/p7IJdb2+BF1RmuC9pHpXvFW20CAwEAATAN
+BgkqhkiG9w0BAQUFAAOBgQDn8c/9ho9L08dOqEJ2WTBmv4dfRC3oTWR/0oIGsaXb
+RhQONy5CJv/ymPYE7nCFWTMaia+w8oFqMie/aNZ7VK6L+hafuUS93IjuTXVN++JP
+4948B0BBagvXGTwNtvm/1sZHLrXTkH1dbRUEF8M+KUSRUu2zJgm+e1bD8WTKQOIL
+NA==
+-----END CERTIFICATE-----
diff --git a/testing/xpcshell/node-http2/example/localhost.key b/testing/xpcshell/node-http2/example/localhost.key
new file mode 100644
index 0000000000..6e1de62642
--- /dev/null
+++ b/testing/xpcshell/node-http2/example/localhost.key
@@ -0,0 +1,15 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIICXQIBAAKBgQDwCzuuPvF0P5G4CY5GO700j6O05JwaCIBt+i9oXLNO7Q7Qf1II
+TGK7p7aJcxF/OiZOLH/KNG9t0cRd+A40bUycpRzSgLgbEdeyHIwMRer0tp2ACZH+
+cMtRVPR6jsqoLbW59pxradVL2o52VIIT+nsgl1vb4EXVGa4L2kele8VbbQIDAQAB
+AoGAKKB+FVup2hb4PsG/RrvNphu5hWA721wdAIAbjfpCjtUocLlb1PO4sjIMfu7u
+wy3AVfLKHhsJ0Phz18OoA8+L65NMoMRsHOGaLEnGIJzJcnDLT5+uTFN5di0a1+UK
+BzB828rlHBNoQisogVCoKTYlCPJAZuI3trEzupWAV28XjTECQQD5LUEwYq4xr62L
+dEq5Qj/+c5paK/jrEBY83VZUmWzYsFgUwmpdku2ITRILQlOM33j6rk8krZZb93sb
+38ydmfwjAkEA9p30zyjOI9kKqTl9WdYNYtIXpyNGYa+Pga33o9pawTewiyS2uCYs
+wnQQV26bQ0YwQqLQhtIbo4fzCO6Ex0w7LwJBANHNbd8cp4kEX35U+3nDM3i+w477
+CUp6sA6tWrw+tqw4xuEr1T1WshOauP+r6AdsPkPsMo0yb7CdzxVoObPVbLsCQQCc
+sx0cjEb/TCeUAy186Z+zzN6umqFb7Jt4wLt7Z4EHCIWqw/c95zPFks3XYDZTdsOv
+c5igMdzR+c4ZPMUthWiNAkByx7If12G1Z/R2Y0vIB0WJq4BJnZCZ0mRR0oAmPoA+
+sZbmwctZ3IU+68Rgr4EAhrU04ygjF67IiNyXX0qqu3VH
+-----END RSA PRIVATE KEY-----
diff --git a/testing/xpcshell/node-http2/example/server.js b/testing/xpcshell/node-http2/example/server.js
new file mode 100644
index 0000000000..66d8f895d1
--- /dev/null
+++ b/testing/xpcshell/node-http2/example/server.js
@@ -0,0 +1,67 @@
+var fs = require('fs');
+var path = require('path');
+var http2 = require('..');
+
+// We cache one file to be able to do simple performance tests without waiting for the disk
+var cachedFile = fs.readFileSync(path.join(__dirname, './server.js'));
+var cachedUrl = '/server.js';
+
+// The callback to handle requests
+function onRequest(request, response) {
+ var filename = path.join(__dirname, request.url);
+
+ // Serving server.js from cache. Useful for microbenchmarks.
+ if (request.url === cachedUrl) {
+ if (response.push) {
+ // Also push down the client js, since it's possible if the requester wants
+ // one, they want both.
+ var push = response.push('/client.js');
+ push.writeHead(200);
+ fs.createReadStream(path.join(__dirname, '/client.js')).pipe(push);
+ }
+ response.end(cachedFile);
+ }
+
+ // Reading file from disk if it exists and is safe.
+ else if ((filename.indexOf(__dirname) === 0) && fs.existsSync(filename) && fs.statSync(filename).isFile()) {
+ response.writeHead(200);
+ var fileStream = fs.createReadStream(filename);
+ fileStream.pipe(response);
+ fileStream.on('finish',response.end);
+ }
+
+ // Example for testing large (boundary-sized) frames.
+ else if (request.url === "/largeframe") {
+ response.writeHead(200);
+ var body = 'a';
+ for (var i = 0; i < 14; i++) {
+ body += body;
+ }
+ body = body + 'a';
+ response.end(body);
+ }
+
+ // Otherwise responding with 404.
+ else {
+ response.writeHead(404);
+ response.end();
+ }
+}
+
+// Creating a bunyan logger (optional)
+var log = require('../test/util').createLogger('server');
+
+// Creating the server in plain or TLS mode (TLS mode is the default)
+var server;
+if (process.env.HTTP2_PLAIN) {
+ server = http2.raw.createServer({
+ log: log
+ }, onRequest);
+} else {
+ server = http2.createServer({
+ log: log,
+ key: fs.readFileSync(path.join(__dirname, '/localhost.key')),
+ cert: fs.readFileSync(path.join(__dirname, '/localhost.crt'))
+ }, onRequest);
+}
+server.listen(process.env.HTTP2_PORT || 8080);
diff --git a/testing/xpcshell/node-http2/lib/http.js b/testing/xpcshell/node-http2/lib/http.js
new file mode 100644
index 0000000000..4c4234c5cb
--- /dev/null
+++ b/testing/xpcshell/node-http2/lib/http.js
@@ -0,0 +1,1262 @@
+// Public API
+// ==========
+
+// The main governing power behind the http2 API design is that it should look very similar to the
+// existing node.js [HTTPS API][1] (which is, in turn, almost identical to the [HTTP API][2]). The
+// additional features of HTTP/2 are exposed as extensions to this API. Furthermore, node-http2
+// should fall back to using HTTP/1.1 if needed. Compatibility with undocumented or deprecated
+// elements of the node.js HTTP/HTTPS API is a non-goal.
+//
+// Additional and modified API elements
+// ------------------------------------
+//
+// - **Class: http2.Endpoint**: an API for using the raw HTTP/2 framing layer. For documentation
+// see [protocol/endpoint.js](protocol/endpoint.html).
+//
+// - **Class: http2.Server**
+// - **Event: 'connection' (socket, [endpoint])**: there's a second argument if the negotiation of
+// HTTP/2 was successful: the reference to the [Endpoint](protocol/endpoint.html) object tied to the
+// socket.
+//
+// - **http2.createServer(options, [requestListener])**: additional option:
+// - **log**: an optional [bunyan](https://github.com/trentm/node-bunyan) logger object
+//
+// - **Class: http2.ServerResponse**
+// - **response.push(options)**: initiates a server push. `options` describes the 'imaginary'
+// request to which the push stream is a response; the possible options are identical to the
+// ones accepted by `http2.request`. Returns a ServerResponse object that can be used to send
+// the response headers and content.
+//
+// - **Class: http2.Agent**
+// - **new Agent(options)**: additional option:
+// - **log**: an optional [bunyan](https://github.com/trentm/node-bunyan) logger object
+// - **agent.sockets**: only contains TCP sockets that corresponds to HTTP/1 requests.
+// - **agent.endpoints**: contains [Endpoint](protocol/endpoint.html) objects for HTTP/2 connections.
+//
+// - **http2.request(options, [callback])**:
+// - similar to http.request
+//
+// - **http2.get(options, [callback])**:
+// - similar to http.get
+//
+// - **Class: http2.ClientRequest**
+// - **Event: 'socket' (socket)**: in case of an HTTP/2 incoming message, `socket` is a reference
+// to the associated [HTTP/2 Stream](protocol/stream.html) object (and not to the TCP socket).
+// - **Event: 'push' (promise)**: signals the intention of a server push associated to this
+// request. `promise` is an IncomingPromise. If there's no listener for this event, the server
+// push is cancelled.
+// - **request.setPriority(priority)**: assign a priority to this request. `priority` is a number
+// between 0 (highest priority) and 2^31-1 (lowest priority). Default value is 2^30.
+//
+// - **Class: http2.IncomingMessage**
+// - has two subclasses for easier interface description: **IncomingRequest** and
+// **IncomingResponse**
+// - **message.socket**: in case of an HTTP/2 incoming message, it's a reference to the associated
+// [HTTP/2 Stream](protocol/stream.html) object (and not to the TCP socket).
+//
+// - **Class: http2.IncomingRequest (IncomingMessage)**
+// - **message.url**: in case of an HTTP/2 incoming request, the `url` field always contains the
+// path, and never a full url (it contains the path in most cases in the HTTPS api as well).
+// - **message.scheme**: additional field. Mandatory HTTP/2 request metadata.
+// - **message.host**: additional field. Mandatory HTTP/2 request metadata. Note that this
+// replaces the old Host header field, but node-http2 will add Host to the `message.headers` for
+// backwards compatibility.
+//
+// - **Class: http2.IncomingPromise (IncomingRequest)**
+// - contains the metadata of the 'imaginary' request to which the server push is an answer.
+// - **Event: 'response' (response)**: signals the arrival of the actual push stream. `response`
+// is an IncomingResponse.
+// - **Event: 'push' (promise)**: signals the intention of a server push associated to this
+// request. `promise` is an IncomingPromise. If there's no listener for this event, the server
+// push is cancelled.
+// - **promise.cancel()**: cancels the promised server push.
+// - **promise.setPriority(priority)**: assign a priority to this push stream. `priority` is a
+// number between 0 (highest priority) and 2^31-1 (lowest priority). Default value is 2^30.
+//
+// API elements not yet implemented
+// --------------------------------
+//
+// - **Class: http2.Server**
+// - **server.maxHeadersCount**
+//
+// API elements that are not applicable to HTTP/2
+// ----------------------------------------------
+//
+// The reason may be deprecation of certain HTTP/1.1 features, or that some API elements simply
+// don't make sense when using HTTP/2. These will not be present when a request is done with HTTP/2,
+// but will function normally when falling back to using HTTP/1.1.
+//
+// - **Class: http2.Server**
+// - **Event: 'checkContinue'**: not in the spec
+// - **Event: 'upgrade'**: upgrade is deprecated in HTTP/2
+// - **Event: 'timeout'**: HTTP/2 sockets won't timeout because of application level keepalive
+// (PING frames)
+// - **Event: 'connect'**: not yet supported
+// - **server.setTimeout(msecs, [callback])**
+// - **server.timeout**
+//
+// - **Class: http2.ServerResponse**
+// - **Event: 'close'**
+// - **Event: 'timeout'**
+// - **response.writeContinue()**
+// - **response.writeHead(statusCode, [reasonPhrase], [headers])**: reasonPhrase will always be
+// ignored since [it's not supported in HTTP/2][3]
+// - **response.setTimeout(timeout, [callback])**
+//
+// - **Class: http2.Agent**
+// - **agent.maxSockets**: only affects HTTP/1 connection pool. When using HTTP/2, there's always
+// one connection per host.
+//
+// - **Class: http2.ClientRequest**
+// - **Event: 'upgrade'**
+// - **Event: 'connect'**
+// - **Event: 'continue'**
+// - **request.setTimeout(timeout, [callback])**
+// - **request.setNoDelay([noDelay])**
+// - **request.setSocketKeepAlive([enable], [initialDelay])**
+//
+// - **Class: http2.IncomingMessage**
+// - **Event: 'close'**
+// - **message.setTimeout(timeout, [callback])**
+//
+// [1]: https://nodejs.org/api/https.html
+// [2]: https://nodejs.org/api/http.html
+// [3]: https://tools.ietf.org/html/rfc7540#section-8.1.2.4
+
+// Common server and client side code
+// ==================================
+
+var net = require('net');
+var url = require('url');
+var util = require('util');
+var EventEmitter = require('events').EventEmitter;
+var PassThrough = require('stream').PassThrough;
+var Readable = require('stream').Readable;
+var Writable = require('stream').Writable;
+var protocol = require('./protocol');
+var Endpoint = protocol.Endpoint;
+var http = require('http');
+var https = require('https');
+
+exports.STATUS_CODES = http.STATUS_CODES;
+exports.IncomingMessage = IncomingMessage;
+exports.OutgoingMessage = OutgoingMessage;
+exports.protocol = protocol;
+
+var deprecatedHeaders = [
+ 'connection',
+ 'host',
+ 'keep-alive',
+ 'proxy-connection',
+ 'transfer-encoding',
+ 'upgrade'
+];
+
+// When doing NPN/ALPN negotiation, HTTP/1.1 is used as fallback
+var supportedProtocols = [protocol.VERSION, 'http/1.1', 'http/1.0'];
+
+// Ciphersuite list based on the recommendations of https://wiki.mozilla.org/Security/Server_Side_TLS
+// The only modification is that kEDH+AESGCM were placed after DHE and ECDHE suites
+var cipherSuites = [
+ 'ECDHE-RSA-AES128-GCM-SHA256',
+ 'ECDHE-ECDSA-AES128-GCM-SHA256',
+ 'ECDHE-RSA-AES256-GCM-SHA384',
+ 'ECDHE-ECDSA-AES256-GCM-SHA384',
+ 'DHE-RSA-AES128-GCM-SHA256',
+ 'DHE-DSS-AES128-GCM-SHA256',
+ 'ECDHE-RSA-AES128-SHA256',
+ 'ECDHE-ECDSA-AES128-SHA256',
+ 'ECDHE-RSA-AES128-SHA',
+ 'ECDHE-ECDSA-AES128-SHA',
+ 'ECDHE-RSA-AES256-SHA384',
+ 'ECDHE-ECDSA-AES256-SHA384',
+ 'ECDHE-RSA-AES256-SHA',
+ 'ECDHE-ECDSA-AES256-SHA',
+ 'DHE-RSA-AES128-SHA256',
+ 'DHE-RSA-AES128-SHA',
+ 'DHE-DSS-AES128-SHA256',
+ 'DHE-RSA-AES256-SHA256',
+ 'DHE-DSS-AES256-SHA',
+ 'DHE-RSA-AES256-SHA',
+ 'kEDH+AESGCM',
+ 'AES128-GCM-SHA256',
+ 'AES256-GCM-SHA384',
+ 'ECDHE-RSA-RC4-SHA',
+ 'ECDHE-ECDSA-RC4-SHA',
+ 'AES128',
+ 'AES256',
+ 'RC4-SHA',
+ 'HIGH',
+ '!aNULL',
+ '!eNULL',
+ '!EXPORT',
+ '!DES',
+ '!3DES',
+ '!MD5',
+ '!PSK'
+].join(':');
+
+// Logging
+// -------
+
+// Logger shim, used when no logger is provided by the user.
+function noop() {}
+var defaultLogger = {
+ fatal: noop,
+ error: noop,
+ warn : noop,
+ info : noop,
+ debug: noop,
+ trace: noop,
+
+ child: function() { return this; }
+};
+
+// Bunyan serializers exported by submodules that are worth adding when creating a logger.
+exports.serializers = protocol.serializers;
+
+// IncomingMessage class
+// ---------------------
+
+function IncomingMessage(stream) {
+ // * This is basically a read-only wrapper for the [Stream](protocol/stream.html) class.
+ PassThrough.call(this);
+ stream.pipe(this);
+ this.socket = this.stream = stream;
+
+ this._log = stream._log.child({ component: 'http' });
+
+ // * HTTP/2.0 does not define a way to carry the version identifier that is included in the
+ // HTTP/1.1 request/status line. Version is always 2.0.
+ this.httpVersion = '2.0';
+ this.httpVersionMajor = 2;
+ this.httpVersionMinor = 0;
+
+ // * `this.headers` will store the regular headers (and none of the special colon headers)
+ this.headers = {};
+ this.trailers = undefined;
+ this._lastHeadersSeen = undefined;
+
+ // * Other metadata is filled in when the headers arrive.
+ stream.once('headers', this._onHeaders.bind(this));
+ stream.once('end', this._onEnd.bind(this));
+}
+IncomingMessage.prototype = Object.create(PassThrough.prototype, { constructor: { value: IncomingMessage } });
+
+// [Request Header Fields](https://tools.ietf.org/html/rfc7540#section-8.1.2.3)
+// * `headers` argument: HTTP/2.0 request and response header fields carry information as a series
+// of key-value pairs. This includes the target URI for the request, the status code for the
+// response, as well as HTTP header fields.
+IncomingMessage.prototype._onHeaders = function _onHeaders(headers) {
+ // * Detects malformed headers
+ this._validateHeaders(headers);
+
+ // * Store the _regular_ headers in `this.headers`
+ for (var name in headers) {
+ if (name[0] !== ':') {
+ if (name === 'set-cookie' && !Array.isArray(headers[name])) {
+ this.headers[name] = [headers[name]];
+ } else {
+ this.headers[name] = headers[name];
+ }
+ }
+ }
+
+ // * The last header block, if it's not the first, will represent the trailers
+ var self = this;
+ this.stream.on('headers', function(headers) {
+ self._lastHeadersSeen = headers;
+ });
+};
+
+IncomingMessage.prototype._onEnd = function _onEnd() {
+ this.trailers = this._lastHeadersSeen;
+};
+
+IncomingMessage.prototype.setTimeout = noop;
+
+IncomingMessage.prototype._checkSpecialHeader = function _checkSpecialHeader(key, value) {
+ if ((typeof value !== 'string') || (value.length === 0)) {
+ this._log.error({ key: key, value: value }, 'Invalid or missing special header field');
+ this.stream.reset('PROTOCOL_ERROR');
+ }
+
+ return value;
+};
+
+IncomingMessage.prototype._validateHeaders = function _validateHeaders(headers) {
+ // * An HTTP/2.0 request or response MUST NOT include any of the following header fields:
+ // Connection, Host, Keep-Alive, Proxy-Connection, Transfer-Encoding, and Upgrade. A server
+ // MUST treat the presence of any of these header fields as a stream error of type
+ // PROTOCOL_ERROR.
+ // If the TE header is present, it's only valid value is 'trailers'
+ for (var i = 0; i < deprecatedHeaders.length; i++) {
+ var key = deprecatedHeaders[i];
+ if (key in headers || (key === 'te' && headers[key] !== 'trailers')) {
+ this._log.error({ key: key, value: headers[key] }, 'Deprecated header found');
+ this.stream.reset('PROTOCOL_ERROR');
+ return;
+ }
+ }
+
+ for (var headerName in headers) {
+ // * Empty header name field is malformed
+ if (headerName.length <= 1) {
+ this.stream.reset('PROTOCOL_ERROR');
+ return;
+ }
+ // * A request or response containing uppercase header name field names MUST be
+ // treated as malformed (Section 8.1.3.5). Implementations that detect malformed
+ // requests or responses need to ensure that the stream ends.
+ if(/[A-Z]/.test(headerName)) {
+ this.stream.reset('PROTOCOL_ERROR');
+ return;
+ }
+ }
+};
+
+// OutgoingMessage class
+// ---------------------
+
+function OutgoingMessage() {
+ // * This is basically a read-only wrapper for the [Stream](protocol/stream.html) class.
+ Writable.call(this);
+
+ this._headers = {};
+ this._trailers = undefined;
+ this.headersSent = false;
+ this.finished = false;
+
+ this.on('finish', this._finish);
+}
+OutgoingMessage.prototype = Object.create(Writable.prototype, { constructor: { value: OutgoingMessage } });
+
+OutgoingMessage.prototype._write = function _write(chunk, encoding, callback) {
+ if (this.stream) {
+ this.stream.write(chunk, encoding, callback);
+ } else {
+ this.once('socket', this._write.bind(this, chunk, encoding, callback));
+ }
+};
+
+OutgoingMessage.prototype._finish = function _finish() {
+ if (this.stream) {
+ if (this._trailers) {
+ if (this.request) {
+ this.request.addTrailers(this._trailers);
+ } else {
+ this.stream.headers(this._trailers);
+ }
+ }
+ this.finished = true;
+ this.stream.end();
+ } else {
+ this.once('socket', this._finish.bind(this));
+ }
+};
+
+OutgoingMessage.prototype.setHeader = function setHeader(name, value) {
+ if (this.headersSent) {
+ return this.emit('error', new Error('Can\'t set headers after they are sent.'));
+ } else {
+ name = name.toLowerCase();
+ if (deprecatedHeaders.indexOf(name) !== -1) {
+ return this.emit('error', new Error('Cannot set deprecated header: ' + name));
+ }
+ this._headers[name] = value;
+ }
+};
+
+OutgoingMessage.prototype.removeHeader = function removeHeader(name) {
+ if (this.headersSent) {
+ return this.emit('error', new Error('Can\'t remove headers after they are sent.'));
+ } else {
+ delete this._headers[name.toLowerCase()];
+ }
+};
+
+OutgoingMessage.prototype.getHeader = function getHeader(name) {
+ return this._headers[name.toLowerCase()];
+};
+
+OutgoingMessage.prototype.addTrailers = function addTrailers(trailers) {
+ this._trailers = trailers;
+};
+
+OutgoingMessage.prototype.setTimeout = noop;
+
+OutgoingMessage.prototype._checkSpecialHeader = IncomingMessage.prototype._checkSpecialHeader;
+
+// Server side
+// ===========
+
+exports.Server = Server;
+exports.IncomingRequest = IncomingRequest;
+exports.OutgoingResponse = OutgoingResponse;
+exports.ServerResponse = OutgoingResponse; // for API compatibility
+
+// Forward events `event` on `source` to all listeners on `target`.
+//
+// Note: The calling context is `source`.
+function forwardEvent(event, source, target) {
+ function forward() {
+ var listeners = target.listeners(event);
+
+ var n = listeners.length;
+
+ // Special case for `error` event with no listeners.
+ if (n === 0 && event === 'error') {
+ var args = [event];
+ args.push.apply(args, arguments);
+
+ target.emit.apply(target, args);
+ return;
+ }
+
+ for (var i = 0; i < n; ++i) {
+ listeners[i].apply(source, arguments);
+ }
+ }
+
+ source.on(event, forward);
+
+ // A reference to the function is necessary to be able to stop
+ // forwarding.
+ return forward;
+}
+
+// Server class
+// ------------
+
+function Server(options) {
+ options = util._extend({}, options);
+
+ this._log = (options.log || defaultLogger).child({ component: 'http' });
+ this._settings = options.settings;
+
+ var start = this._start.bind(this);
+ var fallback = this._fallback.bind(this);
+
+ // HTTP2 over TLS (using NPN or ALPN)
+ if ((options.key && options.cert) || options.pfx) {
+ this._log.info('Creating HTTP/2 server over TLS');
+ this._mode = 'tls';
+ options.ALPNProtocols = supportedProtocols;
+ options.NPNProtocols = supportedProtocols;
+ options.ciphers = options.ciphers || cipherSuites;
+ options.honorCipherOrder = (options.honorCipherOrder != false);
+ this._server = https.createServer(options);
+ this._originalSocketListeners = this._server.listeners('secureConnection');
+ this._server.removeAllListeners('secureConnection');
+ this._server.on('secureConnection', function(socket) {
+ var negotiatedProtocol = socket.alpnProtocol || socket.npnProtocol;
+ // It's true that the client MUST use SNI, but if it doesn't, we don't care, don't fall back to HTTP/1,
+ // since if the ALPN negotiation is otherwise successful, the client thinks we speak HTTP/2 but we don't.
+ if (negotiatedProtocol === protocol.VERSION) {
+ start(socket);
+ } else {
+ fallback(socket);
+ }
+ });
+ this._server.on('request', this.emit.bind(this, 'request'));
+
+ forwardEvent('error', this._server, this);
+ forwardEvent('listening', this._server, this);
+ }
+
+ // HTTP2 over plain TCP
+ else if (options.plain) {
+ this._log.info('Creating HTTP/2 server over plain TCP');
+ this._mode = 'plain';
+ this._server = net.createServer(start);
+ }
+
+ // HTTP/2 with HTTP/1.1 upgrade
+ else {
+ this._log.error('Trying to create HTTP/2 server with Upgrade from HTTP/1.1');
+ throw new Error('HTTP1.1 -> HTTP2 upgrade is not yet supported. Please provide TLS keys.');
+ }
+
+ this._server.on('close', this.emit.bind(this, 'close'));
+}
+Server.prototype = Object.create(EventEmitter.prototype, { constructor: { value: Server } });
+
+// Starting HTTP/2
+Server.prototype._start = function _start(socket) {
+ var endpoint = new Endpoint(this._log, 'SERVER', this._settings);
+
+ this._log.info({ e: endpoint,
+ client: socket.remoteAddress + ':' + socket.remotePort,
+ SNI: socket.servername
+ }, 'New incoming HTTP/2 connection');
+
+ endpoint.pipe(socket).pipe(endpoint);
+
+ var self = this;
+ endpoint.on('stream', function _onStream(stream) {
+ var response = new OutgoingResponse(stream);
+ var request = new IncomingRequest(stream);
+
+ // Some conformance to Node.js Https specs allows to distinguish clients:
+ request.remoteAddress = socket.remoteAddress;
+ request.remotePort = socket.remotePort;
+ request.connection = request.socket = response.socket = socket;
+
+ request.once('ready', self.emit.bind(self, 'request', request, response));
+ });
+
+ endpoint.on('error', this.emit.bind(this, 'clientError'));
+ socket.on('error', this.emit.bind(this, 'clientError'));
+
+ this.emit('connection', socket, endpoint);
+};
+
+Server.prototype._fallback = function _fallback(socket) {
+ var negotiatedProtocol = socket.alpnProtocol || socket.npnProtocol;
+
+ this._log.info({ client: socket.remoteAddress + ':' + socket.remotePort,
+ protocol: negotiatedProtocol,
+ SNI: socket.servername
+ }, 'Falling back to simple HTTPS');
+
+ for (var i = 0; i < this._originalSocketListeners.length; i++) {
+ this._originalSocketListeners[i].call(this._server, socket);
+ }
+
+ this.emit('connection', socket);
+};
+
+// There are [3 possible signatures][1] of the `listen` function. Every arguments is forwarded to
+// the backing TCP or HTTPS server.
+// [1]: https://nodejs.org/api/http.html#http_server_listen_port_hostname_backlog_callback
+Server.prototype.listen = function listen(port, hostname) {
+ this._log.info({ on: ((typeof hostname === 'string') ? (hostname + ':' + port) : port) },
+ 'Listening for incoming connections');
+ this._server.listen.apply(this._server, arguments);
+
+ return this._server;
+};
+
+Server.prototype.close = function close(callback) {
+ this._log.info('Closing server');
+ this._server.close(callback);
+};
+
+Server.prototype.setTimeout = function setTimeout(timeout, callback) {
+ if (this._mode === 'tls') {
+ this._server.setTimeout(timeout, callback);
+ }
+};
+
+Object.defineProperty(Server.prototype, 'timeout', {
+ get: function getTimeout() {
+ if (this._mode === 'tls') {
+ return this._server.timeout;
+ } else {
+ return undefined;
+ }
+ },
+ set: function setTimeout(timeout) {
+ if (this._mode === 'tls') {
+ this._server.timeout = timeout;
+ }
+ }
+});
+
+// Overriding `EventEmitter`'s `on(event, listener)` method to forward certain subscriptions to
+// `server`.There are events on the `http.Server` class where it makes difference whether someone is
+// listening on the event or not. In these cases, we can not simply forward the events from the
+// `server` to `this` since that means a listener. Instead, we forward the subscriptions.
+Server.prototype.on = function on(event, listener) {
+ if ((event === 'upgrade') || (event === 'timeout')) {
+ return this._server.on(event, listener && listener.bind(this));
+ } else {
+ return EventEmitter.prototype.on.call(this, event, listener);
+ }
+};
+
+// `addContext` is used to add Server Name Indication contexts
+Server.prototype.addContext = function addContext(hostname, credentials) {
+ if (this._mode === 'tls') {
+ this._server.addContext(hostname, credentials);
+ }
+};
+
+Server.prototype.address = function address() {
+ return this._server.address()
+};
+
+function createServerRaw(options, requestListener) {
+ if (typeof options === 'function') {
+ requestListener = options;
+ options = {};
+ }
+
+ if (options.pfx || (options.key && options.cert)) {
+ throw new Error('options.pfx, options.key, and options.cert are nonsensical!');
+ }
+
+ options.plain = true;
+ var server = new Server(options);
+
+ if (requestListener) {
+ server.on('request', requestListener);
+ }
+
+ return server;
+}
+
+function createServerTLS(options, requestListener) {
+ if (typeof options === 'function') {
+ throw new Error('options are required!');
+ }
+ if (!options.pfx && !(options.key && options.cert)) {
+ throw new Error('options.pfx or options.key and options.cert are required!');
+ }
+ options.plain = false;
+
+ var server = new Server(options);
+
+ if (requestListener) {
+ server.on('request', requestListener);
+ }
+
+ return server;
+}
+
+// Exposed main interfaces for HTTPS connections (the default)
+exports.https = {};
+exports.createServer = exports.https.createServer = createServerTLS;
+exports.request = exports.https.request = requestTLS;
+exports.get = exports.https.get = getTLS;
+
+// Exposed main interfaces for raw TCP connections (not recommended)
+exports.raw = {};
+exports.raw.createServer = createServerRaw;
+exports.raw.request = requestRaw;
+exports.raw.get = getRaw;
+
+// Exposed main interfaces for HTTP plaintext upgrade connections (not implemented)
+function notImplemented() {
+ throw new Error('HTTP UPGRADE is not implemented!');
+}
+
+exports.http = {};
+exports.http.createServer = exports.http.request = exports.http.get = notImplemented;
+
+// IncomingRequest class
+// ---------------------
+
+function IncomingRequest(stream) {
+ IncomingMessage.call(this, stream);
+}
+IncomingRequest.prototype = Object.create(IncomingMessage.prototype, { constructor: { value: IncomingRequest } });
+
+// [Request Header Fields](https://tools.ietf.org/html/rfc7540#section-8.1.2.3)
+// * `headers` argument: HTTP/2.0 request and response header fields carry information as a series
+// of key-value pairs. This includes the target URI for the request, the status code for the
+// response, as well as HTTP header fields.
+IncomingRequest.prototype._onHeaders = function _onHeaders(headers) {
+ // * The ":method" header field includes the HTTP method
+ // * The ":scheme" header field includes the scheme portion of the target URI
+ // * The ":authority" header field includes the authority portion of the target URI
+ // * The ":path" header field includes the path and query parts of the target URI.
+ // This field MUST NOT be empty; URIs that do not contain a path component MUST include a value
+ // of '/', unless the request is an OPTIONS request for '*', in which case the ":path" header
+ // field MUST include '*'.
+ // * All HTTP/2.0 requests MUST include exactly one valid value for all of these header fields. A
+ // server MUST treat the absence of any of these header fields, presence of multiple values, or
+ // an invalid value as a stream error of type PROTOCOL_ERROR.
+ this.method = this._checkSpecialHeader(':method' , headers[':method']);
+ this.scheme = this._checkSpecialHeader(':scheme' , headers[':scheme']);
+ this.host = this._checkSpecialHeader(':authority', headers[':authority'] );
+ this.url = this._checkSpecialHeader(':path' , headers[':path'] );
+ if (!this.method || !this.scheme || !this.host || !this.url) {
+ // This is invalid, and we've sent a RST_STREAM, so don't continue processing
+ return;
+ }
+
+ // * Host header is included in the headers object for backwards compatibility.
+ this.headers.host = this.host;
+
+ // * Handling regular headers.
+ IncomingMessage.prototype._onHeaders.call(this, headers);
+
+ // * Signaling that the headers arrived.
+ this._log.info({ method: this.method, scheme: this.scheme, host: this.host,
+ path: this.url, headers: this.headers }, 'Incoming request');
+ this.emit('ready');
+};
+
+// OutgoingResponse class
+// ----------------------
+
+function OutgoingResponse(stream) {
+ OutgoingMessage.call(this);
+
+ this._log = stream._log.child({ component: 'http' });
+
+ this.stream = stream;
+ this.statusCode = 200;
+ this.sendDate = true;
+
+ this.stream.once('headers', this._onRequestHeaders.bind(this));
+}
+OutgoingResponse.prototype = Object.create(OutgoingMessage.prototype, { constructor: { value: OutgoingResponse } });
+
+OutgoingResponse.prototype.writeHead = function writeHead(statusCode, reasonPhrase, headers) {
+ if (this.headersSent) {
+ return;
+ }
+
+ if (typeof reasonPhrase === 'string') {
+ this._log.warn('Reason phrase argument was present but ignored by the writeHead method');
+ } else {
+ headers = reasonPhrase;
+ }
+
+ for (var name in headers) {
+ this.setHeader(name, headers[name]);
+ }
+ headers = this._headers;
+
+ if (this.sendDate && !('date' in this._headers)) {
+ headers.date = (new Date()).toUTCString();
+ }
+
+ this._log.info({ status: statusCode, headers: this._headers }, 'Sending server response');
+
+ headers[':status'] = this.statusCode = statusCode;
+
+ this.stream.headers(headers);
+ this.headersSent = true;
+};
+
+OutgoingResponse.prototype._implicitHeaders = function _implicitHeaders() {
+ if (!this.headersSent) {
+ this.writeHead(this.statusCode);
+ }
+};
+
+OutgoingResponse.prototype._implicitHeader = function() {
+ this._implicitHeaders();
+};
+
+OutgoingResponse.prototype.write = function write() {
+ this._implicitHeaders();
+ return OutgoingMessage.prototype.write.apply(this, arguments);
+};
+
+OutgoingResponse.prototype.end = function end() {
+ this.finshed = true;
+ this._implicitHeaders();
+ return OutgoingMessage.prototype.end.apply(this, arguments);
+};
+
+OutgoingResponse.prototype._onRequestHeaders = function _onRequestHeaders(headers) {
+ this._requestHeaders = headers;
+};
+
+OutgoingResponse.prototype.push = function push(options) {
+ if (typeof options === 'string') {
+ options = url.parse(options);
+ }
+
+ if (!options.path) {
+ throw new Error('`path` option is mandatory.');
+ }
+
+ var promise = util._extend({
+ ':method': (options.method || 'GET').toUpperCase(),
+ ':scheme': (options.protocol && options.protocol.slice(0, -1)) || this._requestHeaders[':scheme'],
+ ':authority': options.hostname || options.host || this._requestHeaders[':authority'],
+ ':path': options.path
+ }, options.headers);
+
+ this._log.info({ method: promise[':method'], scheme: promise[':scheme'],
+ authority: promise[':authority'], path: promise[':path'],
+ headers: options.headers }, 'Promising push stream');
+
+ var pushStream = this.stream.promise(promise);
+
+ return new OutgoingResponse(pushStream);
+};
+
+OutgoingResponse.prototype.altsvc = function altsvc(host, port, protocolID, maxAge, origin) {
+ if (origin === undefined) {
+ origin = "";
+ }
+ this.stream.altsvc(host, port, protocolID, maxAge, origin);
+};
+
+// Overriding `EventEmitter`'s `on(event, listener)` method to forward certain subscriptions to
+// `request`. See `Server.prototype.on` for explanation.
+OutgoingResponse.prototype.on = function on(event, listener) {
+ if (this.request && (event === 'timeout')) {
+ this.request.on(event, listener && listener.bind(this));
+ } else {
+ OutgoingMessage.prototype.on.call(this, event, listener);
+ }
+};
+
+// Client side
+// ===========
+
+exports.ClientRequest = OutgoingRequest; // for API compatibility
+exports.OutgoingRequest = OutgoingRequest;
+exports.IncomingResponse = IncomingResponse;
+exports.Agent = Agent;
+exports.globalAgent = undefined;
+
+function requestRaw(options, callback) {
+ if (typeof options === "string") {
+ options = url.parse(options);
+ }
+ options.plain = true;
+ if (options.protocol && options.protocol !== "http:") {
+ throw new Error('This interface only supports http-schemed URLs');
+ }
+ if (options.agent && typeof(options.agent.request) === 'function') {
+ var agentOptions = util._extend({}, options);
+ delete agentOptions.agent;
+ return options.agent.request(agentOptions, callback);
+ }
+ return exports.globalAgent.request(options, callback);
+}
+
+function requestTLS(options, callback) {
+ if (typeof options === "string") {
+ options = url.parse(options);
+ }
+ options.plain = false;
+ if (options.protocol && options.protocol !== "https:") {
+ throw new Error('This interface only supports https-schemed URLs');
+ }
+ if (options.agent && typeof(options.agent.request) === 'function') {
+ var agentOptions = util._extend({}, options);
+ delete agentOptions.agent;
+ return options.agent.request(agentOptions, callback);
+ }
+ return exports.globalAgent.request(options, callback);
+}
+
+function getRaw(options, callback) {
+ if (typeof options === "string") {
+ options = url.parse(options);
+ }
+ options.plain = true;
+ if (options.protocol && options.protocol !== "http:") {
+ throw new Error('This interface only supports http-schemed URLs');
+ }
+ if (options.agent && typeof(options.agent.get) === 'function') {
+ var agentOptions = util._extend({}, options);
+ delete agentOptions.agent;
+ return options.agent.get(agentOptions, callback);
+ }
+ return exports.globalAgent.get(options, callback);
+}
+
+function getTLS(options, callback) {
+ if (typeof options === "string") {
+ options = url.parse(options);
+ }
+ options.plain = false;
+ if (options.protocol && options.protocol !== "https:") {
+ throw new Error('This interface only supports https-schemed URLs');
+ }
+ if (options.agent && typeof(options.agent.get) === 'function') {
+ var agentOptions = util._extend({}, options);
+ delete agentOptions.agent;
+ return options.agent.get(agentOptions, callback);
+ }
+ return exports.globalAgent.get(options, callback);
+}
+
+// Agent class
+// -----------
+
+function Agent(options) {
+ EventEmitter.call(this);
+ this.setMaxListeners(0);
+
+ options = util._extend({}, options);
+
+ this._settings = options.settings;
+ this._log = (options.log || defaultLogger).child({ component: 'http' });
+ this.endpoints = {};
+
+ // * Using an own HTTPS agent, because the global agent does not look at `NPN/ALPNProtocols` when
+ // generating the key identifying the connection, so we may get useless non-negotiated TLS
+ // channels even if we ask for a negotiated one. This agent will contain only negotiated
+ // channels.
+ options.ALPNProtocols = supportedProtocols;
+ options.NPNProtocols = supportedProtocols;
+ this._httpsAgent = new https.Agent(options);
+
+ this.sockets = this._httpsAgent.sockets;
+ this.requests = this._httpsAgent.requests;
+}
+Agent.prototype = Object.create(EventEmitter.prototype, { constructor: { value: Agent } });
+
+Agent.prototype.request = function request(options, callback) {
+ if (typeof options === 'string') {
+ options = url.parse(options);
+ } else {
+ options = util._extend({}, options);
+ }
+
+ options.method = (options.method || 'GET').toUpperCase();
+ options.protocol = options.protocol || 'https:';
+ options.host = options.hostname || options.host || 'localhost';
+ options.port = options.port || 443;
+ options.path = options.path || '/';
+
+ if (!options.plain && options.protocol === 'http:') {
+ this._log.error('Trying to negotiate client request with Upgrade from HTTP/1.1');
+ this.emit('error', new Error('HTTP1.1 -> HTTP2 upgrade is not yet supported.'));
+ }
+
+ var request = new OutgoingRequest(this._log);
+
+ if (callback) {
+ request.on('response', callback);
+ }
+
+ var key = [
+ !!options.plain,
+ options.host,
+ options.port
+ ].join(':');
+ var self = this;
+
+ // * There's an existing HTTP/2 connection to this host
+ if (key in this.endpoints) {
+ var endpoint = this.endpoints[key];
+ request._start(endpoint.createStream(), options);
+ }
+
+ // * HTTP/2 over plain TCP
+ else if (options.plain) {
+ endpoint = new Endpoint(this._log, 'CLIENT', this._settings);
+ endpoint.socket = net.connect({
+ host: options.host,
+ port: options.port,
+ localAddress: options.localAddress
+ });
+
+ endpoint.socket.on('error', function (error) {
+ self._log.error('Socket error: ' + error.toString());
+ request.emit('error', error);
+ });
+
+ endpoint.on('error', function(error){
+ self._log.error('Connection error: ' + error.toString());
+ request.emit('error', error);
+ });
+
+ this.endpoints[key] = endpoint;
+ endpoint.pipe(endpoint.socket).pipe(endpoint);
+ request._start(endpoint.createStream(), options);
+ }
+
+ // * HTTP/2 over TLS negotiated using NPN or ALPN, or fallback to HTTPS1
+ else {
+ var started = false;
+ var createAgent = hasAgentOptions(options);
+ options.ALPNProtocols = supportedProtocols;
+ options.NPNProtocols = supportedProtocols;
+ options.servername = options.host; // Server Name Indication
+ options.ciphers = options.ciphers || cipherSuites;
+ if (createAgent) {
+ options.agent = new https.Agent(options);
+ } else if (options.agent == null) {
+ options.agent = this._httpsAgent;
+ }
+ var httpsRequest = https.request(options);
+
+ httpsRequest.on('error', function (error) {
+ self._log.error('Socket error: ' + error.toString());
+ self.removeAllListeners(key);
+ request.emit('error', error);
+ });
+
+ httpsRequest.on('socket', function(socket) {
+ var negotiatedProtocol = socket.alpnProtocol || socket.npnProtocol;
+ if (negotiatedProtocol != null) { // null in >=0.11.0, undefined in <0.11.0
+ negotiated();
+ } else {
+ socket.on('secureConnect', negotiated);
+ }
+ });
+
+ function negotiated() {
+ var endpoint;
+ var negotiatedProtocol = httpsRequest.socket.alpnProtocol || httpsRequest.socket.npnProtocol;
+ if (negotiatedProtocol === protocol.VERSION) {
+ httpsRequest.socket.emit('agentRemove');
+ unbundleSocket(httpsRequest.socket);
+ endpoint = new Endpoint(self._log, 'CLIENT', self._settings);
+ endpoint.socket = httpsRequest.socket;
+ endpoint.pipe(endpoint.socket).pipe(endpoint);
+ }
+ if (started) {
+ // ** In the meantime, an other connection was made to the same host...
+ if (endpoint) {
+ // *** and it turned out to be HTTP2 and the request was multiplexed on that one, so we should close this one
+ endpoint.close();
+ }
+ // *** otherwise, the fallback to HTTPS1 is already done.
+ } else {
+ if (endpoint) {
+ self._log.info({ e: endpoint, server: options.host + ':' + options.port },
+ 'New outgoing HTTP/2 connection');
+ self.endpoints[key] = endpoint;
+ self.emit(key, endpoint);
+ } else {
+ self.emit(key, undefined);
+ }
+ }
+ }
+
+ this.once(key, function(endpoint) {
+ started = true;
+ if (endpoint) {
+ request._start(endpoint.createStream(), options);
+ } else {
+ request._fallback(httpsRequest);
+ }
+ });
+ }
+
+ return request;
+};
+
+Agent.prototype.get = function get(options, callback) {
+ var request = this.request(options, callback);
+ request.end();
+ return request;
+};
+
+Agent.prototype.destroy = function(error) {
+ if (this._httpsAgent) {
+ this._httpsAgent.destroy();
+ }
+ for (var key in this.endpoints) {
+ this.endpoints[key].close(error);
+ }
+};
+
+function unbundleSocket(socket) {
+ socket.removeAllListeners('data');
+ socket.removeAllListeners('end');
+ socket.removeAllListeners('readable');
+ socket.removeAllListeners('close');
+ socket.removeAllListeners('error');
+ socket.unpipe();
+ delete socket.ondata;
+ delete socket.onend;
+}
+
+function hasAgentOptions(options) {
+ return options.pfx != null ||
+ options.key != null ||
+ options.passphrase != null ||
+ options.cert != null ||
+ options.ca != null ||
+ options.ciphers != null ||
+ options.rejectUnauthorized != null ||
+ options.secureProtocol != null;
+}
+
+Object.defineProperty(Agent.prototype, 'maxSockets', {
+ get: function getMaxSockets() {
+ return this._httpsAgent.maxSockets;
+ },
+ set: function setMaxSockets(value) {
+ this._httpsAgent.maxSockets = value;
+ }
+});
+
+exports.globalAgent = new Agent();
+
+// OutgoingRequest class
+// ---------------------
+
+function OutgoingRequest() {
+ OutgoingMessage.call(this);
+
+ this._log = undefined;
+
+ this.stream = undefined;
+}
+OutgoingRequest.prototype = Object.create(OutgoingMessage.prototype, { constructor: { value: OutgoingRequest } });
+
+OutgoingRequest.prototype._start = function _start(stream, options) {
+ this.stream = stream;
+ this.options = options;
+
+ this._log = stream._log.child({ component: 'http' });
+
+ for (var key in options.headers) {
+ this.setHeader(key, options.headers[key]);
+ }
+ var headers = this._headers;
+ delete headers.host;
+
+ if (options.auth) {
+ headers.authorization = 'Basic ' + new Buffer(options.auth).toString('base64');
+ }
+
+ headers[':scheme'] = options.protocol.slice(0, -1);
+ headers[':method'] = options.method;
+ headers[':authority'] = options.host;
+ headers[':path'] = options.path;
+
+ this._log.info({ scheme: headers[':scheme'], method: headers[':method'],
+ authority: headers[':authority'], path: headers[':path'],
+ headers: (options.headers || {}) }, 'Sending request');
+ this.stream.headers(headers);
+ this.headersSent = true;
+
+ this.emit('socket', this.stream);
+ var response = new IncomingResponse(this.stream);
+ response.req = this;
+ response.once('ready', this.emit.bind(this, 'response', response));
+
+ this.stream.on('promise', this._onPromise.bind(this));
+};
+
+OutgoingRequest.prototype._fallback = function _fallback(request) {
+ request.on('response', this.emit.bind(this, 'response'));
+ this.stream = this.request = request;
+ this.emit('socket', this.socket);
+};
+
+OutgoingRequest.prototype.setPriority = function setPriority(priority) {
+ if (this.stream) {
+ this.stream.priority(priority);
+ } else {
+ this.once('socket', this.setPriority.bind(this, priority));
+ }
+};
+
+// Overriding `EventEmitter`'s `on(event, listener)` method to forward certain subscriptions to
+// `request`. See `Server.prototype.on` for explanation.
+OutgoingRequest.prototype.on = function on(event, listener) {
+ if (this.request && (event === 'upgrade')) {
+ this.request.on(event, listener && listener.bind(this));
+ } else {
+ OutgoingMessage.prototype.on.call(this, event, listener);
+ }
+};
+
+// Methods only in fallback mode
+OutgoingRequest.prototype.setNoDelay = function setNoDelay(noDelay) {
+ if (this.request) {
+ this.request.setNoDelay(noDelay);
+ } else if (!this.stream) {
+ this.on('socket', this.setNoDelay.bind(this, noDelay));
+ }
+};
+
+OutgoingRequest.prototype.setSocketKeepAlive = function setSocketKeepAlive(enable, initialDelay) {
+ if (this.request) {
+ this.request.setSocketKeepAlive(enable, initialDelay);
+ } else if (!this.stream) {
+ this.on('socket', this.setSocketKeepAlive.bind(this, enable, initialDelay));
+ }
+};
+
+OutgoingRequest.prototype.setTimeout = function setTimeout(timeout, callback) {
+ if (this.request) {
+ this.request.setTimeout(timeout, callback);
+ } else if (!this.stream) {
+ this.on('socket', this.setTimeout.bind(this, timeout, callback));
+ }
+};
+
+// Aborting the request
+OutgoingRequest.prototype.abort = function abort() {
+ if (this.request) {
+ this.request.abort();
+ } else if (this.stream) {
+ this.stream.reset('CANCEL');
+ } else {
+ this.on('socket', this.abort.bind(this));
+ }
+};
+
+// Receiving push promises
+OutgoingRequest.prototype._onPromise = function _onPromise(stream, headers) {
+ this._log.info({ push_stream: stream.id }, 'Receiving push promise');
+
+ var promise = new IncomingPromise(stream, headers);
+
+ if (this.listeners('push').length > 0) {
+ this.emit('push', promise);
+ } else {
+ promise.cancel();
+ }
+};
+
+// IncomingResponse class
+// ----------------------
+
+function IncomingResponse(stream) {
+ IncomingMessage.call(this, stream);
+}
+IncomingResponse.prototype = Object.create(IncomingMessage.prototype, { constructor: { value: IncomingResponse } });
+
+// [Response Header Fields](https://tools.ietf.org/html/rfc7540#section-8.1.2.4)
+// * `headers` argument: HTTP/2.0 request and response header fields carry information as a series
+// of key-value pairs. This includes the target URI for the request, the status code for the
+// response, as well as HTTP header fields.
+IncomingResponse.prototype._onHeaders = function _onHeaders(headers) {
+ // * A single ":status" header field is defined that carries the HTTP status code field. This
+ // header field MUST be included in all responses.
+ // * A client MUST treat the absence of the ":status" header field, the presence of multiple
+ // values, or an invalid value as a stream error of type PROTOCOL_ERROR.
+ // Note: currently, we do not enforce it strictly: we accept any format, and parse it as int
+ // * HTTP/2.0 does not define a way to carry the reason phrase that is included in an HTTP/1.1
+ // status line.
+ this.statusCode = parseInt(this._checkSpecialHeader(':status', headers[':status']));
+
+ // * Handling regular headers.
+ IncomingMessage.prototype._onHeaders.call(this, headers);
+
+ // * Signaling that the headers arrived.
+ this._log.info({ status: this.statusCode, headers: this.headers}, 'Incoming response');
+ this.emit('ready');
+};
+
+// IncomingPromise class
+// -------------------------
+
+function IncomingPromise(responseStream, promiseHeaders) {
+ var stream = new Readable();
+ stream._read = noop;
+ stream.push(null);
+ stream._log = responseStream._log;
+
+ IncomingRequest.call(this, stream);
+
+ this._onHeaders(promiseHeaders);
+
+ this._responseStream = responseStream;
+
+ var response = new IncomingResponse(this._responseStream);
+ response.once('ready', this.emit.bind(this, 'response', response));
+
+ this.stream.on('promise', this._onPromise.bind(this));
+}
+IncomingPromise.prototype = Object.create(IncomingRequest.prototype, { constructor: { value: IncomingPromise } });
+
+IncomingPromise.prototype.cancel = function cancel() {
+ this._responseStream.reset('CANCEL');
+};
+
+IncomingPromise.prototype.setPriority = function setPriority(priority) {
+ this._responseStream.priority(priority);
+};
+
+IncomingPromise.prototype._onPromise = OutgoingRequest.prototype._onPromise;
diff --git a/testing/xpcshell/node-http2/lib/index.js b/testing/xpcshell/node-http2/lib/index.js
new file mode 100644
index 0000000000..c67883defe
--- /dev/null
+++ b/testing/xpcshell/node-http2/lib/index.js
@@ -0,0 +1,52 @@
+// [node-http2][homepage] is an [HTTP/2][http2] implementation for [node.js][node].
+//
+// The core of the protocol is implemented in the protocol sub-directory. This directory provides
+// two important features on top of the protocol:
+//
+// * Implementation of different negotiation schemes that can be used to start a HTTP2 connection.
+// These include TLS ALPN, Upgrade and Plain TCP.
+//
+// * Providing an API very similar to the standard node.js [HTTPS module API][node-https]
+// (which is in turn very similar to the [HTTP module API][node-http]).
+//
+// [homepage]: https://github.com/molnarg/node-http2
+// [http2]: https://tools.ietf.org/html/rfc7540
+// [node]: https://nodejs.org/
+// [node-https]: https://nodejs.org/api/https.html
+// [node-http]: https://nodejs.org/api/http.html
+
+module.exports = require('./http');
+
+/*
+ HTTP API
+
+ | ^
+ | |
+ +-------------|------------|------------------------------------------------------+
+ | | | Server/Agent |
+ | v | |
+ | +----------+ +----------+ |
+ | | Outgoing | | Incoming | |
+ | | req/res. | | req/res. | |
+ | +----------+ +----------+ |
+ | | ^ |
+ | | | |
+ | +---------|------------|-------------------------------------+ +----- |
+ | | | | Endpoint | | |
+ | | | | | | |
+ | | v | | | |
+ | | +-----------------------+ +-------------------- | | |
+ | | | Stream | | Stream ... | | |
+ | | +-----------------------+ +-------------------- | | |
+ | | | | |
+ | +------------------------------------------------------------+ +----- |
+ | | | |
+ | | | |
+ | v | |
+ | +------------------------------------------------------------+ +----- |
+ | | TCP stream | | ... |
+ | +------------------------------------------------------------+ +----- |
+ | |
+ +---------------------------------------------------------------------------------+
+
+*/
diff --git a/testing/xpcshell/node-http2/lib/protocol/compressor.js b/testing/xpcshell/node-http2/lib/protocol/compressor.js
new file mode 100644
index 0000000000..3923a91073
--- /dev/null
+++ b/testing/xpcshell/node-http2/lib/protocol/compressor.js
@@ -0,0 +1,1366 @@
+// The implementation of the [HTTP/2 Header Compression][http2-compression] spec is separated from
+// the 'integration' part which handles HEADERS and PUSH_PROMISE frames. The compression itself is
+// implemented in the first part of the file, and consists of three classes: `HeaderTable`,
+// `HeaderSetDecompressor` and `HeaderSetCompressor`. The two latter classes are
+// [Transform Stream][node-transform] subclasses that operate in [object mode][node-objectmode].
+// These transform chunks of binary data into `[name, value]` pairs and vice versa, and store their
+// state in `HeaderTable` instances.
+//
+// The 'integration' part is also implemented by two [Transform Stream][node-transform] subclasses
+// that operate in [object mode][node-objectmode]: the `Compressor` and the `Decompressor`. These
+// provide a layer between the [framer](framer.html) and the
+// [connection handling component](connection.html).
+//
+// [node-transform]: https://nodejs.org/api/stream.html#stream_class_stream_transform
+// [node-objectmode]: https://nodejs.org/api/stream.html#stream_new_stream_readable_options
+// [http2-compression]: https://tools.ietf.org/html/rfc7541
+
+exports.HeaderTable = HeaderTable;
+exports.HuffmanTable = HuffmanTable;
+exports.HeaderSetCompressor = HeaderSetCompressor;
+exports.HeaderSetDecompressor = HeaderSetDecompressor;
+exports.Compressor = Compressor;
+exports.Decompressor = Decompressor;
+
+var TransformStream = require('stream').Transform;
+var assert = require('assert');
+var util = require('util');
+
+// Header compression
+// ==================
+
+// The HeaderTable class
+// ---------------------
+
+// The [Header Table] is a component used to associate headers to index values. It is basically an
+// ordered list of `[name, value]` pairs, so it's implemented as a subclass of `Array`.
+// In this implementation, the Header Table and the [Static Table] are handled as a single table.
+// [Header Table]: https://tools.ietf.org/html/rfc7541#section-2.3.2
+// [Static Table]: https://tools.ietf.org/html/rfc7541#section-2.3.1
+function HeaderTable(log, limit) {
+ var self = HeaderTable.staticTable.map(entryFromPair);
+ self._log = log;
+ self._limit = limit || DEFAULT_HEADER_TABLE_LIMIT;
+ self._staticLength = self.length;
+ self._size = 0;
+ self._enforceLimit = HeaderTable.prototype._enforceLimit;
+ self.add = HeaderTable.prototype.add;
+ self.setSizeLimit = HeaderTable.prototype.setSizeLimit;
+ return self;
+}
+
+function entryFromPair(pair) {
+ var entry = pair.slice();
+ entry._size = size(entry);
+ return entry;
+}
+
+// The encoder decides how to update the header table and as such can control how much memory is
+// used by the header table. To limit the memory requirements on the decoder side, the header table
+// size is bounded.
+//
+// * The default header table size limit is 4096 bytes.
+// * The size of an entry is defined as follows: the size of an entry is the sum of its name's
+// length in bytes, of its value's length in bytes and of 32 bytes.
+// * The size of a header table is the sum of the size of its entries.
+var DEFAULT_HEADER_TABLE_LIMIT = 4096;
+
+function size(entry) {
+ return (new Buffer(entry[0] + entry[1], 'utf8')).length + 32;
+}
+
+// The `add(index, entry)` can be used to [manage the header table][tablemgmt]:
+// [tablemgmt]: https://tools.ietf.org/html/rfc7541#section-4
+//
+// * it pushes the new `entry` at the beggining of the table
+// * before doing such a modification, it has to be ensured that the header table size will stay
+// lower than or equal to the header table size limit. To achieve this, entries are evicted from
+// the end of the header table until the size of the header table is less than or equal to
+// `(this._limit - entry.size)`, or until the table is empty.
+//
+// <---------- Index Address Space ---------->
+// <-- Static Table --> <-- Header Table -->
+// +---+-----------+---+ +---+-----------+---+
+// | 0 | ... | k | |k+1| ... | n |
+// +---+-----------+---+ +---+-----------+---+
+// ^ |
+// | V
+// Insertion Point Drop Point
+
+HeaderTable.prototype._enforceLimit = function _enforceLimit(limit) {
+ var droppedEntries = [];
+ while ((this._size > 0) && (this._size > limit)) {
+ var dropped = this.pop();
+ this._size -= dropped._size;
+ droppedEntries.unshift(dropped);
+ }
+ return droppedEntries;
+};
+
+HeaderTable.prototype.add = function(entry) {
+ var limit = this._limit - entry._size;
+ var droppedEntries = this._enforceLimit(limit);
+
+ if (this._size <= limit) {
+ this.splice(this._staticLength, 0, entry);
+ this._size += entry._size;
+ }
+
+ return droppedEntries;
+};
+
+// The table size limit can be changed externally. In this case, the same eviction algorithm is used
+HeaderTable.prototype.setSizeLimit = function setSizeLimit(limit) {
+ this._limit = limit;
+ this._enforceLimit(this._limit);
+};
+
+// [The Static Table](https://tools.ietf.org/html/rfc7541#section-2.3.1)
+// ------------------
+
+// The table is generated with feeding the table from the spec to the following sed command:
+//
+// sed -re "s/\s*\| [0-9]+\s*\| ([^ ]*)/ [ '\1'/g" -e "s/\|\s([^ ]*)/, '\1'/g" -e 's/ \|/],/g'
+
+HeaderTable.staticTable = [
+ [ ':authority' , '' ],
+ [ ':method' , 'GET' ],
+ [ ':method' , 'POST' ],
+ [ ':path' , '/' ],
+ [ ':path' , '/index.html' ],
+ [ ':scheme' , 'http' ],
+ [ ':scheme' , 'https' ],
+ [ ':status' , '200' ],
+ [ ':status' , '204' ],
+ [ ':status' , '206' ],
+ [ ':status' , '304' ],
+ [ ':status' , '400' ],
+ [ ':status' , '404' ],
+ [ ':status' , '500' ],
+ [ 'accept-charset' , '' ],
+ [ 'accept-encoding' , 'gzip, deflate'],
+ [ 'accept-language' , '' ],
+ [ 'accept-ranges' , '' ],
+ [ 'accept' , '' ],
+ [ 'access-control-allow-origin' , '' ],
+ [ 'age' , '' ],
+ [ 'allow' , '' ],
+ [ 'authorization' , '' ],
+ [ 'cache-control' , '' ],
+ [ 'content-disposition' , '' ],
+ [ 'content-encoding' , '' ],
+ [ 'content-language' , '' ],
+ [ 'content-length' , '' ],
+ [ 'content-location' , '' ],
+ [ 'content-range' , '' ],
+ [ 'content-type' , '' ],
+ [ 'cookie' , '' ],
+ [ 'date' , '' ],
+ [ 'etag' , '' ],
+ [ 'expect' , '' ],
+ [ 'expires' , '' ],
+ [ 'from' , '' ],
+ [ 'host' , '' ],
+ [ 'if-match' , '' ],
+ [ 'if-modified-since' , '' ],
+ [ 'if-none-match' , '' ],
+ [ 'if-range' , '' ],
+ [ 'if-unmodified-since' , '' ],
+ [ 'last-modified' , '' ],
+ [ 'link' , '' ],
+ [ 'location' , '' ],
+ [ 'max-forwards' , '' ],
+ [ 'proxy-authenticate' , '' ],
+ [ 'proxy-authorization' , '' ],
+ [ 'range' , '' ],
+ [ 'referer' , '' ],
+ [ 'refresh' , '' ],
+ [ 'retry-after' , '' ],
+ [ 'server' , '' ],
+ [ 'set-cookie' , '' ],
+ [ 'strict-transport-security' , '' ],
+ [ 'transfer-encoding' , '' ],
+ [ 'user-agent' , '' ],
+ [ 'vary' , '' ],
+ [ 'via' , '' ],
+ [ 'www-authenticate' , '' ]
+];
+
+// The HeaderSetDecompressor class
+// -------------------------------
+
+// A `HeaderSetDecompressor` instance is a transform stream that can be used to *decompress a
+// single header set*. Its input is a stream of binary data chunks and its output is a stream of
+// `[name, value]` pairs.
+//
+// Currently, it is not a proper streaming decompressor implementation, since it buffer its input
+// until the end os the stream, and then processes the whole header block at once.
+
+util.inherits(HeaderSetDecompressor, TransformStream);
+function HeaderSetDecompressor(log, table) {
+ TransformStream.call(this, { objectMode: true });
+
+ this._log = log.child({ component: 'compressor' });
+ this._table = table;
+ this._chunks = [];
+}
+
+// `_transform` is the implementation of the [corresponding virtual function][_transform] of the
+// TransformStream class. It collects the data chunks for later processing.
+// [_transform]: https://nodejs.org/api/stream.html#stream_transform_transform_chunk_encoding_callback
+HeaderSetDecompressor.prototype._transform = function _transform(chunk, encoding, callback) {
+ this._chunks.push(chunk);
+ callback();
+};
+
+// `execute(rep)` executes the given [header representation][representation].
+// [representation]: https://tools.ietf.org/html/rfc7541#section-6
+
+// The *JavaScript object representation* of a header representation:
+//
+// {
+// name: String || Integer, // string literal or index
+// value: String || Integer, // string literal or index
+// index: Boolean // with or without indexing
+// }
+//
+// *Important:* to ease the indexing of the header table, indexes start at 0 instead of 1.
+//
+// Examples:
+//
+// Indexed:
+// { name: 2 , value: 2 , index: false }
+// Literal:
+// { name: 2 , value: 'X', index: false } // without indexing
+// { name: 2 , value: 'Y', index: true } // with indexing
+// { name: 'A', value: 'Z', index: true } // with indexing, literal name
+HeaderSetDecompressor.prototype._execute = function _execute(rep) {
+ this._log.trace({ key: rep.name, value: rep.value, index: rep.index },
+ 'Executing header representation');
+
+ var entry, pair;
+
+ if (rep.contextUpdate) {
+ this._table.setSizeLimit(rep.newMaxSize);
+ }
+
+ // * An _indexed representation_ entails the following actions:
+ // * The header field corresponding to the referenced entry is emitted
+ else if (typeof rep.value === 'number') {
+ var index = rep.value;
+ entry = this._table[index];
+
+ pair = entry.slice();
+ this.push(pair);
+ }
+
+ // * A _literal representation_ that is _not added_ to the header table entails the following
+ // action:
+ // * The header is emitted.
+ // * A _literal representation_ that is _added_ to the header table entails the following further
+ // actions:
+ // * The header is added to the header table.
+ // * The header is emitted.
+ else {
+ if (typeof rep.name === 'number') {
+ pair = [this._table[rep.name][0], rep.value];
+ } else {
+ pair = [rep.name, rep.value];
+ }
+
+ if (rep.index) {
+ entry = entryFromPair(pair);
+ this._table.add(entry);
+ }
+
+ this.push(pair);
+ }
+};
+
+// `_flush` is the implementation of the [corresponding virtual function][_flush] of the
+// TransformStream class. The whole decompressing process is done in `_flush`. It gets called when
+// the input stream is over.
+// [_flush]: https://nodejs.org/api/stream.html#stream_transform_flush_callback
+HeaderSetDecompressor.prototype._flush = function _flush(callback) {
+ var buffer = concat(this._chunks);
+
+ // * processes the header representations
+ buffer.cursor = 0;
+ while (buffer.cursor < buffer.length) {
+ this._execute(HeaderSetDecompressor.header(buffer));
+ }
+
+ callback();
+};
+
+// The HeaderSetCompressor class
+// -----------------------------
+
+// A `HeaderSetCompressor` instance is a transform stream that can be used to *compress a single
+// header set*. Its input is a stream of `[name, value]` pairs and its output is a stream of
+// binary data chunks.
+//
+// It is a real streaming compressor, since it does not wait until the header set is complete.
+//
+// The compression algorithm is (intentionally) not specified by the spec. Therefore, the current
+// compression algorithm can probably be improved in the future.
+
+util.inherits(HeaderSetCompressor, TransformStream);
+function HeaderSetCompressor(log, table) {
+ TransformStream.call(this, { objectMode: true });
+
+ this._log = log.child({ component: 'compressor' });
+ this._table = table;
+ this.push = TransformStream.prototype.push.bind(this);
+}
+
+HeaderSetCompressor.prototype.send = function send(rep) {
+ this._log.trace({ key: rep.name, value: rep.value, index: rep.index },
+ 'Emitting header representation');
+
+ if (!rep.chunks) {
+ rep.chunks = HeaderSetCompressor.header(rep);
+ }
+ rep.chunks.forEach(this.push);
+};
+
+// `_transform` is the implementation of the [corresponding virtual function][_transform] of the
+// TransformStream class. It processes the input headers one by one:
+// [_transform]: https://nodejs.org/api/stream.html#stream_transform_transform_chunk_encoding_callback
+HeaderSetCompressor.prototype._transform = function _transform(pair, encoding, callback) {
+ var name = pair[0].toLowerCase();
+ var value = pair[1];
+ var entry, rep;
+
+ // * tries to find full (name, value) or name match in the header table
+ var nameMatch = -1, fullMatch = -1;
+ for (var droppedIndex = 0; droppedIndex < this._table.length; droppedIndex++) {
+ entry = this._table[droppedIndex];
+ if (entry[0] === name) {
+ if (entry[1] === value) {
+ fullMatch = droppedIndex;
+ break;
+ } else if (nameMatch === -1) {
+ nameMatch = droppedIndex;
+ }
+ }
+ }
+
+ var mustNeverIndex = ((name === 'cookie' && value.length < 20) ||
+ (name === 'set-cookie' && value.length < 20) ||
+ name === 'authorization');
+
+ if (fullMatch !== -1 && !mustNeverIndex) {
+ this.send({ name: fullMatch, value: fullMatch, index: false });
+ }
+
+ // * otherwise, it will be a literal representation (with a name index if there's a name match)
+ else {
+ entry = entryFromPair(pair);
+
+ var indexing = (entry._size < this._table._limit / 2) && !mustNeverIndex;
+
+ if (indexing) {
+ this._table.add(entry);
+ }
+
+ this.send({ name: (nameMatch !== -1) ? nameMatch : name, value: value, index: indexing, mustNeverIndex: mustNeverIndex, contextUpdate: false });
+ }
+
+ callback();
+};
+
+// `_flush` is the implementation of the [corresponding virtual function][_flush] of the
+// TransformStream class. It gets called when there's no more header to compress. The final step:
+// [_flush]: https://nodejs.org/api/stream.html#stream_transform_flush_callback
+HeaderSetCompressor.prototype._flush = function _flush(callback) {
+ callback();
+};
+
+// [Detailed Format](https://tools.ietf.org/html/rfc7541#section-5)
+// -----------------
+
+// ### Integer representation ###
+//
+// The algorithm to represent an integer I is as follows:
+//
+// 1. If I < 2^N - 1, encode I on N bits
+// 2. Else, encode 2^N - 1 on N bits and do the following steps:
+// 1. Set I to (I - (2^N - 1)) and Q to 1
+// 2. While Q > 0
+// 1. Compute Q and R, quotient and remainder of I divided by 2^7
+// 2. If Q is strictly greater than 0, write one 1 bit; otherwise, write one 0 bit
+// 3. Encode R on the next 7 bits
+// 4. I = Q
+
+HeaderSetCompressor.integer = function writeInteger(I, N) {
+ var limit = Math.pow(2,N) - 1;
+ if (I < limit) {
+ return [new Buffer([I])];
+ }
+
+ var bytes = [];
+ if (N !== 0) {
+ bytes.push(limit);
+ }
+ I -= limit;
+
+ var Q = 1, R;
+ while (Q > 0) {
+ Q = Math.floor(I / 128);
+ R = I % 128;
+
+ if (Q > 0) {
+ R += 128;
+ }
+ bytes.push(R);
+
+ I = Q;
+ }
+
+ return [new Buffer(bytes)];
+};
+
+// The inverse algorithm:
+//
+// 1. Set I to the number coded on the lower N bits of the first byte
+// 2. If I is smaller than 2^N - 1 then return I
+// 2. Else the number is encoded on more than one byte, so do the following steps:
+// 1. Set M to 0
+// 2. While returning with I
+// 1. Let B be the next byte (the first byte if N is 0)
+// 2. Read out the lower 7 bits of B and multiply it with 2^M
+// 3. Increase I with this number
+// 4. Increase M by 7
+// 5. Return I if the most significant bit of B is 0
+
+HeaderSetDecompressor.integer = function readInteger(buffer, N) {
+ var limit = Math.pow(2,N) - 1;
+
+ var I = buffer[buffer.cursor] & limit;
+ if (N !== 0) {
+ buffer.cursor += 1;
+ }
+
+ if (I === limit) {
+ var M = 0;
+ do {
+ I += (buffer[buffer.cursor] & 127) << M;
+ M += 7;
+ buffer.cursor += 1;
+ } while (buffer[buffer.cursor - 1] & 128);
+ }
+
+ return I;
+};
+
+// ### Huffman Encoding ###
+
+function HuffmanTable(table) {
+ function createTree(codes, position) {
+ if (codes.length === 1) {
+ return [table.indexOf(codes[0])];
+ }
+
+ else {
+ position = position || 0;
+ var zero = [];
+ var one = [];
+ for (var i = 0; i < codes.length; i++) {
+ var string = codes[i];
+ if (string[position] === '0') {
+ zero.push(string);
+ } else {
+ one.push(string);
+ }
+ }
+ return [createTree(zero, position + 1), createTree(one, position + 1)];
+ }
+ }
+
+ this.tree = createTree(table);
+
+ this.codes = table.map(function(bits) {
+ return parseInt(bits, 2);
+ });
+ this.lengths = table.map(function(bits) {
+ return bits.length;
+ });
+}
+
+HuffmanTable.prototype.encode = function encode(buffer) {
+ var result = [];
+ var space = 8;
+
+ function add(data) {
+ if (space === 8) {
+ result.push(data);
+ } else {
+ result[result.length - 1] |= data;
+ }
+ }
+
+ for (var i = 0; i < buffer.length; i++) {
+ var byte = buffer[i];
+ var code = this.codes[byte];
+ var length = this.lengths[byte];
+
+ while (length !== 0) {
+ if (space >= length) {
+ add(code << (space - length));
+ code = 0;
+ space -= length;
+ length = 0;
+ } else {
+ var shift = length - space;
+ var msb = code >> shift;
+ add(msb);
+ code -= msb << shift;
+ length -= space;
+ space = 0;
+ }
+
+ if (space === 0) {
+ space = 8;
+ }
+ }
+ }
+
+ if (space !== 8) {
+ add(this.codes[256] >> (this.lengths[256] - space));
+ }
+
+ return new Buffer(result);
+};
+
+HuffmanTable.prototype.decode = function decode(buffer) {
+ var result = [];
+ var subtree = this.tree;
+
+ for (var i = 0; i < buffer.length; i++) {
+ var byte = buffer[i];
+
+ for (var j = 0; j < 8; j++) {
+ var bit = (byte & 128) ? 1 : 0;
+ byte = byte << 1;
+
+ subtree = subtree[bit];
+ if (subtree.length === 1) {
+ result.push(subtree[0]);
+ subtree = this.tree;
+ }
+ }
+ }
+
+ return new Buffer(result);
+};
+
+// The initializer arrays for the Huffman tables are generated with feeding the tables from the
+// spec to this sed command:
+//
+// sed -e "s/^.* [|]//g" -e "s/|//g" -e "s/ .*//g" -e "s/^/ '/g" -e "s/$/',/g"
+
+HuffmanTable.huffmanTable = new HuffmanTable([
+ '1111111111000',
+ '11111111111111111011000',
+ '1111111111111111111111100010',
+ '1111111111111111111111100011',
+ '1111111111111111111111100100',
+ '1111111111111111111111100101',
+ '1111111111111111111111100110',
+ '1111111111111111111111100111',
+ '1111111111111111111111101000',
+ '111111111111111111101010',
+ '111111111111111111111111111100',
+ '1111111111111111111111101001',
+ '1111111111111111111111101010',
+ '111111111111111111111111111101',
+ '1111111111111111111111101011',
+ '1111111111111111111111101100',
+ '1111111111111111111111101101',
+ '1111111111111111111111101110',
+ '1111111111111111111111101111',
+ '1111111111111111111111110000',
+ '1111111111111111111111110001',
+ '1111111111111111111111110010',
+ '111111111111111111111111111110',
+ '1111111111111111111111110011',
+ '1111111111111111111111110100',
+ '1111111111111111111111110101',
+ '1111111111111111111111110110',
+ '1111111111111111111111110111',
+ '1111111111111111111111111000',
+ '1111111111111111111111111001',
+ '1111111111111111111111111010',
+ '1111111111111111111111111011',
+ '010100',
+ '1111111000',
+ '1111111001',
+ '111111111010',
+ '1111111111001',
+ '010101',
+ '11111000',
+ '11111111010',
+ '1111111010',
+ '1111111011',
+ '11111001',
+ '11111111011',
+ '11111010',
+ '010110',
+ '010111',
+ '011000',
+ '00000',
+ '00001',
+ '00010',
+ '011001',
+ '011010',
+ '011011',
+ '011100',
+ '011101',
+ '011110',
+ '011111',
+ '1011100',
+ '11111011',
+ '111111111111100',
+ '100000',
+ '111111111011',
+ '1111111100',
+ '1111111111010',
+ '100001',
+ '1011101',
+ '1011110',
+ '1011111',
+ '1100000',
+ '1100001',
+ '1100010',
+ '1100011',
+ '1100100',
+ '1100101',
+ '1100110',
+ '1100111',
+ '1101000',
+ '1101001',
+ '1101010',
+ '1101011',
+ '1101100',
+ '1101101',
+ '1101110',
+ '1101111',
+ '1110000',
+ '1110001',
+ '1110010',
+ '11111100',
+ '1110011',
+ '11111101',
+ '1111111111011',
+ '1111111111111110000',
+ '1111111111100',
+ '11111111111100',
+ '100010',
+ '111111111111101',
+ '00011',
+ '100011',
+ '00100',
+ '100100',
+ '00101',
+ '100101',
+ '100110',
+ '100111',
+ '00110',
+ '1110100',
+ '1110101',
+ '101000',
+ '101001',
+ '101010',
+ '00111',
+ '101011',
+ '1110110',
+ '101100',
+ '01000',
+ '01001',
+ '101101',
+ '1110111',
+ '1111000',
+ '1111001',
+ '1111010',
+ '1111011',
+ '111111111111110',
+ '11111111100',
+ '11111111111101',
+ '1111111111101',
+ '1111111111111111111111111100',
+ '11111111111111100110',
+ '1111111111111111010010',
+ '11111111111111100111',
+ '11111111111111101000',
+ '1111111111111111010011',
+ '1111111111111111010100',
+ '1111111111111111010101',
+ '11111111111111111011001',
+ '1111111111111111010110',
+ '11111111111111111011010',
+ '11111111111111111011011',
+ '11111111111111111011100',
+ '11111111111111111011101',
+ '11111111111111111011110',
+ '111111111111111111101011',
+ '11111111111111111011111',
+ '111111111111111111101100',
+ '111111111111111111101101',
+ '1111111111111111010111',
+ '11111111111111111100000',
+ '111111111111111111101110',
+ '11111111111111111100001',
+ '11111111111111111100010',
+ '11111111111111111100011',
+ '11111111111111111100100',
+ '111111111111111011100',
+ '1111111111111111011000',
+ '11111111111111111100101',
+ '1111111111111111011001',
+ '11111111111111111100110',
+ '11111111111111111100111',
+ '111111111111111111101111',
+ '1111111111111111011010',
+ '111111111111111011101',
+ '11111111111111101001',
+ '1111111111111111011011',
+ '1111111111111111011100',
+ '11111111111111111101000',
+ '11111111111111111101001',
+ '111111111111111011110',
+ '11111111111111111101010',
+ '1111111111111111011101',
+ '1111111111111111011110',
+ '111111111111111111110000',
+ '111111111111111011111',
+ '1111111111111111011111',
+ '11111111111111111101011',
+ '11111111111111111101100',
+ '111111111111111100000',
+ '111111111111111100001',
+ '1111111111111111100000',
+ '111111111111111100010',
+ '11111111111111111101101',
+ '1111111111111111100001',
+ '11111111111111111101110',
+ '11111111111111111101111',
+ '11111111111111101010',
+ '1111111111111111100010',
+ '1111111111111111100011',
+ '1111111111111111100100',
+ '11111111111111111110000',
+ '1111111111111111100101',
+ '1111111111111111100110',
+ '11111111111111111110001',
+ '11111111111111111111100000',
+ '11111111111111111111100001',
+ '11111111111111101011',
+ '1111111111111110001',
+ '1111111111111111100111',
+ '11111111111111111110010',
+ '1111111111111111101000',
+ '1111111111111111111101100',
+ '11111111111111111111100010',
+ '11111111111111111111100011',
+ '11111111111111111111100100',
+ '111111111111111111111011110',
+ '111111111111111111111011111',
+ '11111111111111111111100101',
+ '111111111111111111110001',
+ '1111111111111111111101101',
+ '1111111111111110010',
+ '111111111111111100011',
+ '11111111111111111111100110',
+ '111111111111111111111100000',
+ '111111111111111111111100001',
+ '11111111111111111111100111',
+ '111111111111111111111100010',
+ '111111111111111111110010',
+ '111111111111111100100',
+ '111111111111111100101',
+ '11111111111111111111101000',
+ '11111111111111111111101001',
+ '1111111111111111111111111101',
+ '111111111111111111111100011',
+ '111111111111111111111100100',
+ '111111111111111111111100101',
+ '11111111111111101100',
+ '111111111111111111110011',
+ '11111111111111101101',
+ '111111111111111100110',
+ '1111111111111111101001',
+ '111111111111111100111',
+ '111111111111111101000',
+ '11111111111111111110011',
+ '1111111111111111101010',
+ '1111111111111111101011',
+ '1111111111111111111101110',
+ '1111111111111111111101111',
+ '111111111111111111110100',
+ '111111111111111111110101',
+ '11111111111111111111101010',
+ '11111111111111111110100',
+ '11111111111111111111101011',
+ '111111111111111111111100110',
+ '11111111111111111111101100',
+ '11111111111111111111101101',
+ '111111111111111111111100111',
+ '111111111111111111111101000',
+ '111111111111111111111101001',
+ '111111111111111111111101010',
+ '111111111111111111111101011',
+ '1111111111111111111111111110',
+ '111111111111111111111101100',
+ '111111111111111111111101101',
+ '111111111111111111111101110',
+ '111111111111111111111101111',
+ '111111111111111111111110000',
+ '11111111111111111111101110',
+ '111111111111111111111111111111'
+]);
+
+// ### String literal representation ###
+//
+// Literal **strings** can represent header names or header values. There's two variant of the
+// string encoding:
+//
+// String literal with Huffman encoding:
+//
+// 0 1 2 3 4 5 6 7
+// +---+---+---+---+---+---+---+---+
+// | 1 | Value Length Prefix (7) |
+// +---+---+---+---+---+---+---+---+
+// | Value Length (0-N bytes) |
+// +---+---+---+---+---+---+---+---+
+// ...
+// +---+---+---+---+---+---+---+---+
+// | Huffman Encoded Data |Padding|
+// +---+---+---+---+---+---+---+---+
+//
+// String literal without Huffman encoding:
+//
+// 0 1 2 3 4 5 6 7
+// +---+---+---+---+---+---+---+---+
+// | 0 | Value Length Prefix (7) |
+// +---+---+---+---+---+---+---+---+
+// | Value Length (0-N bytes) |
+// +---+---+---+---+---+---+---+---+
+// ...
+// +---+---+---+---+---+---+---+---+
+// | Field Bytes Without Encoding |
+// +---+---+---+---+---+---+---+---+
+
+HeaderSetCompressor.string = function writeString(str) {
+ str = new Buffer(str, 'utf8');
+
+ var huffman = HuffmanTable.huffmanTable.encode(str);
+ if (huffman.length < str.length) {
+ var length = HeaderSetCompressor.integer(huffman.length, 7);
+ length[0][0] |= 128;
+ return length.concat(huffman);
+ }
+
+ else {
+ length = HeaderSetCompressor.integer(str.length, 7);
+ return length.concat(str);
+ }
+};
+
+HeaderSetDecompressor.string = function readString(buffer) {
+ var huffman = buffer[buffer.cursor] & 128;
+ var length = HeaderSetDecompressor.integer(buffer, 7);
+ var encoded = buffer.slice(buffer.cursor, buffer.cursor + length);
+ buffer.cursor += length;
+ return (huffman ? HuffmanTable.huffmanTable.decode(encoded) : encoded).toString('utf8');
+};
+
+// ### Header represenations ###
+
+// The JavaScript object representation is described near the
+// `HeaderSetDecompressor.prototype._execute()` method definition.
+//
+// **All binary header representations** start with a prefix signaling the representation type and
+// an index represented using prefix coded integers:
+//
+// 0 1 2 3 4 5 6 7
+// +---+---+---+---+---+---+---+---+
+// | 1 | Index (7+) | Indexed Representation
+// +---+---------------------------+
+//
+// 0 1 2 3 4 5 6 7
+// +---+---+---+---+---+---+---+---+
+// | 0 | 1 | Index (6+) |
+// +---+---+---+-------------------+ Literal w/ Indexing
+// | Value Length (8+) |
+// +-------------------------------+ w/ Indexed Name
+// | Value String (Length octets) |
+// +-------------------------------+
+//
+// 0 1 2 3 4 5 6 7
+// +---+---+---+---+---+---+---+---+
+// | 0 | 1 | 0 |
+// +---+---+---+-------------------+
+// | Name Length (8+) |
+// +-------------------------------+ Literal w/ Indexing
+// | Name String (Length octets) |
+// +-------------------------------+ w/ New Name
+// | Value Length (8+) |
+// +-------------------------------+
+// | Value String (Length octets) |
+// +-------------------------------+
+//
+// 0 1 2 3 4 5 6 7
+// +---+---+---+---+---+---+---+---+
+// | 0 | 0 | 0 | 0 | Index (4+) |
+// +---+---+---+-------------------+ Literal w/o Incremental Indexing
+// | Value Length (8+) |
+// +-------------------------------+ w/ Indexed Name
+// | Value String (Length octets) |
+// +-------------------------------+
+//
+// 0 1 2 3 4 5 6 7
+// +---+---+---+---+---+---+---+---+
+// | 0 | 0 | 0 | 0 | 0 |
+// +---+---+---+-------------------+
+// | Name Length (8+) |
+// +-------------------------------+ Literal w/o Incremental Indexing
+// | Name String (Length octets) |
+// +-------------------------------+ w/ New Name
+// | Value Length (8+) |
+// +-------------------------------+
+// | Value String (Length octets) |
+// +-------------------------------+
+//
+// 0 1 2 3 4 5 6 7
+// +---+---+---+---+---+---+---+---+
+// | 0 | 0 | 0 | 1 | Index (4+) |
+// +---+---+---+-------------------+ Literal never indexed
+// | Value Length (8+) |
+// +-------------------------------+ w/ Indexed Name
+// | Value String (Length octets) |
+// +-------------------------------+
+//
+// 0 1 2 3 4 5 6 7
+// +---+---+---+---+---+---+---+---+
+// | 0 | 0 | 0 | 1 | 0 |
+// +---+---+---+-------------------+
+// | Name Length (8+) |
+// +-------------------------------+ Literal never indexed
+// | Name String (Length octets) |
+// +-------------------------------+ w/ New Name
+// | Value Length (8+) |
+// +-------------------------------+
+// | Value String (Length octets) |
+// +-------------------------------+
+//
+// The **Indexed Representation** consists of the 1-bit prefix and the Index that is represented as
+// a 7-bit prefix coded integer and nothing else.
+//
+// After the first bits, **all literal representations** specify the header name, either as a
+// pointer to the Header Table (Index) or a string literal. When the string literal representation
+// is used, the Index is set to 0 and the string literal starts at the second byte.
+//
+// For **all literal representations**, the specification of the header value comes next. It is
+// always represented as a string.
+
+var representations = {
+ indexed : { prefix: 7, pattern: 0x80 },
+ literalIncremental : { prefix: 6, pattern: 0x40 },
+ contextUpdate : { prefix: 0, pattern: 0x20 },
+ literalNeverIndexed : { prefix: 4, pattern: 0x10 },
+ literal : { prefix: 4, pattern: 0x00 }
+};
+
+HeaderSetCompressor.header = function writeHeader(header) {
+ var representation, buffers = [];
+
+ if (header.contextUpdate) {
+ representation = representations.contextUpdate;
+ } else if (typeof header.value === 'number') {
+ representation = representations.indexed;
+ } else if (header.index) {
+ representation = representations.literalIncremental;
+ } else if (header.mustNeverIndex) {
+ representation = representations.literalNeverIndexed;
+ } else {
+ representation = representations.literal;
+ }
+
+ if (representation === representations.contextUpdate) {
+ buffers.push(HeaderSetCompressor.integer(header.newMaxSize, 5));
+ }
+
+ else if (representation === representations.indexed) {
+ buffers.push(HeaderSetCompressor.integer(header.value + 1, representation.prefix));
+ }
+
+ else {
+ if (typeof header.name === 'number') {
+ buffers.push(HeaderSetCompressor.integer(header.name + 1, representation.prefix));
+ } else {
+ buffers.push(HeaderSetCompressor.integer(0, representation.prefix));
+ buffers.push(HeaderSetCompressor.string(header.name));
+ }
+ buffers.push(HeaderSetCompressor.string(header.value));
+ }
+
+ buffers[0][0][0] |= representation.pattern;
+
+ return Array.prototype.concat.apply([], buffers); // array of arrays of buffers -> array of buffers
+};
+
+HeaderSetDecompressor.header = function readHeader(buffer) {
+ var representation, header = {};
+
+ var firstByte = buffer[buffer.cursor];
+ if (firstByte & 0x80) {
+ representation = representations.indexed;
+ } else if (firstByte & 0x40) {
+ representation = representations.literalIncremental;
+ } else if (firstByte & 0x20) {
+ representation = representations.contextUpdate;
+ } else if (firstByte & 0x10) {
+ representation = representations.literalNeverIndexed;
+ } else {
+ representation = representations.literal;
+ }
+
+ header.value = header.name = -1;
+ header.index = false;
+ header.contextUpdate = false;
+ header.newMaxSize = 0;
+ header.mustNeverIndex = false;
+
+ if (representation === representations.contextUpdate) {
+ header.contextUpdate = true;
+ header.newMaxSize = HeaderSetDecompressor.integer(buffer, 5);
+ }
+
+ else if (representation === representations.indexed) {
+ header.value = header.name = HeaderSetDecompressor.integer(buffer, representation.prefix) - 1;
+ }
+
+ else {
+ header.name = HeaderSetDecompressor.integer(buffer, representation.prefix) - 1;
+ if (header.name === -1) {
+ header.name = HeaderSetDecompressor.string(buffer);
+ }
+ header.value = HeaderSetDecompressor.string(buffer);
+ header.index = (representation === representations.literalIncremental);
+ header.mustNeverIndex = (representation === representations.literalNeverIndexed);
+ }
+
+ return header;
+};
+
+// Integration with HTTP/2
+// =======================
+
+// This section describes the interaction between the compressor/decompressor and the rest of the
+// HTTP/2 implementation. The `Compressor` and the `Decompressor` makes up a layer between the
+// [framer](framer.html) and the [connection handling component](connection.html). They let most
+// frames pass through, except HEADERS and PUSH_PROMISE frames. They convert the frames between
+// these two representations:
+//
+// { {
+// type: 'HEADERS', type: 'HEADERS',
+// flags: {}, flags: {},
+// stream: 1, <===> stream: 1,
+// headers: { data: Buffer
+// N1: 'V1', }
+// N2: ['V1', 'V2', ...],
+// // ...
+// }
+// }
+//
+// There are possibly several binary frame that belong to a single non-binary frame.
+
+var MAX_HTTP_PAYLOAD_SIZE = 16384;
+
+// The Compressor class
+// --------------------
+
+// The Compressor transform stream is basically stateless.
+util.inherits(Compressor, TransformStream);
+function Compressor(log, type) {
+ TransformStream.call(this, { objectMode: true });
+
+ this._log = log.child({ component: 'compressor' });
+
+ assert((type === 'REQUEST') || (type === 'RESPONSE'));
+ this._table = new HeaderTable(this._log);
+
+ this.tableSizeChangePending = false;
+ this.lowestTableSizePending = 0;
+ this.tableSizeSetting = DEFAULT_HEADER_TABLE_LIMIT;
+}
+
+// Changing the header table size
+Compressor.prototype.setTableSizeLimit = function setTableSizeLimit(size) {
+ this._table.setSizeLimit(size);
+ if (!this.tableSizeChangePending || size < this.lowestTableSizePending) {
+ this.lowestTableSizePending = size;
+ }
+ this.tableSizeSetting = size;
+ this.tableSizeChangePending = true;
+};
+
+// `compress` takes a header set, and compresses it using a new `HeaderSetCompressor` stream
+// instance. This means that from now on, the advantages of streaming header encoding are lost,
+// but the API becomes simpler.
+Compressor.prototype.compress = function compress(headers) {
+ var compressor = new HeaderSetCompressor(this._log, this._table);
+
+ if (this.tableSizeChangePending) {
+ if (this.lowestTableSizePending < this.tableSizeSetting) {
+ compressor.send({contextUpdate: true, newMaxSize: this.lowestTableSizePending,
+ name: "", value: "", index: 0});
+ }
+ compressor.send({contextUpdate: true, newMaxSize: this.tableSizeSetting,
+ name: "", value: "", index: 0});
+ this.tableSizeChangePending = false;
+ }
+ var colonHeaders = [];
+ var nonColonHeaders = [];
+
+ // To ensure we send colon headers first
+ for (var name in headers) {
+ if (name.trim()[0] === ':') {
+ colonHeaders.push(name);
+ } else {
+ nonColonHeaders.push(name);
+ }
+ }
+
+ function compressHeader(name) {
+ var value = headers[name];
+ name = String(name).toLowerCase();
+
+ // * To allow for better compression efficiency, the Cookie header field MAY be split into
+ // separate header fields, each with one or more cookie-pairs.
+ if (name == 'cookie') {
+ if (!(value instanceof Array)) {
+ value = [value];
+ }
+ value = Array.prototype.concat.apply([], value.map(function(cookie) {
+ return String(cookie).split(';').map(trim);
+ }));
+ }
+
+ if (value instanceof Array) {
+ for (var i = 0; i < value.length; i++) {
+ compressor.write([name, String(value[i])]);
+ }
+ } else {
+ compressor.write([name, String(value)]);
+ }
+ }
+
+ colonHeaders.forEach(compressHeader);
+ nonColonHeaders.forEach(compressHeader);
+
+ compressor.end();
+
+ var chunk, chunks = [];
+ while (chunk = compressor.read()) {
+ chunks.push(chunk);
+ }
+ return concat(chunks);
+};
+
+// When a `frame` arrives
+Compressor.prototype._transform = function _transform(frame, encoding, done) {
+ // * and it is a HEADERS or PUSH_PROMISE frame
+ // * it generates a header block using the compress method
+ // * cuts the header block into `chunks` that are not larger than `MAX_HTTP_PAYLOAD_SIZE`
+ // * for each chunk, it pushes out a chunk frame that is identical to the original, except
+ // the `data` property which holds the given chunk, the type of the frame which is always
+ // CONTINUATION except for the first frame, and the END_HEADERS/END_PUSH_STREAM flag that
+ // marks the last frame and the END_STREAM flag which is always false before the end
+ if (frame.type === 'HEADERS' || frame.type === 'PUSH_PROMISE') {
+ var buffer = this.compress(frame.headers);
+
+ // This will result in CONTINUATIONs from a PUSH_PROMISE being 4 bytes shorter than they could
+ // be, but that's not the end of the world, and it prevents us from going over MAX_HTTP_PAYLOAD_SIZE
+ // on the initial PUSH_PROMISE frame.
+ var adjustment = frame.type === 'PUSH_PROMISE' ? 4 : 0;
+ var chunks = cut(buffer, MAX_HTTP_PAYLOAD_SIZE - adjustment);
+
+ for (var i = 0; i < chunks.length; i++) {
+ var chunkFrame;
+ var first = (i === 0);
+ var last = (i === chunks.length - 1);
+
+ if (first) {
+ chunkFrame = util._extend({}, frame);
+ chunkFrame.flags = util._extend({}, frame.flags);
+ chunkFrame.flags['END_' + frame.type] = last;
+ } else {
+ chunkFrame = {
+ type: 'CONTINUATION',
+ flags: { END_HEADERS: last },
+ stream: frame.stream
+ };
+ }
+ chunkFrame.data = chunks[i];
+
+ this.push(chunkFrame);
+ }
+ }
+
+ // * otherwise, the frame is forwarded without taking any action
+ else {
+ this.push(frame);
+ }
+
+ done();
+};
+
+// The Decompressor class
+// ----------------------
+
+// The Decompressor is a stateful transform stream, since it has to collect multiple frames first,
+// and the decoding comes after unifying the payload of those frames.
+//
+// If there's a frame in progress, `this._inProgress` is `true`. The frames are collected in
+// `this._frames`, and the type of the frame and the stream identifier is stored in `this._type`
+// and `this._stream` respectively.
+util.inherits(Decompressor, TransformStream);
+function Decompressor(log, type) {
+ TransformStream.call(this, { objectMode: true });
+
+ this._log = log.child({ component: 'compressor' });
+
+ assert((type === 'REQUEST') || (type === 'RESPONSE'));
+ this._table = new HeaderTable(this._log);
+
+ this._inProgress = false;
+ this._base = undefined;
+}
+
+// Changing the header table size
+Decompressor.prototype.setTableSizeLimit = function setTableSizeLimit(size) {
+ this._table.setSizeLimit(size);
+};
+
+// `decompress` takes a full header block, and decompresses it using a new `HeaderSetDecompressor`
+// stream instance. This means that from now on, the advantages of streaming header decoding are
+// lost, but the API becomes simpler.
+Decompressor.prototype.decompress = function decompress(block) {
+ var decompressor = new HeaderSetDecompressor(this._log, this._table);
+ decompressor.end(block);
+
+ var seenNonColonHeader = false;
+ var headers = {};
+ var pair;
+ while (pair = decompressor.read()) {
+ var name = pair[0];
+ var value = pair[1];
+ var isColonHeader = (name.trim()[0] === ':');
+ if (seenNonColonHeader && isColonHeader) {
+ this.emit('error', 'PROTOCOL_ERROR');
+ return headers;
+ }
+ seenNonColonHeader = !isColonHeader;
+ if (name in headers) {
+ if (headers[name] instanceof Array) {
+ headers[name].push(value);
+ } else {
+ headers[name] = [headers[name], value];
+ }
+ } else {
+ headers[name] = value;
+ }
+ }
+
+ // * If there are multiple Cookie header fields after decompression, these MUST be concatenated
+ // into a single octet string using the two octet delimiter of 0x3B, 0x20 (the ASCII
+ // string "; ").
+ if (('cookie' in headers) && (headers['cookie'] instanceof Array)) {
+ headers['cookie'] = headers['cookie'].join('; ');
+ }
+
+ return headers;
+};
+
+// When a `frame` arrives
+Decompressor.prototype._transform = function _transform(frame, encoding, done) {
+ // * and the collection process is already `_inProgress`, the frame is simply stored, except if
+ // it's an illegal frame
+ if (this._inProgress) {
+ if ((frame.type !== 'CONTINUATION') || (frame.stream !== this._base.stream)) {
+ this._log.error('A series of HEADER frames were not continuous');
+ this.emit('error', 'PROTOCOL_ERROR');
+ return;
+ }
+ this._frames.push(frame);
+ }
+
+ // * and the collection process is not `_inProgress`, but the new frame's type is HEADERS or
+ // PUSH_PROMISE, a new collection process begins
+ else if ((frame.type === 'HEADERS') || (frame.type === 'PUSH_PROMISE')) {
+ this._inProgress = true;
+ this._base = util._extend({}, frame);
+ this._frames = [frame];
+ }
+
+ // * otherwise, the frame is forwarded without taking any action
+ else {
+ this.push(frame);
+ }
+
+ // * When the frame signals that it's the last in the series, the header block chunks are
+ // concatenated, the headers are decompressed, and a new frame gets pushed out with the
+ // decompressed headers.
+ if (this._inProgress && (frame.flags.END_HEADERS || frame.flags.END_PUSH_PROMISE)) {
+ var buffer = concat(this._frames.map(function(frame) {
+ return frame.data;
+ }));
+ try {
+ var headers = this.decompress(buffer);
+ } catch(error) {
+ this._log.error({ err: error }, 'Header decompression error');
+ this.emit('error', 'COMPRESSION_ERROR');
+ return;
+ }
+ this.push(util._extend(this._base, { headers: headers }));
+ this._inProgress = false;
+ }
+
+ done();
+};
+
+// Helper functions
+// ================
+
+// Concatenate an array of buffers into a new buffer
+function concat(buffers) {
+ var size = 0;
+ for (var i = 0; i < buffers.length; i++) {
+ size += buffers[i].length;
+ }
+
+ var concatenated = new Buffer(size);
+ for (var cursor = 0, j = 0; j < buffers.length; cursor += buffers[j].length, j++) {
+ buffers[j].copy(concatenated, cursor);
+ }
+
+ return concatenated;
+}
+
+// Cut `buffer` into chunks not larger than `size`
+function cut(buffer, size) {
+ var chunks = [];
+ var cursor = 0;
+ do {
+ var chunkSize = Math.min(size, buffer.length - cursor);
+ chunks.push(buffer.slice(cursor, cursor + chunkSize));
+ cursor += chunkSize;
+ } while(cursor < buffer.length);
+ return chunks;
+}
+
+function trim(string) {
+ return string.trim();
+}
diff --git a/testing/xpcshell/node-http2/lib/protocol/connection.js b/testing/xpcshell/node-http2/lib/protocol/connection.js
new file mode 100644
index 0000000000..2b86b7f1c7
--- /dev/null
+++ b/testing/xpcshell/node-http2/lib/protocol/connection.js
@@ -0,0 +1,619 @@
+var assert = require('assert');
+
+// The Connection class
+// ====================
+
+// The Connection class manages HTTP/2 connections. Each instance corresponds to one transport
+// stream (TCP stream). It operates by sending and receiving frames and is implemented as a
+// [Flow](flow.html) subclass.
+
+var Flow = require('./flow').Flow;
+
+exports.Connection = Connection;
+
+// Public API
+// ----------
+
+// * **new Connection(log, firstStreamId, settings)**: create a new Connection
+//
+// * **Event: 'error' (type)**: signals a connection level error made by the other end
+//
+// * **Event: 'peerError' (type)**: signals the receipt of a GOAWAY frame that contains an error
+// code other than NO_ERROR
+//
+// * **Event: 'stream' (stream)**: signals that there's an incoming stream
+//
+// * **createStream(): stream**: initiate a new stream
+//
+// * **set(settings, callback)**: change the value of one or more settings according to the
+// key-value pairs of `settings`. The callback is called after the peer acknowledged the changes.
+//
+// * **ping([callback])**: send a ping and call callback when the answer arrives
+//
+// * **close([error])**: close the stream with an error code
+
+// Constructor
+// -----------
+
+// The main aspects of managing the connection are:
+function Connection(log, firstStreamId, settings) {
+ // * initializing the base class
+ Flow.call(this, 0);
+
+ // * logging: every method uses the common logger object
+ this._log = log.child({ component: 'connection' });
+
+ // * stream management
+ this._initializeStreamManagement(firstStreamId);
+
+ // * lifecycle management
+ this._initializeLifecycleManagement();
+
+ // * flow control
+ this._initializeFlowControl();
+
+ // * settings management
+ this._initializeSettingsManagement(settings);
+
+ // * multiplexing
+ this._initializeMultiplexing();
+}
+Connection.prototype = Object.create(Flow.prototype, { constructor: { value: Connection } });
+
+// Overview
+// --------
+
+// | ^ | ^
+// v | v |
+// +--------------+ +--------------+
+// +---| stream1 |---| stream2 |---- .... ---+
+// | | +----------+ | | +----------+ | |
+// | | | stream1. | | | | stream2. | | |
+// | +-| upstream |-+ +-| upstream |-+ |
+// | +----------+ +----------+ |
+// | | ^ | ^ |
+// | v | v | |
+// | +-----+-------------+-----+-------- .... |
+// | ^ | | | |
+// | | v | | |
+// | +--------------+ | | |
+// | | stream0 | | | |
+// | | connection | | | |
+// | | management | multiplexing |
+// | +--------------+ flow control |
+// | | ^ |
+// | _read() | | _write() |
+// | v | |
+// | +------------+ +-----------+ |
+// | |output queue| |input queue| |
+// +----------------+------------+-+-----------+-----------------+
+// | ^
+// read() | | write()
+// v |
+
+// Stream management
+// -----------------
+
+var Stream = require('./stream').Stream;
+
+// Initialization:
+Connection.prototype._initializeStreamManagement = function _initializeStreamManagement(firstStreamId) {
+ // * streams are stored in two data structures:
+ // * `_streamIds` is an id -> stream map of the streams that are allowed to receive frames.
+ // * `_streamPriorities` is a priority -> [stream] map of stream that allowed to send frames.
+ this._streamIds = [];
+ this._streamPriorities = [];
+
+ // * The next outbound stream ID and the last inbound stream id
+ this._nextStreamId = firstStreamId;
+ this._lastIncomingStream = 0;
+
+ // * Calling `_writeControlFrame` when there's an incoming stream with 0 as stream ID
+ this._streamIds[0] = { upstream: { write: this._writeControlFrame.bind(this) } };
+
+ // * By default, the number of concurrent outbound streams is not limited. The `_streamLimit` can
+ // be set by the SETTINGS_MAX_CONCURRENT_STREAMS setting.
+ this._streamSlotsFree = Infinity;
+ this._streamLimit = Infinity;
+ this.on('RECEIVING_SETTINGS_MAX_CONCURRENT_STREAMS', this._updateStreamLimit);
+};
+
+// `_writeControlFrame` is called when there's an incoming frame in the `_control` stream. It
+// broadcasts the message by creating an event on it.
+Connection.prototype._writeControlFrame = function _writeControlFrame(frame) {
+ if ((frame.type === 'SETTINGS') || (frame.type === 'PING') ||
+ (frame.type === 'GOAWAY') || (frame.type === 'WINDOW_UPDATE') ||
+ (frame.type === 'ALTSVC')) {
+ this._log.debug({ frame: frame }, 'Receiving connection level frame');
+ this.emit(frame.type, frame);
+ } else {
+ this._log.error({ frame: frame }, 'Invalid connection level frame');
+ this.emit('error', 'PROTOCOL_ERROR');
+ }
+};
+
+// Methods to manage the stream slot pool:
+Connection.prototype._updateStreamLimit = function _updateStreamLimit(newStreamLimit) {
+ var wakeup = (this._streamSlotsFree === 0) && (newStreamLimit > this._streamLimit);
+ this._streamSlotsFree += newStreamLimit - this._streamLimit;
+ this._streamLimit = newStreamLimit;
+ if (wakeup) {
+ this.emit('wakeup');
+ }
+};
+
+Connection.prototype._changeStreamCount = function _changeStreamCount(change) {
+ if (change) {
+ this._log.trace({ free: this._streamSlotsFree, change: change },
+ 'Changing active stream count.');
+ var wakeup = (this._streamSlotsFree === 0) && (change < 0);
+ this._streamSlotsFree -= change;
+ if (wakeup) {
+ this.emit('wakeup');
+ }
+ }
+};
+
+// Creating a new *inbound or outbound* stream with the given `id` (which is undefined in case of
+// an outbound stream) consists of three steps:
+//
+// 1. var stream = new Stream(this._log, this);
+// 2. this._allocateId(stream, id);
+// 2. this._allocatePriority(stream);
+
+// Allocating an ID to a stream
+Connection.prototype._allocateId = function _allocateId(stream, id) {
+ // * initiated stream without definite ID
+ if (id === undefined) {
+ id = this._nextStreamId;
+ this._nextStreamId += 2;
+ }
+
+ // * incoming stream with a legitim ID (larger than any previous and different parity than ours)
+ else if ((id > this._lastIncomingStream) && ((id - this._nextStreamId) % 2 !== 0)) {
+ this._lastIncomingStream = id;
+ }
+
+ // * incoming stream with invalid ID
+ else {
+ this._log.error({ stream_id: id, lastIncomingStream: this._lastIncomingStream },
+ 'Invalid incoming stream ID.');
+ this.emit('error', 'PROTOCOL_ERROR');
+ return undefined;
+ }
+
+ assert(!(id in this._streamIds));
+
+ // * adding to `this._streamIds`
+ this._log.trace({ s: stream, stream_id: id }, 'Allocating ID for stream.');
+ this._streamIds[id] = stream;
+ stream.id = id;
+ this.emit('new_stream', stream, id);
+
+ // * forwarding connection errors from streams
+ stream.on('connectionError', this.emit.bind(this, 'error'));
+
+ return id;
+};
+
+// Allocating a priority to a stream, and managing priority changes
+Connection.prototype._allocatePriority = function _allocatePriority(stream) {
+ this._log.trace({ s: stream }, 'Allocating priority for stream.');
+ this._insert(stream, stream._priority);
+ stream.on('priority', this._reprioritize.bind(this, stream));
+ stream.upstream.on('readable', this.emit.bind(this, 'wakeup'));
+ this.emit('wakeup');
+};
+
+Connection.prototype._insert = function _insert(stream, priority) {
+ if (priority in this._streamPriorities) {
+ this._streamPriorities[priority].push(stream);
+ } else {
+ this._streamPriorities[priority] = [stream];
+ }
+};
+
+Connection.prototype._reprioritize = function _reprioritize(stream, priority) {
+ var bucket = this._streamPriorities[stream._priority];
+ var index = bucket.indexOf(stream);
+ assert(index !== -1);
+ bucket.splice(index, 1);
+ if (bucket.length === 0) {
+ delete this._streamPriorities[stream._priority];
+ }
+
+ this._insert(stream, priority);
+};
+
+// Creating an *inbound* stream with the given ID. It is called when there's an incoming frame to
+// a previously nonexistent stream.
+Connection.prototype._createIncomingStream = function _createIncomingStream(id) {
+ this._log.debug({ stream_id: id }, 'New incoming stream.');
+
+ var stream = new Stream(this._log, this);
+ this._allocateId(stream, id);
+ this._allocatePriority(stream);
+ this.emit('stream', stream, id);
+
+ return stream;
+};
+
+// Creating an *outbound* stream
+Connection.prototype.createStream = function createStream() {
+ this._log.trace('Creating new outbound stream.');
+
+ // * Receiving is enabled immediately, and an ID gets assigned to the stream
+ var stream = new Stream(this._log, this);
+ this._allocatePriority(stream);
+
+ return stream;
+};
+
+// Multiplexing
+// ------------
+
+Connection.prototype._initializeMultiplexing = function _initializeMultiplexing() {
+ this.on('window_update', this.emit.bind(this, 'wakeup'));
+ this._sendScheduled = false;
+ this._firstFrameReceived = false;
+};
+
+// The `_send` method is a virtual method of the [Flow class](flow.html) that has to be implemented
+// by child classes. It reads frames from streams and pushes them to the output buffer.
+Connection.prototype._send = function _send(immediate) {
+ // * Do not do anything if the connection is already closed
+ if (this._closed) {
+ return;
+ }
+
+ // * Collapsing multiple calls in a turn into a single deferred call
+ if (immediate) {
+ this._sendScheduled = false;
+ } else {
+ if (!this._sendScheduled) {
+ this._sendScheduled = true;
+ setImmediate(this._send.bind(this, true));
+ }
+ return;
+ }
+
+ this._log.trace('Starting forwarding frames from streams.');
+
+ // * Looping through priority `bucket`s in priority order.
+priority_loop:
+ for (var priority in this._streamPriorities) {
+ var bucket = this._streamPriorities[priority];
+ var nextBucket = [];
+
+ // * Forwarding frames from buckets with round-robin scheduling.
+ // 1. pulling out frame
+ // 2. if there's no frame, skip this stream
+ // 3. if forwarding this frame would make `streamCount` greater than `streamLimit`, skip
+ // this stream
+ // 4. adding stream to the bucket of the next round
+ // 5. assigning an ID to the frame (allocating an ID to the stream if there isn't already)
+ // 6. if forwarding a PUSH_PROMISE, allocate ID to the promised stream
+ // 7. forwarding the frame, changing `streamCount` as appropriate
+ // 8. stepping to the next stream if there's still more frame needed in the output buffer
+ // 9. switching to the bucket of the next round
+ while (bucket.length > 0) {
+ for (var index = 0; index < bucket.length; index++) {
+ var stream = bucket[index];
+ var frame = stream.upstream.read((this._window > 0) ? this._window : -1);
+
+ if (!frame) {
+ continue;
+ } else if (frame.count_change > this._streamSlotsFree) {
+ stream.upstream.unshift(frame);
+ continue;
+ }
+
+ nextBucket.push(stream);
+
+ if (frame.stream === undefined) {
+ frame.stream = stream.id || this._allocateId(stream);
+ }
+
+ if (frame.type === 'PUSH_PROMISE') {
+ this._allocatePriority(frame.promised_stream);
+ frame.promised_stream = this._allocateId(frame.promised_stream);
+ }
+
+ this._log.trace({ s: stream, frame: frame }, 'Forwarding outgoing frame');
+ var moreNeeded = this.push(frame);
+ this._changeStreamCount(frame.count_change);
+
+ assert(moreNeeded !== null); // The frame shouldn't be unforwarded
+ if (moreNeeded === false) {
+ break priority_loop;
+ }
+ }
+
+ bucket = nextBucket;
+ nextBucket = [];
+ }
+ }
+
+ // * if we couldn't forward any frame, then sleep until window update, or some other wakeup event
+ if (moreNeeded === undefined) {
+ this.once('wakeup', this._send.bind(this));
+ }
+
+ this._log.trace({ moreNeeded: moreNeeded }, 'Stopping forwarding frames from streams.');
+};
+
+// The `_receive` method is another virtual method of the [Flow class](flow.html) that has to be
+// implemented by child classes. It forwards the given frame to the appropriate stream:
+Connection.prototype._receive = function _receive(frame, done) {
+ this._log.trace({ frame: frame }, 'Forwarding incoming frame');
+
+ // * first frame needs to be checked by the `_onFirstFrameReceived` method
+ if (!this._firstFrameReceived) {
+ this._firstFrameReceived = true;
+ this._onFirstFrameReceived(frame);
+ }
+
+ // Do some sanity checking here before we create a stream
+ if ((frame.type == 'SETTINGS' ||
+ frame.type == 'PING' ||
+ frame.type == 'GOAWAY') &&
+ frame.stream != 0) {
+ // Got connection-level frame on a stream - EEP!
+ this.close('PROTOCOL_ERROR');
+ return;
+ } else if ((frame.type == 'DATA' ||
+ frame.type == 'HEADERS' ||
+ frame.type == 'PRIORITY' ||
+ frame.type == 'RST_STREAM' ||
+ frame.type == 'PUSH_PROMISE' ||
+ frame.type == 'CONTINUATION') &&
+ frame.stream == 0) {
+ // Got stream-level frame on connection - EEP!
+ this.close('PROTOCOL_ERROR');
+ return;
+ }
+ // WINDOW_UPDATE can be on either stream or connection
+
+ // * gets the appropriate stream from the stream registry
+ var stream = this._streamIds[frame.stream];
+
+ // * or creates one if it's not in `this.streams`
+ if (!stream) {
+ stream = this._createIncomingStream(frame.stream);
+ }
+
+ // * in case of PUSH_PROMISE, replaces the promised stream id with a new incoming stream
+ if (frame.type === 'PUSH_PROMISE') {
+ frame.promised_stream = this._createIncomingStream(frame.promised_stream);
+ }
+
+ frame.count_change = this._changeStreamCount.bind(this);
+
+ // * and writes it to the `stream`'s `upstream`
+ stream.upstream.write(frame);
+
+ done();
+};
+
+// Settings management
+// -------------------
+
+var defaultSettings = {
+};
+
+// Settings management initialization:
+Connection.prototype._initializeSettingsManagement = function _initializeSettingsManagement(settings) {
+ // * Setting up the callback queue for setting acknowledgements
+ this._settingsAckCallbacks = [];
+
+ // * Sending the initial settings.
+ this._log.debug({ settings: settings },
+ 'Sending the first SETTINGS frame as part of the connection header.');
+ this.set(settings || defaultSettings);
+
+ // * Forwarding SETTINGS frames to the `_receiveSettings` method
+ this.on('SETTINGS', this._receiveSettings);
+ this.on('RECEIVING_SETTINGS_MAX_FRAME_SIZE', this._sanityCheckMaxFrameSize);
+};
+
+// * Checking that the first frame the other endpoint sends is SETTINGS
+Connection.prototype._onFirstFrameReceived = function _onFirstFrameReceived(frame) {
+ if ((frame.stream === 0) && (frame.type === 'SETTINGS')) {
+ this._log.debug('Receiving the first SETTINGS frame as part of the connection header.');
+ } else {
+ this._log.fatal({ frame: frame }, 'Invalid connection header: first frame is not SETTINGS.');
+ this.emit('error', 'PROTOCOL_ERROR');
+ }
+};
+
+// Handling of incoming SETTINGS frames.
+Connection.prototype._receiveSettings = function _receiveSettings(frame) {
+ // * If it's an ACK, call the appropriate callback
+ if (frame.flags.ACK) {
+ var callback = this._settingsAckCallbacks.shift();
+ if (callback) {
+ callback();
+ }
+ }
+
+ // * If it's a setting change request, then send an ACK and change the appropriate settings
+ else {
+ if (!this._closed) {
+ this.push({
+ type: 'SETTINGS',
+ flags: { ACK: true },
+ stream: 0,
+ settings: {}
+ });
+ }
+ for (var name in frame.settings) {
+ this.emit('RECEIVING_' + name, frame.settings[name]);
+ }
+ }
+};
+
+Connection.prototype._sanityCheckMaxFrameSize = function _sanityCheckMaxFrameSize(value) {
+ if ((value < 0x4000) || (value >= 0x01000000)) {
+ this._log.fatal('Received invalid value for max frame size: ' + value);
+ this.emit('error');
+ }
+};
+
+// Changing one or more settings value and sending out a SETTINGS frame
+Connection.prototype.set = function set(settings, callback) {
+ // * Calling the callback and emitting event when the change is acknowledges
+ var self = this;
+ this._settingsAckCallbacks.push(function() {
+ for (var name in settings) {
+ self.emit('ACKNOWLEDGED_' + name, settings[name]);
+ }
+ if (callback) {
+ callback();
+ }
+ });
+
+ // * Sending out the SETTINGS frame
+ this.push({
+ type: 'SETTINGS',
+ flags: { ACK: false },
+ stream: 0,
+ settings: settings
+ });
+ for (var name in settings) {
+ this.emit('SENDING_' + name, settings[name]);
+ }
+};
+
+// Lifecycle management
+// --------------------
+
+// The main responsibilities of lifecycle management code:
+//
+// * keeping the connection alive by
+// * sending PINGs when the connection is idle
+// * answering PINGs
+// * ending the connection
+
+Connection.prototype._initializeLifecycleManagement = function _initializeLifecycleManagement() {
+ this._pings = {};
+ this.on('PING', this._receivePing);
+ this.on('GOAWAY', this._receiveGoaway);
+ this._closed = false;
+};
+
+// Generating a string of length 16 with random hexadecimal digits
+Connection.prototype._generatePingId = function _generatePingId() {
+ do {
+ var id = '';
+ for (var i = 0; i < 16; i++) {
+ id += Math.floor(Math.random()*16).toString(16);
+ }
+ } while(id in this._pings);
+ return id;
+};
+
+// Sending a ping and calling `callback` when the answer arrives
+Connection.prototype.ping = function ping(callback) {
+ var id = this._generatePingId();
+ var data = new Buffer(id, 'hex');
+ this._pings[id] = callback;
+
+ this._log.debug({ data: data }, 'Sending PING.');
+ this.push({
+ type: 'PING',
+ flags: {
+ ACK: false
+ },
+ stream: 0,
+ data: data
+ });
+};
+
+// Answering pings
+Connection.prototype._receivePing = function _receivePing(frame) {
+ if (frame.flags.ACK) {
+ var id = frame.data.toString('hex');
+ if (id in this._pings) {
+ this._log.debug({ data: frame.data }, 'Receiving answer for a PING.');
+ var callback = this._pings[id];
+ if (callback) {
+ callback();
+ }
+ delete this._pings[id];
+ } else {
+ this._log.warn({ data: frame.data }, 'Unsolicited PING answer.');
+ }
+
+ } else {
+ this._log.debug({ data: frame.data }, 'Answering PING.');
+ this.push({
+ type: 'PING',
+ flags: {
+ ACK: true
+ },
+ stream: 0,
+ data: frame.data
+ });
+ }
+};
+
+// Terminating the connection
+Connection.prototype.close = function close(error) {
+ if (this._closed) {
+ this._log.warn('Trying to close an already closed connection');
+ return;
+ }
+
+ this._log.debug({ error: error }, 'Closing the connection');
+ this.push({
+ type: 'GOAWAY',
+ flags: {},
+ stream: 0,
+ last_stream: this._lastIncomingStream,
+ error: error || 'NO_ERROR'
+ });
+ this.push(null);
+ this._closed = true;
+};
+
+Connection.prototype._receiveGoaway = function _receiveGoaway(frame) {
+ this._log.debug({ error: frame.error }, 'Other end closed the connection');
+ this.push(null);
+ this._closed = true;
+ if (frame.error !== 'NO_ERROR') {
+ this.emit('peerError', frame.error);
+ }
+};
+
+// Flow control
+// ------------
+
+Connection.prototype._initializeFlowControl = function _initializeFlowControl() {
+ // Handling of initial window size of individual streams.
+ this._initialStreamWindowSize = INITIAL_STREAM_WINDOW_SIZE;
+ this.on('new_stream', function(stream) {
+ stream.upstream.setInitialWindow(this._initialStreamWindowSize);
+ });
+ this.on('RECEIVING_SETTINGS_INITIAL_WINDOW_SIZE', this._setInitialStreamWindowSize);
+ this._streamIds[0].upstream.setInitialWindow = function noop() {};
+};
+
+// The initial connection flow control window is 65535 bytes.
+var INITIAL_STREAM_WINDOW_SIZE = 65535;
+
+// A SETTINGS frame can alter the initial flow control window size for all current streams. When the
+// value of SETTINGS_INITIAL_WINDOW_SIZE changes, a receiver MUST adjust the window size of all
+// stream by calling the `setInitialStreamWindowSize` method. The window size has to be modified by
+// the difference between the new value and the old value.
+Connection.prototype._setInitialStreamWindowSize = function _setInitialStreamWindowSize(size) {
+ if ((this._initialStreamWindowSize === Infinity) && (size !== Infinity)) {
+ this._log.error('Trying to manipulate initial flow control window size after flow control was turned off.');
+ this.emit('error', 'FLOW_CONTROL_ERROR');
+ } else {
+ this._log.debug({ size: size }, 'Changing stream initial window size.');
+ this._initialStreamWindowSize = size;
+ this._streamIds.forEach(function(stream) {
+ stream.upstream.setInitialWindow(size);
+ });
+ }
+};
diff --git a/testing/xpcshell/node-http2/lib/protocol/endpoint.js b/testing/xpcshell/node-http2/lib/protocol/endpoint.js
new file mode 100644
index 0000000000..a218db040a
--- /dev/null
+++ b/testing/xpcshell/node-http2/lib/protocol/endpoint.js
@@ -0,0 +1,262 @@
+var assert = require('assert');
+
+var Serializer = require('./framer').Serializer;
+var Deserializer = require('./framer').Deserializer;
+var Compressor = require('./compressor').Compressor;
+var Decompressor = require('./compressor').Decompressor;
+var Connection = require('./connection').Connection;
+var Duplex = require('stream').Duplex;
+var Transform = require('stream').Transform;
+
+exports.Endpoint = Endpoint;
+
+// The Endpoint class
+// ==================
+
+// Public API
+// ----------
+
+// - **new Endpoint(log, role, settings, filters)**: create a new Endpoint.
+//
+// - `log`: bunyan logger of the parent
+// - `role`: 'CLIENT' or 'SERVER'
+// - `settings`: initial HTTP/2 settings
+// - `filters`: a map of functions that filter the traffic between components (for debugging or
+// intentional failure injection).
+//
+// Filter functions get three arguments:
+// 1. `frame`: the current frame
+// 2. `forward(frame)`: function that can be used to forward a frame to the next component
+// 3. `done()`: callback to signal the end of the filter process
+//
+// Valid filter names and their position in the stack:
+// - `beforeSerialization`: after compression, before serialization
+// - `beforeCompression`: after multiplexing, before compression
+// - `afterDeserialization`: after deserialization, before decompression
+// - `afterDecompression`: after decompression, before multiplexing
+//
+// * **Event: 'stream' (Stream)**: 'stream' event forwarded from the underlying Connection
+//
+// * **Event: 'error' (type)**: signals an error
+//
+// * **createStream(): Stream**: initiate a new stream (forwarded to the underlying Connection)
+//
+// * **close([error])**: close the connection with an error code
+
+// Constructor
+// -----------
+
+// The process of initialization:
+function Endpoint(log, role, settings, filters) {
+ Duplex.call(this);
+
+ // * Initializing logging infrastructure
+ this._log = log.child({ component: 'endpoint', e: this });
+
+ // * First part of the handshake process: sending and receiving the client connection header
+ // prelude.
+ assert((role === 'CLIENT') || role === 'SERVER');
+ if (role === 'CLIENT') {
+ this._writePrelude();
+ } else {
+ this._readPrelude();
+ }
+
+ // * Initialization of component. This includes the second part of the handshake process:
+ // sending the first SETTINGS frame. This is done by the connection class right after
+ // initialization.
+ this._initializeDataFlow(role, settings, filters || {});
+
+ // * Initialization of management code.
+ this._initializeManagement();
+
+ // * Initializing error handling.
+ this._initializeErrorHandling();
+}
+Endpoint.prototype = Object.create(Duplex.prototype, { constructor: { value: Endpoint } });
+
+// Handshake
+// ---------
+
+var CLIENT_PRELUDE = new Buffer('PRI * HTTP/2.0\r\n\r\nSM\r\n\r\n');
+
+// Writing the client header is simple and synchronous.
+Endpoint.prototype._writePrelude = function _writePrelude() {
+ this._log.debug('Sending the client connection header prelude.');
+ this.push(CLIENT_PRELUDE);
+};
+
+// The asynchronous process of reading the client header:
+Endpoint.prototype._readPrelude = function _readPrelude() {
+ // * progress in the header is tracker using a `cursor`
+ var cursor = 0;
+
+ // * `_write` is temporarily replaced by the comparator function
+ this._write = function _temporalWrite(chunk, encoding, done) {
+ // * which compares the stored header with the current `chunk` byte by byte and emits the
+ // 'error' event if there's a byte that doesn't match
+ var offset = cursor;
+ while(cursor < CLIENT_PRELUDE.length && (cursor - offset) < chunk.length) {
+ if (CLIENT_PRELUDE[cursor] !== chunk[cursor - offset]) {
+ this._log.fatal({ cursor: cursor, offset: offset, chunk: chunk },
+ 'Client connection header prelude does not match.');
+ this._error('handshake', 'PROTOCOL_ERROR');
+ return;
+ }
+ cursor += 1;
+ }
+
+ // * if the whole header is over, and there were no error then restore the original `_write`
+ // and call it with the remaining part of the current chunk
+ if (cursor === CLIENT_PRELUDE.length) {
+ this._log.debug('Successfully received the client connection header prelude.');
+ delete this._write;
+ chunk = chunk.slice(cursor - offset);
+ this._write(chunk, encoding, done);
+ }
+ };
+};
+
+// Data flow
+// ---------
+
+// +---------------------------------------------+
+// | |
+// | +-------------------------------------+ |
+// | | +---------+ +---------+ +---------+ | |
+// | | | stream1 | | stream2 | | ... | | |
+// | | +---------+ +---------+ +---------+ | |
+// | | connection | |
+// | +-------------------------------------+ |
+// | | ^ |
+// | pipe | | pipe |
+// | v | |
+// | +------------------+------------------+ |
+// | | compressor | decompressor | |
+// | +------------------+------------------+ |
+// | | ^ |
+// | pipe | | pipe |
+// | v | |
+// | +------------------+------------------+ |
+// | | serializer | deserializer | |
+// | +------------------+------------------+ |
+// | | ^ |
+// | _read() | | _write() |
+// | v | |
+// | +------------+ +-----------+ |
+// | |output queue| |input queue| |
+// +------+------------+-----+-----------+-------+
+// | ^
+// read() | | write()
+// v |
+
+function createTransformStream(filter) {
+ var transform = new Transform({ objectMode: true });
+ var push = transform.push.bind(transform);
+ transform._transform = function(frame, encoding, done) {
+ filter(frame, push, done);
+ };
+ return transform;
+}
+
+function pipeAndFilter(stream1, stream2, filter) {
+ if (filter) {
+ stream1.pipe(createTransformStream(filter)).pipe(stream2);
+ } else {
+ stream1.pipe(stream2);
+ }
+}
+
+Endpoint.prototype._initializeDataFlow = function _initializeDataFlow(role, settings, filters) {
+ var firstStreamId, compressorRole, decompressorRole;
+ if (role === 'CLIENT') {
+ firstStreamId = 1;
+ compressorRole = 'REQUEST';
+ decompressorRole = 'RESPONSE';
+ } else {
+ firstStreamId = 2;
+ compressorRole = 'RESPONSE';
+ decompressorRole = 'REQUEST';
+ }
+
+ this._serializer = new Serializer(this._log);
+ this._deserializer = new Deserializer(this._log);
+ this._compressor = new Compressor(this._log, compressorRole);
+ this._decompressor = new Decompressor(this._log, decompressorRole);
+ this._connection = new Connection(this._log, firstStreamId, settings);
+
+ pipeAndFilter(this._connection, this._compressor, filters.beforeCompression);
+ pipeAndFilter(this._compressor, this._serializer, filters.beforeSerialization);
+ pipeAndFilter(this._deserializer, this._decompressor, filters.afterDeserialization);
+ pipeAndFilter(this._decompressor, this._connection, filters.afterDecompression);
+
+ this._connection.on('ACKNOWLEDGED_SETTINGS_HEADER_TABLE_SIZE',
+ this._decompressor.setTableSizeLimit.bind(this._decompressor));
+ this._connection.on('RECEIVING_SETTINGS_HEADER_TABLE_SIZE',
+ this._compressor.setTableSizeLimit.bind(this._compressor));
+};
+
+var noread = {};
+Endpoint.prototype._read = function _read() {
+ this._readableState.sync = true;
+ var moreNeeded = noread, chunk;
+ while (moreNeeded && (chunk = this._serializer.read())) {
+ moreNeeded = this.push(chunk);
+ }
+ if (moreNeeded === noread) {
+ this._serializer.once('readable', this._read.bind(this));
+ }
+ this._readableState.sync = false;
+};
+
+Endpoint.prototype._write = function _write(chunk, encoding, done) {
+ this._deserializer.write(chunk, encoding, done);
+};
+
+// Management
+// --------------
+
+Endpoint.prototype._initializeManagement = function _initializeManagement() {
+ this._connection.on('stream', this.emit.bind(this, 'stream'));
+};
+
+Endpoint.prototype.createStream = function createStream() {
+ return this._connection.createStream();
+};
+
+// Error handling
+// --------------
+
+Endpoint.prototype._initializeErrorHandling = function _initializeErrorHandling() {
+ this._serializer.on('error', this._error.bind(this, 'serializer'));
+ this._deserializer.on('error', this._error.bind(this, 'deserializer'));
+ this._compressor.on('error', this._error.bind(this, 'compressor'));
+ this._decompressor.on('error', this._error.bind(this, 'decompressor'));
+ this._connection.on('error', this._error.bind(this, 'connection'));
+
+ this._connection.on('peerError', this.emit.bind(this, 'peerError'));
+};
+
+Endpoint.prototype._error = function _error(component, error) {
+ this._log.fatal({ source: component, message: error }, 'Fatal error, closing connection');
+ this.close(error);
+ setImmediate(this.emit.bind(this, 'error', error));
+};
+
+Endpoint.prototype.close = function close(error) {
+ this._connection.close(error);
+};
+
+// Bunyan serializers
+// ------------------
+
+exports.serializers = {};
+
+var nextId = 0;
+exports.serializers.e = function(endpoint) {
+ if (!('id' in endpoint)) {
+ endpoint.id = nextId;
+ nextId += 1;
+ }
+ return endpoint.id;
+};
diff --git a/testing/xpcshell/node-http2/lib/protocol/flow.js b/testing/xpcshell/node-http2/lib/protocol/flow.js
new file mode 100644
index 0000000000..4ec5649bee
--- /dev/null
+++ b/testing/xpcshell/node-http2/lib/protocol/flow.js
@@ -0,0 +1,353 @@
+var assert = require('assert');
+
+// The Flow class
+// ==============
+
+// Flow is a [Duplex stream][1] subclass which implements HTTP/2 flow control. It is designed to be
+// subclassed by [Connection](connection.html) and the `upstream` component of [Stream](stream.html).
+// [1]: https://nodejs.org/api/stream.html#stream_class_stream_duplex
+
+var Duplex = require('stream').Duplex;
+
+exports.Flow = Flow;
+
+// Public API
+// ----------
+
+// * **Event: 'error' (type)**: signals an error
+//
+// * **setInitialWindow(size)**: the initial flow control window size can be changed *any time*
+// ([as described in the standard][1]) using this method
+//
+// [1]: https://tools.ietf.org/html/rfc7540#section-6.9.2
+
+// API for child classes
+// ---------------------
+
+// * **new Flow([flowControlId])**: creating a new flow that will listen for WINDOW_UPDATES frames
+// with the given `flowControlId` (or every update frame if not given)
+//
+// * **_send()**: called when more frames should be pushed. The child class is expected to override
+// this (instead of the `_read` method of the Duplex class).
+//
+// * **_receive(frame, readyCallback)**: called when there's an incoming frame. The child class is
+// expected to override this (instead of the `_write` method of the Duplex class).
+//
+// * **push(frame): bool**: schedules `frame` for sending.
+//
+// Returns `true` if it needs more frames in the output queue, `false` if the output queue is
+// full, and `null` if did not push the frame into the output queue (instead, it pushed it into
+// the flow control queue).
+//
+// * **read(limit): frame**: like the regular `read`, but the 'flow control size' (0 for non-DATA
+// frames, length of the payload for DATA frames) of the returned frame will be under `limit`.
+// Small exception: pass -1 as `limit` if the max. flow control size is 0. `read(0)` means the
+// same thing as [in the original API](https://nodejs.org/api/stream.html#stream_stream_read_0).
+//
+// * **getLastQueuedFrame(): frame**: returns the last frame in output buffers
+//
+// * **_log**: the Flow class uses the `_log` object of the parent
+
+// Constructor
+// -----------
+
+// When a HTTP/2.0 connection is first established, new streams are created with an initial flow
+// control window size of 65535 bytes.
+var INITIAL_WINDOW_SIZE = 65535;
+
+// `flowControlId` is needed if only specific WINDOW_UPDATEs should be watched.
+function Flow(flowControlId) {
+ Duplex.call(this, { objectMode: true });
+
+ this._window = this._initialWindow = INITIAL_WINDOW_SIZE;
+ this._flowControlId = flowControlId;
+ this._queue = [];
+ this._ended = false;
+ this._received = 0;
+ this._blocked = false;
+}
+Flow.prototype = Object.create(Duplex.prototype, { constructor: { value: Flow } });
+
+// Incoming frames
+// ---------------
+
+// `_receive` is called when there's an incoming frame.
+Flow.prototype._receive = function _receive(frame, callback) {
+ throw new Error('The _receive(frame, callback) method has to be overridden by the child class!');
+};
+
+// `_receive` is called by `_write` which in turn is [called by Duplex][1] when someone `write()`s
+// to the flow. It emits the 'receiving' event and notifies the window size tracking code if the
+// incoming frame is a WINDOW_UPDATE.
+// [1]: https://nodejs.org/api/stream.html#stream_writable_write_chunk_encoding_callback_1
+Flow.prototype._write = function _write(frame, encoding, callback) {
+ var sentToUs = (this._flowControlId === undefined) || (frame.stream === this._flowControlId);
+
+ if (sentToUs && (frame.flags.END_STREAM || (frame.type === 'RST_STREAM'))) {
+ this._ended = true;
+ }
+
+ if ((frame.type === 'DATA') && (frame.data.length > 0)) {
+ this._receive(frame, function() {
+ this._received += frame.data.length;
+ if (!this._restoreWindowTimer) {
+ this._restoreWindowTimer = setImmediate(this._restoreWindow.bind(this));
+ }
+ callback();
+ }.bind(this));
+ }
+
+ else {
+ this._receive(frame, callback);
+ }
+
+ if (sentToUs && (frame.type === 'WINDOW_UPDATE')) {
+ this._updateWindow(frame);
+ }
+};
+
+// `_restoreWindow` basically acknowledges the DATA frames received since it's last call. It sends
+// a WINDOW_UPDATE that restores the flow control window of the remote end.
+// TODO: push this directly into the output queue. No need to wait for DATA frames in the queue.
+Flow.prototype._restoreWindow = function _restoreWindow() {
+ delete this._restoreWindowTimer;
+ if (!this._ended && (this._received > 0)) {
+ this.push({
+ type: 'WINDOW_UPDATE',
+ flags: {},
+ stream: this._flowControlId,
+ window_size: this._received
+ });
+ this._received = 0;
+ }
+};
+
+// Outgoing frames - sending procedure
+// -----------------------------------
+
+// flow
+// +-------------------------------------------------+
+// | |
+// +--------+ +---------+ |
+// read() | output | _read() | flow | _send() |
+// <----------| |<----------| control |<------------- |
+// | buffer | | buffer | |
+// +--------+ +---------+ |
+// | input | |
+// ---------->| |-----------------------------------> |
+// write() | buffer | _write() _receive() |
+// +--------+ |
+// | |
+// +-------------------------------------------------+
+
+// `_send` is called when more frames should be pushed to the output buffer.
+Flow.prototype._send = function _send() {
+ throw new Error('The _send() method has to be overridden by the child class!');
+};
+
+// `_send` is called by `_read` which is in turn [called by Duplex][1] when it wants to have more
+// items in the output queue.
+// [1]: https://nodejs.org/api/stream.html#stream_writable_write_chunk_encoding_callback_1
+Flow.prototype._read = function _read() {
+ // * if the flow control queue is empty, then let the user push more frames
+ if (this._queue.length === 0) {
+ this._send();
+ }
+
+ // * if there are items in the flow control queue, then let's put them into the output queue (to
+ // the extent it is possible with respect to the window size and output queue feedback)
+ else if (this._window > 0) {
+ this._blocked = false;
+ this._readableState.sync = true; // to avoid reentrant calls
+ do {
+ var moreNeeded = this._push(this._queue[0]);
+ if (moreNeeded !== null) {
+ this._queue.shift();
+ }
+ } while (moreNeeded && (this._queue.length > 0));
+ this._readableState.sync = false;
+
+ assert((!moreNeeded) || // * output queue is full
+ (this._queue.length === 0) || // * flow control queue is empty
+ (!this._window && (this._queue[0].type === 'DATA'))); // * waiting for window update
+ }
+
+ // * otherwise, come back when the flow control window is positive
+ else if (!this._blocked) {
+ this._parentPush({
+ type: 'BLOCKED',
+ flags: {},
+ stream: this._flowControlId
+ });
+ this.once('window_update', this._read);
+ this._blocked = true;
+ }
+};
+
+var MAX_PAYLOAD_SIZE = 4096; // Must not be greater than MAX_HTTP_PAYLOAD_SIZE which is 16383
+
+// `read(limit)` is like the `read` of the Readable class, but it guarantess that the 'flow control
+// size' (0 for non-DATA frames, length of the payload for DATA frames) of the returned frame will
+// be under `limit`.
+Flow.prototype.read = function read(limit) {
+ if (limit === 0) {
+ return Duplex.prototype.read.call(this, 0);
+ } else if (limit === -1) {
+ limit = 0;
+ } else if ((limit === undefined) || (limit > MAX_PAYLOAD_SIZE)) {
+ limit = MAX_PAYLOAD_SIZE;
+ }
+
+ // * Looking at the first frame in the queue without pulling it out if possible.
+ var frame = this._readableState.buffer[0];
+ if (!frame && !this._readableState.ended) {
+ this._read();
+ frame = this._readableState.buffer[0];
+ }
+
+ if (frame && (frame.type === 'DATA')) {
+ // * If the frame is DATA, then there's two special cases:
+ // * if the limit is 0, we shouldn't return anything
+ // * if the size of the frame is larger than limit, then the frame should be split
+ if (limit === 0) {
+ return Duplex.prototype.read.call(this, 0);
+ }
+
+ else if (frame.data.length > limit) {
+ this._log.trace({ frame: frame, size: frame.data.length, forwardable: limit },
+ 'Splitting out forwardable part of a DATA frame.');
+ this.unshift({
+ type: 'DATA',
+ flags: {},
+ stream: frame.stream,
+ data: frame.data.slice(0, limit)
+ });
+ frame.data = frame.data.slice(limit);
+ }
+ }
+
+ return Duplex.prototype.read.call(this);
+};
+
+// `_parentPush` pushes the given `frame` into the output queue
+Flow.prototype._parentPush = function _parentPush(frame) {
+ this._log.trace({ frame: frame }, 'Pushing frame into the output queue');
+
+ if (frame && (frame.type === 'DATA') && (this._window !== Infinity)) {
+ this._log.trace({ window: this._window, by: frame.data.length },
+ 'Decreasing flow control window size.');
+ this._window -= frame.data.length;
+ assert(this._window >= 0);
+ }
+
+ return Duplex.prototype.push.call(this, frame);
+};
+
+// `_push(frame)` pushes `frame` into the output queue and decreases the flow control window size.
+// It is capable of splitting DATA frames into smaller parts, if the window size is not enough to
+// push the whole frame. The return value is similar to `push` except that it returns `null` if it
+// did not push the whole frame to the output queue (but maybe it did push part of the frame).
+Flow.prototype._push = function _push(frame) {
+ var data = frame && (frame.type === 'DATA') && frame.data;
+ var maxFrameLength = (this._window < 16384) ? this._window : 16384;
+
+ if (!data || (data.length <= maxFrameLength)) {
+ return this._parentPush(frame);
+ }
+
+ else if (this._window <= 0) {
+ return null;
+ }
+
+ else {
+ this._log.trace({ frame: frame, size: frame.data.length, forwardable: this._window },
+ 'Splitting out forwardable part of a DATA frame.');
+ frame.data = data.slice(maxFrameLength);
+ this._parentPush({
+ type: 'DATA',
+ flags: {},
+ stream: frame.stream,
+ data: data.slice(0, maxFrameLength)
+ });
+ return null;
+ }
+};
+
+// Push `frame` into the flow control queue, or if it's empty, then directly into the output queue
+Flow.prototype.push = function push(frame) {
+ if (frame === null) {
+ this._log.debug('Enqueueing outgoing End Of Stream');
+ } else {
+ this._log.debug({ frame: frame }, 'Enqueueing outgoing frame');
+ }
+
+ var moreNeeded = null;
+ if (this._queue.length === 0) {
+ moreNeeded = this._push(frame);
+ }
+
+ if (moreNeeded === null) {
+ this._queue.push(frame);
+ }
+
+ return moreNeeded;
+};
+
+// `getLastQueuedFrame` returns the last frame in output buffers. This is primarily used by the
+// [Stream](stream.html) class to mark the last frame with END_STREAM flag.
+Flow.prototype.getLastQueuedFrame = function getLastQueuedFrame() {
+ var readableQueue = this._readableState.buffer;
+ return this._queue[this._queue.length - 1] || readableQueue[readableQueue.length - 1];
+};
+
+// Outgoing frames - managing the window size
+// ------------------------------------------
+
+// Flow control window size is manipulated using the `_increaseWindow` method.
+//
+// * Invoking it with `Infinite` means turning off flow control. Flow control cannot be enabled
+// again once disabled. Any attempt to re-enable flow control MUST be rejected with a
+// FLOW_CONTROL_ERROR error code.
+// * A sender MUST NOT allow a flow control window to exceed 2^31 - 1 bytes. The action taken
+// depends on it being a stream or the connection itself.
+
+var WINDOW_SIZE_LIMIT = Math.pow(2, 31) - 1;
+
+Flow.prototype._increaseWindow = function _increaseWindow(size) {
+ if ((this._window === Infinity) && (size !== Infinity)) {
+ this._log.error('Trying to increase flow control window after flow control was turned off.');
+ this.emit('error', 'FLOW_CONTROL_ERROR');
+ } else {
+ this._log.trace({ window: this._window, by: size }, 'Increasing flow control window size.');
+ this._window += size;
+ if ((this._window !== Infinity) && (this._window > WINDOW_SIZE_LIMIT)) {
+ this._log.error('Flow control window grew too large.');
+ this.emit('error', 'FLOW_CONTROL_ERROR');
+ } else {
+ if (size != 0) {
+ this.emit('window_update');
+ }
+ }
+ }
+};
+
+// The `_updateWindow` method gets called every time there's an incoming WINDOW_UPDATE frame. It
+// modifies the flow control window:
+//
+// * Flow control can be disabled for an individual stream by sending a WINDOW_UPDATE with the
+// END_FLOW_CONTROL flag set. The payload of a WINDOW_UPDATE frame that has the END_FLOW_CONTROL
+// flag set is ignored.
+// * A sender that receives a WINDOW_UPDATE frame updates the corresponding window by the amount
+// specified in the frame.
+Flow.prototype._updateWindow = function _updateWindow(frame) {
+ this._increaseWindow(frame.flags.END_FLOW_CONTROL ? Infinity : frame.window_size);
+};
+
+// A SETTINGS frame can alter the initial flow control window size for all current streams. When the
+// value of SETTINGS_INITIAL_WINDOW_SIZE changes, a receiver MUST adjust the size of all stream by
+// calling the `setInitialWindow` method. The window size has to be modified by the difference
+// between the new value and the old value.
+Flow.prototype.setInitialWindow = function setInitialWindow(initialWindow) {
+ this._increaseWindow(initialWindow - this._initialWindow);
+ this._initialWindow = initialWindow;
+};
diff --git a/testing/xpcshell/node-http2/lib/protocol/framer.js b/testing/xpcshell/node-http2/lib/protocol/framer.js
new file mode 100644
index 0000000000..244e60ae16
--- /dev/null
+++ b/testing/xpcshell/node-http2/lib/protocol/framer.js
@@ -0,0 +1,1165 @@
+// The framer consists of two [Transform Stream][1] subclasses that operate in [object mode][2]:
+// the Serializer and the Deserializer
+// [1]: https://nodejs.org/api/stream.html#stream_class_stream_transform
+// [2]: https://nodejs.org/api/stream.html#stream_new_stream_readable_options
+var assert = require('assert');
+
+var Transform = require('stream').Transform;
+
+exports.Serializer = Serializer;
+exports.Deserializer = Deserializer;
+
+var logData = Boolean(process.env.HTTP2_LOG_DATA);
+
+var MAX_PAYLOAD_SIZE = 16384;
+var WINDOW_UPDATE_PAYLOAD_SIZE = 4;
+
+// Serializer
+// ----------
+//
+// Frame Objects
+// * * * * * * * --+---------------------------
+// | |
+// v v Buffers
+// [] -----> Payload Ser. --[buffers]--> Header Ser. --> * * * *
+// empty adds payload adds header
+// array buffers buffer
+
+function Serializer(log) {
+ this._log = log.child({ component: 'serializer' });
+ Transform.call(this, { objectMode: true });
+}
+Serializer.prototype = Object.create(Transform.prototype, { constructor: { value: Serializer } });
+
+// When there's an incoming frame object, it first generates the frame type specific part of the
+// frame (payload), and then then adds the header part which holds fields that are common to all
+// frame types (like the length of the payload).
+Serializer.prototype._transform = function _transform(frame, encoding, done) {
+ this._log.trace({ frame: frame }, 'Outgoing frame');
+
+ assert(frame.type in Serializer, 'Unknown frame type: ' + frame.type);
+
+ var buffers = [];
+ Serializer[frame.type](frame, buffers);
+ var length = Serializer.commonHeader(frame, buffers);
+
+ assert(length <= MAX_PAYLOAD_SIZE, 'Frame too large!');
+
+ for (var i = 0; i < buffers.length; i++) {
+ if (logData) {
+ this._log.trace({ data: buffers[i] }, 'Outgoing data');
+ }
+ this.push(buffers[i]);
+ }
+
+ done();
+};
+
+// Deserializer
+// ------------
+//
+// Buffers
+// * * * * --------+-------------------------
+// | |
+// v v Frame Objects
+// {} -----> Header Des. --{frame}--> Payload Des. --> * * * * * * *
+// empty adds parsed adds parsed
+// object header properties payload properties
+
+function Deserializer(log, role) {
+ this._role = role;
+ this._log = log.child({ component: 'deserializer' });
+ Transform.call(this, { objectMode: true });
+ this._next(COMMON_HEADER_SIZE);
+}
+Deserializer.prototype = Object.create(Transform.prototype, { constructor: { value: Deserializer } });
+
+// The Deserializer is stateful, and it's two main alternating states are: *waiting for header* and
+// *waiting for payload*. The state is stored in the boolean property `_waitingForHeader`.
+//
+// When entering a new state, a `_buffer` is created that will hold the accumulated data (header or
+// payload). The `_cursor` is used to track the progress.
+Deserializer.prototype._next = function(size) {
+ this._cursor = 0;
+ this._buffer = new Buffer(size);
+ this._waitingForHeader = !this._waitingForHeader;
+ if (this._waitingForHeader) {
+ this._frame = {};
+ }
+};
+
+// Parsing an incoming buffer is an iterative process because it can hold multiple frames if it's
+// large enough. A `cursor` is used to track the progress in parsing the incoming `chunk`.
+Deserializer.prototype._transform = function _transform(chunk, encoding, done) {
+ var cursor = 0;
+
+ if (logData) {
+ this._log.trace({ data: chunk }, 'Incoming data');
+ }
+
+ while(cursor < chunk.length) {
+ // The content of an incoming buffer is first copied to `_buffer`. If it can't hold the full
+ // chunk, then only a part of it is copied.
+ var toCopy = Math.min(chunk.length - cursor, this._buffer.length - this._cursor);
+ chunk.copy(this._buffer, this._cursor, cursor, cursor + toCopy);
+ this._cursor += toCopy;
+ cursor += toCopy;
+
+ // When `_buffer` is full, it's content gets parsed either as header or payload depending on
+ // the actual state.
+
+ // If it's header then the parsed data is stored in a temporary variable and then the
+ // deserializer waits for the specified length payload.
+ if ((this._cursor === this._buffer.length) && this._waitingForHeader) {
+ var payloadSize = Deserializer.commonHeader(this._buffer, this._frame);
+ if (payloadSize <= MAX_PAYLOAD_SIZE) {
+ this._next(payloadSize);
+ } else {
+ this.emit('error', 'FRAME_SIZE_ERROR');
+ return;
+ }
+ }
+
+ // If it's payload then the the frame object is finalized and then gets pushed out.
+ // Unknown frame types are ignored.
+ //
+ // Note: If we just finished the parsing of a header and the payload length is 0, this branch
+ // will also run.
+ if ((this._cursor === this._buffer.length) && !this._waitingForHeader) {
+ if (this._frame.type) {
+ var error = Deserializer[this._frame.type](this._buffer, this._frame, this._role);
+ if (error) {
+ this._log.error('Incoming frame parsing error: ' + error);
+ this.emit('error', error);
+ } else {
+ this._log.trace({ frame: this._frame }, 'Incoming frame');
+ this.push(this._frame);
+ }
+ } else {
+ this._log.error('Unknown type incoming frame');
+ // Ignore it other than logging
+ }
+ this._next(COMMON_HEADER_SIZE);
+ }
+ }
+
+ done();
+};
+
+// [Frame Header](https://tools.ietf.org/html/rfc7540#section-4.1)
+// --------------------------------------------------------------
+//
+// HTTP/2 frames share a common base format consisting of a 9-byte header followed by 0 to 2^24 - 1
+// bytes of data.
+//
+// Additional size limits can be set by specific application uses. HTTP limits the frame size to
+// 16,384 octets by default, though this can be increased by a receiver.
+//
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | Length (24) |
+// +---------------+---------------+---------------+
+// | Type (8) | Flags (8) |
+// +-+-----------------------------+---------------+---------------+
+// |R| Stream Identifier (31) |
+// +-+-------------------------------------------------------------+
+// | Frame Data (0...) ...
+// +---------------------------------------------------------------+
+//
+// The fields of the frame header are defined as:
+//
+// * Length:
+// The length of the frame data expressed as an unsigned 24-bit integer. The 9 bytes of the frame
+// header are not included in this value.
+//
+// * Type:
+// The 8-bit type of the frame. The frame type determines how the remainder of the frame header
+// and data are interpreted. Implementations MUST ignore unsupported and unrecognized frame types.
+//
+// * Flags:
+// An 8-bit field reserved for frame-type specific boolean flags.
+//
+// Flags are assigned semantics specific to the indicated frame type. Flags that have no defined
+// semantics for a particular frame type MUST be ignored, and MUST be left unset (0) when sending.
+//
+// * R:
+// A reserved 1-bit field. The semantics of this bit are undefined and the bit MUST remain unset
+// (0) when sending and MUST be ignored when receiving.
+//
+// * Stream Identifier:
+// A 31-bit stream identifier. The value 0 is reserved for frames that are associated with the
+// connection as a whole as opposed to an individual stream.
+//
+// The structure and content of the remaining frame data is dependent entirely on the frame type.
+
+var COMMON_HEADER_SIZE = 9;
+
+var frameTypes = [];
+
+var frameFlags = {};
+
+var genericAttributes = ['type', 'flags', 'stream'];
+
+var typeSpecificAttributes = {};
+
+Serializer.commonHeader = function writeCommonHeader(frame, buffers) {
+ var headerBuffer = new Buffer(COMMON_HEADER_SIZE);
+
+ var size = 0;
+ for (var i = 0; i < buffers.length; i++) {
+ size += buffers[i].length;
+ }
+ headerBuffer.writeUInt8(0, 0);
+ headerBuffer.writeUInt16BE(size, 1);
+
+ var typeId = frameTypes.indexOf(frame.type); // If we are here then the type is valid for sure
+ headerBuffer.writeUInt8(typeId, 3);
+
+ var flagByte = 0;
+ for (var flag in frame.flags) {
+ var position = frameFlags[frame.type].indexOf(flag);
+ assert(position !== -1, 'Unknown flag for frame type ' + frame.type + ': ' + flag);
+ if (frame.flags[flag]) {
+ flagByte |= (1 << position);
+ }
+ }
+ headerBuffer.writeUInt8(flagByte, 4);
+
+ assert((0 <= frame.stream) && (frame.stream < 0x7fffffff), frame.stream);
+ headerBuffer.writeUInt32BE(frame.stream || 0, 5);
+
+ buffers.unshift(headerBuffer);
+
+ return size;
+};
+
+Deserializer.commonHeader = function readCommonHeader(buffer, frame) {
+ if (buffer.length < 9) {
+ return 'FRAME_SIZE_ERROR';
+ }
+
+ var totallyWastedByte = buffer.readUInt8(0);
+ var length = buffer.readUInt16BE(1);
+ // We do this just for sanity checking later on, to make sure no one sent us a
+ // frame that's super large.
+ length += totallyWastedByte << 16;
+
+ frame.type = frameTypes[buffer.readUInt8(3)];
+ if (!frame.type) {
+ // We are required to ignore unknown frame types
+ return length;
+ }
+
+ frame.flags = {};
+ var flagByte = buffer.readUInt8(4);
+ var definedFlags = frameFlags[frame.type];
+ for (var i = 0; i < definedFlags.length; i++) {
+ frame.flags[definedFlags[i]] = Boolean(flagByte & (1 << i));
+ }
+
+ frame.stream = buffer.readUInt32BE(5) & 0x7fffffff;
+
+ return length;
+};
+
+// Frame types
+// ===========
+
+// Every frame type is registered in the following places:
+//
+// * `frameTypes`: a register of frame type codes (used by `commonHeader()`)
+// * `frameFlags`: a register of valid flags for frame types (used by `commonHeader()`)
+// * `typeSpecificAttributes`: a register of frame specific frame object attributes (used by
+// logging code and also serves as documentation for frame objects)
+
+// [DATA Frames](https://tools.ietf.org/html/rfc7540#section-6.1)
+// ------------------------------------------------------------
+//
+// DATA frames (type=0x0) convey arbitrary, variable-length sequences of octets associated with a
+// stream.
+//
+// The DATA frame defines the following flags:
+//
+// * END_STREAM (0x1):
+// Bit 1 being set indicates that this frame is the last that the endpoint will send for the
+// identified stream.
+// * PADDED (0x08):
+// Bit 4 being set indicates that the Pad Length field is present.
+
+frameTypes[0x0] = 'DATA';
+
+frameFlags.DATA = ['END_STREAM', 'RESERVED2', 'RESERVED4', 'PADDED'];
+
+typeSpecificAttributes.DATA = ['data'];
+
+Serializer.DATA = function writeData(frame, buffers) {
+ buffers.push(frame.data);
+};
+
+Deserializer.DATA = function readData(buffer, frame) {
+ var dataOffset = 0;
+ var paddingLength = 0;
+ if (frame.flags.PADDED) {
+ if (buffer.length < 1) {
+ // We must have at least one byte for padding control, but we don't. Bad peer!
+ return 'FRAME_SIZE_ERROR';
+ }
+ paddingLength = (buffer.readUInt8(dataOffset) & 0xff);
+ dataOffset = 1;
+ }
+
+ if (paddingLength) {
+ if (paddingLength >= (buffer.length - 1)) {
+ // We don't have enough room for the padding advertised - bad peer!
+ return 'FRAME_SIZE_ERROR';
+ }
+ frame.data = buffer.slice(dataOffset, -1 * paddingLength);
+ } else {
+ frame.data = buffer.slice(dataOffset);
+ }
+};
+
+// [HEADERS](https://tools.ietf.org/html/rfc7540#section-6.2)
+// --------------------------------------------------------------
+//
+// The HEADERS frame (type=0x1) allows the sender to create a stream.
+//
+// The HEADERS frame defines the following flags:
+//
+// * END_STREAM (0x1):
+// Bit 1 being set indicates that this frame is the last that the endpoint will send for the
+// identified stream.
+// * END_HEADERS (0x4):
+// The END_HEADERS bit indicates that this frame contains the entire payload necessary to provide
+// a complete set of headers.
+// * PADDED (0x08):
+// Bit 4 being set indicates that the Pad Length field is present.
+// * PRIORITY (0x20):
+// Bit 6 being set indicates that the Exlusive Flag (E), Stream Dependency, and Weight fields are
+// present.
+
+frameTypes[0x1] = 'HEADERS';
+
+frameFlags.HEADERS = ['END_STREAM', 'RESERVED2', 'END_HEADERS', 'PADDED', 'RESERVED5', 'PRIORITY'];
+
+typeSpecificAttributes.HEADERS = ['priorityDependency', 'priorityWeight', 'exclusiveDependency', 'headers', 'data'];
+
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |Pad Length? (8)|
+// +-+-------------+---------------+-------------------------------+
+// |E| Stream Dependency? (31) |
+// +-+-------------+-----------------------------------------------+
+// | Weight? (8) |
+// +-+-------------+-----------------------------------------------+
+// | Header Block Fragment (*) ...
+// +---------------------------------------------------------------+
+// | Padding (*) ...
+// +---------------------------------------------------------------+
+//
+// The payload of a HEADERS frame contains a Headers Block
+
+Serializer.HEADERS = function writeHeadersPriority(frame, buffers) {
+ if (frame.flags.PRIORITY) {
+ var buffer = new Buffer(5);
+ assert((0 <= frame.priorityDependency) && (frame.priorityDependency <= 0x7fffffff), frame.priorityDependency);
+ buffer.writeUInt32BE(frame.priorityDependency, 0);
+ if (frame.exclusiveDependency) {
+ buffer[0] |= 0x80;
+ }
+ assert((0 <= frame.priorityWeight) && (frame.priorityWeight <= 0xff), frame.priorityWeight);
+ buffer.writeUInt8(frame.priorityWeight, 4);
+ buffers.push(buffer);
+ }
+ buffers.push(frame.data);
+};
+
+Deserializer.HEADERS = function readHeadersPriority(buffer, frame) {
+ var minFrameLength = 0;
+ if (frame.flags.PADDED) {
+ minFrameLength += 1;
+ }
+ if (frame.flags.PRIORITY) {
+ minFrameLength += 5;
+ }
+ if (buffer.length < minFrameLength) {
+ // Peer didn't send enough data - bad peer!
+ return 'FRAME_SIZE_ERROR';
+ }
+
+ var dataOffset = 0;
+ var paddingLength = 0;
+ if (frame.flags.PADDED) {
+ paddingLength = (buffer.readUInt8(dataOffset) & 0xff);
+ dataOffset = 1;
+ }
+
+ if (frame.flags.PRIORITY) {
+ var dependencyData = new Buffer(4);
+ buffer.copy(dependencyData, 0, dataOffset, dataOffset + 4);
+ dataOffset += 4;
+ frame.exclusiveDependency = !!(dependencyData[0] & 0x80);
+ dependencyData[0] &= 0x7f;
+ frame.priorityDependency = dependencyData.readUInt32BE(0);
+ frame.priorityWeight = buffer.readUInt8(dataOffset);
+ dataOffset += 1;
+ }
+
+ if (paddingLength) {
+ if ((buffer.length - dataOffset) < paddingLength) {
+ // Not enough data left to satisfy the advertised padding - bad peer!
+ return 'FRAME_SIZE_ERROR';
+ }
+ frame.data = buffer.slice(dataOffset, -1 * paddingLength);
+ } else {
+ frame.data = buffer.slice(dataOffset);
+ }
+};
+
+// [PRIORITY](https://tools.ietf.org/html/rfc7540#section-6.3)
+// -------------------------------------------------------
+//
+// The PRIORITY frame (type=0x2) specifies the sender-advised priority of a stream.
+//
+// The PRIORITY frame does not define any flags.
+
+frameTypes[0x2] = 'PRIORITY';
+
+frameFlags.PRIORITY = [];
+
+typeSpecificAttributes.PRIORITY = ['priorityDependency', 'priorityWeight', 'exclusiveDependency'];
+
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |E| Stream Dependency? (31) |
+// +-+-------------+-----------------------------------------------+
+// | Weight? (8) |
+// +-+-------------+
+//
+// The payload of a PRIORITY frame contains an exclusive bit, a 31-bit dependency, and an 8-bit weight
+
+Serializer.PRIORITY = function writePriority(frame, buffers) {
+ var buffer = new Buffer(5);
+ assert((0 <= frame.priorityDependency) && (frame.priorityDependency <= 0x7fffffff), frame.priorityDependency);
+ buffer.writeUInt32BE(frame.priorityDependency, 0);
+ if (frame.exclusiveDependency) {
+ buffer[0] |= 0x80;
+ }
+ assert((0 <= frame.priorityWeight) && (frame.priorityWeight <= 0xff), frame.priorityWeight);
+ buffer.writeUInt8(frame.priorityWeight, 4);
+
+ buffers.push(buffer);
+};
+
+Deserializer.PRIORITY = function readPriority(buffer, frame) {
+ if (buffer.length < 5) {
+ // PRIORITY frames are 5 bytes long. Bad peer!
+ return 'FRAME_SIZE_ERROR';
+ }
+ var dependencyData = new Buffer(4);
+ buffer.copy(dependencyData, 0, 0, 4);
+ frame.exclusiveDependency = !!(dependencyData[0] & 0x80);
+ dependencyData[0] &= 0x7f;
+ frame.priorityDependency = dependencyData.readUInt32BE(0);
+ frame.priorityWeight = buffer.readUInt8(4);
+};
+
+// [RST_STREAM](https://tools.ietf.org/html/rfc7540#section-6.4)
+// -----------------------------------------------------------
+//
+// The RST_STREAM frame (type=0x3) allows for abnormal termination of a stream.
+//
+// No type-flags are defined.
+
+frameTypes[0x3] = 'RST_STREAM';
+
+frameFlags.RST_STREAM = [];
+
+typeSpecificAttributes.RST_STREAM = ['error'];
+
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | Error Code (32) |
+// +---------------------------------------------------------------+
+//
+// The RST_STREAM frame contains a single unsigned, 32-bit integer identifying the error
+// code (see Error Codes). The error code indicates why the stream is being terminated.
+
+Serializer.RST_STREAM = function writeRstStream(frame, buffers) {
+ var buffer = new Buffer(4);
+ var code = errorCodes.indexOf(frame.error);
+ assert((0 <= code) && (code <= 0xffffffff), code);
+ buffer.writeUInt32BE(code, 0);
+ buffers.push(buffer);
+};
+
+Deserializer.RST_STREAM = function readRstStream(buffer, frame) {
+ if (buffer.length < 4) {
+ // RST_STREAM is 4 bytes long. Bad peer!
+ return 'FRAME_SIZE_ERROR';
+ }
+ frame.error = errorCodes[buffer.readUInt32BE(0)];
+ if (!frame.error) {
+ // Unknown error codes are considered equivalent to INTERNAL_ERROR
+ frame.error = 'INTERNAL_ERROR';
+ }
+};
+
+// [SETTINGS](https://tools.ietf.org/html/rfc7540#section-6.5)
+// -------------------------------------------------------
+//
+// The SETTINGS frame (type=0x4) conveys configuration parameters that affect how endpoints
+// communicate.
+//
+// The SETTINGS frame defines the following flag:
+
+// * ACK (0x1):
+// Bit 1 being set indicates that this frame acknowledges receipt and application of the peer's
+// SETTINGS frame.
+frameTypes[0x4] = 'SETTINGS';
+
+frameFlags.SETTINGS = ['ACK'];
+
+typeSpecificAttributes.SETTINGS = ['settings'];
+
+// The payload of a SETTINGS frame consists of zero or more settings. Each setting consists of a
+// 16-bit identifier, and an unsigned 32-bit value.
+//
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | Identifier(16) | Value (32) |
+// +-----------------+---------------------------------------------+
+// ...Value |
+// +---------------------------------+
+//
+// Each setting in a SETTINGS frame replaces the existing value for that setting. Settings are
+// processed in the order in which they appear, and a receiver of a SETTINGS frame does not need to
+// maintain any state other than the current value of settings. Therefore, the value of a setting
+// is the last value that is seen by a receiver. This permits the inclusion of the same settings
+// multiple times in the same SETTINGS frame, though doing so does nothing other than waste
+// connection capacity.
+
+Serializer.SETTINGS = function writeSettings(frame, buffers) {
+ var settings = [], settingsLeft = Object.keys(frame.settings);
+ definedSettings.forEach(function(setting, id) {
+ if (setting.name in frame.settings) {
+ settingsLeft.splice(settingsLeft.indexOf(setting.name), 1);
+ var value = frame.settings[setting.name];
+ settings.push({ id: id, value: setting.flag ? Boolean(value) : value });
+ }
+ });
+ assert(settingsLeft.length === 0, 'Unknown settings: ' + settingsLeft.join(', '));
+
+ var buffer = new Buffer(settings.length * 6);
+ for (var i = 0; i < settings.length; i++) {
+ buffer.writeUInt16BE(settings[i].id & 0xffff, i*6);
+ buffer.writeUInt32BE(settings[i].value, i*6 + 2);
+ }
+
+ buffers.push(buffer);
+};
+
+Deserializer.SETTINGS = function readSettings(buffer, frame, role) {
+ frame.settings = {};
+
+ // Receipt of a SETTINGS frame with the ACK flag set and a length
+ // field value other than 0 MUST be treated as a connection error
+ // (Section 5.4.1) of type FRAME_SIZE_ERROR.
+ if(frame.flags.ACK && buffer.length != 0) {
+ return 'FRAME_SIZE_ERROR';
+ }
+
+ if (buffer.length % 6 !== 0) {
+ return 'PROTOCOL_ERROR';
+ }
+ for (var i = 0; i < buffer.length / 6; i++) {
+ var id = buffer.readUInt16BE(i*6) & 0xffff;
+ var setting = definedSettings[id];
+ if (setting) {
+ if (role == 'CLIENT' && setting.name == 'SETTINGS_ENABLE_PUSH') {
+ return 'SETTINGS frame on client got SETTINGS_ENABLE_PUSH';
+ }
+ var value = buffer.readUInt32BE(i*6 + 2);
+ frame.settings[setting.name] = setting.flag ? Boolean(value & 0x1) : value;
+ }
+ }
+};
+
+// The following settings are defined:
+var definedSettings = [];
+
+// * SETTINGS_HEADER_TABLE_SIZE (1):
+// Allows the sender to inform the remote endpoint of the size of the header compression table
+// used to decode header blocks.
+definedSettings[1] = { name: 'SETTINGS_HEADER_TABLE_SIZE', flag: false };
+
+// * SETTINGS_ENABLE_PUSH (2):
+// This setting can be use to disable server push. An endpoint MUST NOT send a PUSH_PROMISE frame
+// if it receives this setting set to a value of 0. The default value is 1, which indicates that
+// push is permitted.
+definedSettings[2] = { name: 'SETTINGS_ENABLE_PUSH', flag: true };
+
+// * SETTINGS_MAX_CONCURRENT_STREAMS (3):
+// indicates the maximum number of concurrent streams that the sender will allow.
+definedSettings[3] = { name: 'SETTINGS_MAX_CONCURRENT_STREAMS', flag: false };
+
+// * SETTINGS_INITIAL_WINDOW_SIZE (4):
+// indicates the sender's initial stream window size (in bytes) for new streams.
+definedSettings[4] = { name: 'SETTINGS_INITIAL_WINDOW_SIZE', flag: false };
+
+// * SETTINGS_MAX_FRAME_SIZE (5):
+// indicates the maximum size of a frame the receiver will allow.
+definedSettings[5] = { name: 'SETTINGS_MAX_FRAME_SIZE', flag: false };
+
+// [PUSH_PROMISE](https://tools.ietf.org/html/rfc7540#section-6.6)
+// ---------------------------------------------------------------
+//
+// The PUSH_PROMISE frame (type=0x5) is used to notify the peer endpoint in advance of streams the
+// sender intends to initiate.
+//
+// The PUSH_PROMISE frame defines the following flags:
+//
+// * END_PUSH_PROMISE (0x4):
+// The END_PUSH_PROMISE bit indicates that this frame contains the entire payload necessary to
+// provide a complete set of headers.
+
+frameTypes[0x5] = 'PUSH_PROMISE';
+
+frameFlags.PUSH_PROMISE = ['RESERVED1', 'RESERVED2', 'END_PUSH_PROMISE', 'PADDED'];
+
+typeSpecificAttributes.PUSH_PROMISE = ['promised_stream', 'headers', 'data'];
+
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |Pad Length? (8)|
+// +-+-------------+-----------------------------------------------+
+// |X| Promised-Stream-ID (31) |
+// +-+-------------------------------------------------------------+
+// | Header Block Fragment (*) ...
+// +---------------------------------------------------------------+
+// | Padding (*) ...
+// +---------------------------------------------------------------+
+//
+// The PUSH_PROMISE frame includes the unsigned 31-bit identifier of
+// the stream the endpoint plans to create along with a minimal set of headers that provide
+// additional context for the stream.
+
+Serializer.PUSH_PROMISE = function writePushPromise(frame, buffers) {
+ var buffer = new Buffer(4);
+
+ var promised_stream = frame.promised_stream;
+ assert((0 <= promised_stream) && (promised_stream <= 0x7fffffff), promised_stream);
+ buffer.writeUInt32BE(promised_stream, 0);
+
+ buffers.push(buffer);
+ buffers.push(frame.data);
+};
+
+Deserializer.PUSH_PROMISE = function readPushPromise(buffer, frame) {
+ if (buffer.length < 4) {
+ return 'FRAME_SIZE_ERROR';
+ }
+ var dataOffset = 0;
+ var paddingLength = 0;
+ if (frame.flags.PADDED) {
+ if (buffer.length < 5) {
+ return 'FRAME_SIZE_ERROR';
+ }
+ paddingLength = (buffer.readUInt8(dataOffset) & 0xff);
+ dataOffset = 1;
+ }
+ frame.promised_stream = buffer.readUInt32BE(dataOffset) & 0x7fffffff;
+ dataOffset += 4;
+ if (paddingLength) {
+ if ((buffer.length - dataOffset) < paddingLength) {
+ return 'FRAME_SIZE_ERROR';
+ }
+ frame.data = buffer.slice(dataOffset, -1 * paddingLength);
+ } else {
+ frame.data = buffer.slice(dataOffset);
+ }
+};
+
+// [PING](https://tools.ietf.org/html/rfc7540#section-6.7)
+// -----------------------------------------------
+//
+// The PING frame (type=0x6) is a mechanism for measuring a minimal round-trip time from the
+// sender, as well as determining whether an idle connection is still functional.
+//
+// The PING frame defines one type-specific flag:
+//
+// * ACK (0x1):
+// Bit 1 being set indicates that this PING frame is a PING response.
+
+frameTypes[0x6] = 'PING';
+
+frameFlags.PING = ['ACK'];
+
+typeSpecificAttributes.PING = ['data'];
+
+// In addition to the frame header, PING frames MUST contain 8 additional octets of opaque data.
+
+Serializer.PING = function writePing(frame, buffers) {
+ buffers.push(frame.data);
+};
+
+Deserializer.PING = function readPing(buffer, frame) {
+ if (buffer.length !== 8) {
+ return 'FRAME_SIZE_ERROR';
+ }
+ frame.data = buffer;
+};
+
+// [GOAWAY](https://tools.ietf.org/html/rfc7540#section-6.8)
+// ---------------------------------------------------
+//
+// The GOAWAY frame (type=0x7) informs the remote peer to stop creating streams on this connection.
+//
+// The GOAWAY frame does not define any flags.
+
+frameTypes[0x7] = 'GOAWAY';
+
+frameFlags.GOAWAY = [];
+
+typeSpecificAttributes.GOAWAY = ['last_stream', 'error'];
+
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// |X| Last-Stream-ID (31) |
+// +-+-------------------------------------------------------------+
+// | Error Code (32) |
+// +---------------------------------------------------------------+
+//
+// The last stream identifier in the GOAWAY frame contains the highest numbered stream identifier
+// for which the sender of the GOAWAY frame has received frames on and might have taken some action
+// on.
+//
+// The GOAWAY frame also contains a 32-bit error code (see Error Codes) that contains the reason for
+// closing the connection.
+
+Serializer.GOAWAY = function writeGoaway(frame, buffers) {
+ var buffer = new Buffer(8);
+
+ var last_stream = frame.last_stream;
+ assert((0 <= last_stream) && (last_stream <= 0x7fffffff), last_stream);
+ buffer.writeUInt32BE(last_stream, 0);
+
+ var code = errorCodes.indexOf(frame.error);
+ assert((0 <= code) && (code <= 0xffffffff), code);
+ buffer.writeUInt32BE(code, 4);
+
+ buffers.push(buffer);
+};
+
+Deserializer.GOAWAY = function readGoaway(buffer, frame) {
+ if (buffer.length !== 8) {
+ // GOAWAY must have 8 bytes
+ return 'FRAME_SIZE_ERROR';
+ }
+ frame.last_stream = buffer.readUInt32BE(0) & 0x7fffffff;
+ frame.error = errorCodes[buffer.readUInt32BE(4)];
+ if (!frame.error) {
+ // Unknown error types are to be considered equivalent to INTERNAL ERROR
+ frame.error = 'INTERNAL_ERROR';
+ }
+};
+
+// [WINDOW_UPDATE](https://tools.ietf.org/html/rfc7540#section-6.9)
+// -----------------------------------------------------------------
+//
+// The WINDOW_UPDATE frame (type=0x8) is used to implement flow control.
+//
+// The WINDOW_UPDATE frame does not define any flags.
+
+frameTypes[0x8] = 'WINDOW_UPDATE';
+
+frameFlags.WINDOW_UPDATE = [];
+
+typeSpecificAttributes.WINDOW_UPDATE = ['window_size'];
+
+// The payload of a WINDOW_UPDATE frame is a 32-bit value indicating the additional number of bytes
+// that the sender can transmit in addition to the existing flow control window. The legal range
+// for this field is 1 to 2^31 - 1 (0x7fffffff) bytes; the most significant bit of this value is
+// reserved.
+
+Serializer.WINDOW_UPDATE = function writeWindowUpdate(frame, buffers) {
+ var buffer = new Buffer(4);
+
+ var window_size = frame.window_size;
+ assert((0 < window_size) && (window_size <= 0x7fffffff), window_size);
+ buffer.writeUInt32BE(window_size, 0);
+
+ buffers.push(buffer);
+};
+
+Deserializer.WINDOW_UPDATE = function readWindowUpdate(buffer, frame) {
+ if (buffer.length !== WINDOW_UPDATE_PAYLOAD_SIZE) {
+ return 'FRAME_SIZE_ERROR';
+ }
+ frame.window_size = buffer.readUInt32BE(0) & 0x7fffffff;
+ if (frame.window_size === 0) {
+ return 'PROTOCOL_ERROR';
+ }
+};
+
+// [CONTINUATION](https://tools.ietf.org/html/rfc7540#section-6.10)
+// ------------------------------------------------------------
+//
+// The CONTINUATION frame (type=0x9) is used to continue a sequence of header block fragments.
+//
+// The CONTINUATION frame defines the following flag:
+//
+// * END_HEADERS (0x4):
+// The END_HEADERS bit indicates that this frame ends the sequence of header block fragments
+// necessary to provide a complete set of headers.
+
+frameTypes[0x9] = 'CONTINUATION';
+
+frameFlags.CONTINUATION = ['RESERVED1', 'RESERVED2', 'END_HEADERS'];
+
+typeSpecificAttributes.CONTINUATION = ['headers', 'data'];
+
+Serializer.CONTINUATION = function writeContinuation(frame, buffers) {
+ buffers.push(frame.data);
+};
+
+Deserializer.CONTINUATION = function readContinuation(buffer, frame) {
+ frame.data = buffer;
+};
+
+// [ALTSVC](https://tools.ietf.org/html/rfc7838#section-4)
+// ------------------------------------------------------------
+//
+// The ALTSVC frame (type=0xA) advertises the availability of an alternative service to the client.
+//
+// The ALTSVC frame does not define any flags.
+
+frameTypes[0xA] = 'ALTSVC';
+
+frameFlags.ALTSVC = [];
+
+// 0 1 2 3
+// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+// | Origin-Len (16) | Origin? (*) ...
+// +-------------------------------+----------------+--------------+
+// | Alt-Svc-Field-Value (*) ...
+// +---------------------------------------------------------------+
+//
+// The ALTSVC frame contains the following fields:
+//
+// Origin-Len: An unsigned, 16-bit integer indicating the length, in
+// octets, of the Origin field.
+//
+// Origin: An OPTIONAL sequence of characters containing ASCII
+// serialisation of an origin ([RFC6454](https://tools.ietf.org/html/rfc6454),
+// Section 6.2) that the alternate service is applicable to.
+//
+// Alt-Svc-Field-Value: A sequence of octets (length determined by
+// subtracting the length of all preceding fields from the frame
+// length) containing a value identical to the Alt-Svc field value
+// defined in (Section 3)[https://tools.ietf.org/html/rfc7838#section-3]
+// (ABNF production "Alt-Svc").
+
+typeSpecificAttributes.ALTSVC = ['maxAge', 'port', 'protocolID', 'host',
+ 'origin'];
+
+function istchar(c) {
+ return ('!#$&\'*+-.^_`|~1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'.indexOf(c) > -1);
+}
+
+function hexencode(s) {
+ var t = '';
+ for (var i = 0; i < s.length; i++) {
+ if (!istchar(s[i])) {
+ t += '%';
+ t += new Buffer(s[i]).toString('hex');
+ } else {
+ t += s[i];
+ }
+ }
+ return t;
+}
+
+Serializer.ALTSVC = function writeAltSvc(frame, buffers) {
+ var buffer = new Buffer(2);
+ buffer.writeUInt16BE(frame.origin.length, 0);
+ buffers.push(buffer);
+ buffers.push(new Buffer(frame.origin, 'ascii'));
+
+ var fieldValue = hexencode(frame.protocolID) + '="' + frame.host + ':' + frame.port + '"';
+ if (frame.maxAge !== 86400) { // 86400 is the default
+ fieldValue += "; ma=" + frame.maxAge;
+ }
+
+ buffers.push(new Buffer(fieldValue, 'ascii'));
+};
+
+function stripquotes(s) {
+ var start = 0;
+ var end = s.length;
+ while ((start < end) && (s[start] === '"')) {
+ start++;
+ }
+ while ((end > start) && (s[end - 1] === '"')) {
+ end--;
+ }
+ if (start >= end) {
+ return "";
+ }
+ return s.substring(start, end);
+}
+
+function splitNameValue(nvpair) {
+ var eq = -1;
+ var inQuotes = false;
+
+ for (var i = 0; i < nvpair.length; i++) {
+ if (nvpair[i] === '"') {
+ inQuotes = !inQuotes;
+ continue;
+ }
+ if (inQuotes) {
+ continue;
+ }
+ if (nvpair[i] === '=') {
+ eq = i;
+ break;
+ }
+ }
+
+ if (eq === -1) {
+ return {'name': nvpair, 'value': null};
+ }
+
+ var name = stripquotes(nvpair.substring(0, eq).trim());
+ var value = stripquotes(nvpair.substring(eq + 1).trim());
+ return {'name': name, 'value': value};
+}
+
+function splitHeaderParameters(hv) {
+ return parseHeaderValue(hv, ';', splitNameValue);
+}
+
+function parseHeaderValue(hv, separator, callback) {
+ var start = 0;
+ var inQuotes = false;
+ var values = [];
+
+ for (var i = 0; i < hv.length; i++) {
+ if (hv[i] === '"') {
+ inQuotes = !inQuotes;
+ continue;
+ }
+ if (inQuotes) {
+ // Just skip this
+ continue;
+ }
+ if (hv[i] === separator) {
+ var newValue = hv.substring(start, i).trim();
+ if (newValue.length > 0) {
+ newValue = callback(newValue);
+ values.push(newValue);
+ }
+ start = i + 1;
+ }
+ }
+
+ var newValue = hv.substring(start).trim();
+ if (newValue.length > 0) {
+ newValue = callback(newValue);
+ values.push(newValue);
+ }
+
+ return values;
+}
+
+function rsplit(s, delim, count) {
+ var nsplits = 0;
+ var end = s.length;
+ var rval = [];
+ for (var i = s.length - 1; i >= 0; i--) {
+ if (s[i] === delim) {
+ var t = s.substring(i + 1, end);
+ end = i;
+ rval.unshift(t);
+ nsplits++;
+ if (nsplits === count) {
+ break;
+ }
+ }
+ }
+ if (end !== 0) {
+ rval.unshift(s.substring(0, end));
+ }
+ return rval;
+}
+
+function ishex(c) {
+ return ('0123456789ABCDEFabcdef'.indexOf(c) > -1);
+}
+
+function unescape(s) {
+ var i = 0;
+ var t = '';
+ while (i < s.length) {
+ if (s[i] != '%' || !ishex(s[i + 1]) || !ishex(s[i + 2])) {
+ t += s[i];
+ } else {
+ ++i;
+ var hexvalue = '';
+ if (i < s.length) {
+ hexvalue += s[i];
+ ++i;
+ }
+ if (i < s.length) {
+ hexvalue += s[i];
+ }
+ if (hexvalue.length > 0) {
+ t += new Buffer(hexvalue, 'hex').toString();
+ } else {
+ t += '%';
+ }
+ }
+
+ ++i;
+ }
+ return t;
+}
+
+Deserializer.ALTSVC = function readAltSvc(buffer, frame) {
+ if (buffer.length < 2) {
+ return 'FRAME_SIZE_ERROR';
+ }
+ var originLength = buffer.readUInt16BE(0);
+ if ((buffer.length - 2) < originLength) {
+ return 'FRAME_SIZE_ERROR';
+ }
+ frame.origin = buffer.toString('ascii', 2, 2 + originLength);
+ var fieldValue = buffer.toString('ascii', 2 + originLength);
+ var values = parseHeaderValue(fieldValue, ',', splitHeaderParameters);
+ if (values.length > 1) {
+ // TODO - warn that we only use one here
+ }
+ if (values.length === 0) {
+ // Well that's a malformed frame. Just ignore it.
+ return;
+ }
+
+ var chosenAltSvc = values[0];
+ frame.maxAge = 86400; // Default
+ for (var i = 0; i < chosenAltSvc.length; i++) {
+ if (i === 0) {
+ // This corresponds to the protocolID="<host>:<port>" item
+ frame.protocolID = unescape(chosenAltSvc[i].name);
+ var hostport = rsplit(chosenAltSvc[i].value, ':', 1);
+ frame.host = hostport[0];
+ frame.port = parseInt(hostport[1], 10);
+ } else if (chosenAltSvc[i].name == 'ma') {
+ frame.maxAge = parseInt(chosenAltSvc[i].value, 10);
+ }
+ // Otherwise, we just ignore this
+ }
+};
+
+// BLOCKED
+// ------------------------------------------------------------
+//
+// The BLOCKED frame (type=0xB) indicates that the sender is unable to send data
+// due to a closed flow control window.
+//
+// The BLOCKED frame does not define any flags and contains no payload.
+
+frameTypes[0xB] = 'BLOCKED';
+
+frameFlags.BLOCKED = [];
+
+typeSpecificAttributes.BLOCKED = [];
+
+Serializer.BLOCKED = function writeBlocked(frame, buffers) {
+};
+
+Deserializer.BLOCKED = function readBlocked(buffer, frame) {
+};
+
+// [Error Codes](https://tools.ietf.org/html/rfc7540#section-7)
+// ------------------------------------------------------------
+
+var errorCodes = [
+ 'NO_ERROR',
+ 'PROTOCOL_ERROR',
+ 'INTERNAL_ERROR',
+ 'FLOW_CONTROL_ERROR',
+ 'SETTINGS_TIMEOUT',
+ 'STREAM_CLOSED',
+ 'FRAME_SIZE_ERROR',
+ 'REFUSED_STREAM',
+ 'CANCEL',
+ 'COMPRESSION_ERROR',
+ 'CONNECT_ERROR',
+ 'ENHANCE_YOUR_CALM',
+ 'INADEQUATE_SECURITY',
+ 'HTTP_1_1_REQUIRED'
+];
+
+// Logging
+// -------
+
+// [Bunyan serializers](https://github.com/trentm/node-bunyan#serializers) to improve logging output
+// for debug messages emitted in this component.
+exports.serializers = {};
+
+// * `frame` serializer: it transforms data attributes from Buffers to hex strings and filters out
+// flags that are not present.
+var frameCounter = 0;
+exports.serializers.frame = function(frame) {
+ if (!frame) {
+ return null;
+ }
+
+ if ('id' in frame) {
+ return frame.id;
+ }
+
+ frame.id = frameCounter;
+ frameCounter += 1;
+
+ var logEntry = { id: frame.id };
+ genericAttributes.concat(typeSpecificAttributes[frame.type]).forEach(function(name) {
+ logEntry[name] = frame[name];
+ });
+
+ if (frame.data instanceof Buffer) {
+ if (logEntry.data.length > 50) {
+ logEntry.data = frame.data.slice(0, 47).toString('hex') + '...';
+ } else {
+ logEntry.data = frame.data.toString('hex');
+ }
+
+ if (!('length' in logEntry)) {
+ logEntry.length = frame.data.length;
+ }
+ }
+
+ if (frame.promised_stream instanceof Object) {
+ logEntry.promised_stream = 'stream-' + frame.promised_stream.id;
+ }
+
+ logEntry.flags = Object.keys(frame.flags || {}).filter(function(name) {
+ return frame.flags[name] === true;
+ });
+
+ return logEntry;
+};
+
+// * `data` serializer: it simply transforms a buffer to a hex string.
+exports.serializers.data = function(data) {
+ return data.toString('hex');
+};
diff --git a/testing/xpcshell/node-http2/lib/protocol/index.js b/testing/xpcshell/node-http2/lib/protocol/index.js
new file mode 100644
index 0000000000..0f3720e2ce
--- /dev/null
+++ b/testing/xpcshell/node-http2/lib/protocol/index.js
@@ -0,0 +1,91 @@
+// This is an implementation of the [HTTP/2][http2]
+// framing layer for [node.js][node].
+//
+// The main building blocks are [node.js streams][node-stream] that are connected through pipes.
+//
+// The main components are:
+//
+// * [Endpoint](endpoint.html): represents an HTTP/2 endpoint (client or server). It's
+// responsible for the the first part of the handshake process (sending/receiving the
+// [connection header][http2-connheader]) and manages other components (framer, compressor,
+// connection, streams) that make up a client or server.
+//
+// * [Connection](connection.html): multiplexes the active HTTP/2 streams, manages connection
+// lifecycle and settings, and responsible for enforcing the connection level limits (flow
+// control, initiated stream limit)
+//
+// * [Stream](stream.html): implementation of the [HTTP/2 stream concept][http2-stream].
+// Implements the [stream state machine][http2-streamstate] defined by the standard, provides
+// management methods and events for using the stream (sending/receiving headers, data, etc.),
+// and enforces stream level constraints (flow control, sending only legal frames).
+//
+// * [Flow](flow.html): implements flow control for Connection and Stream as parent class.
+//
+// * [Compressor and Decompressor](compressor.html): compression and decompression of HEADER and
+// PUSH_PROMISE frames
+//
+// * [Serializer and Deserializer](framer.html): the lowest layer in the stack that transforms
+// between the binary and the JavaScript object representation of HTTP/2 frames
+//
+// [http2]: https://tools.ietf.org/html/rfc7540
+// [http2-connheader]: https://tools.ietf.org/html/rfc7540#section-3.5
+// [http2-stream]: https://tools.ietf.org/html/rfc7540#section-5
+// [http2-streamstate]: https://tools.ietf.org/html/rfc7540#section-5.1
+// [node]: https://nodejs.org/
+// [node-stream]: https://nodejs.org/api/stream.html
+// [node-https]: https://nodejs.org/api/https.html
+// [node-http]: https://nodejs.org/api/http.html
+
+exports.VERSION = 'h2';
+
+exports.Endpoint = require('./endpoint').Endpoint;
+
+/* Bunyan serializers exported by submodules that are worth adding when creating a logger. */
+exports.serializers = {};
+var modules = ['./framer', './compressor', './flow', './connection', './stream', './endpoint'];
+modules.map(require).forEach(function(module) {
+ for (var name in module.serializers) {
+ exports.serializers[name] = module.serializers[name];
+ }
+});
+
+/*
+ Stream API Endpoint API
+ Stream data
+
+ | ^ | ^
+ | | | |
+ | | | |
+ +-----------|------------|---------------------------------------+
+ | | | Endpoint |
+ | | | |
+ | +-------|------------|-----------------------------------+ |
+ | | | | Connection | |
+ | | v | | |
+ | | +-----------------------+ +-------------------- | |
+ | | | Stream | | Stream ... | |
+ | | +-----------------------+ +-------------------- | |
+ | | | ^ | ^ | |
+ | | v | v | | |
+ | | +------------+--+--------+--+------------+- ... | |
+ | | | ^ | |
+ | | | | | |
+ | +-----------------------|--------|-----------------------+ |
+ | | | |
+ | v | |
+ | +--------------------------+ +--------------------------+ |
+ | | Compressor | | Decompressor | |
+ | +--------------------------+ +--------------------------+ |
+ | | ^ |
+ | v | |
+ | +--------------------------+ +--------------------------+ |
+ | | Serializer | | Deserializer | |
+ | +--------------------------+ +--------------------------+ |
+ | | ^ |
+ +---------------------------|--------|---------------------------+
+ | |
+ v |
+
+ Raw data
+
+*/
diff --git a/testing/xpcshell/node-http2/lib/protocol/stream.js b/testing/xpcshell/node-http2/lib/protocol/stream.js
new file mode 100644
index 0000000000..6d520b9496
--- /dev/null
+++ b/testing/xpcshell/node-http2/lib/protocol/stream.js
@@ -0,0 +1,659 @@
+var assert = require('assert');
+
+// The Stream class
+// ================
+
+// Stream is a [Duplex stream](https://nodejs.org/api/stream.html#stream_class_stream_duplex)
+// subclass that implements the [HTTP/2 Stream](https://tools.ietf.org/html/rfc7540#section-5)
+// concept. It has two 'sides': one that is used by the user to send/receive data (the `stream`
+// object itself) and one that is used by a Connection to read/write frames to/from the other peer
+// (`stream.upstream`).
+
+var Duplex = require('stream').Duplex;
+
+exports.Stream = Stream;
+
+// Public API
+// ----------
+
+// * **new Stream(log, connection)**: create a new Stream
+//
+// * **Event: 'headers' (headers)**: signals incoming headers
+//
+// * **Event: 'promise' (stream, headers)**: signals an incoming push promise
+//
+// * **Event: 'priority' (priority)**: signals a priority change. `priority` is a number between 0
+// (highest priority) and 2^31-1 (lowest priority). Default value is 2^30.
+//
+// * **Event: 'error' (type)**: signals an error
+//
+// * **headers(headers)**: send headers
+//
+// * **promise(headers): Stream**: promise a stream
+//
+// * **priority(priority)**: set the priority of the stream. Priority can be changed by the peer
+// too, but once it is set locally, it can not be changed remotely.
+//
+// * **reset(error)**: reset the stream with an error code
+//
+// * **upstream**: a [Flow](flow.js) that is used by the parent connection to write/read frames
+// that are to be sent/arrived to/from the peer and are related to this stream.
+//
+// Headers are always in the [regular node.js header format][1].
+// [1]: https://nodejs.org/api/http.html#http_message_headers
+
+// Constructor
+// -----------
+
+// The main aspects of managing the stream are:
+function Stream(log, connection) {
+ Duplex.call(this);
+
+ // * logging
+ this._log = log.child({ component: 'stream', s: this });
+
+ // * receiving and sending stream management commands
+ this._initializeManagement();
+
+ // * sending and receiving frames to/from the upstream connection
+ this._initializeDataFlow();
+
+ // * maintaining the state of the stream (idle, open, closed, etc.) and error detection
+ this._initializeState();
+
+ this.connection = connection;
+}
+
+Stream.prototype = Object.create(Duplex.prototype, { constructor: { value: Stream } });
+
+// Managing the stream
+// -------------------
+
+// the default stream priority is 2^30
+var DEFAULT_PRIORITY = Math.pow(2, 30);
+var MAX_PRIORITY = Math.pow(2, 31) - 1;
+
+// PUSH_PROMISE and HEADERS are forwarded to the user through events.
+Stream.prototype._initializeManagement = function _initializeManagement() {
+ this._resetSent = false;
+ this._priority = DEFAULT_PRIORITY;
+ this._letPeerPrioritize = true;
+};
+
+Stream.prototype.promise = function promise(headers) {
+ var stream = new Stream(this._log, this.connection);
+ stream._priority = Math.min(this._priority + 1, MAX_PRIORITY);
+ this._pushUpstream({
+ type: 'PUSH_PROMISE',
+ flags: {},
+ stream: this.id,
+ promised_stream: stream,
+ headers: headers
+ });
+ return stream;
+};
+
+Stream.prototype._onPromise = function _onPromise(frame) {
+ this.emit('promise', frame.promised_stream, frame.headers);
+};
+
+Stream.prototype.headers = function headers(headers) {
+ this._pushUpstream({
+ type: 'HEADERS',
+ flags: {},
+ stream: this.id,
+ headers: headers
+ });
+};
+
+Stream.prototype._onHeaders = function _onHeaders(frame) {
+ if (frame.priority !== undefined) {
+ this.priority(frame.priority, true);
+ }
+ this.emit('headers', frame.headers);
+};
+
+Stream.prototype.priority = function priority(priority, peer) {
+ if ((peer && this._letPeerPrioritize) || !peer) {
+ if (!peer) {
+ this._letPeerPrioritize = false;
+
+ var lastFrame = this.upstream.getLastQueuedFrame();
+ if (lastFrame && ((lastFrame.type === 'HEADERS') || (lastFrame.type === 'PRIORITY'))) {
+ lastFrame.priority = priority;
+ } else {
+ this._pushUpstream({
+ type: 'PRIORITY',
+ flags: {},
+ stream: this.id,
+ priority: priority
+ });
+ }
+ }
+
+ this._log.debug({ priority: priority }, 'Changing priority');
+ this.emit('priority', priority);
+ this._priority = priority;
+ }
+};
+
+Stream.prototype._onPriority = function _onPriority(frame) {
+ this.priority(frame.priority, true);
+};
+
+// Resetting the stream. Normally, an endpoint SHOULD NOT send more than one RST_STREAM frame for
+// any stream.
+Stream.prototype.reset = function reset(error) {
+ if (!this._resetSent) {
+ this._resetSent = true;
+ this._pushUpstream({
+ type: 'RST_STREAM',
+ flags: {},
+ stream: this.id,
+ error: error
+ });
+ }
+};
+
+// Specify an alternate service for the origin of this stream
+Stream.prototype.altsvc = function altsvc(host, port, protocolID, maxAge, origin) {
+ var stream;
+ if (origin) {
+ stream = 0;
+ } else {
+ stream = this.id;
+ }
+ this._pushUpstream({
+ type: 'ALTSVC',
+ flags: {},
+ stream: stream,
+ host: host,
+ port: port,
+ protocolID: protocolID,
+ origin: origin,
+ maxAge: maxAge
+ });
+};
+
+// Data flow
+// ---------
+
+// The incoming and the generated outgoing frames are received/transmitted on the `this.upstream`
+// [Flow](flow.html). The [Connection](connection.html) object instantiating the stream will read
+// and write frames to/from it. The stream itself is a regular [Duplex stream][1], and is used by
+// the user to write or read the body of the request.
+// [1]: https://nodejs.org/api/stream.html#stream_class_stream_duplex
+
+// upstream side stream user side
+//
+// +------------------------------------+
+// | |
+// +------------------+ |
+// | upstream | |
+// | | |
+// +--+ | +--|
+// read() | | _send() | _write() | | write(buf)
+// <--------------|B |<--------------|--------------| B|<------------
+// | | | | |
+// frames +--+ | +--| buffers
+// | | | | |
+// -------------->|B |---------------|------------->| B|------------>
+// write(frame) | | _receive() | _read() | | read()
+// +--+ | +--|
+// | | |
+// | | |
+// +------------------+ |
+// | |
+// +------------------------------------+
+//
+// B: input or output buffer
+
+var Flow = require('./flow').Flow;
+
+Stream.prototype._initializeDataFlow = function _initializeDataFlow() {
+ this.id = undefined;
+
+ this._ended = false;
+
+ this.upstream = new Flow();
+ this.upstream._log = this._log;
+ this.upstream._send = this._send.bind(this);
+ this.upstream._receive = this._receive.bind(this);
+ this.upstream.write = this._writeUpstream.bind(this);
+ this.upstream.on('error', this.emit.bind(this, 'error'));
+
+ this.on('finish', this._finishing);
+};
+
+Stream.prototype._pushUpstream = function _pushUpstream(frame) {
+ this.upstream.push(frame);
+ this._transition(true, frame);
+};
+
+// Overriding the upstream's `write` allows us to act immediately instead of waiting for the input
+// queue to empty. This is important in case of control frames.
+Stream.prototype._writeUpstream = function _writeUpstream(frame) {
+ this._log.debug({ frame: frame }, 'Receiving frame');
+
+ var moreNeeded = Flow.prototype.write.call(this.upstream, frame);
+
+ // * Transition to a new state if that's the effect of receiving the frame
+ this._transition(false, frame);
+
+ // * If it's a control frame. Call the appropriate handler method.
+ if (frame.type === 'HEADERS') {
+ if (this._processedHeaders && !frame.flags['END_STREAM']) {
+ this.emit('error', 'PROTOCOL_ERROR');
+ }
+ this._processedHeaders = true;
+ this._onHeaders(frame);
+ } else if (frame.type === 'PUSH_PROMISE') {
+ this._onPromise(frame);
+ } else if (frame.type === 'PRIORITY') {
+ this._onPriority(frame);
+ } else if (frame.type === 'ALTSVC') {
+ // TODO
+ } else if (frame.type === 'BLOCKED') {
+ // TODO
+ }
+
+ // * If it's an invalid stream level frame, emit error
+ else if ((frame.type !== 'DATA') &&
+ (frame.type !== 'WINDOW_UPDATE') &&
+ (frame.type !== 'RST_STREAM')) {
+ this._log.error({ frame: frame }, 'Invalid stream level frame');
+ this.emit('error', 'PROTOCOL_ERROR');
+ }
+
+ return moreNeeded;
+};
+
+// The `_receive` method (= `upstream._receive`) gets called when there's an incoming frame.
+Stream.prototype._receive = function _receive(frame, ready) {
+ // * If it's a DATA frame, then push the payload into the output buffer on the other side.
+ // Call ready when the other side is ready to receive more.
+ if (!this._ended && (frame.type === 'DATA')) {
+ var moreNeeded = this.push(frame.data);
+ if (!moreNeeded) {
+ this._receiveMore = ready;
+ }
+ }
+
+ // * Any frame may signal the end of the stream with the END_STREAM flag
+ if (!this._ended && (frame.flags.END_STREAM || (frame.type === 'RST_STREAM'))) {
+ this.push(null);
+ this._ended = true;
+ }
+
+ // * Postpone calling `ready` if `push()` returned a falsy value
+ if (this._receiveMore !== ready) {
+ ready();
+ }
+};
+
+// The `_read` method is called when the user side is ready to receive more data. If there's a
+// pending write on the upstream, then call its pending ready callback to receive more frames.
+Stream.prototype._read = function _read() {
+ if (this._receiveMore) {
+ var receiveMore = this._receiveMore;
+ delete this._receiveMore;
+ receiveMore();
+ }
+};
+
+// The `write` method gets called when there's a write request from the user.
+Stream.prototype._write = function _write(buffer, encoding, ready) {
+ // * Chunking is done by the upstream Flow.
+ var moreNeeded = this._pushUpstream({
+ type: 'DATA',
+ flags: {},
+ stream: this.id,
+ data: buffer
+ });
+
+ // * Call ready when upstream is ready to receive more frames.
+ if (moreNeeded) {
+ ready();
+ } else {
+ this._sendMore = ready;
+ }
+};
+
+// The `_send` (= `upstream._send`) method is called when upstream is ready to receive more frames.
+// If there's a pending write on the user side, then call its pending ready callback to receive more
+// writes.
+Stream.prototype._send = function _send() {
+ if (this._sendMore) {
+ var sendMore = this._sendMore;
+ delete this._sendMore;
+ sendMore();
+ }
+};
+
+// When the stream is finishing (the user calls `end()` on it), then we have to set the `END_STREAM`
+// flag on the last frame. If there's no frame in the queue, or if it doesn't support this flag,
+// then we create a 0 length DATA frame. We could do this all the time, but putting the flag on an
+// existing frame is a nice optimization.
+var emptyBuffer = new Buffer(0);
+Stream.prototype._finishing = function _finishing() {
+ var endFrame = {
+ type: 'DATA',
+ flags: { END_STREAM: true },
+ stream: this.id,
+ data: emptyBuffer
+ };
+ var lastFrame = this.upstream.getLastQueuedFrame();
+ if (lastFrame && ((lastFrame.type === 'DATA') || (lastFrame.type === 'HEADERS'))) {
+ this._log.debug({ frame: lastFrame }, 'Marking last frame with END_STREAM flag.');
+ lastFrame.flags.END_STREAM = true;
+ this._transition(true, endFrame);
+ } else {
+ this._pushUpstream(endFrame);
+ }
+};
+
+// [Stream States](https://tools.ietf.org/html/rfc7540#section-5.1)
+// ----------------
+//
+// +--------+
+// PP | | PP
+// ,--------| idle |--------.
+// / | | \
+// v +--------+ v
+// +----------+ | +----------+
+// | | | H | |
+// ,---| reserved | | | reserved |---.
+// | | (local) | v | (remote) | |
+// | +----------+ +--------+ +----------+ |
+// | | ES | | ES | |
+// | | H ,-------| open |-------. | H |
+// | | / | | \ | |
+// | v v +--------+ v v |
+// | +----------+ | +----------+ |
+// | | half | | | half | |
+// | | closed | | R | closed | |
+// | | (remote) | | | (local) | |
+// | +----------+ | +----------+ |
+// | | v | |
+// | | ES / R +--------+ ES / R | |
+// | `----------->| |<-----------' |
+// | R | closed | R |
+// `-------------------->| |<--------------------'
+// +--------+
+
+// Streams begin in the IDLE state and transitions happen when there's an incoming or outgoing frame
+Stream.prototype._initializeState = function _initializeState() {
+ this.state = 'IDLE';
+ this._initiated = undefined;
+ this._closedByUs = undefined;
+ this._closedWithRst = undefined;
+ this._processedHeaders = false;
+};
+
+// Only `_setState` should change `this.state` directly. It also logs the state change and notifies
+// interested parties using the 'state' event.
+Stream.prototype._setState = function transition(state) {
+ assert(this.state !== state);
+ this._log.debug({ from: this.state, to: state }, 'State transition');
+ this.state = state;
+ this.emit('state', state);
+};
+
+// A state is 'active' if the stream in that state counts towards the concurrency limit. Streams
+// that are in the "open" state, or either of the "half closed" states count toward this limit.
+function activeState(state) {
+ return ((state === 'HALF_CLOSED_LOCAL') || (state === 'HALF_CLOSED_REMOTE') || (state === 'OPEN'));
+}
+
+// `_transition` is called every time there's an incoming or outgoing frame. It manages state
+// transitions, and detects stream errors. A stream error is always caused by a frame that is not
+// allowed in the current state.
+Stream.prototype._transition = function transition(sending, frame) {
+ var receiving = !sending;
+ var connectionError;
+ var streamError;
+
+ var DATA = false, HEADERS = false, PRIORITY = false, ALTSVC = false, BLOCKED = false;
+ var RST_STREAM = false, PUSH_PROMISE = false, WINDOW_UPDATE = false;
+ switch(frame.type) {
+ case 'DATA' : DATA = true; break;
+ case 'HEADERS' : HEADERS = true; break;
+ case 'PRIORITY' : PRIORITY = true; break;
+ case 'RST_STREAM' : RST_STREAM = true; break;
+ case 'PUSH_PROMISE' : PUSH_PROMISE = true; break;
+ case 'WINDOW_UPDATE': WINDOW_UPDATE = true; break;
+ case 'ALTSVC' : ALTSVC = true; break;
+ case 'BLOCKED' : BLOCKED = true; break;
+ }
+
+ var previousState = this.state;
+
+ switch (this.state) {
+ // All streams start in the **idle** state. In this state, no frames have been exchanged.
+ //
+ // * Sending or receiving a HEADERS frame causes the stream to become "open".
+ //
+ // When the HEADERS frame contains the END_STREAM flags, then two state transitions happen.
+ case 'IDLE':
+ if (HEADERS) {
+ this._setState('OPEN');
+ if (frame.flags.END_STREAM) {
+ this._setState(sending ? 'HALF_CLOSED_LOCAL' : 'HALF_CLOSED_REMOTE');
+ }
+ this._initiated = sending;
+ } else if (sending && RST_STREAM) {
+ this._setState('CLOSED');
+ } else if (PRIORITY) {
+ /* No state change */
+ } else {
+ connectionError = 'PROTOCOL_ERROR';
+ }
+ break;
+
+ // A stream in the **reserved (local)** state is one that has been promised by sending a
+ // PUSH_PROMISE frame.
+ //
+ // * The endpoint can send a HEADERS frame. This causes the stream to open in a "half closed
+ // (remote)" state.
+ // * Either endpoint can send a RST_STREAM frame to cause the stream to become "closed". This
+ // releases the stream reservation.
+ // * An endpoint may receive PRIORITY frame in this state.
+ // * An endpoint MUST NOT send any other type of frame in this state.
+ case 'RESERVED_LOCAL':
+ if (sending && HEADERS) {
+ this._setState('HALF_CLOSED_REMOTE');
+ } else if (RST_STREAM) {
+ this._setState('CLOSED');
+ } else if (PRIORITY) {
+ /* No state change */
+ } else {
+ connectionError = 'PROTOCOL_ERROR';
+ }
+ break;
+
+ // A stream in the **reserved (remote)** state has been reserved by a remote peer.
+ //
+ // * Either endpoint can send a RST_STREAM frame to cause the stream to become "closed". This
+ // releases the stream reservation.
+ // * Receiving a HEADERS frame causes the stream to transition to "half closed (local)".
+ // * An endpoint MAY send PRIORITY frames in this state to reprioritize the stream.
+ // * Receiving any other type of frame MUST be treated as a stream error of type PROTOCOL_ERROR.
+ case 'RESERVED_REMOTE':
+ if (RST_STREAM) {
+ this._setState('CLOSED');
+ } else if (receiving && HEADERS) {
+ this._setState('HALF_CLOSED_LOCAL');
+ } else if (BLOCKED || PRIORITY) {
+ /* No state change */
+ } else {
+ connectionError = 'PROTOCOL_ERROR';
+ }
+ break;
+
+ // The **open** state is where both peers can send frames. In this state, sending peers observe
+ // advertised stream level flow control limits.
+ //
+ // * From this state either endpoint can send a frame with a END_STREAM flag set, which causes
+ // the stream to transition into one of the "half closed" states: an endpoint sending a
+ // END_STREAM flag causes the stream state to become "half closed (local)"; an endpoint
+ // receiving a END_STREAM flag causes the stream state to become "half closed (remote)".
+ // * Either endpoint can send a RST_STREAM frame from this state, causing it to transition
+ // immediately to "closed".
+ case 'OPEN':
+ if (frame.flags.END_STREAM) {
+ this._setState(sending ? 'HALF_CLOSED_LOCAL' : 'HALF_CLOSED_REMOTE');
+ } else if (RST_STREAM) {
+ this._setState('CLOSED');
+ } else {
+ /* No state change */
+ }
+ break;
+
+ // A stream that is **half closed (local)** cannot be used for sending frames.
+ //
+ // * A stream transitions from this state to "closed" when a frame that contains a END_STREAM
+ // flag is received, or when either peer sends a RST_STREAM frame.
+ // * An endpoint MAY send or receive PRIORITY frames in this state to reprioritize the stream.
+ // * WINDOW_UPDATE can be sent by a peer that has sent a frame bearing the END_STREAM flag.
+ case 'HALF_CLOSED_LOCAL':
+ if (RST_STREAM || (receiving && frame.flags.END_STREAM)) {
+ this._setState('CLOSED');
+ } else if (BLOCKED || ALTSVC || receiving || PRIORITY || (sending && WINDOW_UPDATE)) {
+ /* No state change */
+ } else {
+ connectionError = 'PROTOCOL_ERROR';
+ }
+ break;
+
+ // A stream that is **half closed (remote)** is no longer being used by the peer to send frames.
+ // In this state, an endpoint is no longer obligated to maintain a receiver flow control window
+ // if it performs flow control.
+ //
+ // * If an endpoint receives additional frames for a stream that is in this state it MUST
+ // respond with a stream error of type STREAM_CLOSED.
+ // * A stream can transition from this state to "closed" by sending a frame that contains a
+ // END_STREAM flag, or when either peer sends a RST_STREAM frame.
+ // * An endpoint MAY send or receive PRIORITY frames in this state to reprioritize the stream.
+ // * A receiver MAY receive a WINDOW_UPDATE frame on a "half closed (remote)" stream.
+ case 'HALF_CLOSED_REMOTE':
+ if (RST_STREAM || (sending && frame.flags.END_STREAM)) {
+ this._setState('CLOSED');
+ } else if (BLOCKED || ALTSVC || sending || PRIORITY || (receiving && WINDOW_UPDATE)) {
+ /* No state change */
+ } else {
+ connectionError = 'PROTOCOL_ERROR';
+ }
+ break;
+
+ // The **closed** state is the terminal state.
+ //
+ // * An endpoint MUST NOT send frames on a closed stream. An endpoint that receives a frame
+ // after receiving a RST_STREAM or a frame containing a END_STREAM flag on that stream MUST
+ // treat that as a stream error of type STREAM_CLOSED.
+ // * WINDOW_UPDATE, PRIORITY or RST_STREAM frames can be received in this state for a short
+ // period after a frame containing an END_STREAM flag is sent. Until the remote peer receives
+ // and processes the frame bearing the END_STREAM flag, it might send either frame type.
+ // Endpoints MUST ignore WINDOW_UPDATE frames received in this state, though endpoints MAY
+ // choose to treat WINDOW_UPDATE frames that arrive a significant time after sending
+ // END_STREAM as a connection error of type PROTOCOL_ERROR.
+ // * If this state is reached as a result of sending a RST_STREAM frame, the peer that receives
+ // the RST_STREAM might have already sent - or enqueued for sending - frames on the stream
+ // that cannot be withdrawn. An endpoint that sends a RST_STREAM frame MUST ignore frames that
+ // it receives on closed streams after it has sent a RST_STREAM frame. An endpoint MAY choose
+ // to limit the period over which it ignores frames and treat frames that arrive after this
+ // time as being in error.
+ // * An endpoint might receive a PUSH_PROMISE frame after it sends RST_STREAM. PUSH_PROMISE
+ // causes a stream to become "reserved". If promised streams are not desired, a RST_STREAM
+ // can be used to close any of those streams.
+ case 'CLOSED':
+ if (PRIORITY || (sending && RST_STREAM) ||
+ (receiving && WINDOW_UPDATE) ||
+ (receiving && this._closedByUs &&
+ (this._closedWithRst || RST_STREAM || ALTSVC))) {
+ /* No state change */
+ } else {
+ streamError = 'STREAM_CLOSED';
+ }
+ break;
+ }
+
+ // Noting that the connection was closed by the other endpoint. It may be important in edge cases.
+ // For example, when the peer tries to cancel a promised stream, but we already sent every data
+ // on it, then the stream is in CLOSED state, yet we want to ignore the incoming RST_STREAM.
+ if ((this.state === 'CLOSED') && (previousState !== 'CLOSED')) {
+ this._closedByUs = sending;
+ this._closedWithRst = RST_STREAM;
+ }
+
+ // Sending/receiving a PUSH_PROMISE
+ //
+ // * Sending a PUSH_PROMISE frame marks the associated stream for later use. The stream state
+ // for the reserved stream transitions to "reserved (local)".
+ // * Receiving a PUSH_PROMISE frame marks the associated stream as reserved by the remote peer.
+ // The state of the stream becomes "reserved (remote)".
+ if (PUSH_PROMISE && !connectionError && !streamError) {
+ /* This assertion must hold, because _transition is called immediately when a frame is written
+ to the stream. If it would be called when a frame gets out of the input queue, the state
+ of the reserved could have been changed by then. */
+ assert(frame.promised_stream.state === 'IDLE', frame.promised_stream.state);
+ frame.promised_stream._setState(sending ? 'RESERVED_LOCAL' : 'RESERVED_REMOTE');
+ frame.promised_stream._initiated = sending;
+ }
+
+ // Signaling how sending/receiving this frame changes the active stream count (-1, 0 or +1)
+ if (this._initiated) {
+ var change = (activeState(this.state) - activeState(previousState));
+ if (sending) {
+ frame.count_change = change;
+ } else {
+ frame.count_change(change);
+ }
+ } else if (sending) {
+ frame.count_change = 0;
+ }
+
+ // Common error handling.
+ if (connectionError || streamError) {
+ var info = {
+ error: connectionError,
+ frame: frame,
+ state: this.state,
+ closedByUs: this._closedByUs,
+ closedWithRst: this._closedWithRst
+ };
+
+ // * When sending something invalid, throwing an exception, since it is probably a bug.
+ if (sending) {
+ this._log.error(info, 'Sending illegal frame.');
+ return this.emit('error', new Error('Sending illegal frame (' + frame.type + ') in ' + this.state + ' state.'));
+ }
+
+ // * In case of a serious problem, emitting and error and letting someone else handle it
+ // (e.g. closing the connection)
+ // * When receiving something invalid, sending an RST_STREAM using the `reset` method.
+ // This will automatically cause a transition to the CLOSED state.
+ else {
+ this._log.error(info, 'Received illegal frame.');
+ if (connectionError) {
+ this.emit('connectionError', connectionError);
+ } else {
+ this.reset(streamError);
+ this.emit('error', streamError);
+ }
+ }
+ }
+};
+
+// Bunyan serializers
+// ------------------
+
+exports.serializers = {};
+
+var nextId = 0;
+exports.serializers.s = function(stream) {
+ if (!('_id' in stream)) {
+ stream._id = nextId;
+ nextId += 1;
+ }
+ return stream._id;
+};
diff --git a/testing/xpcshell/node-http2/package.json b/testing/xpcshell/node-http2/package.json
new file mode 100644
index 0000000000..5372f17f7c
--- /dev/null
+++ b/testing/xpcshell/node-http2/package.json
@@ -0,0 +1,46 @@
+{
+ "name": "http2",
+ "version": "3.3.6",
+ "description": "An HTTP/2 client and server implementation",
+ "main": "lib/index.js",
+ "engines" : {
+ "node" : ">=0.12.0"
+ },
+ "devDependencies": {
+ "istanbul": "*",
+ "chai": "*",
+ "mocha": "*",
+ "docco": "*",
+ "bunyan": "*"
+ },
+ "scripts": {
+ "test": "istanbul test _mocha -- --reporter spec --slow 500 --timeout 15000",
+ "doc": "docco lib/* --output doc --layout parallel --template root.jst --css doc/docco.css && docco lib/protocol/* --output doc/protocol --layout parallel --template protocol.jst --css doc/docco.css"
+ },
+ "repository": {
+ "type": "git",
+ "url": "https://github.com/molnarg/node-http2.git"
+ },
+ "homepage": "https://github.com/molnarg/node-http2",
+ "bugs": {
+ "url": "https://github.com/molnarg/node-http2/issues"
+ },
+ "keywords": [
+ "http",
+ "http2",
+ "client",
+ "server"
+ ],
+ "author": "Gábor Molnár <gabor@molnar.es> (http://gabor.molnar.es)",
+ "contributors": [
+ "Nick Hurley",
+ "Mike Belshe",
+ "Yoshihiro Iwanaga",
+ "Igor Novikov",
+ "James Willcox",
+ "David Björklund",
+ "Patrick McManus"
+ ],
+ "license": "MIT",
+ "readmeFilename": "README.md"
+}
diff --git a/testing/xpcshell/node-http2/test/compressor.js b/testing/xpcshell/node-http2/test/compressor.js
new file mode 100644
index 0000000000..f86baf5db9
--- /dev/null
+++ b/testing/xpcshell/node-http2/test/compressor.js
@@ -0,0 +1,575 @@
+var expect = require('chai').expect;
+var util = require('./util');
+
+var compressor = require('../lib/protocol/compressor');
+var HeaderTable = compressor.HeaderTable;
+var HuffmanTable = compressor.HuffmanTable;
+var HeaderSetCompressor = compressor.HeaderSetCompressor;
+var HeaderSetDecompressor = compressor.HeaderSetDecompressor;
+var Compressor = compressor.Compressor;
+var Decompressor = compressor.Decompressor;
+
+var test_integers = [{
+ N: 5,
+ I: 10,
+ buffer: new Buffer([10])
+}, {
+ N: 0,
+ I: 10,
+ buffer: new Buffer([10])
+}, {
+ N: 5,
+ I: 1337,
+ buffer: new Buffer([31, 128 + 26, 10])
+}, {
+ N: 0,
+ I: 1337,
+ buffer: new Buffer([128 + 57, 10])
+}];
+
+var test_strings = [{
+ string: 'www.foo.com',
+ buffer: new Buffer('89f1e3c2f29ceb90f4ff', 'hex')
+}, {
+ string: 'éáűőúöüó€',
+ buffer: new Buffer('13c3a9c3a1c5b1c591c3bac3b6c3bcc3b3e282ac', 'hex')
+}];
+
+test_huffman_request = {
+ 'GET': 'c5837f',
+ 'http': '9d29af',
+ '/': '63',
+ 'www.foo.com': 'f1e3c2f29ceb90f4ff',
+ 'https': '9d29ad1f',
+ 'www.bar.com': 'f1e3c2f18ec5c87a7f',
+ 'no-cache': 'a8eb10649cbf',
+ '/custom-path.css': '6096a127a56ac699d72211',
+ 'custom-key': '25a849e95ba97d7f',
+ 'custom-value': '25a849e95bb8e8b4bf'
+};
+
+test_huffman_response = {
+ '302': '6402',
+ 'private': 'aec3771a4b',
+ 'Mon, 21 OCt 2013 20:13:21 GMT': 'd07abe941054d5792a0801654102e059b820a98b46ff',
+ ': https://www.bar.com': 'b8a4e94d68b8c31e3c785e31d8b90f4f',
+ '200': '1001',
+ 'Mon, 21 OCt 2013 20:13:22 GMT': 'd07abe941054d5792a0801654102e059b821298b46ff',
+ 'https://www.bar.com': '9d29ad171863c78f0bc63b1721e9',
+ 'gzip': '9bd9ab',
+ 'foo=AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA\
+AAAAAAAAAAAAAAAAAAAAAAAAAALASDJKHQKBZXOQWEOPIUAXQWEOIUAXLJKHQWOEIUAL\
+QWEOIUAXLQEUAXLLKJASDQWEOUIAXN1234LASDJKHQKBZXOQWEOPIUAXQWEOIUAXLJKH\
+QWOEIUALQWEOIUAXLQEUAXLLKJASDQWEOUIAXN1234LASDJKHQKBZXOQWEOPIUAXQWEO\
+IUAXLJKHQWOEIUALQWEOIUAXLQEUAXLLKJASDQWEOUIAXN1234LASDJKHQKBZXOQWEOP\
+IUAXQWEOIUAXLJKHQWOEIUALQWEOIUAXLQEUAXLLKJASDQWEOUIAXN1234ZZZZZZZZZZ\
+ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ1234 m\
+ax-age=3600; version=1': '94e7821861861861861861861861861861861861861861861861861861861861861861861861861861861861861861861861861861861861873c3bafe5cd8f666bbfbf9ab672c1ab5e4e10fe6ce583564e10fe67cb9b1ece5ab064e10e7d9cb06ac9c21fccfb307087f33e7cd961dd7f672c1ab86487f34844cb59e1dd7f2e6c7b335dfdfcd5b3960d5af27087f3672c1ab27087f33e5cd8f672d583270873ece583564e10fe67d983843f99f3e6cb0eebfb3960d5c3243f9a42265acf0eebf97363d99aefefe6ad9cb06ad793843f9b3960d593843f99f2e6c7b396ac1938439f672c1ab27087f33ecc1c21fccf9f3658775fd9cb06ae1921fcd21132d678775fcb9b1eccd77f7f356ce58356bc9c21fcd9cb06ac9c21fccf97363d9cb560c9c21cfb3960d593843f99f660e10fe67cf9b2c3bafece583570c90fe6908996bf7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f42265a5291f9587316065c003ed4ee5b1063d5007f',
+ 'foo=ZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZZ\
+ZZZZZZZZZZZZZZZZZZZZZZZZZZLASDJKHQKBZXOQWEOPIUAXQWEOIUAXLJKHQWOEIUAL\
+QWEOIUAXLQEUAXLLKJASDQWEOUIAXN1234LASDJKHQKBZXOQWEOPIUAXQWEOIUAXLJKH\
+QWOEIUALQWEOIUAXLQEUAXLLKJASDQWEOUIAXN1234LASDJKHQKBZXOQWEOPIUAXQWEO\
+IUAXLJKHQWOEIUALQWEOIUAXLQEUAXLLKJASDQWEOUIAXN1234LASDJKHQKBZXOQWEOP\
+IUAXQWEOIUAXLJKHQWOEIUALQWEOIUAXLQEUAXLLKJASDQWEOUIAXN1234AAAAAAAAAA\
+AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA1234 m\
+ax-age=3600; version=1': '94e783f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f7f73c3bafe5cd8f666bbfbf9ab672c1ab5e4e10fe6ce583564e10fe67cb9b1ece5ab064e10e7d9cb06ac9c21fccfb307087f33e7cd961dd7f672c1ab86487f34844cb59e1dd7f2e6c7b335dfdfcd5b3960d5af27087f3672c1ab27087f33e5cd8f672d583270873ece583564e10fe67d983843f99f3e6cb0eebfb3960d5c3243f9a42265acf0eebf97363d99aefefe6ad9cb06ad793843f9b3960d593843f99f2e6c7b396ac1938439f672c1ab27087f33ecc1c21fccf9f3658775fd9cb06ae1921fcd21132d678775fcb9b1eccd77f7f356ce58356bc9c21fcd9cb06ac9c21fccf97363d9cb560c9c21cfb3960d593843f99f660e10fe67cf9b2c3bafece583570c90fe6908996a1861861861861861861861861861861861861861861861861861861861861861861861861861861861861861861861861861861861842265a5291f9587316065c003ed4ee5b1063d5007f'
+};
+
+var test_headers = [{
+ // index
+ header: {
+ name: 1,
+ value: 1,
+ index: false,
+ mustNeverIndex: false,
+ contextUpdate: false,
+ newMaxSize: 0
+ },
+ buffer: new Buffer('82', 'hex')
+}, {
+ // index
+ header: {
+ name: 5,
+ value: 5,
+ index: false,
+ mustNeverIndex: false,
+ contextUpdate: false,
+ newMaxSize: 0
+ },
+ buffer: new Buffer('86', 'hex')
+}, {
+ // index
+ header: {
+ name: 3,
+ value: 3,
+ index: false,
+ mustNeverIndex: false,
+ contextUpdate: false,
+ newMaxSize: 0
+ },
+ buffer: new Buffer('84', 'hex')
+}, {
+ // literal w/index, name index
+ header: {
+ name: 0,
+ value: 'www.foo.com',
+ index: true,
+ mustNeverIndex: false,
+ contextUpdate: false,
+ newMaxSize: 0
+ },
+ buffer: new Buffer('41' + '89f1e3c2f29ceb90f4ff', 'hex')
+}, {
+ // indexed
+ header: {
+ name: 1,
+ value: 1,
+ index: false,
+ mustNeverIndex: false,
+ contextUpdate: false,
+ newMaxSize: 0
+ },
+ buffer: new Buffer('82', 'hex')
+}, {
+ // indexed
+ header: {
+ name: 6,
+ value: 6,
+ index: false,
+ mustNeverIndex: false,
+ contextUpdate: false,
+ newMaxSize: 0
+ },
+ buffer: new Buffer('87', 'hex')
+}, {
+ // indexed
+ header: {
+ name: 3,
+ value: 3,
+ index: false,
+ mustNeverIndex: false,
+ contextUpdate: false,
+ newMaxSize: 0
+ },
+ buffer: new Buffer('84', 'hex')
+}, {
+ // literal w/index, name index
+ header: {
+ name: 0,
+ value: 'www.bar.com',
+ index: true,
+ mustNeverIndex: false,
+ contextUpdate: false,
+ newMaxSize: 0
+ },
+ buffer: new Buffer('41' + '89f1e3c2f18ec5c87a7f', 'hex')
+}, {
+ // literal w/index, name index
+ header: {
+ name: 23,
+ value: 'no-cache',
+ index: true,
+ mustNeverIndex: false,
+ contextUpdate: false,
+ newMaxSize: 0
+ },
+ buffer: new Buffer('58' + '86a8eb10649cbf', 'hex')
+}, {
+ // index
+ header: {
+ name: 1,
+ value: 1,
+ index: false,
+ mustNeverIndex: false,
+ contextUpdate: false,
+ newMaxSize: 0
+ },
+ buffer: new Buffer('82', 'hex')
+}, {
+ // index
+ header: {
+ name: 6,
+ value: 6,
+ index: false,
+ mustNeverIndex: false,
+ contextUpdate: false,
+ newMaxSize: 0
+ },
+ buffer: new Buffer('87', 'hex')
+}, {
+ // literal w/index, name index
+ header: {
+ name: 3,
+ value: '/custom-path.css',
+ index: true,
+ mustNeverIndex: false,
+ contextUpdate: false,
+ newMaxSize: 0
+ },
+ buffer: new Buffer('44' + '8b6096a127a56ac699d72211', 'hex')
+}, {
+ // index
+ header: {
+ name: 63,
+ value: 63,
+ index: false,
+ mustNeverIndex: false,
+ contextUpdate: false,
+ newMaxSize: 0
+ },
+ buffer: new Buffer('C0', 'hex')
+}, {
+ // literal w/index, new name & value
+ header: {
+ name: 'custom-key',
+ value: 'custom-value',
+ index: true,
+ mustNeverIndex: false,
+ contextUpdate: false,
+ newMaxSize: 0
+ },
+ buffer: new Buffer('40' + '8825a849e95ba97d7f' + '8925a849e95bb8e8b4bf', 'hex')
+}, {
+ // index
+ header: {
+ name: 1,
+ value: 1,
+ index: false,
+ mustNeverIndex: false,
+ contextUpdate: false,
+ newMaxSize: 0
+ },
+ buffer: new Buffer('82', 'hex')
+}, {
+ // index
+ header: {
+ name: 6,
+ value: 6,
+ index: false,
+ mustNeverIndex: false,
+ contextUpdate: false,
+ newMaxSize: 0
+ },
+ buffer: new Buffer('87', 'hex')
+}, {
+ // index
+ header: {
+ name: 62,
+ value: 62,
+ index: false,
+ mustNeverIndex: false,
+ contextUpdate: false,
+ newMaxSize: 0
+ },
+ buffer: new Buffer('BF', 'hex')
+}, {
+ // index
+ header: {
+ name: 65,
+ value: 65,
+ index: false,
+ mustNeverIndex: false,
+ contextUpdate: false,
+ newMaxSize: 0
+ },
+ buffer: new Buffer('C2', 'hex')
+}, {
+ // index
+ header: {
+ name: 64,
+ value: 64,
+ index: false,
+ mustNeverIndex: false,
+ contextUpdate: false,
+ newMaxSize: 0
+ },
+ buffer: new Buffer('C1', 'hex')
+}, {
+ // index
+ header: {
+ name: 61,
+ value: 61,
+ index: false,
+ mustNeverIndex: false,
+ contextUpdate: false,
+ newMaxSize: 0
+ },
+ buffer: new Buffer('BE', 'hex')
+}, {
+ // Literal w/o index, name index
+ header: {
+ name: 6,
+ value: "whatever",
+ index: false,
+ mustNeverIndex: false,
+ contextUpdate: false,
+ newMaxSize: 0
+ },
+ buffer: new Buffer('07' + '86f138d25ee5b3', 'hex')
+}, {
+ // Literal w/o index, new name & value
+ header: {
+ name: "foo",
+ value: "bar",
+ index: false,
+ mustNeverIndex: false,
+ contextUpdate: false,
+ newMaxSize: 0
+ },
+ buffer: new Buffer('00' + '8294e7' + '03626172', 'hex')
+}, {
+ // Literal never indexed, name index
+ header: {
+ name: 6,
+ value: "whatever",
+ index: false,
+ mustNeverIndex: true,
+ contextUpdate: false,
+ newMaxSize: 0
+ },
+ buffer: new Buffer('17' + '86f138d25ee5b3', 'hex')
+}, {
+ // Literal never indexed, new name & value
+ header: {
+ name: "foo",
+ value: "bar",
+ index: false,
+ mustNeverIndex: true,
+ contextUpdate: false,
+ newMaxSize: 0
+ },
+ buffer: new Buffer('10' + '8294e7' + '03626172', 'hex')
+}, {
+ header: {
+ name: -1,
+ value: -1,
+ index: false,
+ mustNeverIndex: false,
+ contextUpdate: true,
+ newMaxSize: 100
+ },
+ buffer: new Buffer('3F45', 'hex')
+}];
+
+var test_header_sets = [{
+ headers: {
+ ':method': 'GET',
+ ':scheme': 'http',
+ ':path': '/',
+ ':authority': 'www.foo.com'
+ },
+ buffer: util.concat(test_headers.slice(0, 4).map(function(test) { return test.buffer; }))
+}, {
+ headers: {
+ ':method': 'GET',
+ ':scheme': 'https',
+ ':path': '/',
+ ':authority': 'www.bar.com',
+ 'cache-control': 'no-cache'
+ },
+ buffer: util.concat(test_headers.slice(4, 9).map(function(test) { return test.buffer; }))
+}, {
+ headers: {
+ ':method': 'GET',
+ ':scheme': 'https',
+ ':path': '/custom-path.css',
+ ':authority': 'www.bar.com',
+ 'custom-key': 'custom-value'
+ },
+ buffer: util.concat(test_headers.slice(9, 14).map(function(test) { return test.buffer; }))
+}, {
+ headers: {
+ ':method': 'GET',
+ ':scheme': 'https',
+ ':path': '/custom-path.css',
+ ':authority': ['www.foo.com', 'www.bar.com'],
+ 'custom-key': 'custom-value'
+ },
+ buffer: util.concat(test_headers.slice(14, 19).map(function(test) { return test.buffer; }))
+}];
+
+describe('compressor.js', function() {
+ describe('HeaderTable', function() {
+ });
+
+ describe('HuffmanTable', function() {
+ describe('method encode(buffer)', function() {
+ it('should return the Huffman encoded version of the input buffer', function() {
+ var table = HuffmanTable.huffmanTable;
+ for (var decoded in test_huffman_request) {
+ var encoded = test_huffman_request[decoded];
+ expect(table.encode(new Buffer(decoded)).toString('hex')).to.equal(encoded);
+ }
+ table = HuffmanTable.huffmanTable;
+ for (decoded in test_huffman_response) {
+ encoded = test_huffman_response[decoded];
+ expect(table.encode(new Buffer(decoded)).toString('hex')).to.equal(encoded);
+ }
+ });
+ });
+ describe('method decode(buffer)', function() {
+ it('should return the Huffman decoded version of the input buffer', function() {
+ var table = HuffmanTable.huffmanTable;
+ for (var decoded in test_huffman_request) {
+ var encoded = test_huffman_request[decoded];
+ expect(table.decode(new Buffer(encoded, 'hex')).toString()).to.equal(decoded);
+ }
+ table = HuffmanTable.huffmanTable;
+ for (decoded in test_huffman_response) {
+ encoded = test_huffman_response[decoded];
+ expect(table.decode(new Buffer(encoded, 'hex')).toString()).to.equal(decoded);
+ }
+ });
+ });
+ });
+
+ describe('HeaderSetCompressor', function() {
+ describe('static method .integer(I, N)', function() {
+ it('should return an array of buffers that represent the N-prefix coded form of the integer I', function() {
+ for (var i = 0; i < test_integers.length; i++) {
+ var test = test_integers[i];
+ test.buffer.cursor = 0;
+ expect(util.concat(HeaderSetCompressor.integer(test.I, test.N))).to.deep.equal(test.buffer);
+ }
+ });
+ });
+ describe('static method .string(string)', function() {
+ it('should return an array of buffers that represent the encoded form of the string', function() {
+ var table = HuffmanTable.huffmanTable;
+ for (var i = 0; i < test_strings.length; i++) {
+ var test = test_strings[i];
+ expect(util.concat(HeaderSetCompressor.string(test.string, table))).to.deep.equal(test.buffer);
+ }
+ });
+ });
+ describe('static method .header({ name, value, index })', function() {
+ it('should return an array of buffers that represent the encoded form of the header', function() {
+ var table = HuffmanTable.huffmanTable;
+ for (var i = 0; i < test_headers.length; i++) {
+ var test = test_headers[i];
+ expect(util.concat(HeaderSetCompressor.header(test.header, table))).to.deep.equal(test.buffer);
+ }
+ });
+ });
+ });
+
+ describe('HeaderSetDecompressor', function() {
+ describe('static method .integer(buffer, N)', function() {
+ it('should return the parsed N-prefix coded number and increase the cursor property of buffer', function() {
+ for (var i = 0; i < test_integers.length; i++) {
+ var test = test_integers[i];
+ test.buffer.cursor = 0;
+ expect(HeaderSetDecompressor.integer(test.buffer, test.N)).to.equal(test.I);
+ expect(test.buffer.cursor).to.equal(test.buffer.length);
+ }
+ });
+ });
+ describe('static method .string(buffer)', function() {
+ it('should return the parsed string and increase the cursor property of buffer', function() {
+ var table = HuffmanTable.huffmanTable;
+ for (var i = 0; i < test_strings.length; i++) {
+ var test = test_strings[i];
+ test.buffer.cursor = 0;
+ expect(HeaderSetDecompressor.string(test.buffer, table)).to.equal(test.string);
+ expect(test.buffer.cursor).to.equal(test.buffer.length);
+ }
+ });
+ });
+ describe('static method .header(buffer)', function() {
+ it('should return the parsed header and increase the cursor property of buffer', function() {
+ var table = HuffmanTable.huffmanTable;
+ for (var i = 0; i < test_headers.length; i++) {
+ var test = test_headers[i];
+ test.buffer.cursor = 0;
+ expect(HeaderSetDecompressor.header(test.buffer, table)).to.deep.equal(test.header);
+ expect(test.buffer.cursor).to.equal(test.buffer.length);
+ }
+ });
+ });
+ });
+ describe('Decompressor', function() {
+ describe('method decompress(buffer)', function() {
+ it('should return the parsed header set in { name1: value1, name2: [value2, value3], ... } format', function() {
+ var decompressor = new Decompressor(util.log, 'REQUEST');
+ for (var i = 0; i < test_header_sets.length - 1; i++) {
+ var header_set = test_header_sets[i];
+ expect(decompressor.decompress(header_set.buffer)).to.deep.equal(header_set.headers);
+ }
+ });
+ });
+ describe('transform stream', function() {
+ it('should emit an error event if a series of header frames is interleaved with other frames', function() {
+ var decompressor = new Decompressor(util.log, 'REQUEST');
+ var error_occured = false;
+ decompressor.on('error', function() {
+ error_occured = true;
+ });
+ decompressor.write({
+ type: 'HEADERS',
+ flags: {
+ END_HEADERS: false
+ },
+ data: new Buffer(5)
+ });
+ decompressor.write({
+ type: 'DATA',
+ flags: {},
+ data: new Buffer(5)
+ });
+ expect(error_occured).to.be.equal(true);
+ });
+ });
+ });
+
+ describe('invariant', function() {
+ describe('decompressor.decompress(compressor.compress(headerset)) === headerset', function() {
+ it('should be true for any header set if the states are synchronized', function() {
+ var compressor = new Compressor(util.log, 'REQUEST');
+ var decompressor = new Decompressor(util.log, 'REQUEST');
+ var n = test_header_sets.length;
+ for (var i = 0; i < 10; i++) {
+ var headers = test_header_sets[i%n].headers;
+ var compressed = compressor.compress(headers);
+ var decompressed = decompressor.decompress(compressed);
+ expect(decompressed).to.deep.equal(headers);
+ expect(compressor._table).to.deep.equal(decompressor._table);
+ }
+ });
+ });
+ describe('source.pipe(compressor).pipe(decompressor).pipe(destination)', function() {
+ it('should behave like source.pipe(destination) for a stream of frames', function(done) {
+ var compressor = new Compressor(util.log, 'RESPONSE');
+ var decompressor = new Decompressor(util.log, 'RESPONSE');
+ var n = test_header_sets.length;
+ compressor.pipe(decompressor);
+ for (var i = 0; i < 10; i++) {
+ compressor.write({
+ type: i%2 ? 'HEADERS' : 'PUSH_PROMISE',
+ flags: {},
+ headers: test_header_sets[i%n].headers
+ });
+ }
+ setTimeout(function() {
+ for (var j = 0; j < 10; j++) {
+ expect(decompressor.read().headers).to.deep.equal(test_header_sets[j%n].headers);
+ }
+ done();
+ }, 10);
+ });
+ });
+ describe('huffmanTable.decompress(huffmanTable.compress(buffer)) === buffer', function() {
+ it('should be true for any buffer', function() {
+ for (var i = 0; i < 10; i++) {
+ var buffer = [];
+ while (Math.random() > 0.1) {
+ buffer.push(Math.floor(Math.random() * 256))
+ }
+ buffer = new Buffer(buffer);
+ var table = HuffmanTable.huffmanTable;
+ var result = table.decode(table.encode(buffer));
+ expect(result).to.deep.equal(buffer);
+ }
+ });
+ });
+ });
+});
diff --git a/testing/xpcshell/node-http2/test/connection.js b/testing/xpcshell/node-http2/test/connection.js
new file mode 100644
index 0000000000..2c68857f7f
--- /dev/null
+++ b/testing/xpcshell/node-http2/test/connection.js
@@ -0,0 +1,237 @@
+var expect = require('chai').expect;
+var util = require('./util');
+
+var Connection = require('../lib/protocol/connection').Connection;
+
+var settings = {
+ SETTINGS_MAX_CONCURRENT_STREAMS: 100,
+ SETTINGS_INITIAL_WINDOW_SIZE: 100000
+};
+
+var MAX_PRIORITY = Math.pow(2, 31) - 1;
+var MAX_RANDOM_PRIORITY = 10;
+
+function randomPriority() {
+ return Math.floor(Math.random() * (MAX_RANDOM_PRIORITY + 1));
+}
+
+function expectPriorityOrder(priorities) {
+ priorities.forEach(function(bucket, priority) {
+ bucket.forEach(function(stream) {
+ expect(stream._priority).to.be.equal(priority);
+ });
+ });
+}
+
+describe('connection.js', function() {
+ describe('Connection class', function() {
+ describe('method ._insert(stream)', function() {
+ it('should insert the stream in _streamPriorities in a place determined by stream._priority', function() {
+ var streams = [];
+ var connection = Object.create(Connection.prototype, { _streamPriorities: { value: streams }});
+ var streamCount = 10;
+
+ for (var i = 0; i < streamCount; i++) {
+ var stream = { _priority: randomPriority() };
+ connection._insert(stream, stream._priority);
+ expect(connection._streamPriorities[stream._priority]).to.include(stream);
+ }
+
+ expectPriorityOrder(connection._streamPriorities);
+ });
+ });
+ describe('method ._reprioritize(stream)', function() {
+ it('should eject and then insert the stream in _streamPriorities in a place determined by stream._priority', function() {
+ var streams = [];
+ var connection = Object.create(Connection.prototype, { _streamPriorities: { value: streams }});
+ var streamCount = 10;
+ var oldPriority, newPriority, stream;
+
+ for (var i = 0; i < streamCount; i++) {
+ oldPriority = randomPriority();
+ while ((newPriority = randomPriority()) === oldPriority);
+ stream = { _priority: oldPriority };
+ connection._insert(stream, oldPriority);
+ connection._reprioritize(stream, newPriority);
+ stream._priority = newPriority;
+
+ expect(connection._streamPriorities[newPriority]).to.include(stream);
+ expect(connection._streamPriorities[oldPriority] || []).to.not.include(stream);
+ }
+
+ expectPriorityOrder(streams);
+ });
+ });
+ describe('invalid operation', function() {
+ describe('unsolicited ping answer', function() {
+ it('should be ignored', function() {
+ var connection = new Connection(util.log, 1, settings);
+
+ connection._receivePing({
+ stream: 0,
+ type: 'PING',
+ flags: {
+ 'PONG': true
+ },
+ data: new Buffer(8)
+ });
+ });
+ });
+ });
+ });
+ describe('test scenario', function() {
+ var c, s;
+ beforeEach(function() {
+ c = new Connection(util.log.child({ role: 'client' }), 1, settings);
+ s = new Connection(util.log.child({ role: 'client' }), 2, settings);
+ c.pipe(s).pipe(c);
+ });
+
+ describe('connection setup', function() {
+ it('should work as expected', function(done) {
+ setTimeout(function() {
+ // If there are no exception until this, then we're done
+ done();
+ }, 10);
+ });
+ });
+ describe('sending/receiving a request', function() {
+ it('should work as expected', function(done) {
+ // Request and response data
+ var request_headers = {
+ ':method': 'GET',
+ ':path': '/'
+ };
+ var request_data = new Buffer(0);
+ var response_headers = {
+ ':status': '200'
+ };
+ var response_data = new Buffer('12345678', 'hex');
+
+ // Setting up server
+ s.on('stream', function(server_stream) {
+ server_stream.on('headers', function(headers) {
+ expect(headers).to.deep.equal(request_headers);
+ server_stream.headers(response_headers);
+ server_stream.end(response_data);
+ });
+ });
+
+ // Sending request
+ var client_stream = c.createStream();
+ client_stream.headers(request_headers);
+ client_stream.end(request_data);
+
+ // Waiting for answer
+ done = util.callNTimes(2, done);
+ client_stream.on('headers', function(headers) {
+ expect(headers).to.deep.equal(response_headers);
+ done();
+ });
+ client_stream.on('data', function(data) {
+ expect(data).to.deep.equal(response_data);
+ done();
+ });
+ });
+ });
+ describe('server push', function() {
+ it('should work as expected', function(done) {
+ var request_headers = { ':method': 'get', ':path': '/' };
+ var response_headers = { ':status': '200' };
+ var push_request_headers = { ':method': 'get', ':path': '/x' };
+ var push_response_headers = { ':status': '200' };
+ var response_content = new Buffer(10);
+ var push_content = new Buffer(10);
+
+ done = util.callNTimes(5, done);
+
+ s.on('stream', function(response) {
+ response.headers(response_headers);
+
+ var pushed = response.promise(push_request_headers);
+ pushed.headers(push_response_headers);
+ pushed.end(push_content);
+
+ response.end(response_content);
+ });
+
+ var request = c.createStream();
+ request.headers(request_headers);
+ request.end();
+ request.on('headers', function(headers) {
+ expect(headers).to.deep.equal(response_headers);
+ done();
+ });
+ request.on('data', function(data) {
+ expect(data).to.deep.equal(response_content);
+ done();
+ });
+ request.on('promise', function(pushed, headers) {
+ expect(headers).to.deep.equal(push_request_headers);
+ pushed.on('headers', function(headers) {
+ expect(headers).to.deep.equal(response_headers);
+ done();
+ });
+ pushed.on('data', function(data) {
+ expect(data).to.deep.equal(push_content);
+ done();
+ });
+ pushed.on('end', done);
+ });
+ });
+ });
+ describe('ping from client', function() {
+ it('should work as expected', function(done) {
+ c.ping(function() {
+ done();
+ });
+ });
+ });
+ describe('ping from server', function() {
+ it('should work as expected', function(done) {
+ s.ping(function() {
+ done();
+ });
+ });
+ });
+ describe('creating two streams and then using them in reverse order', function() {
+ it('should not result in non-monotonous local ID ordering', function() {
+ var s1 = c.createStream();
+ var s2 = c.createStream();
+ s2.headers({ ':method': 'get', ':path': '/' });
+ s1.headers({ ':method': 'get', ':path': '/' });
+ });
+ });
+ describe('creating two promises and then using them in reverse order', function() {
+ it('should not result in non-monotonous local ID ordering', function(done) {
+ s.on('stream', function(response) {
+ response.headers({ ':status': '200' });
+
+ var p1 = s.createStream();
+ var p2 = s.createStream();
+ response.promise(p2, { ':method': 'get', ':path': '/p2' });
+ response.promise(p1, { ':method': 'get', ':path': '/p1' });
+ p2.headers({ ':status': '200' });
+ p1.headers({ ':status': '200' });
+ });
+
+ var request = c.createStream();
+ request.headers({ ':method': 'get', ':path': '/' });
+
+ done = util.callNTimes(2, done);
+ request.on('promise', function() {
+ done();
+ });
+ });
+ });
+ describe('closing the connection on one end', function() {
+ it('should result in closed streams on both ends', function(done) {
+ done = util.callNTimes(2, done);
+ c.on('end', done);
+ s.on('end', done);
+
+ c.close();
+ });
+ });
+ });
+});
diff --git a/testing/xpcshell/node-http2/test/endpoint.js b/testing/xpcshell/node-http2/test/endpoint.js
new file mode 100644
index 0000000000..bdd2569d42
--- /dev/null
+++ b/testing/xpcshell/node-http2/test/endpoint.js
@@ -0,0 +1,41 @@
+var expect = require('chai').expect;
+var util = require('./util');
+
+var endpoint = require('../lib/protocol/endpoint');
+var Endpoint = endpoint.Endpoint;
+
+var settings = {
+ SETTINGS_MAX_CONCURRENT_STREAMS: 100,
+ SETTINGS_INITIAL_WINDOW_SIZE: 100000
+};
+
+describe('endpoint.js', function() {
+ describe('scenario', function() {
+ describe('connection setup', function() {
+ it('should work as expected', function(done) {
+ var c = new Endpoint(util.log.child({ role: 'client' }), 'CLIENT', settings);
+ var s = new Endpoint(util.log.child({ role: 'client' }), 'SERVER', settings);
+
+ util.log.debug('Test initialization over, starting piping.');
+ c.pipe(s).pipe(c);
+
+ setTimeout(function() {
+ // If there are no exception until this, then we're done
+ done();
+ }, 10);
+ });
+ });
+ });
+ describe('bunyan serializer', function() {
+ describe('`e`', function() {
+ var format = endpoint.serializers.e;
+ it('should assign a unique ID to each endpoint', function() {
+ var c = new Endpoint(util.log.child({ role: 'client' }), 'CLIENT', settings);
+ var s = new Endpoint(util.log.child({ role: 'client' }), 'SERVER', settings);
+ expect(format(c)).to.not.equal(format(s));
+ expect(format(c)).to.equal(format(c));
+ expect(format(s)).to.equal(format(s));
+ });
+ });
+ });
+});
diff --git a/testing/xpcshell/node-http2/test/flow.js b/testing/xpcshell/node-http2/test/flow.js
new file mode 100644
index 0000000000..a077c68451
--- /dev/null
+++ b/testing/xpcshell/node-http2/test/flow.js
@@ -0,0 +1,260 @@
+var expect = require('chai').expect;
+var util = require('./util');
+
+var Flow = require('../lib/protocol/flow').Flow;
+
+var MAX_PAYLOAD_SIZE = 16384;
+
+function createFlow(log) {
+ var flowControlId = util.random(10, 100);
+ var flow = new Flow(flowControlId);
+ flow._log = util.log.child(log || {});
+ return flow;
+}
+
+describe('flow.js', function() {
+ describe('Flow class', function() {
+ var flow;
+ beforeEach(function() {
+ flow = createFlow();
+ });
+
+ describe('._receive(frame, callback) method', function() {
+ it('is called when there\'s a frame in the input buffer to be consumed', function(done) {
+ var frame = { type: 'PRIORITY', flags: {}, priority: 1 };
+ flow._receive = function _receive(receivedFrame, callback) {
+ expect(receivedFrame).to.equal(frame);
+ callback();
+ };
+ flow.write(frame, done);
+ });
+ it('has to be overridden by the child class, otherwise it throws', function() {
+ expect(flow._receive.bind(flow)).to.throw(Error);
+ });
+ });
+ describe('._send() method', function() {
+ it('is called when the output buffer should be filled with more frames and the flow' +
+ 'control queue is empty', function() {
+ var notFlowControlledFrame = { type: 'PRIORITY', flags: {}, priority: 1 };
+ flow._send = function _send() {
+ this.push(notFlowControlledFrame);
+ };
+ expect(flow.read()).to.equal(notFlowControlledFrame);
+
+ flow._window = 0;
+ flow._queue.push({ type: 'DATA', flags: {}, data: { length: 1 } });
+ var frame = flow.read();
+ while (frame.type === notFlowControlledFrame.type) frame = flow.read();
+ expect(frame.type).to.equal('BLOCKED');
+ expect(flow.read()).to.equal(null);
+ });
+ it('has to be overridden by the child class, otherwise it throws', function() {
+ expect(flow._send.bind(flow)).to.throw(Error);
+ });
+ });
+ describe('._increaseWindow(size) method', function() {
+ it('should increase `this._window` by `size`', function() {
+ flow._send = util.noop;
+ flow._window = 0;
+
+ var increase1 = util.random(0,100);
+ var increase2 = util.random(0,100);
+ flow._increaseWindow(increase1);
+ flow._increaseWindow(increase2);
+ expect(flow._window).to.equal(increase1 + increase2);
+
+ flow._increaseWindow(Infinity);
+ expect(flow._window).to.equal(Infinity);
+ });
+ it('should emit error when increasing with a finite `size` when `_window` is infinite', function() {
+ flow._send = util.noop;
+ flow._increaseWindow(Infinity);
+ var increase = util.random(1,100);
+
+ expect(flow._increaseWindow.bind(flow, increase)).to.throw('Uncaught, unspecified "error" event.');
+ });
+ it('should emit error when `_window` grows over the window limit', function() {
+ var WINDOW_SIZE_LIMIT = Math.pow(2, 31) - 1;
+ flow._send = util.noop;
+ flow._window = 0;
+
+ flow._increaseWindow(WINDOW_SIZE_LIMIT);
+ expect(flow._increaseWindow.bind(flow, 1)).to.throw('Uncaught, unspecified "error" event.');
+
+ });
+ });
+ describe('.read() method', function() {
+ describe('when the flow control queue is not empty', function() {
+ it('should return the first item in the queue if the window is enough', function() {
+ var priorityFrame = { type: 'PRIORITY', flags: {}, priority: 1 };
+ var dataFrame = { type: 'DATA', flags: {}, data: { length: 10 } };
+ flow._send = util.noop;
+ flow._window = 10;
+ flow._queue = [priorityFrame, dataFrame];
+
+ expect(flow.read()).to.equal(priorityFrame);
+ expect(flow.read()).to.equal(dataFrame);
+ });
+ it('should also split DATA frames when needed', function() {
+ var buffer = new Buffer(10);
+ var dataFrame = { type: 'DATA', flags: {}, stream: util.random(0, 100), data: buffer };
+ flow._send = util.noop;
+ flow._window = 5;
+ flow._queue = [dataFrame];
+
+ var expectedFragment = { flags: {}, type: 'DATA', stream: dataFrame.stream, data: buffer.slice(0,5) };
+ expect(flow.read()).to.deep.equal(expectedFragment);
+ expect(dataFrame.data).to.deep.equal(buffer.slice(5));
+ });
+ });
+ });
+ describe('.push(frame) method', function() {
+ it('should push `frame` into the output queue or the flow control queue', function() {
+ var priorityFrame = { type: 'PRIORITY', flags: {}, priority: 1 };
+ var dataFrame = { type: 'DATA', flags: {}, data: { length: 10 } };
+ flow._window = 10;
+
+ flow.push(dataFrame); // output queue
+ flow.push(dataFrame); // flow control queue, because of depleted window
+ flow.push(priorityFrame); // flow control queue, because it's not empty
+
+ expect(flow.read()).to.be.equal(dataFrame);
+ expect(flow._queue[0]).to.be.equal(dataFrame);
+ expect(flow._queue[1]).to.be.equal(priorityFrame);
+ });
+ });
+ describe('.write() method', function() {
+ it('call with a DATA frame should trigger sending WINDOW_UPDATE if remote flow control is not' +
+ 'disabled', function(done) {
+ flow._window = 100;
+ flow._send = util.noop;
+ flow._receive = function(frame, callback) {
+ callback();
+ };
+
+ var buffer = new Buffer(util.random(10, 100));
+ flow.write({ type: 'DATA', flags: {}, data: buffer });
+ flow.once('readable', function() {
+ expect(flow.read()).to.be.deep.equal({
+ type: 'WINDOW_UPDATE',
+ flags: {},
+ stream: flow._flowControlId,
+ window_size: buffer.length
+ });
+ done();
+ });
+ });
+ });
+ });
+ describe('test scenario', function() {
+ var flow1, flow2;
+ beforeEach(function() {
+ flow1 = createFlow({ flow: 1 });
+ flow2 = createFlow({ flow: 2 });
+ flow1._flowControlId = flow2._flowControlId;
+ flow1._send = flow2._send = util.noop;
+ flow1._receive = flow2._receive = function(frame, callback) { callback(); };
+ });
+
+ describe('sending a large data stream', function() {
+ it('should work as expected', function(done) {
+ // Sender side
+ var frameNumber = util.random(5, 8);
+ var input = [];
+ flow1._send = function _send() {
+ if (input.length >= frameNumber) {
+ this.push({ type: 'DATA', flags: { END_STREAM: true }, data: new Buffer(0) });
+ this.push(null);
+ } else {
+ var buffer = new Buffer(util.random(1000, 100000));
+ input.push(buffer);
+ this.push({ type: 'DATA', flags: {}, data: buffer });
+ }
+ };
+
+ // Receiver side
+ var output = [];
+ flow2._receive = function _receive(frame, callback) {
+ if (frame.type === 'DATA') {
+ expect(frame.data.length).to.be.lte(MAX_PAYLOAD_SIZE);
+ output.push(frame.data);
+ }
+ if (frame.flags.END_STREAM) {
+ this.emit('end_stream');
+ }
+ callback();
+ };
+
+ // Checking results
+ flow2.on('end_stream', function() {
+ input = util.concat(input);
+ output = util.concat(output);
+
+ expect(input).to.deep.equal(output);
+
+ done();
+ });
+
+ // Start piping
+ flow1.pipe(flow2).pipe(flow1);
+ });
+ });
+
+ describe('when running out of window', function() {
+ it('should send a BLOCKED frame', function(done) {
+ // Sender side
+ var frameNumber = util.random(5, 8);
+ var input = [];
+ flow1._send = function _send() {
+ if (input.length >= frameNumber) {
+ this.push({ type: 'DATA', flags: { END_STREAM: true }, data: new Buffer(0) });
+ this.push(null);
+ } else {
+ var buffer = new Buffer(util.random(1000, 100000));
+ input.push(buffer);
+ this.push({ type: 'DATA', flags: {}, data: buffer });
+ }
+ };
+
+ // Receiver side
+ // Do not send WINDOW_UPDATESs except when the other side sends BLOCKED
+ var output = [];
+ flow2._restoreWindow = util.noop;
+ flow2._receive = function _receive(frame, callback) {
+ if (frame.type === 'DATA') {
+ expect(frame.data.length).to.be.lte(MAX_PAYLOAD_SIZE);
+ output.push(frame.data);
+ }
+ if (frame.flags.END_STREAM) {
+ this.emit('end_stream');
+ }
+ if (frame.type === 'BLOCKED') {
+ setTimeout(function() {
+ this._push({
+ type: 'WINDOW_UPDATE',
+ flags: {},
+ stream: this._flowControlId,
+ window_size: this._received
+ });
+ this._received = 0;
+ }.bind(this), 20);
+ }
+ callback();
+ };
+
+ // Checking results
+ flow2.on('end_stream', function() {
+ input = util.concat(input);
+ output = util.concat(output);
+
+ expect(input).to.deep.equal(output);
+
+ done();
+ });
+
+ // Start piping
+ flow1.pipe(flow2).pipe(flow1);
+ });
+ });
+ });
+});
diff --git a/testing/xpcshell/node-http2/test/framer.js b/testing/xpcshell/node-http2/test/framer.js
new file mode 100644
index 0000000000..59cc711ba0
--- /dev/null
+++ b/testing/xpcshell/node-http2/test/framer.js
@@ -0,0 +1,395 @@
+var expect = require('chai').expect;
+var util = require('./util');
+
+var framer = require('../lib/protocol/framer');
+var Serializer = framer.Serializer;
+var Deserializer = framer.Deserializer;
+
+var frame_types = {
+ DATA: ['data'],
+ HEADERS: ['priority_information', 'data'],
+ PRIORITY: ['priority_information'],
+ RST_STREAM: ['error'],
+ SETTINGS: ['settings'],
+ PUSH_PROMISE: ['promised_stream', 'data'],
+ PING: ['data'],
+ GOAWAY: ['last_stream', 'error'],
+ WINDOW_UPDATE: ['window_size'],
+ CONTINUATION: ['data'],
+ ALTSVC: ['protocolID', 'host', 'port', 'origin', 'maxAge']
+};
+
+var test_frames = [{
+ frame: {
+ type: 'DATA',
+ flags: { END_STREAM: false, RESERVED2: false, RESERVED4: false,
+ PADDED: false },
+ stream: 10,
+
+ data: new Buffer('12345678', 'hex')
+ },
+ // length + type + flags + stream + content
+ buffer: new Buffer('000004' + '00' + '00' + '0000000A' + '12345678', 'hex')
+
+}, {
+ frame: {
+ type: 'HEADERS',
+ flags: { END_STREAM: false, RESERVED2: false, END_HEADERS: false,
+ PADDED: false, RESERVED5: false, PRIORITY: false },
+ stream: 15,
+
+ data: new Buffer('12345678', 'hex')
+ },
+ buffer: new Buffer('000004' + '01' + '00' + '0000000F' + '12345678', 'hex')
+
+}, {
+ frame: {
+ type: 'HEADERS',
+ flags: { END_STREAM: false, RESERVED2: false, END_HEADERS: false,
+ PADDED: false, RESERVED5: false, PRIORITY: true },
+ stream: 15,
+ priorityDependency: 10,
+ priorityWeight: 5,
+ exclusiveDependency: false,
+
+ data: new Buffer('12345678', 'hex')
+ },
+ buffer: new Buffer('000009' + '01' + '20' + '0000000F' + '0000000A' + '05' + '12345678', 'hex')
+
+
+}, {
+ frame: {
+ type: 'HEADERS',
+ flags: { END_STREAM: false, RESERVED2: false, END_HEADERS: false,
+ PADDED: false, RESERVED5: false, PRIORITY: true },
+ stream: 15,
+ priorityDependency: 10,
+ priorityWeight: 5,
+ exclusiveDependency: true,
+
+ data: new Buffer('12345678', 'hex')
+ },
+ buffer: new Buffer('000009' + '01' + '20' + '0000000F' + '8000000A' + '05' + '12345678', 'hex')
+
+}, {
+ frame: {
+ type: 'PRIORITY',
+ flags: { },
+ stream: 10,
+
+ priorityDependency: 9,
+ priorityWeight: 5,
+ exclusiveDependency: false
+ },
+ buffer: new Buffer('000005' + '02' + '00' + '0000000A' + '00000009' + '05', 'hex')
+
+}, {
+ frame: {
+ type: 'PRIORITY',
+ flags: { },
+ stream: 10,
+
+ priorityDependency: 9,
+ priorityWeight: 5,
+ exclusiveDependency: true
+ },
+ buffer: new Buffer('000005' + '02' + '00' + '0000000A' + '80000009' + '05', 'hex')
+
+}, {
+ frame: {
+ type: 'RST_STREAM',
+ flags: { },
+ stream: 10,
+
+ error: 'INTERNAL_ERROR'
+ },
+ buffer: new Buffer('000004' + '03' + '00' + '0000000A' + '00000002', 'hex')
+
+}, {
+ frame: {
+ type: 'SETTINGS',
+ flags: { ACK: false },
+ stream: 10,
+
+ settings: {
+ SETTINGS_HEADER_TABLE_SIZE: 0x12345678,
+ SETTINGS_ENABLE_PUSH: true,
+ SETTINGS_MAX_CONCURRENT_STREAMS: 0x01234567,
+ SETTINGS_INITIAL_WINDOW_SIZE: 0x89ABCDEF,
+ SETTINGS_MAX_FRAME_SIZE: 0x00010000
+ }
+ },
+ buffer: new Buffer('00001E' + '04' + '00' + '0000000A' + '0001' + '12345678' +
+ '0002' + '00000001' +
+ '0003' + '01234567' +
+ '0004' + '89ABCDEF' +
+ '0005' + '00010000', 'hex')
+
+}, {
+ frame: {
+ type: 'PUSH_PROMISE',
+ flags: { RESERVED1: false, RESERVED2: false, END_PUSH_PROMISE: false,
+ PADDED: false },
+ stream: 15,
+
+ promised_stream: 3,
+ data: new Buffer('12345678', 'hex')
+ },
+ buffer: new Buffer('000008' + '05' + '00' + '0000000F' + '00000003' + '12345678', 'hex')
+
+}, {
+ frame: {
+ type: 'PING',
+ flags: { ACK: false },
+ stream: 15,
+
+ data: new Buffer('1234567887654321', 'hex')
+ },
+ buffer: new Buffer('000008' + '06' + '00' + '0000000F' + '1234567887654321', 'hex')
+
+}, {
+ frame: {
+ type: 'GOAWAY',
+ flags: { },
+ stream: 10,
+
+ last_stream: 0x12345678,
+ error: 'PROTOCOL_ERROR'
+ },
+ buffer: new Buffer('000008' + '07' + '00' + '0000000A' + '12345678' + '00000001', 'hex')
+
+}, {
+ frame: {
+ type: 'WINDOW_UPDATE',
+ flags: { },
+ stream: 10,
+
+ window_size: 0x12345678
+ },
+ buffer: new Buffer('000004' + '08' + '00' + '0000000A' + '12345678', 'hex')
+}, {
+ frame: {
+ type: 'CONTINUATION',
+ flags: { RESERVED1: false, RESERVED2: false, END_HEADERS: true },
+ stream: 10,
+
+ data: new Buffer('12345678', 'hex')
+ },
+ // length + type + flags + stream + content
+ buffer: new Buffer('000004' + '09' + '04' + '0000000A' + '12345678', 'hex')
+}, {
+ frame: {
+ type: 'ALTSVC',
+ flags: { },
+ stream: 0,
+
+ maxAge: 31536000,
+ port: 4443,
+ protocolID: "h2",
+ host: "altsvc.example.com",
+ origin: ""
+ },
+ buffer: new Buffer(new Buffer('00002B' + '0A' + '00' + '00000000' + '0000', 'hex') + new Buffer('h2="altsvc.example.com:4443"; ma=31536000', 'ascii'))
+}, {
+ frame: {
+ type: 'ALTSVC',
+ flags: { },
+ stream: 0,
+
+ maxAge: 31536000,
+ port: 4443,
+ protocolID: "h2",
+ host: "altsvc.example.com",
+ origin: "https://onlyme.example.com"
+ },
+ buffer: new Buffer(new Buffer('000045' + '0A' + '00' + '00000000' + '001A', 'hex') + new Buffer('https://onlyme.example.comh2="altsvc.example.com:4443"; ma=31536000', 'ascii'))
+
+}, {
+ frame: {
+ type: 'BLOCKED',
+ flags: { },
+ stream: 10
+ },
+ buffer: new Buffer('000000' + '0B' + '00' + '0000000A', 'hex')
+}];
+
+var deserializer_test_frames = test_frames.slice(0);
+var padded_test_frames = [{
+ frame: {
+ type: 'DATA',
+ flags: { END_STREAM: false, RESERVED2: false, RESERVED4: false,
+ PADDED: true },
+ stream: 10,
+ data: new Buffer('12345678', 'hex')
+ },
+ // length + type + flags + stream + pad length + content + padding
+ buffer: new Buffer('00000B' + '00' + '08' + '0000000A' + '06' + '12345678' + '000000000000', 'hex')
+
+}, {
+ frame: {
+ type: 'HEADERS',
+ flags: { END_STREAM: false, RESERVED2: false, END_HEADERS: false,
+ PADDED: true, RESERVED5: false, PRIORITY: false },
+ stream: 15,
+
+ data: new Buffer('12345678', 'hex')
+ },
+ // length + type + flags + stream + pad length + data + padding
+ buffer: new Buffer('00000B' + '01' + '08' + '0000000F' + '06' + '12345678' + '000000000000', 'hex')
+
+}, {
+ frame: {
+ type: 'HEADERS',
+ flags: { END_STREAM: false, RESERVED2: false, END_HEADERS: false,
+ PADDED: true, RESERVED5: false, PRIORITY: true },
+ stream: 15,
+ priorityDependency: 10,
+ priorityWeight: 5,
+ exclusiveDependency: false,
+
+ data: new Buffer('12345678', 'hex')
+ },
+ // length + type + flags + stream + pad length + priority dependency + priority weight + data + padding
+ buffer: new Buffer('000010' + '01' + '28' + '0000000F' + '06' + '0000000A' + '05' + '12345678' + '000000000000', 'hex')
+
+}, {
+ frame: {
+ type: 'HEADERS',
+ flags: { END_STREAM: false, RESERVED2: false, END_HEADERS: false,
+ PADDED: true, RESERVED5: false, PRIORITY: true },
+ stream: 15,
+ priorityDependency: 10,
+ priorityWeight: 5,
+ exclusiveDependency: true,
+
+ data: new Buffer('12345678', 'hex')
+ },
+ // length + type + flags + stream + pad length + priority dependency + priority weight + data + padding
+ buffer: new Buffer('000010' + '01' + '28' + '0000000F' + '06' + '8000000A' + '05' + '12345678' + '000000000000', 'hex')
+
+}, {
+ frame: {
+ type: 'PUSH_PROMISE',
+ flags: { RESERVED1: false, RESERVED2: false, END_PUSH_PROMISE: false,
+ PADDED: true },
+ stream: 15,
+
+ promised_stream: 3,
+ data: new Buffer('12345678', 'hex')
+ },
+ // length + type + flags + stream + pad length + promised stream + data + padding
+ buffer: new Buffer('00000F' + '05' + '08' + '0000000F' + '06' + '00000003' + '12345678' + '000000000000', 'hex')
+
+}];
+for (var idx = 0; idx < padded_test_frames.length; idx++) {
+ deserializer_test_frames.push(padded_test_frames[idx]);
+}
+
+
+describe('framer.js', function() {
+ describe('Serializer', function() {
+ describe('static method .commonHeader({ type, flags, stream }, buffer_array)', function() {
+ it('should add the appropriate 9 byte header buffer in front of the others', function() {
+ for (var i = 0; i < test_frames.length; i++) {
+ var test = test_frames[i];
+ var buffers = [test.buffer.slice(9)];
+ var header_buffer = test.buffer.slice(0,9);
+ Serializer.commonHeader(test.frame, buffers);
+ expect(buffers[0]).to.deep.equal(header_buffer);
+ }
+ });
+ });
+
+ Object.keys(frame_types).forEach(function(type) {
+ var tests = test_frames.filter(function(test) { return test.frame.type === type; });
+ var frame_shape = '{ ' + frame_types[type].join(', ') + ' }';
+ describe('static method .' + type + '(' + frame_shape + ', buffer_array)', function() {
+ it('should push buffers to the array that make up a ' + type + ' type payload', function() {
+ for (var i = 0; i < tests.length; i++) {
+ var test = tests[i];
+ var buffers = [];
+ Serializer[type](test.frame, buffers);
+ expect(util.concat(buffers)).to.deep.equal(test.buffer.slice(9));
+ }
+ });
+ });
+ });
+
+ describe('transform stream', function() {
+ it('should transform frame objects to appropriate buffers', function() {
+ var stream = new Serializer(util.log);
+
+ for (var i = 0; i < test_frames.length; i++) {
+ var test = test_frames[i];
+ stream.write(test.frame);
+ var chunk, buffer = new Buffer(0);
+ while (chunk = stream.read()) {
+ buffer = util.concat([buffer, chunk]);
+ }
+ expect(buffer).to.be.deep.equal(test.buffer);
+ }
+ });
+ });
+ });
+
+ describe('Deserializer', function() {
+ describe('static method .commonHeader(header_buffer, frame)', function() {
+ it('should augment the frame object with these properties: { type, flags, stream })', function() {
+ for (var i = 0; i < deserializer_test_frames.length; i++) {
+ var test = deserializer_test_frames[i], frame = {};
+ Deserializer.commonHeader(test.buffer.slice(0,9), frame);
+ expect(frame).to.deep.equal({
+ type: test.frame.type,
+ flags: test.frame.flags,
+ stream: test.frame.stream
+ });
+ }
+ });
+ });
+
+ Object.keys(frame_types).forEach(function(type) {
+ var tests = deserializer_test_frames.filter(function(test) { return test.frame.type === type; });
+ var frame_shape = '{ ' + frame_types[type].join(', ') + ' }';
+ describe('static method .' + type + '(payload_buffer, frame)', function() {
+ it('should augment the frame object with these properties: ' + frame_shape, function() {
+ for (var i = 0; i < tests.length; i++) {
+ var test = tests[i];
+ var frame = {
+ type: test.frame.type,
+ flags: test.frame.flags,
+ stream: test.frame.stream
+ };
+ Deserializer[type](test.buffer.slice(9), frame);
+ expect(frame).to.deep.equal(test.frame);
+ }
+ });
+ });
+ });
+
+ describe('transform stream', function() {
+ it('should transform buffers to appropriate frame object', function() {
+ var stream = new Deserializer(util.log);
+
+ var shuffled = util.shuffleBuffers(deserializer_test_frames.map(function(test) { return test.buffer; }));
+ shuffled.forEach(stream.write.bind(stream));
+
+ for (var j = 0; j < deserializer_test_frames.length; j++) {
+ expect(stream.read()).to.be.deep.equal(deserializer_test_frames[j].frame);
+ }
+ });
+ });
+ });
+
+ describe('bunyan formatter', function() {
+ describe('`frame`', function() {
+ var format = framer.serializers.frame;
+ it('should assign a unique ID to each frame', function() {
+ var frame1 = { type: 'DATA', data: new Buffer(10) };
+ var frame2 = { type: 'PRIORITY', priority: 1 };
+ expect(format(frame1).id).to.be.equal(format(frame1));
+ expect(format(frame2).id).to.be.equal(format(frame2));
+ expect(format(frame1)).to.not.be.equal(format(frame2));
+ });
+ });
+ });
+});
diff --git a/testing/xpcshell/node-http2/test/http.js b/testing/xpcshell/node-http2/test/http.js
new file mode 100644
index 0000000000..95a074e4a0
--- /dev/null
+++ b/testing/xpcshell/node-http2/test/http.js
@@ -0,0 +1,793 @@
+var expect = require('chai').expect;
+var util = require('./util');
+var fs = require('fs');
+var path = require('path');
+var url = require('url');
+var net = require('net');
+
+var http2 = require('../lib/http');
+var https = require('https');
+
+var serverOptions = {
+ key: fs.readFileSync(path.join(__dirname, '../example/localhost.key')),
+ cert: fs.readFileSync(path.join(__dirname, '../example/localhost.crt')),
+ rejectUnauthorized: true,
+ log: util.serverLog
+};
+
+var agentOptions = {
+ key: serverOptions.key,
+ ca: serverOptions.cert,
+ rejectUnauthorized: true,
+ log: util.clientLog
+};
+
+var globalAgent = new http2.Agent(agentOptions);
+
+describe('http.js', function() {
+ beforeEach(function() {
+ http2.globalAgent = globalAgent;
+ });
+ describe('Server', function() {
+ describe('new Server(options)', function() {
+ it('should throw if called without \'plain\' or TLS options', function() {
+ expect(function() {
+ new http2.Server();
+ }).to.throw(Error);
+ expect(function() {
+ http2.createServer(util.noop);
+ }).to.throw(Error);
+ });
+ });
+ describe('method `listen()`', function () {
+ it('should emit `listening` event', function (done) {
+ var server = http2.createServer(serverOptions);
+
+ server.on('listening', function () {
+ server.close();
+
+ done();
+ })
+
+ server.listen(0);
+ });
+ it('should emit `error` on failure', function (done) {
+ var server = http2.createServer(serverOptions);
+
+ // This TCP server is used to explicitly take a port to make
+ // server.listen() fails.
+ var net = require('net').createServer();
+
+ server.on('error', function () {
+ net.close()
+
+ done();
+ });
+
+ net.listen(0, function () {
+ server.listen(this.address().port);
+ });
+ });
+ });
+ describe('property `timeout`', function() {
+ it('should be a proxy for the backing HTTPS server\'s `timeout` property', function() {
+ var server = new http2.Server(serverOptions);
+ var backingServer = server._server;
+ var newTimeout = 10;
+ server.timeout = newTimeout;
+ expect(server.timeout).to.be.equal(newTimeout);
+ expect(backingServer.timeout).to.be.equal(newTimeout);
+ });
+ });
+ describe('method `setTimeout(timeout, [callback])`', function() {
+ it('should be a proxy for the backing HTTPS server\'s `setTimeout` method', function() {
+ var server = new http2.Server(serverOptions);
+ var backingServer = server._server;
+ var newTimeout = 10;
+ var newCallback = util.noop;
+ backingServer.setTimeout = function(timeout, callback) {
+ expect(timeout).to.be.equal(newTimeout);
+ expect(callback).to.be.equal(newCallback);
+ };
+ server.setTimeout(newTimeout, newCallback);
+ });
+ });
+ });
+ describe('Agent', function() {
+ describe('property `maxSockets`', function() {
+ it('should be a proxy for the backing HTTPS agent\'s `maxSockets` property', function() {
+ var agent = new http2.Agent({ log: util.clientLog });
+ var backingAgent = agent._httpsAgent;
+ var newMaxSockets = backingAgent.maxSockets + 1;
+ agent.maxSockets = newMaxSockets;
+ expect(agent.maxSockets).to.be.equal(newMaxSockets);
+ expect(backingAgent.maxSockets).to.be.equal(newMaxSockets);
+ });
+ });
+ describe('method `request(options, [callback])`', function() {
+ it('should use a new agent for request-specific TLS settings', function(done) {
+ var path = '/x';
+ var message = 'Hello world';
+
+ var server = http2.createServer(serverOptions, function(request, response) {
+ expect(request.url).to.equal(path);
+ response.end(message);
+ });
+
+ server.listen(1234, function() {
+ var options = url.parse('https://localhost:1234' + path);
+ options.key = agentOptions.key;
+ options.ca = agentOptions.ca;
+ options.rejectUnauthorized = true;
+
+ http2.globalAgent = new http2.Agent({ log: util.clientLog });
+ http2.get(options, function(response) {
+ response.on('data', function(data) {
+ expect(data.toString()).to.equal(message);
+ server.close();
+ done();
+ });
+ });
+ });
+ });
+ it('should throw when trying to use with \'http\' scheme', function() {
+ expect(function() {
+ var agent = new http2.Agent({ log: util.clientLog });
+ agent.request({ protocol: 'http:' });
+ }).to.throw(Error);
+ });
+ });
+ });
+ describe('OutgoingRequest', function() {
+ function testFallbackProxyMethod(name, originalArguments, done) {
+ var request = new http2.OutgoingRequest();
+
+ // When in HTTP/2 mode, this call should be ignored
+ request.stream = { reset: util.noop };
+ request[name].apply(request, originalArguments);
+ delete request.stream;
+
+ // When in fallback mode, this call should be forwarded
+ request[name].apply(request, originalArguments);
+ var mockFallbackRequest = { on: util.noop };
+ mockFallbackRequest[name] = function() {
+ expect(Array.prototype.slice.call(arguments)).to.deep.equal(originalArguments);
+ done();
+ };
+ request._fallback(mockFallbackRequest);
+ }
+ describe('method `setNoDelay(noDelay)`', function() {
+ it('should act as a proxy for the backing HTTPS agent\'s `setNoDelay` method', function(done) {
+ testFallbackProxyMethod('setNoDelay', [true], done);
+ });
+ });
+ describe('method `setSocketKeepAlive(enable, initialDelay)`', function() {
+ it('should act as a proxy for the backing HTTPS agent\'s `setSocketKeepAlive` method', function(done) {
+ testFallbackProxyMethod('setSocketKeepAlive', [true, util.random(10, 100)], done);
+ });
+ });
+ describe('method `setTimeout(timeout, [callback])`', function() {
+ it('should act as a proxy for the backing HTTPS agent\'s `setTimeout` method', function(done) {
+ testFallbackProxyMethod('setTimeout', [util.random(10, 100), util.noop], done);
+ });
+ });
+ describe('method `abort()`', function() {
+ it('should act as a proxy for the backing HTTPS agent\'s `abort` method', function(done) {
+ testFallbackProxyMethod('abort', [], done);
+ });
+ });
+ });
+ describe('OutgoingResponse', function() {
+ it('should throw error when writeHead is called multiple times on it', function() {
+ var called = false;
+ var stream = { _log: util.log, headers: function () {
+ if (called) {
+ throw new Error('Should not send headers twice');
+ } else {
+ called = true;
+ }
+ }, once: util.noop };
+ var response = new http2.OutgoingResponse(stream);
+
+ response.writeHead(200);
+ response.writeHead(404);
+ });
+ it('field finished should be Boolean', function(){
+ var stream = { _log: util.log, headers: function () {}, once: util.noop };
+ var response = new http2.OutgoingResponse(stream);
+ expect(response.finished).to.be.a('Boolean');
+ });
+ it('field finished should initially be false and then go to true when response completes',function(done){
+ var res;
+ var server = http2.createServer(serverOptions, function(request, response) {
+ res = response;
+ expect(res.finished).to.be.false;
+ response.end('HiThere');
+ });
+ server.listen(1236, function() {
+ http2.get('https://localhost:1236/finished-test', function(response) {
+ response.on('data', function(data){
+ var sink = data; //
+ });
+ response.on('end',function(){
+ expect(res.finished).to.be.true;
+ server.close();
+ done();
+ });
+ });
+ });
+ });
+ });
+ describe('test scenario', function() {
+ describe('simple request', function() {
+ it('should work as expected', function(done) {
+ var path = '/x';
+ var message = 'Hello world';
+
+ var server = http2.createServer(serverOptions, function(request, response) {
+ expect(request.url).to.equal(path);
+ response.end(message);
+ });
+
+ server.listen(1234, function() {
+ http2.get('https://localhost:1234' + path, function(response) {
+ response.on('data', function(data) {
+ expect(data.toString()).to.equal(message);
+ server.close();
+ done();
+ });
+ });
+ });
+ });
+ });
+ describe('2 simple request in parallel', function() {
+ it('should work as expected', function(originalDone) {
+ var path = '/x';
+ var message = 'Hello world';
+ var done = util.callNTimes(2, function() {
+ server.close();
+ originalDone();
+ });
+
+ var server = http2.createServer(serverOptions, function(request, response) {
+ expect(request.url).to.equal(path);
+ response.end(message);
+ });
+
+ server.listen(1234, function() {
+ http2.get('https://localhost:1234' + path, function(response) {
+ response.on('data', function(data) {
+ expect(data.toString()).to.equal(message);
+ done();
+ });
+ });
+ http2.get('https://localhost:1234' + path, function(response) {
+ response.on('data', function(data) {
+ expect(data.toString()).to.equal(message);
+ done();
+ });
+ });
+ });
+ });
+ });
+ describe('100 simple request in a series', function() {
+ it('should work as expected', function(done) {
+ var path = '/x';
+ var message = 'Hello world';
+
+ var server = http2.createServer(serverOptions, function(request, response) {
+ expect(request.url).to.equal(path);
+ response.end(message);
+ });
+
+ var n = 100;
+ server.listen(1242, function() {
+ doRequest();
+ function doRequest() {
+ http2.get('https://localhost:1242' + path, function(response) {
+ response.on('data', function(data) {
+ expect(data.toString()).to.equal(message);
+ if (n) {
+ n -= 1;
+ doRequest();
+ } else {
+ server.close();
+ done();
+ }
+ });
+ });
+ }
+ });
+ });
+ });
+ describe('request with payload', function() {
+ it('should work as expected', function(done) {
+ var path = '/x';
+ var message = 'Hello world';
+
+ var server = http2.createServer(serverOptions, function(request, response) {
+ expect(request.url).to.equal(path);
+ request.once('data', function(data) {
+ expect(data.toString()).to.equal(message);
+ response.end();
+ });
+ });
+
+ server.listen(1240, function() {
+ var request = http2.request({
+ host: 'localhost',
+ port: 1240,
+ path: path
+ });
+ request.write(message);
+ request.end();
+ request.on('response', function() {
+ server.close();
+ done();
+ });
+ });
+ });
+ });
+ describe('request with custom status code and headers', function() {
+ it('should work as expected', function(done) {
+ var path = '/x';
+ var message = 'Hello world';
+ var headerName = 'name';
+ var headerValue = 'value';
+
+ var server = http2.createServer(serverOptions, function(request, response) {
+ // Request URL and headers
+ expect(request.url).to.equal(path);
+ expect(request.headers[headerName]).to.equal(headerValue);
+
+ // A header to be overwritten later
+ response.setHeader(headerName, 'to be overwritten');
+ expect(response.getHeader(headerName)).to.equal('to be overwritten');
+
+ // A header to be deleted
+ response.setHeader('nonexistent', 'x');
+ response.removeHeader('nonexistent');
+ expect(response.getHeader('nonexistent')).to.equal(undefined);
+
+ // A set-cookie header which should always be an array
+ response.setHeader('set-cookie', 'foo');
+
+ // Don't send date
+ response.sendDate = false;
+
+ // Specifying more headers, the status code and a reason phrase with `writeHead`
+ var moreHeaders = {};
+ moreHeaders[headerName] = headerValue;
+ response.writeHead(600, 'to be discarded', moreHeaders);
+ expect(response.getHeader(headerName)).to.equal(headerValue);
+
+ // Empty response body
+ response.end(message);
+ });
+
+ server.listen(1239, function() {
+ var headers = {};
+ headers[headerName] = headerValue;
+ var request = http2.request({
+ host: 'localhost',
+ port: 1239,
+ path: path,
+ headers: headers
+ });
+ request.end();
+ request.on('response', function(response) {
+ expect(response.headers[headerName]).to.equal(headerValue);
+ expect(response.headers['nonexistent']).to.equal(undefined);
+ expect(response.headers['set-cookie']).to.an.instanceof(Array)
+ expect(response.headers['set-cookie']).to.deep.equal(['foo'])
+ expect(response.headers['date']).to.equal(undefined);
+ response.on('data', function(data) {
+ expect(data.toString()).to.equal(message);
+ server.close();
+ done();
+ });
+ });
+ });
+ });
+ });
+ describe('request over plain TCP', function() {
+ it('should work as expected', function(done) {
+ var path = '/x';
+ var message = 'Hello world';
+
+ var server = http2.raw.createServer({
+ log: util.serverLog
+ }, function(request, response) {
+ expect(request.url).to.equal(path);
+ response.end(message);
+ });
+
+ server.listen(1237, function() {
+ var request = http2.raw.request({
+ plain: true,
+ host: 'localhost',
+ port: 1237,
+ path: path
+ }, function(response) {
+ response.on('data', function(data) {
+ expect(data.toString()).to.equal(message);
+ server.close();
+ done();
+ });
+ });
+ request.end();
+ });
+ });
+ });
+ describe('get over plain TCP', function() {
+ it('should work as expected', function(done) {
+ var path = '/x';
+ var message = 'Hello world';
+
+ var server = http2.raw.createServer({
+ log: util.serverLog
+ }, function(request, response) {
+ expect(request.url).to.equal(path);
+ response.end(message);
+ });
+
+ server.listen(1237, function() {
+ var request = http2.raw.get('http://localhost:1237/x', function(response) {
+ response.on('data', function(data) {
+ expect(data.toString()).to.equal(message);
+ server.close();
+ done();
+ });
+ });
+ request.end();
+ });
+ });
+ });
+ describe('request to an HTTPS/1 server', function() {
+ it('should fall back to HTTPS/1 successfully', function(done) {
+ var path = '/x';
+ var message = 'Hello world';
+
+ var server = https.createServer(serverOptions, function(request, response) {
+ expect(request.url).to.equal(path);
+ response.end(message);
+ });
+
+ server.listen(5678, function() {
+ http2.get('https://localhost:5678' + path, function(response) {
+ response.on('data', function(data) {
+ expect(data.toString()).to.equal(message);
+ done();
+ });
+ });
+ });
+ });
+ });
+ describe('2 parallel request to an HTTPS/1 server', function() {
+ it('should fall back to HTTPS/1 successfully', function(originalDone) {
+ var path = '/x';
+ var message = 'Hello world';
+ var done = util.callNTimes(2, function() {
+ server.close();
+ originalDone();
+ });
+
+ var server = https.createServer(serverOptions, function(request, response) {
+ expect(request.url).to.equal(path);
+ response.end(message);
+ });
+
+ server.listen(6789, function() {
+ http2.get('https://localhost:6789' + path, function(response) {
+ response.on('data', function(data) {
+ expect(data.toString()).to.equal(message);
+ done();
+ });
+ });
+ http2.get('https://localhost:6789' + path, function(response) {
+ response.on('data', function(data) {
+ expect(data.toString()).to.equal(message);
+ done();
+ });
+ });
+ });
+ });
+ });
+ describe('HTTPS/1 request to a HTTP/2 server', function() {
+ it('should fall back to HTTPS/1 successfully', function(done) {
+ var path = '/x';
+ var message = 'Hello world';
+
+ var server = http2.createServer(serverOptions, function(request, response) {
+ expect(request.url).to.equal(path);
+ response.end(message);
+ });
+
+ server.listen(1236, function() {
+ var options = url.parse('https://localhost:1236' + path);
+ options.agent = new https.Agent(agentOptions);
+ https.get(options, function(response) {
+ response.on('data', function(data) {
+ expect(data.toString()).to.equal(message);
+ done();
+ });
+ });
+ });
+ });
+ });
+ describe('two parallel request', function() {
+ it('should work as expected', function(done) {
+ var path = '/x';
+ var message = 'Hello world';
+
+ var server = http2.createServer(serverOptions, function(request, response) {
+ expect(request.url).to.equal(path);
+ response.end(message);
+ });
+
+ server.listen(1237, function() {
+ done = util.callNTimes(2, done);
+ // 1. request
+ http2.get('https://localhost:1237' + path, function(response) {
+ response.on('data', function(data) {
+ expect(data.toString()).to.equal(message);
+ done();
+ });
+ });
+ // 2. request
+ http2.get('https://localhost:1237' + path, function(response) {
+ response.on('data', function(data) {
+ expect(data.toString()).to.equal(message);
+ done();
+ });
+ });
+ });
+ });
+ });
+ describe('two subsequent request', function() {
+ it('should use the same HTTP/2 connection', function(done) {
+ var path = '/x';
+ var message = 'Hello world';
+
+ var server = http2.createServer(serverOptions, function(request, response) {
+ expect(request.url).to.equal(path);
+ response.end(message);
+ });
+
+ server.listen(1238, function() {
+ // 1. request
+ http2.get('https://localhost:1238' + path, function(response) {
+ response.on('data', function(data) {
+ expect(data.toString()).to.equal(message);
+
+ // 2. request
+ http2.get('https://localhost:1238' + path, function(response) {
+ response.on('data', function(data) {
+ expect(data.toString()).to.equal(message);
+ done();
+ });
+ });
+ });
+ });
+ });
+ });
+ });
+ describe('https server node module specification conformance', function() {
+ it('should provide API for remote HTTP 1.1 client address', function(done) {
+ var remoteAddress = null;
+ var remotePort = null;
+
+ var server = http2.createServer(serverOptions, function(request, response) {
+ // HTTPS 1.1 client with Node 0.10 server
+ if (!request.remoteAddress) {
+ if (request.socket.socket) {
+ remoteAddress = request.socket.socket.remoteAddress;
+ remotePort = request.socket.socket.remotePort;
+ } else {
+ remoteAddress = request.socket.remoteAddress;
+ remotePort = request.socket.remotePort;
+ }
+ } else {
+ // HTTPS 1.1/2.0 client with Node 0.12 server
+ remoteAddress = request.remoteAddress;
+ remotePort = request.remotePort;
+ }
+ response.write('Pong');
+ response.end();
+ });
+
+ server.listen(1259, 'localhost', function() {
+ var request = https.request({
+ host: 'localhost',
+ port: 1259,
+ path: '/',
+ ca: serverOptions.cert
+ });
+ request.write('Ping');
+ request.end();
+ request.on('response', function(response) {
+ response.on('data', function(data) {
+ var localAddress = response.socket.address();
+ expect(remoteAddress).to.equal(localAddress.address);
+ expect(remotePort).to.equal(localAddress.port);
+ server.close();
+ done();
+ });
+ });
+ });
+ });
+ it('should provide API for remote HTTP 2.0 client address', function(done) {
+ var remoteAddress = null;
+ var remotePort = null;
+ var localAddress = null;
+
+ var server = http2.createServer(serverOptions, function(request, response) {
+ remoteAddress = request.remoteAddress;
+ remotePort = request.remotePort;
+ response.write('Pong');
+ response.end();
+ });
+
+ server.listen(1258, 'localhost', function() {
+ var request = http2.request({
+ host: 'localhost',
+ port: 1258,
+ path: '/'
+ });
+ request.write('Ping');
+ globalAgent.on('false:localhost:1258', function(endpoint) {
+ localAddress = endpoint.socket.address();
+ });
+ request.end();
+ request.on('response', function(response) {
+ response.on('data', function(data) {
+ expect(remoteAddress).to.equal(localAddress.address);
+ expect(remotePort).to.equal(localAddress.port);
+ server.close();
+ done();
+ });
+ });
+ });
+ });
+ it('should expose net.Socket as .socket and .connection', function(done) {
+ var server = http2.createServer(serverOptions, function(request, response) {
+ expect(request.socket).to.equal(request.connection);
+ expect(request.socket).to.be.instanceof(net.Socket);
+ response.write('Pong');
+ response.end();
+ done();
+ });
+
+ server.listen(1248, 'localhost', function() {
+ var request = https.request({
+ host: 'localhost',
+ port: 1248,
+ path: '/',
+ ca: serverOptions.cert
+ });
+ request.write('Ping');
+ request.end();
+ });
+ });
+ });
+ describe('request and response with trailers', function() {
+ it('should work as expected', function(done) {
+ var path = '/x';
+ var message = 'Hello world';
+ var requestTrailers = { 'content-md5': 'x' };
+ var responseTrailers = { 'content-md5': 'y' };
+
+ var server = http2.createServer(serverOptions, function(request, response) {
+ expect(request.url).to.equal(path);
+ request.on('data', util.noop);
+ request.once('end', function() {
+ expect(request.trailers).to.deep.equal(requestTrailers);
+ response.write(message);
+ response.addTrailers(responseTrailers);
+ response.end();
+ });
+ });
+
+ server.listen(1241, function() {
+ var request = http2.request('https://localhost:1241' + path);
+ request.addTrailers(requestTrailers);
+ request.end();
+ request.on('response', function(response) {
+ response.on('data', util.noop);
+ response.once('end', function() {
+ expect(response.trailers).to.deep.equal(responseTrailers);
+ done();
+ });
+ });
+ });
+ });
+ });
+ describe('Handle socket error', function () {
+ it('HTTPS on Connection Refused error', function (done) {
+ var path = '/x';
+ var request = http2.request('https://127.0.0.1:6666' + path);
+
+ request.on('error', function (err) {
+ expect(err.errno).to.equal('ECONNREFUSED');
+ done();
+ });
+
+ request.on('response', function (response) {
+ server._server._handle.destroy();
+
+ response.on('data', util.noop);
+
+ response.once('end', function () {
+ done(new Error('Request should have failed'));
+ });
+ });
+
+ request.end();
+
+ });
+ it('HTTP on Connection Refused error', function (done) {
+ var path = '/x';
+
+ var request = http2.raw.request('http://127.0.0.1:6666' + path);
+
+ request.on('error', function (err) {
+ expect(err.errno).to.equal('ECONNREFUSED');
+ done();
+ });
+
+ request.on('response', function (response) {
+ server._server._handle.destroy();
+
+ response.on('data', util.noop);
+
+ response.once('end', function () {
+ done(new Error('Request should have failed'));
+ });
+ });
+
+ request.end();
+ });
+ });
+ describe('server push', function() {
+ it('should work as expected', function(done) {
+ var path = '/x';
+ var message = 'Hello world';
+ var pushedPath = '/y';
+ var pushedMessage = 'Hello world 2';
+
+ var server = http2.createServer(serverOptions, function(request, response) {
+ expect(request.url).to.equal(path);
+ var push1 = response.push('/y');
+ push1.end(pushedMessage);
+ var push2 = response.push({ path: '/y', protocol: 'https:' });
+ push2.end(pushedMessage);
+ response.end(message);
+ });
+
+ server.listen(1235, function() {
+ var request = http2.get('https://localhost:1235' + path);
+ done = util.callNTimes(5, done);
+
+ request.on('response', function(response) {
+ response.on('data', function(data) {
+ expect(data.toString()).to.equal(message);
+ done();
+ });
+ response.on('end', done);
+ });
+
+ request.on('push', function(promise) {
+ expect(promise.url).to.be.equal(pushedPath);
+ promise.on('response', function(pushStream) {
+ pushStream.on('data', function(data) {
+ expect(data.toString()).to.equal(pushedMessage);
+ done();
+ });
+ pushStream.on('end', done);
+ });
+ });
+ });
+ });
+ });
+ });
+});
diff --git a/testing/xpcshell/node-http2/test/stream.js b/testing/xpcshell/node-http2/test/stream.js
new file mode 100644
index 0000000000..90e0ef64b0
--- /dev/null
+++ b/testing/xpcshell/node-http2/test/stream.js
@@ -0,0 +1,413 @@
+var expect = require('chai').expect;
+var util = require('./util');
+
+var stream = require('../lib/protocol/stream');
+var Stream = stream.Stream;
+
+function createStream() {
+ var stream = new Stream(util.log, null);
+ stream.upstream._window = Infinity;
+ return stream;
+}
+
+// Execute a list of commands and assertions
+var recorded_events = ['state', 'error', 'window_update', 'headers', 'promise'];
+function execute_sequence(stream, sequence, done) {
+ if (!done) {
+ done = sequence;
+ sequence = stream;
+ stream = createStream();
+ }
+
+ var outgoing_frames = [];
+
+ var emit = stream.emit, events = [];
+ stream.emit = function(name) {
+ if (recorded_events.indexOf(name) !== -1) {
+ events.push({ name: name, data: Array.prototype.slice.call(arguments, 1) });
+ }
+ return emit.apply(this, arguments);
+ };
+
+ var commands = [], checks = [];
+ sequence.forEach(function(step) {
+ if ('method' in step || 'incoming' in step || 'outgoing' in step || 'wait' in step || 'set_state' in step) {
+ commands.push(step);
+ }
+
+ if ('outgoing' in step || 'event' in step || 'active' in step) {
+ checks.push(step);
+ }
+ });
+
+ var activeCount = 0;
+ function count_change(change) {
+ activeCount += change;
+ }
+
+ function execute(callback) {
+ var command = commands.shift();
+ if (command) {
+ if ('method' in command) {
+ var value = stream[command.method.name].apply(stream, command.method.arguments);
+ if (command.method.ret) {
+ command.method.ret(value);
+ }
+ execute(callback);
+ } else if ('incoming' in command) {
+ command.incoming.count_change = count_change;
+ stream.upstream.write(command.incoming);
+ execute(callback);
+ } else if ('outgoing' in command) {
+ outgoing_frames.push(stream.upstream.read());
+ execute(callback);
+ } else if ('set_state' in command) {
+ stream.state = command.set_state;
+ execute(callback);
+ } else if ('wait' in command) {
+ setTimeout(execute.bind(null, callback), command.wait);
+ } else {
+ throw new Error('Invalid command', command);
+ }
+ } else {
+ setTimeout(callback, 5);
+ }
+ }
+
+ function check() {
+ checks.forEach(function(check) {
+ if ('outgoing' in check) {
+ var frame = outgoing_frames.shift();
+ for (var key in check.outgoing) {
+ expect(frame).to.have.property(key).that.deep.equals(check.outgoing[key]);
+ }
+ count_change(frame.count_change);
+ } else if ('event' in check) {
+ var event = events.shift();
+ expect(event.name).to.be.equal(check.event.name);
+ check.event.data.forEach(function(data, index) {
+ expect(event.data[index]).to.deep.equal(data);
+ });
+ } else if ('active' in check) {
+ expect(activeCount).to.be.equal(check.active);
+ } else {
+ throw new Error('Invalid check', check);
+ }
+ });
+ done();
+ }
+
+ setImmediate(execute.bind(null, check));
+}
+
+var example_frames = [
+ { type: 'PRIORITY', flags: {}, priority: 1 },
+ { type: 'WINDOW_UPDATE', flags: {}, settings: {} },
+ { type: 'RST_STREAM', flags: {}, error: 'CANCEL' },
+ { type: 'HEADERS', flags: {}, headers: {}, priority: undefined },
+ { type: 'DATA', flags: {}, data: new Buffer(5) },
+ { type: 'PUSH_PROMISE', flags: {}, headers: {}, promised_stream: new Stream(util.log, null) }
+];
+
+var invalid_incoming_frames = {
+ IDLE: [
+ { type: 'DATA', flags: {}, data: new Buffer(5) },
+ { type: 'WINDOW_UPDATE', flags: {}, settings: {} },
+ { type: 'PUSH_PROMISE', flags: {}, headers: {} },
+ { type: 'RST_STREAM', flags: {}, error: 'CANCEL' }
+ ],
+ RESERVED_LOCAL: [
+ { type: 'DATA', flags: {}, data: new Buffer(5) },
+ { type: 'HEADERS', flags: {}, headers: {}, priority: undefined },
+ { type: 'PUSH_PROMISE', flags: {}, headers: {} },
+ { type: 'WINDOW_UPDATE', flags: {}, settings: {} }
+ ],
+ RESERVED_REMOTE: [
+ { type: 'DATA', flags: {}, data: new Buffer(5) },
+ { type: 'PUSH_PROMISE', flags: {}, headers: {} },
+ { type: 'WINDOW_UPDATE', flags: {}, settings: {} }
+ ],
+ OPEN: [
+ ],
+ HALF_CLOSED_LOCAL: [
+ ],
+ HALF_CLOSED_REMOTE: [
+ { type: 'DATA', flags: {}, data: new Buffer(5) },
+ { type: 'HEADERS', flags: {}, headers: {}, priority: undefined },
+ { type: 'PUSH_PROMISE', flags: {}, headers: {} }
+ ]
+};
+
+var invalid_outgoing_frames = {
+ IDLE: [
+ { type: 'DATA', flags: {}, data: new Buffer(5) },
+ { type: 'WINDOW_UPDATE', flags: {}, settings: {} },
+ { type: 'PUSH_PROMISE', flags: {}, headers: {} }
+ ],
+ RESERVED_LOCAL: [
+ { type: 'DATA', flags: {}, data: new Buffer(5) },
+ { type: 'PUSH_PROMISE', flags: {}, headers: {} },
+ { type: 'WINDOW_UPDATE', flags: {}, settings: {} }
+ ],
+ RESERVED_REMOTE: [
+ { type: 'DATA', flags: {}, data: new Buffer(5) },
+ { type: 'HEADERS', flags: {}, headers: {}, priority: undefined },
+ { type: 'PUSH_PROMISE', flags: {}, headers: {} },
+ { type: 'WINDOW_UPDATE', flags: {}, settings: {} }
+ ],
+ OPEN: [
+ ],
+ HALF_CLOSED_LOCAL: [
+ { type: 'DATA', flags: {}, data: new Buffer(5) },
+ { type: 'HEADERS', flags: {}, headers: {}, priority: undefined },
+ { type: 'PUSH_PROMISE', flags: {}, headers: {} }
+ ],
+ HALF_CLOSED_REMOTE: [
+ ],
+ CLOSED: [
+ { type: 'WINDOW_UPDATE', flags: {}, settings: {} },
+ { type: 'HEADERS', flags: {}, headers: {}, priority: undefined },
+ { type: 'DATA', flags: {}, data: new Buffer(5) },
+ { type: 'PUSH_PROMISE', flags: {}, headers: {}, promised_stream: new Stream(util.log, null) }
+ ]
+};
+
+describe('stream.js', function() {
+ describe('Stream class', function() {
+ describe('._transition(sending, frame) method', function() {
+ it('should emit error, and answer RST_STREAM for invalid incoming frames', function() {
+ Object.keys(invalid_incoming_frames).forEach(function(state) {
+ invalid_incoming_frames[state].forEach(function(invalid_frame) {
+ var stream = createStream();
+ var connectionErrorHappened = false;
+ stream.state = state;
+ stream.once('connectionError', function() { connectionErrorHappened = true; });
+ stream._transition(false, invalid_frame);
+ expect(connectionErrorHappened);
+ });
+ });
+
+ // CLOSED state as a result of incoming END_STREAM (or RST_STREAM)
+ var stream = createStream();
+ stream.headers({});
+ stream.end();
+ stream.upstream.write({ type: 'HEADERS', headers:{}, flags: { END_STREAM: true }, count_change: util.noop });
+ example_frames.slice(2).forEach(function(invalid_frame) {
+ invalid_frame.count_change = util.noop;
+ expect(stream._transition.bind(stream, false, invalid_frame)).to.throw('Uncaught, unspecified "error" event.');
+ });
+
+ // CLOSED state as a result of outgoing END_STREAM
+ stream = createStream();
+ stream.upstream.write({ type: 'HEADERS', headers:{}, flags: { END_STREAM: true }, count_change: util.noop });
+ stream.headers({});
+ stream.end();
+ example_frames.slice(3).forEach(function(invalid_frame) {
+ invalid_frame.count_change = util.noop;
+ expect(stream._transition.bind(stream, false, invalid_frame)).to.throw('Uncaught, unspecified "error" event.');
+ });
+ });
+ it('should throw exception for invalid outgoing frames', function() {
+ Object.keys(invalid_outgoing_frames).forEach(function(state) {
+ invalid_outgoing_frames[state].forEach(function(invalid_frame) {
+ var stream = createStream();
+ stream.state = state;
+ expect(stream._transition.bind(stream, true, invalid_frame)).to.throw(Error);
+ });
+ });
+ });
+ it('should close the stream when there\'s an incoming or outgoing RST_STREAM', function() {
+ [
+ 'RESERVED_LOCAL',
+ 'RESERVED_REMOTE',
+ 'OPEN',
+ 'HALF_CLOSED_LOCAL',
+ 'HALF_CLOSED_REMOTE'
+ ].forEach(function(state) {
+ [true, false].forEach(function(sending) {
+ var stream = createStream();
+ stream.state = state;
+ stream._transition(sending, { type: 'RST_STREAM', flags: {} });
+ expect(stream.state).to.be.equal('CLOSED');
+ });
+ });
+ });
+ it('should ignore any incoming frame after sending reset', function() {
+ var stream = createStream();
+ stream.reset();
+ example_frames.forEach(stream._transition.bind(stream, false));
+ });
+ it('should ignore certain incoming frames after closing the stream with END_STREAM', function() {
+ var stream = createStream();
+ stream.upstream.write({ type: 'HEADERS', flags: { END_STREAM: true }, headers:{} });
+ stream.headers({});
+ stream.end();
+ example_frames.slice(0,3).forEach(function(frame) {
+ frame.count_change = util.noop;
+ stream._transition(false, frame);
+ });
+ });
+ });
+ });
+ describe('test scenario', function() {
+ describe('sending request', function() {
+ it('should trigger the appropriate state transitions and outgoing frames', function(done) {
+ execute_sequence([
+ { method : { name: 'headers', arguments: [{ ':path': '/' }] } },
+ { outgoing: { type: 'HEADERS', flags: { }, headers: { ':path': '/' } } },
+ { event : { name: 'state', data: ['OPEN'] } },
+
+ { wait : 5 },
+ { method : { name: 'end', arguments: [] } },
+ { event : { name: 'state', data: ['HALF_CLOSED_LOCAL'] } },
+ { outgoing: { type: 'DATA', flags: { END_STREAM: true }, data: new Buffer(0) } },
+
+ { wait : 10 },
+ { incoming: { type: 'HEADERS', flags: { }, headers: { ':status': 200 } } },
+ { incoming: { type: 'DATA' , flags: { END_STREAM: true }, data: new Buffer(5) } },
+ { event : { name: 'headers', data: [{ ':status': 200 }] } },
+ { event : { name: 'state', data: ['CLOSED'] } },
+
+ { active : 0 }
+ ], done);
+ });
+ });
+ describe('answering request', function() {
+ it('should trigger the appropriate state transitions and outgoing frames', function(done) {
+ var payload = new Buffer(5);
+ execute_sequence([
+ { incoming: { type: 'HEADERS', flags: { }, headers: { ':path': '/' } } },
+ { event : { name: 'state', data: ['OPEN'] } },
+ { event : { name: 'headers', data: [{ ':path': '/' }] } },
+
+ { wait : 5 },
+ { incoming: { type: 'DATA', flags: { }, data: new Buffer(5) } },
+ { incoming: { type: 'DATA', flags: { END_STREAM: true }, data: new Buffer(10) } },
+ { event : { name: 'state', data: ['HALF_CLOSED_REMOTE'] } },
+
+ { wait : 5 },
+ { method : { name: 'headers', arguments: [{ ':status': 200 }] } },
+ { outgoing: { type: 'HEADERS', flags: { }, headers: { ':status': 200 } } },
+
+ { wait : 5 },
+ { method : { name: 'end', arguments: [payload] } },
+ { outgoing: { type: 'DATA', flags: { END_STREAM: true }, data: payload } },
+ { event : { name: 'state', data: ['CLOSED'] } },
+
+ { active : 0 }
+ ], done);
+ });
+ });
+ describe('sending push stream', function() {
+ it('should trigger the appropriate state transitions and outgoing frames', function(done) {
+ var payload = new Buffer(5);
+ var pushStream;
+
+ execute_sequence([
+ // receiving request
+ { incoming: { type: 'HEADERS', flags: { END_STREAM: true }, headers: { ':path': '/' } } },
+ { event : { name: 'state', data: ['OPEN'] } },
+ { event : { name: 'state', data: ['HALF_CLOSED_REMOTE'] } },
+ { event : { name: 'headers', data: [{ ':path': '/' }] } },
+
+ // sending response headers
+ { wait : 5 },
+ { method : { name: 'headers', arguments: [{ ':status': '200' }] } },
+ { outgoing: { type: 'HEADERS', flags: { }, headers: { ':status': '200' } } },
+
+ // sending push promise
+ { method : { name: 'promise', arguments: [{ ':path': '/' }], ret: function(str) { pushStream = str; } } },
+ { outgoing: { type: 'PUSH_PROMISE', flags: { }, headers: { ':path': '/' } } },
+
+ // sending response data
+ { method : { name: 'end', arguments: [payload] } },
+ { outgoing: { type: 'DATA', flags: { END_STREAM: true }, data: payload } },
+ { event : { name: 'state', data: ['CLOSED'] } },
+
+ { active : 0 }
+ ], function() {
+ // initial state of the promised stream
+ expect(pushStream.state).to.equal('RESERVED_LOCAL');
+
+ execute_sequence(pushStream, [
+ // push headers
+ { wait : 5 },
+ { method : { name: 'headers', arguments: [{ ':status': '200' }] } },
+ { outgoing: { type: 'HEADERS', flags: { }, headers: { ':status': '200' } } },
+ { event : { name: 'state', data: ['HALF_CLOSED_REMOTE'] } },
+
+ // push data
+ { method : { name: 'end', arguments: [payload] } },
+ { outgoing: { type: 'DATA', flags: { END_STREAM: true }, data: payload } },
+ { event : { name: 'state', data: ['CLOSED'] } },
+
+ { active : 1 }
+ ], done);
+ });
+ });
+ });
+ describe('receiving push stream', function() {
+ it('should trigger the appropriate state transitions and outgoing frames', function(done) {
+ var payload = new Buffer(5);
+ var original_stream = createStream();
+ var promised_stream = createStream();
+
+ done = util.callNTimes(2, done);
+
+ execute_sequence(original_stream, [
+ // sending request headers
+ { method : { name: 'headers', arguments: [{ ':path': '/' }] } },
+ { method : { name: 'end', arguments: [] } },
+ { outgoing: { type: 'HEADERS', flags: { END_STREAM: true }, headers: { ':path': '/' } } },
+ { event : { name: 'state', data: ['OPEN'] } },
+ { event : { name: 'state', data: ['HALF_CLOSED_LOCAL'] } },
+
+ // receiving response headers
+ { wait : 10 },
+ { incoming: { type: 'HEADERS', flags: { }, headers: { ':status': 200 } } },
+ { event : { name: 'headers', data: [{ ':status': 200 }] } },
+
+ // receiving push promise
+ { incoming: { type: 'PUSH_PROMISE', flags: { }, headers: { ':path': '/2.html' }, promised_stream: promised_stream } },
+ { event : { name: 'promise', data: [promised_stream, { ':path': '/2.html' }] } },
+
+ // receiving response data
+ { incoming: { type: 'DATA' , flags: { END_STREAM: true }, data: payload } },
+ { event : { name: 'state', data: ['CLOSED'] } },
+
+ { active : 0 }
+ ], done);
+
+ execute_sequence(promised_stream, [
+ // initial state of the promised stream
+ { event : { name: 'state', data: ['RESERVED_REMOTE'] } },
+
+ // push headers
+ { wait : 10 },
+ { incoming: { type: 'HEADERS', flags: { END_STREAM: false }, headers: { ':status': 200 } } },
+ { event : { name: 'state', data: ['HALF_CLOSED_LOCAL'] } },
+ { event : { name: 'headers', data: [{ ':status': 200 }] } },
+
+ // push data
+ { incoming: { type: 'DATA', flags: { END_STREAM: true }, data: payload } },
+ { event : { name: 'state', data: ['CLOSED'] } },
+
+ { active : 0 }
+ ], done);
+ });
+ });
+ });
+
+ describe('bunyan formatter', function() {
+ describe('`s`', function() {
+ var format = stream.serializers.s;
+ it('should assign a unique ID to each frame', function() {
+ var stream1 = createStream();
+ var stream2 = createStream();
+ expect(format(stream1)).to.be.equal(format(stream1));
+ expect(format(stream2)).to.be.equal(format(stream2));
+ expect(format(stream1)).to.not.be.equal(format(stream2));
+ });
+ });
+ });
+});
diff --git a/testing/xpcshell/node-http2/test/util.js b/testing/xpcshell/node-http2/test/util.js
new file mode 100644
index 0000000000..52c6a1be36
--- /dev/null
+++ b/testing/xpcshell/node-http2/test/util.js
@@ -0,0 +1,89 @@
+var path = require('path');
+var fs = require('fs');
+var spawn = require('child_process').spawn;
+
+function noop() {}
+exports.noop = noop;
+
+if (process.env.HTTP2_LOG) {
+ var logOutput = process.stderr;
+ if (process.stderr.isTTY) {
+ var bin = path.resolve(path.dirname(require.resolve('bunyan')), '..', 'bin', 'bunyan');
+ if(bin && fs.existsSync(bin)) {
+ logOutput = spawn(bin, ['-o', 'short'], {
+ stdio: [null, process.stderr, process.stderr]
+ }).stdin;
+ }
+ }
+ exports.createLogger = function(name) {
+ return require('bunyan').createLogger({
+ name: name,
+ stream: logOutput,
+ level: process.env.HTTP2_LOG,
+ serializers: require('../lib/http').serializers
+ });
+ };
+ exports.log = exports.createLogger('test');
+ exports.clientLog = exports.createLogger('client');
+ exports.serverLog = exports.createLogger('server');
+} else {
+ exports.createLogger = function() {
+ return exports.log;
+ };
+ exports.log = exports.clientLog = exports.serverLog = {
+ fatal: noop,
+ error: noop,
+ warn : noop,
+ info : noop,
+ debug: noop,
+ trace: noop,
+
+ child: function() { return this; }
+ };
+}
+
+exports.callNTimes = function callNTimes(limit, done) {
+ if (limit === 0) {
+ done();
+ } else {
+ var i = 0;
+ return function() {
+ i += 1;
+ if (i === limit) {
+ done();
+ }
+ };
+ }
+};
+
+// Concatenate an array of buffers into a new buffer
+exports.concat = function concat(buffers) {
+ var size = 0;
+ for (var i = 0; i < buffers.length; i++) {
+ size += buffers[i].length;
+ }
+
+ var concatenated = new Buffer(size);
+ for (var cursor = 0, j = 0; j < buffers.length; cursor += buffers[j].length, j++) {
+ buffers[j].copy(concatenated, cursor);
+ }
+
+ return concatenated;
+};
+
+exports.random = function random(min, max) {
+ return min + Math.floor(Math.random() * (max - min + 1));
+};
+
+// Concatenate an array of buffers and then cut them into random size buffers
+exports.shuffleBuffers = function shuffleBuffers(buffers) {
+ var concatenated = exports.concat(buffers), output = [], written = 0;
+
+ while (written < concatenated.length) {
+ var chunk_size = Math.min(concatenated.length - written, Math.ceil(Math.random()*20));
+ output.push(concatenated.slice(written, written + chunk_size));
+ written += chunk_size;
+ }
+
+ return output;
+};
diff --git a/testing/xpcshell/remotexpcshelltests.py b/testing/xpcshell/remotexpcshelltests.py
new file mode 100644
index 0000000000..c5124880f8
--- /dev/null
+++ b/testing/xpcshell/remotexpcshelltests.py
@@ -0,0 +1,614 @@
+#!/usr/bin/env python
+#
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import posixpath
+import sys, os
+import subprocess
+import runxpcshelltests as xpcshell
+import tempfile
+import time
+from zipfile import ZipFile
+from mozlog import commandline
+import shutil
+import mozdevice
+import mozfile
+import mozinfo
+
+from xpcshellcommandline import parser_remote
+
+here = os.path.dirname(os.path.abspath(__file__))
+
+def remoteJoin(path1, path2):
+ return posixpath.join(path1, path2)
+
+class RemoteXPCShellTestThread(xpcshell.XPCShellTestThread):
+ def __init__(self, *args, **kwargs):
+ xpcshell.XPCShellTestThread.__init__(self, *args, **kwargs)
+
+ self.shellReturnCode = None
+ # embed the mobile params from the harness into the TestThread
+ mobileArgs = kwargs.get('mobileArgs')
+ for key in mobileArgs:
+ setattr(self, key, mobileArgs[key])
+
+ def buildCmdTestFile(self, name):
+ remoteDir = self.remoteForLocal(os.path.dirname(name))
+ if remoteDir == self.remoteHere:
+ remoteName = os.path.basename(name)
+ else:
+ remoteName = remoteJoin(remoteDir, os.path.basename(name))
+ return ['-e', 'const _TEST_FILE = ["%s"];' %
+ remoteName.replace('\\', '/')]
+
+ def remoteForLocal(self, local):
+ for mapping in self.pathMapping:
+ if (os.path.abspath(mapping.local) == os.path.abspath(local)):
+ return mapping.remote
+ return local
+
+ def setupTempDir(self):
+ # make sure the temp dir exists
+ self.clearRemoteDir(self.remoteTmpDir)
+ # env var is set in buildEnvironment
+ return self.remoteTmpDir
+
+ def setupPluginsDir(self):
+ if not os.path.isdir(self.pluginsPath):
+ return None
+
+ # making sure tmp dir is set up
+ self.setupTempDir()
+
+ pluginsDir = remoteJoin(self.remoteTmpDir, "plugins")
+ self.device.pushDir(self.pluginsPath, pluginsDir)
+ if self.interactive:
+ self.log.info("plugins dir is %s" % pluginsDir)
+ return pluginsDir
+
+ def setupProfileDir(self):
+ self.clearRemoteDir(self.profileDir)
+ if self.interactive or self.singleFile:
+ self.log.info("profile dir is %s" % self.profileDir)
+ return self.profileDir
+
+ def setupMozinfoJS(self):
+ local = tempfile.mktemp()
+ mozinfo.output_to_file(local)
+ mozInfoJSPath = remoteJoin(self.profileDir, "mozinfo.json")
+ self.device.pushFile(local, mozInfoJSPath)
+ os.remove(local)
+ return mozInfoJSPath
+
+ def logCommand(self, name, completeCmd, testdir):
+ self.log.info("%s | full command: %r" % (name, completeCmd))
+ self.log.info("%s | current directory: %r" % (name, self.remoteHere))
+ self.log.info("%s | environment: %s" % (name, self.env))
+
+ def getHeadAndTailFiles(self, test):
+ """Override parent method to find files on remote device.
+
+ Obtains lists of head- and tail files. Returns a tuple containing
+ a list of head files and a list of tail files.
+ """
+ def sanitize_list(s, kind):
+ for f in s.strip().split(' '):
+ f = f.strip()
+ if len(f) < 1:
+ continue
+
+ path = remoteJoin(self.remoteHere, f)
+
+ # skip check for file existence: the convenience of discovering
+ # a missing file does not justify the time cost of the round trip
+ # to the device
+ yield path
+
+ self.remoteHere = self.remoteForLocal(test['here'])
+
+ headlist = test.get('head', '')
+ taillist = test.get('tail', '')
+ return (list(sanitize_list(headlist, 'head')),
+ list(sanitize_list(taillist, 'tail')))
+
+ def buildXpcsCmd(self):
+ # change base class' paths to remote paths and use base class to build command
+ self.xpcshell = remoteJoin(self.remoteBinDir, "xpcw")
+ self.headJSPath = remoteJoin(self.remoteScriptsDir, 'head.js')
+ self.httpdJSPath = remoteJoin(self.remoteComponentsDir, 'httpd.js')
+ self.httpdManifest = remoteJoin(self.remoteComponentsDir, 'httpd.manifest')
+ self.testingModulesDir = self.remoteModulesDir
+ self.testharnessdir = self.remoteScriptsDir
+ xpcshell.XPCShellTestThread.buildXpcsCmd(self)
+ # remove "-g <dir> -a <dir>" and add "--greomni <apk>"
+ del(self.xpcsCmd[1:5])
+ if self.options.localAPK:
+ self.xpcsCmd.insert(3, '--greomni')
+ self.xpcsCmd.insert(4, self.remoteAPK)
+
+ if self.remoteDebugger:
+ # for example, "/data/local/gdbserver" "localhost:12345"
+ self.xpcsCmd = [
+ self.remoteDebugger,
+ self.remoteDebuggerArgs,
+ self.xpcsCmd]
+
+ def killTimeout(self, proc):
+ self.kill(proc)
+
+ def launchProcess(self, cmd, stdout, stderr, env, cwd, timeout=None):
+ self.timedout = False
+ cmd.insert(1, self.remoteHere)
+ outputFile = "xpcshelloutput"
+ with open(outputFile, 'w+') as f:
+ try:
+ self.shellReturnCode = self.device.shell(cmd, f, timeout=timeout+10)
+ except mozdevice.DMError as e:
+ if self.timedout:
+ # If the test timed out, there is a good chance the SUTagent also
+ # timed out and failed to return a return code, generating a
+ # DMError. Ignore the DMError to simplify the error report.
+ self.shellReturnCode = None
+ pass
+ else:
+ raise e
+ # The device manager may have timed out waiting for xpcshell.
+ # Guard against an accumulation of hung processes by killing
+ # them here. Note also that IPC tests may spawn new instances
+ # of xpcshell.
+ self.device.killProcess("xpcshell")
+ return outputFile
+
+ def checkForCrashes(self,
+ dump_directory,
+ symbols_path,
+ test_name=None):
+ if not self.device.dirExists(self.remoteMinidumpDir):
+ # The minidumps directory is automatically created when Fennec
+ # (first) starts, so its lack of presence is a hint that
+ # something went wrong.
+ print "Automation Error: No crash directory (%s) found on remote device" % self.remoteMinidumpDir
+ # Whilst no crash was found, the run should still display as a failure
+ return True
+ with mozfile.TemporaryDirectory() as dumpDir:
+ self.device.getDirectory(self.remoteMinidumpDir, dumpDir)
+ crashed = xpcshell.XPCShellTestThread.checkForCrashes(self, dumpDir, symbols_path, test_name)
+ self.clearRemoteDir(self.remoteMinidumpDir)
+ return crashed
+
+ def communicate(self, proc):
+ f = open(proc, "r")
+ contents = f.read()
+ f.close()
+ os.remove(proc)
+ return contents, ""
+
+ def poll(self, proc):
+ if self.device.processExist("xpcshell") is None:
+ return self.getReturnCode(proc)
+ # Process is still running
+ return None
+
+ def kill(self, proc):
+ return self.device.killProcess("xpcshell", True)
+
+ def getReturnCode(self, proc):
+ if self.shellReturnCode is not None:
+ return self.shellReturnCode
+ else:
+ return -1
+
+ def removeDir(self, dirname):
+ self.device.removeDir(dirname)
+
+ def clearRemoteDir(self, remoteDir):
+ out = ""
+ try:
+ out = self.device.shellCheckOutput([self.remoteClearDirScript, remoteDir])
+ except mozdevice.DMError:
+ self.log.info("unable to delete %s: '%s'" % (remoteDir, str(out)))
+ self.log.info("retrying after 10 seconds...")
+ time.sleep(10)
+ try:
+ out = self.device.shellCheckOutput([self.remoteClearDirScript, remoteDir])
+ except mozdevice.DMError:
+ self.log.error("failed to delete %s: '%s'" % (remoteDir, str(out)))
+
+ #TODO: consider creating a separate log dir. We don't have the test file structure,
+ # so we use filename.log. Would rather see ./logs/filename.log
+ def createLogFile(self, test, stdout):
+ try:
+ f = None
+ filename = test.replace('\\', '/').split('/')[-1] + ".log"
+ f = open(filename, "w")
+ f.write(stdout)
+
+ finally:
+ if f is not None:
+ f.close()
+
+
+# A specialization of XPCShellTests that runs tests on an Android device
+# via devicemanager.
+class XPCShellRemote(xpcshell.XPCShellTests, object):
+
+ def __init__(self, devmgr, options, log):
+ xpcshell.XPCShellTests.__init__(self, log)
+
+ # Add Android version (SDK level) to mozinfo so that manifest entries
+ # can be conditional on android_version.
+ androidVersion = devmgr.shellCheckOutput(['getprop', 'ro.build.version.sdk'])
+ mozinfo.info['android_version'] = androidVersion
+
+ self.localLib = options.localLib
+ self.localBin = options.localBin
+ self.options = options
+ self.device = devmgr
+ self.pathMapping = []
+ self.remoteTestRoot = "%s/xpc" % self.device.deviceRoot
+ # remoteBinDir contains xpcshell and its wrapper script, both of which must
+ # be executable. Since +x permissions cannot usually be set on /mnt/sdcard,
+ # and the test root may be on /mnt/sdcard, remoteBinDir is set to be on
+ # /data/local, always.
+ self.remoteBinDir = "/data/local/xpcb"
+ # Terse directory names are used here ("c" for the components directory)
+ # to minimize the length of the command line used to execute
+ # xpcshell on the remote device. adb has a limit to the number
+ # of characters used in a shell command, and the xpcshell command
+ # line can be quite complex.
+ self.remoteTmpDir = remoteJoin(self.remoteTestRoot, "tmp")
+ self.remoteScriptsDir = self.remoteTestRoot
+ self.remoteComponentsDir = remoteJoin(self.remoteTestRoot, "c")
+ self.remoteModulesDir = remoteJoin(self.remoteTestRoot, "m")
+ self.remoteMinidumpDir = remoteJoin(self.remoteTestRoot, "minidumps")
+ self.remoteClearDirScript = remoteJoin(self.remoteBinDir, "cleardir")
+ self.profileDir = remoteJoin(self.remoteTestRoot, "p")
+ self.remoteDebugger = options.debugger
+ self.remoteDebuggerArgs = options.debuggerArgs
+ self.testingModulesDir = options.testingModulesDir
+
+ self.env = {}
+
+ if self.options.objdir:
+ self.xpcDir = os.path.join(self.options.objdir, "_tests/xpcshell")
+ elif os.path.isdir(os.path.join(here, 'tests')):
+ self.xpcDir = os.path.join(here, 'tests')
+ else:
+ print >> sys.stderr, "Couldn't find local xpcshell test directory"
+ sys.exit(1)
+
+ if options.localAPK:
+ self.localAPKContents = ZipFile(options.localAPK)
+ if options.setup:
+ self.setupTestDir()
+ self.setupUtilities()
+ self.setupModules()
+ self.setupMinidumpDir()
+ self.remoteAPK = None
+ if options.localAPK:
+ self.remoteAPK = remoteJoin(self.remoteBinDir, os.path.basename(options.localAPK))
+ self.setAppRoot()
+
+ # data that needs to be passed to the RemoteXPCShellTestThread
+ self.mobileArgs = {
+ 'device': self.device,
+ 'remoteBinDir': self.remoteBinDir,
+ 'remoteScriptsDir': self.remoteScriptsDir,
+ 'remoteComponentsDir': self.remoteComponentsDir,
+ 'remoteModulesDir': self.remoteModulesDir,
+ 'options': self.options,
+ 'remoteDebugger': self.remoteDebugger,
+ 'pathMapping': self.pathMapping,
+ 'profileDir': self.profileDir,
+ 'remoteTmpDir': self.remoteTmpDir,
+ 'remoteMinidumpDir': self.remoteMinidumpDir,
+ 'remoteClearDirScript': self.remoteClearDirScript,
+ }
+ if self.remoteAPK:
+ self.mobileArgs['remoteAPK'] = self.remoteAPK
+
+ def setLD_LIBRARY_PATH(self):
+ self.env["LD_LIBRARY_PATH"] = self.remoteBinDir
+
+ def pushWrapper(self):
+ # Rather than executing xpcshell directly, this wrapper script is
+ # used. By setting environment variables and the cwd in the script,
+ # the length of the per-test command line is shortened. This is
+ # often important when using ADB, as there is a limit to the length
+ # of the ADB command line.
+ localWrapper = tempfile.mktemp()
+ f = open(localWrapper, "w")
+ f.write("#!/system/bin/sh\n")
+ for envkey, envval in self.env.iteritems():
+ f.write("export %s=%s\n" % (envkey, envval))
+ f.writelines([
+ "cd $1\n",
+ "echo xpcw: cd $1\n",
+ "shift\n",
+ "echo xpcw: xpcshell \"$@\"\n",
+ "%s/xpcshell \"$@\"\n" % self.remoteBinDir])
+ f.close()
+ remoteWrapper = remoteJoin(self.remoteBinDir, "xpcw")
+ self.device.pushFile(localWrapper, remoteWrapper)
+ os.remove(localWrapper)
+
+ # Removing and re-creating a directory is a common operation which
+ # can be implemented more efficiently with a shell script.
+ localWrapper = tempfile.mktemp()
+ f = open(localWrapper, "w")
+ # The directory may not exist initially, so rm may fail. 'rm -f' is not
+ # supported on some Androids. Similarly, 'test' and 'if [ -d ]' are not
+ # universally available, so we just ignore errors from rm.
+ f.writelines([
+ "#!/system/bin/sh\n",
+ "rm -r \"$1\"\n",
+ "mkdir \"$1\"\n"])
+ f.close()
+ self.device.pushFile(localWrapper, self.remoteClearDirScript)
+ os.remove(localWrapper)
+
+ self.device.chmodDir(self.remoteBinDir)
+
+ def buildEnvironment(self):
+ self.buildCoreEnvironment()
+ self.setLD_LIBRARY_PATH()
+ self.env["MOZ_LINKER_CACHE"] = self.remoteBinDir
+ if self.options.localAPK and self.appRoot:
+ self.env["GRE_HOME"] = self.appRoot
+ self.env["XPCSHELL_TEST_PROFILE_DIR"] = self.profileDir
+ self.env["TMPDIR"] = self.remoteTmpDir
+ self.env["HOME"] = self.profileDir
+ self.env["XPCSHELL_TEST_TEMP_DIR"] = self.remoteTmpDir
+ self.env["XPCSHELL_MINIDUMP_DIR"] = self.remoteMinidumpDir
+ if self.options.setup:
+ self.pushWrapper()
+
+ def setAppRoot(self):
+ # Determine the application root directory associated with the package
+ # name used by the Fennec APK.
+ self.appRoot = None
+ packageName = None
+ if self.options.localAPK:
+ try:
+ packageName = self.localAPKContents.read("package-name.txt")
+ if packageName:
+ self.appRoot = self.device.getAppRoot(packageName.strip())
+ except Exception as detail:
+ print "unable to determine app root: " + str(detail)
+ pass
+ return None
+
+ def setupUtilities(self):
+ if (not self.device.dirExists(self.remoteBinDir)):
+ # device.mkDir may fail here where shellCheckOutput may succeed -- see bug 817235
+ try:
+ self.device.shellCheckOutput(["mkdir", self.remoteBinDir]);
+ except mozdevice.DMError:
+ # Might get a permission error; try again as root, if available
+ self.device.shellCheckOutput(["mkdir", self.remoteBinDir], root=True);
+ self.device.shellCheckOutput(["chmod", "777", self.remoteBinDir], root=True);
+
+ remotePrefDir = remoteJoin(self.remoteBinDir, "defaults/pref")
+ if (self.device.dirExists(self.remoteTmpDir)):
+ self.device.removeDir(self.remoteTmpDir)
+ self.device.mkDir(self.remoteTmpDir)
+ if (not self.device.dirExists(remotePrefDir)):
+ self.device.mkDirs(remoteJoin(remotePrefDir, "extra"))
+ if (not self.device.dirExists(self.remoteScriptsDir)):
+ self.device.mkDir(self.remoteScriptsDir)
+ if (not self.device.dirExists(self.remoteComponentsDir)):
+ self.device.mkDir(self.remoteComponentsDir)
+
+ local = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'head.js')
+ remoteFile = remoteJoin(self.remoteScriptsDir, "head.js")
+ self.device.pushFile(local, remoteFile)
+
+ # The xpcshell binary is required for all tests. Additional binaries
+ # are required for some tests. This list should be similar to
+ # TEST_HARNESS_BINS in testing/mochitest/Makefile.in.
+ binaries = ["xpcshell",
+ "ssltunnel",
+ "certutil",
+ "pk12util",
+ "BadCertServer",
+ "OCSPStaplingServer",
+ "GenerateOCSPResponse"]
+ for fname in binaries:
+ local = os.path.join(self.localBin, fname)
+ if os.path.isfile(local):
+ print >> sys.stderr, "Pushing %s.." % fname
+ remoteFile = remoteJoin(self.remoteBinDir, fname)
+ self.device.pushFile(local, remoteFile)
+ else:
+ print >> sys.stderr, "*** Expected binary %s not found in %s!" % (fname, self.localBin)
+
+ local = os.path.join(self.localBin, "components/httpd.js")
+ remoteFile = remoteJoin(self.remoteComponentsDir, "httpd.js")
+ self.device.pushFile(local, remoteFile)
+
+ local = os.path.join(self.localBin, "components/httpd.manifest")
+ remoteFile = remoteJoin(self.remoteComponentsDir, "httpd.manifest")
+ self.device.pushFile(local, remoteFile)
+
+ local = os.path.join(self.localBin, "components/test_necko.xpt")
+ remoteFile = remoteJoin(self.remoteComponentsDir, "test_necko.xpt")
+ self.device.pushFile(local, remoteFile)
+
+ if self.options.localAPK:
+ remoteFile = remoteJoin(self.remoteBinDir, os.path.basename(self.options.localAPK))
+ self.device.pushFile(self.options.localAPK, remoteFile)
+
+ self.pushLibs()
+
+ def pushLibs(self):
+ pushed_libs_count = 0
+ if self.options.localAPK:
+ try:
+ dir = tempfile.mkdtemp()
+ for info in self.localAPKContents.infolist():
+ if info.filename.endswith(".so"):
+ print >> sys.stderr, "Pushing %s.." % info.filename
+ remoteFile = remoteJoin(self.remoteBinDir, os.path.basename(info.filename))
+ self.localAPKContents.extract(info, dir)
+ localFile = os.path.join(dir, info.filename)
+ with open(localFile) as f:
+ # Decompress xz-compressed file.
+ if f.read(5)[1:] == '7zXZ':
+ cmd = ['xz', '-df', '--suffix', '.so', localFile]
+ subprocess.check_output(cmd)
+ # xz strips the ".so" file suffix.
+ os.rename(localFile[:-3], localFile)
+ self.device.pushFile(localFile, remoteFile)
+ pushed_libs_count += 1
+ finally:
+ shutil.rmtree(dir)
+ return pushed_libs_count
+
+ for file in os.listdir(self.localLib):
+ if (file.endswith(".so")):
+ print >> sys.stderr, "Pushing %s.." % file
+ if 'libxul' in file:
+ print >> sys.stderr, "This is a big file, it could take a while."
+ localFile = os.path.join(self.localLib, file)
+ remoteFile = remoteJoin(self.remoteBinDir, file)
+ self.device.pushFile(localFile, remoteFile)
+ pushed_libs_count += 1
+
+ # Additional libraries may be found in a sub-directory such as "lib/armeabi-v7a"
+ localArmLib = os.path.join(self.localLib, "lib")
+ if os.path.exists(localArmLib):
+ for root, dirs, files in os.walk(localArmLib):
+ for file in files:
+ if (file.endswith(".so")):
+ print >> sys.stderr, "Pushing %s.." % file
+ localFile = os.path.join(root, file)
+ remoteFile = remoteJoin(self.remoteBinDir, file)
+ self.device.pushFile(localFile, remoteFile)
+ pushed_libs_count += 1
+
+ return pushed_libs_count
+
+ def setupModules(self):
+ if self.testingModulesDir:
+ self.device.pushDir(self.testingModulesDir, self.remoteModulesDir)
+
+ def setupTestDir(self):
+ print 'pushing %s' % self.xpcDir
+ try:
+ # The tests directory can be quite large: 5000 files and growing!
+ # Sometimes - like on a low-end aws instance running an emulator - the push
+ # may exceed the default 5 minute timeout, so we increase it here to 10 minutes.
+ self.device.pushDir(self.xpcDir, self.remoteScriptsDir, timeout=600, retryLimit=10)
+ except TypeError:
+ # Foopies have an older mozdevice ver without retryLimit
+ self.device.pushDir(self.xpcDir, self.remoteScriptsDir)
+
+ def setupMinidumpDir(self):
+ if self.device.dirExists(self.remoteMinidumpDir):
+ self.device.removeDir(self.remoteMinidumpDir)
+ self.device.mkDir(self.remoteMinidumpDir)
+
+ def buildTestList(self, test_tags=None, test_paths=None):
+ xpcshell.XPCShellTests.buildTestList(self, test_tags=test_tags, test_paths=test_paths)
+ uniqueTestPaths = set([])
+ for test in self.alltests:
+ uniqueTestPaths.add(test['here'])
+ for testdir in uniqueTestPaths:
+ abbrevTestDir = os.path.relpath(testdir, self.xpcDir)
+ remoteScriptDir = remoteJoin(self.remoteScriptsDir, abbrevTestDir)
+ self.pathMapping.append(PathMapping(testdir, remoteScriptDir))
+
+def verifyRemoteOptions(parser, options):
+ if options.localLib is None:
+ if options.localAPK and options.objdir:
+ for path in ['dist/fennec', 'fennec/lib']:
+ options.localLib = os.path.join(options.objdir, path)
+ if os.path.isdir(options.localLib):
+ break
+ else:
+ parser.error("Couldn't find local library dir, specify --local-lib-dir")
+ elif options.objdir:
+ options.localLib = os.path.join(options.objdir, 'dist/bin')
+ elif os.path.isfile(os.path.join(here, '..', 'bin', 'xpcshell')):
+ # assume tests are being run from a tests.zip
+ options.localLib = os.path.abspath(os.path.join(here, '..', 'bin'))
+ else:
+ parser.error("Couldn't find local library dir, specify --local-lib-dir")
+
+ if options.localBin is None:
+ if options.objdir:
+ for path in ['dist/bin', 'bin']:
+ options.localBin = os.path.join(options.objdir, path)
+ if os.path.isdir(options.localBin):
+ break
+ else:
+ parser.error("Couldn't find local binary dir, specify --local-bin-dir")
+ elif os.path.isfile(os.path.join(here, '..', 'bin', 'xpcshell')):
+ # assume tests are being run from a tests.zip
+ options.localBin = os.path.abspath(os.path.join(here, '..', 'bin'))
+ else:
+ parser.error("Couldn't find local binary dir, specify --local-bin-dir")
+ return options
+
+class PathMapping:
+
+ def __init__(self, localDir, remoteDir):
+ self.local = localDir
+ self.remote = remoteDir
+
+def main():
+ if sys.version_info < (2,7):
+ print >>sys.stderr, "Error: You must use python version 2.7 or newer but less than 3.0"
+ sys.exit(1)
+
+ parser = parser_remote()
+ options = parser.parse_args()
+ if not options.localAPK:
+ for file in os.listdir(os.path.join(options.objdir, "dist")):
+ if (file.endswith(".apk") and file.startswith("fennec")):
+ options.localAPK = os.path.join(options.objdir, "dist")
+ options.localAPK = os.path.join(options.localAPK, file)
+ print >>sys.stderr, "using APK: " + options.localAPK
+ break
+ else:
+ print >>sys.stderr, "Error: please specify an APK"
+ sys.exit(1)
+
+ options = verifyRemoteOptions(parser, options)
+ log = commandline.setup_logging("Remote XPCShell",
+ options,
+ {"tbpl": sys.stdout})
+
+ if options.dm_trans == "adb":
+ if options.deviceIP:
+ dm = mozdevice.DroidADB(options.deviceIP, options.devicePort, packageName=None, deviceRoot=options.remoteTestRoot)
+ else:
+ dm = mozdevice.DroidADB(packageName=None, deviceRoot=options.remoteTestRoot)
+ else:
+ if not options.deviceIP:
+ print "Error: you must provide a device IP to connect to via the --device option"
+ sys.exit(1)
+ dm = mozdevice.DroidSUT(options.deviceIP, options.devicePort, deviceRoot=options.remoteTestRoot)
+
+ if options.interactive and not options.testPath:
+ print >>sys.stderr, "Error: You must specify a test filename in interactive mode!"
+ sys.exit(1)
+
+ if options.xpcshell is None:
+ options.xpcshell = "xpcshell"
+
+ xpcsh = XPCShellRemote(dm, options, log)
+
+ # we don't run concurrent tests on mobile
+ options.sequential = True
+
+ if not xpcsh.runTests(testClass=RemoteXPCShellTestThread,
+ mobileArgs=xpcsh.mobileArgs,
+ **vars(options)):
+ sys.exit(1)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/testing/xpcshell/runxpcshelltests.py b/testing/xpcshell/runxpcshelltests.py
new file mode 100755
index 0000000000..7c88343dc9
--- /dev/null
+++ b/testing/xpcshell/runxpcshelltests.py
@@ -0,0 +1,1501 @@
+#!/usr/bin/env python
+#
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+import copy
+import importlib
+import json
+import math
+import mozdebug
+import mozinfo
+import os
+import os.path
+import random
+import re
+import shutil
+import signal
+import subprocess
+import sys
+import tempfile
+import time
+import traceback
+
+from collections import deque, namedtuple
+from distutils import dir_util
+from distutils.version import LooseVersion
+from multiprocessing import cpu_count
+from argparse import ArgumentParser
+from subprocess import Popen, PIPE, STDOUT
+from tempfile import mkdtemp, gettempdir
+from threading import (
+ Timer,
+ Thread,
+ Event,
+ current_thread,
+)
+
+try:
+ import psutil
+ HAVE_PSUTIL = True
+except Exception:
+ HAVE_PSUTIL = False
+
+from automation import Automation
+from xpcshellcommandline import parser_desktop
+
+SCRIPT_DIR = os.path.abspath(os.path.realpath(os.path.dirname(__file__)))
+
+HARNESS_TIMEOUT = 5 * 60
+
+# benchmarking on tbpl revealed that this works best for now
+NUM_THREADS = int(cpu_count() * 4)
+
+EXPECTED_LOG_ACTIONS = set([
+ "test_status",
+ "log",
+])
+
+# --------------------------------------------------------------
+# TODO: this is a hack for mozbase without virtualenv, remove with bug 849900
+#
+here = os.path.dirname(__file__)
+mozbase = os.path.realpath(os.path.join(os.path.dirname(here), 'mozbase'))
+
+if os.path.isdir(mozbase):
+ for package in os.listdir(mozbase):
+ sys.path.append(os.path.join(mozbase, package))
+
+from manifestparser import TestManifest
+from manifestparser.filters import chunk_by_slice, tags, pathprefix
+from mozlog import commandline
+import mozcrash
+import mozinfo
+from mozrunner.utils import get_stack_fixer_function
+
+# --------------------------------------------------------------
+
+# TODO: perhaps this should be in a more generally shared location?
+# This regex matches all of the C0 and C1 control characters
+# (U+0000 through U+001F; U+007F; U+0080 through U+009F),
+# except TAB (U+0009), CR (U+000D), LF (U+000A) and backslash (U+005C).
+# A raw string is deliberately not used.
+_cleanup_encoding_re = re.compile(u'[\x00-\x08\x0b\x0c\x0e-\x1f\x7f-\x9f\\\\]')
+def _cleanup_encoding_repl(m):
+ c = m.group(0)
+ return '\\\\' if c == '\\' else '\\x{0:02X}'.format(ord(c))
+def cleanup_encoding(s):
+ """S is either a byte or unicode string. Either way it may
+ contain control characters, unpaired surrogates, reserved code
+ points, etc. If it is a byte string, it is assumed to be
+ UTF-8, but it may not be *correct* UTF-8. Return a
+ sanitized unicode object."""
+ if not isinstance(s, basestring):
+ return unicode(s)
+ if not isinstance(s, unicode):
+ s = s.decode('utf-8', 'replace')
+ # Replace all C0 and C1 control characters with \xNN escapes.
+ return _cleanup_encoding_re.sub(_cleanup_encoding_repl, s)
+
+""" Control-C handling """
+gotSIGINT = False
+def markGotSIGINT(signum, stackFrame):
+ global gotSIGINT
+ gotSIGINT = True
+
+class XPCShellTestThread(Thread):
+ def __init__(self, test_object, event, cleanup_dir_list, retry=True,
+ app_dir_key=None, interactive=False,
+ verbose=False, pStdout=None, pStderr=None, keep_going=False,
+ log=None, usingTSan=False, **kwargs):
+ Thread.__init__(self)
+ self.daemon = True
+
+ self.test_object = test_object
+ self.cleanup_dir_list = cleanup_dir_list
+ self.retry = retry
+
+ self.appPath = kwargs.get('appPath')
+ self.xrePath = kwargs.get('xrePath')
+ self.testingModulesDir = kwargs.get('testingModulesDir')
+ self.debuggerInfo = kwargs.get('debuggerInfo')
+ self.jsDebuggerInfo = kwargs.get('jsDebuggerInfo')
+ self.pluginsPath = kwargs.get('pluginsPath')
+ self.httpdManifest = kwargs.get('httpdManifest')
+ self.httpdJSPath = kwargs.get('httpdJSPath')
+ self.headJSPath = kwargs.get('headJSPath')
+ self.testharnessdir = kwargs.get('testharnessdir')
+ self.profileName = kwargs.get('profileName')
+ self.singleFile = kwargs.get('singleFile')
+ self.env = copy.deepcopy(kwargs.get('env'))
+ self.symbolsPath = kwargs.get('symbolsPath')
+ self.logfiles = kwargs.get('logfiles')
+ self.xpcshell = kwargs.get('xpcshell')
+ self.xpcsRunArgs = kwargs.get('xpcsRunArgs')
+ self.failureManifest = kwargs.get('failureManifest')
+ self.jscovdir = kwargs.get('jscovdir')
+ self.stack_fixer_function = kwargs.get('stack_fixer_function')
+ self._rootTempDir = kwargs.get('tempDir')
+
+ self.app_dir_key = app_dir_key
+ self.interactive = interactive
+ self.verbose = verbose
+ self.pStdout = pStdout
+ self.pStderr = pStderr
+ self.keep_going = keep_going
+ self.log = log
+ self.usingTSan = usingTSan
+
+ # only one of these will be set to 1. adding them to the totals in
+ # the harness
+ self.passCount = 0
+ self.todoCount = 0
+ self.failCount = 0
+
+ # Context for output processing
+ self.output_lines = []
+ self.has_failure_output = False
+ self.saw_proc_start = False
+ self.saw_proc_end = False
+ self.complete_command = None
+ self.harness_timeout = kwargs.get('harness_timeout')
+ self.timedout = False
+
+ # event from main thread to signal work done
+ self.event = event
+ self.done = False # explicitly set flag so we don't rely on thread.isAlive
+
+ def run(self):
+ try:
+ self.run_test()
+ except Exception as e:
+ self.exception = e
+ self.traceback = traceback.format_exc()
+ else:
+ self.exception = None
+ self.traceback = None
+ if self.retry:
+ self.log.info("%s failed or timed out, will retry." %
+ self.test_object['id'])
+ self.done = True
+ self.event.set()
+
+ def kill(self, proc):
+ """
+ Simple wrapper to kill a process.
+ On a remote system, this is overloaded to handle remote process communication.
+ """
+ return proc.kill()
+
+ def removeDir(self, dirname):
+ """
+ Simple wrapper to remove (recursively) a given directory.
+ On a remote system, we need to overload this to work on the remote filesystem.
+ """
+ shutil.rmtree(dirname)
+
+ def poll(self, proc):
+ """
+ Simple wrapper to check if a process has terminated.
+ On a remote system, this is overloaded to handle remote process communication.
+ """
+ return proc.poll()
+
+ def createLogFile(self, test_file, stdout):
+ """
+ For a given test file and stdout buffer, create a log file.
+ On a remote system we have to fix the test name since it can contain directories.
+ """
+ with open(test_file + ".log", "w") as f:
+ f.write(stdout)
+
+ def getReturnCode(self, proc):
+ """
+ Simple wrapper to get the return code for a given process.
+ On a remote system we overload this to work with the remote process management.
+ """
+ return proc.returncode
+
+ def communicate(self, proc):
+ """
+ Simple wrapper to communicate with a process.
+ On a remote system, this is overloaded to handle remote process communication.
+ """
+ # Processing of incremental output put here to
+ # sidestep issues on remote platforms, where what we know
+ # as proc is a file pulled off of a device.
+ if proc.stdout:
+ while True:
+ line = proc.stdout.readline()
+ if not line:
+ break
+ self.process_line(line)
+
+ if self.saw_proc_start and not self.saw_proc_end:
+ self.has_failure_output = True
+
+ return proc.communicate()
+
+ def launchProcess(self, cmd, stdout, stderr, env, cwd, timeout=None):
+ """
+ Simple wrapper to launch a process.
+ On a remote system, this is more complex and we need to overload this function.
+ """
+ # timeout is needed by remote xpcshell to extend the
+ # devicemanager.shell() timeout. It is not used in this function.
+ if HAVE_PSUTIL:
+ popen_func = psutil.Popen
+ else:
+ popen_func = Popen
+ proc = popen_func(cmd, stdout=stdout, stderr=stderr,
+ env=env, cwd=cwd)
+ return proc
+
+ def checkForCrashes(self,
+ dump_directory,
+ symbols_path,
+ test_name=None):
+ """
+ Simple wrapper to check for crashes.
+ On a remote system, this is more complex and we need to overload this function.
+ """
+ return mozcrash.check_for_crashes(dump_directory, symbols_path, test_name=test_name)
+
+ def logCommand(self, name, completeCmd, testdir):
+ self.log.info("%s | full command: %r" % (name, completeCmd))
+ self.log.info("%s | current directory: %r" % (name, testdir))
+ # Show only those environment variables that are changed from
+ # the ambient environment.
+ changedEnv = (set("%s=%s" % i for i in self.env.iteritems())
+ - set("%s=%s" % i for i in os.environ.iteritems()))
+ self.log.info("%s | environment: %s" % (name, list(changedEnv)))
+
+ def killTimeout(self, proc):
+ Automation().killAndGetStackNoScreenshot(proc.pid,
+ self.appPath,
+ self.debuggerInfo)
+
+ def postCheck(self, proc):
+ """Checks for a still-running test process, kills it and fails the test if found.
+ We can sometimes get here before the process has terminated, which would
+ cause removeDir() to fail - so check for the process and kill it if needed.
+ """
+ if proc and self.poll(proc) is None:
+ self.kill(proc)
+ message = "%s | Process still running after test!" % self.test_object['id']
+ if self.retry:
+ self.log.info(message)
+ return
+
+ self.log.error(message)
+ self.log_full_output()
+ self.failCount = 1
+
+ def testTimeout(self, proc):
+ if self.test_object['expected'] == 'pass':
+ expected = 'PASS'
+ else:
+ expected = 'FAIL'
+
+ if self.retry:
+ self.log.test_end(self.test_object['id'], 'TIMEOUT',
+ expected='TIMEOUT',
+ message="Test timed out")
+ else:
+ self.failCount = 1
+ self.log.test_end(self.test_object['id'], 'TIMEOUT',
+ expected=expected,
+ message="Test timed out")
+ self.log_full_output()
+
+ self.done = True
+ self.timedout = True
+ self.killTimeout(proc)
+ self.log.info("xpcshell return code: %s" % self.getReturnCode(proc))
+ self.postCheck(proc)
+ self.clean_temp_dirs(self.test_object['path'])
+
+ def buildCmdTestFile(self, name):
+ """
+ Build the command line arguments for the test file.
+ On a remote system, this may be overloaded to use a remote path structure.
+ """
+ return ['-e', 'const _TEST_FILE = ["%s"];' %
+ name.replace('\\', '/')]
+
+ def setupTempDir(self):
+ tempDir = mkdtemp(prefix='xpc-other-', dir=self._rootTempDir)
+ self.env["XPCSHELL_TEST_TEMP_DIR"] = tempDir
+ if self.interactive:
+ self.log.info("temp dir is %s" % tempDir)
+ return tempDir
+
+ def setupPluginsDir(self):
+ if not os.path.isdir(self.pluginsPath):
+ return None
+
+ pluginsDir = mkdtemp(prefix='xpc-plugins-', dir=self._rootTempDir)
+ # shutil.copytree requires dst to not exist. Deleting the tempdir
+ # would make a race condition possible in a concurrent environment,
+ # so we are using dir_utils.copy_tree which accepts an existing dst
+ dir_util.copy_tree(self.pluginsPath, pluginsDir)
+ if self.interactive:
+ self.log.info("plugins dir is %s" % pluginsDir)
+ return pluginsDir
+
+ def setupProfileDir(self):
+ """
+ Create a temporary folder for the profile and set appropriate environment variables.
+ When running check-interactive and check-one, the directory is well-defined and
+ retained for inspection once the tests complete.
+
+ On a remote system, this may be overloaded to use a remote path structure.
+ """
+ if self.interactive or self.singleFile:
+ profileDir = os.path.join(gettempdir(), self.profileName, "xpcshellprofile")
+ try:
+ # This could be left over from previous runs
+ self.removeDir(profileDir)
+ except:
+ pass
+ os.makedirs(profileDir)
+ else:
+ profileDir = mkdtemp(prefix='xpc-profile-', dir=self._rootTempDir)
+ self.env["XPCSHELL_TEST_PROFILE_DIR"] = profileDir
+ if self.interactive or self.singleFile:
+ self.log.info("profile dir is %s" % profileDir)
+ return profileDir
+
+ def setupMozinfoJS(self):
+ mozInfoJSPath = os.path.join(self.profileDir, 'mozinfo.json')
+ mozInfoJSPath = mozInfoJSPath.replace('\\', '\\\\')
+ mozinfo.output_to_file(mozInfoJSPath)
+ return mozInfoJSPath
+
+ def buildCmdHead(self, headfiles, tailfiles, xpcscmd):
+ """
+ Build the command line arguments for the head and tail files,
+ along with the address of the webserver which some tests require.
+
+ On a remote system, this is overloaded to resolve quoting issues over a secondary command line.
+ """
+ cmdH = ", ".join(['"' + f.replace('\\', '/') + '"'
+ for f in headfiles])
+ cmdT = ", ".join(['"' + f.replace('\\', '/') + '"'
+ for f in tailfiles])
+
+ dbgport = 0 if self.jsDebuggerInfo is None else self.jsDebuggerInfo.port
+
+ return xpcscmd + \
+ ['-e', 'const _SERVER_ADDR = "localhost"',
+ '-e', 'const _HEAD_FILES = [%s];' % cmdH,
+ '-e', 'const _TAIL_FILES = [%s];' % cmdT,
+ '-e', 'const _JSDEBUGGER_PORT = %d;' % dbgport,
+ ]
+
+ def getHeadAndTailFiles(self, test):
+ """Obtain lists of head- and tail files. Returns a tuple
+ containing a list of head files and a list of tail files.
+ """
+ def sanitize_list(s, kind):
+ for f in s.strip().split(' '):
+ f = f.strip()
+ if len(f) < 1:
+ continue
+
+ path = os.path.normpath(os.path.join(test['here'], f))
+ if not os.path.exists(path):
+ raise Exception('%s file does not exist: %s' % (kind, path))
+
+ if not os.path.isfile(path):
+ raise Exception('%s file is not a file: %s' % (kind, path))
+
+ yield path
+
+ headlist = test.get('head', '')
+ taillist = test.get('tail', '')
+ return (list(sanitize_list(headlist, 'head')),
+ list(sanitize_list(taillist, 'tail')))
+
+ def buildXpcsCmd(self):
+ """
+ Load the root head.js file as the first file in our test path, before other head, test, and tail files.
+ On a remote system, we overload this to add additional command line arguments, so this gets overloaded.
+ """
+ # - NOTE: if you rename/add any of the constants set here, update
+ # do_load_child_test_harness() in head.js
+ if not self.appPath:
+ self.appPath = self.xrePath
+
+ self.xpcsCmd = [
+ self.xpcshell,
+ '-g', self.xrePath,
+ '-a', self.appPath,
+ '-r', self.httpdManifest,
+ '-m',
+ '-s',
+ '-e', 'const _HEAD_JS_PATH = "%s";' % self.headJSPath,
+ '-e', 'const _MOZINFO_JS_PATH = "%s";' % self.mozInfoJSPath,
+ ]
+
+ if self.testingModulesDir:
+ # Escape backslashes in string literal.
+ sanitized = self.testingModulesDir.replace('\\', '\\\\')
+ self.xpcsCmd.extend([
+ '-e',
+ 'const _TESTING_MODULES_DIR = "%s";' % sanitized
+ ])
+
+ self.xpcsCmd.extend(['-f', os.path.join(self.testharnessdir, 'head.js')])
+
+ if self.debuggerInfo:
+ self.xpcsCmd = [self.debuggerInfo.path] + self.debuggerInfo.args + self.xpcsCmd
+
+ # Automation doesn't specify a pluginsPath and xpcshell defaults to
+ # $APPDIR/plugins. We do the same here so we can carry on with
+ # setting up every test with its own plugins directory.
+ if not self.pluginsPath:
+ self.pluginsPath = os.path.join(self.appPath, 'plugins')
+
+ self.pluginsDir = self.setupPluginsDir()
+ if self.pluginsDir:
+ self.xpcsCmd.extend(['-p', self.pluginsDir])
+
+ def cleanupDir(self, directory, name):
+ if not os.path.exists(directory):
+ return
+
+ TRY_LIMIT = 25 # up to TRY_LIMIT attempts (one every second), because
+ # the Windows filesystem is slow to react to the changes
+ try_count = 0
+ while try_count < TRY_LIMIT:
+ try:
+ self.removeDir(directory)
+ except OSError:
+ self.log.info("Failed to remove directory: %s. Waiting." % directory)
+ # We suspect the filesystem may still be making changes. Wait a
+ # little bit and try again.
+ time.sleep(1)
+ try_count += 1
+ else:
+ # removed fine
+ return
+
+ # we try cleaning up again later at the end of the run
+ self.cleanup_dir_list.append(directory)
+
+ def clean_temp_dirs(self, name):
+ # We don't want to delete the profile when running check-interactive
+ # or check-one.
+ if self.profileDir and not self.interactive and not self.singleFile:
+ self.cleanupDir(self.profileDir, name)
+
+ self.cleanupDir(self.tempDir, name)
+
+ if self.pluginsDir:
+ self.cleanupDir(self.pluginsDir, name)
+
+ def parse_output(self, output):
+ """Parses process output for structured messages and saves output as it is
+ read. Sets self.has_failure_output in case of evidence of a failure"""
+ for line_string in output.splitlines():
+ self.process_line(line_string)
+
+ if self.saw_proc_start and not self.saw_proc_end:
+ self.has_failure_output = True
+
+ def fix_text_output(self, line):
+ line = cleanup_encoding(line)
+ if self.stack_fixer_function is not None:
+ return self.stack_fixer_function(line)
+ return line
+
+ def log_line(self, line):
+ """Log a line of output (either a parser json object or text output from
+ the test process"""
+ if isinstance(line, basestring):
+ line = self.fix_text_output(line).rstrip('\r\n')
+ self.log.process_output(self.proc_ident,
+ line,
+ command=self.complete_command)
+ else:
+ if 'message' in line:
+ line['message'] = self.fix_text_output(line['message'])
+ if 'xpcshell_process' in line:
+ line['thread'] = ' '.join([current_thread().name, line['xpcshell_process']])
+ else:
+ line['thread'] = current_thread().name
+ self.log.log_raw(line)
+
+ def log_full_output(self):
+ """Logs any buffered output from the test process, and clears the buffer."""
+ if not self.output_lines:
+ return
+ self.log.info(">>>>>>>")
+ for line in self.output_lines:
+ self.log_line(line)
+ self.log.info("<<<<<<<")
+ self.output_lines = []
+
+ def report_message(self, message):
+ """Stores or logs a json log message in mozlog format."""
+ if self.verbose:
+ self.log_line(message)
+ else:
+ self.output_lines.append(message)
+
+ def process_line(self, line_string):
+ """ Parses a single line of output, determining its significance and
+ reporting a message.
+ """
+ if not line_string.strip():
+ return
+
+ try:
+ line_object = json.loads(line_string)
+ if not isinstance(line_object, dict):
+ self.report_message(line_string)
+ return
+ except ValueError:
+ self.report_message(line_string)
+ return
+
+ if ('action' not in line_object or
+ line_object['action'] not in EXPECTED_LOG_ACTIONS):
+ # The test process output JSON.
+ self.report_message(line_string)
+ return
+
+ action = line_object['action']
+
+ self.has_failure_output = (self.has_failure_output or
+ 'expected' in line_object or
+ action == 'log' and line_object['level'] == 'ERROR')
+
+ self.report_message(line_object)
+
+ if action == 'log' and line_object['message'] == 'CHILD-TEST-STARTED':
+ self.saw_proc_start = True
+ elif action == 'log' and line_object['message'] == 'CHILD-TEST-COMPLETED':
+ self.saw_proc_end = True
+
+ def run_test(self):
+ """Run an individual xpcshell test."""
+ global gotSIGINT
+
+ name = self.test_object['id']
+ path = self.test_object['path']
+
+ # Check for skipped tests
+ if 'disabled' in self.test_object:
+ message = self.test_object['disabled']
+ if not message:
+ message = 'disabled from xpcshell manifest'
+ self.log.test_start(name)
+ self.log.test_end(name, 'SKIP', message=message)
+
+ self.retry = False
+ self.keep_going = True
+ return
+
+ # Check for known-fail tests
+ expect_pass = self.test_object['expected'] == 'pass'
+
+ # By default self.appPath will equal the gre dir. If specified in the
+ # xpcshell.ini file, set a different app dir for this test.
+ if self.app_dir_key and self.app_dir_key in self.test_object:
+ rel_app_dir = self.test_object[self.app_dir_key]
+ rel_app_dir = os.path.join(self.xrePath, rel_app_dir)
+ self.appPath = os.path.abspath(rel_app_dir)
+ else:
+ self.appPath = None
+
+ test_dir = os.path.dirname(path)
+
+ # Create a profile and a temp dir that the JS harness can stick
+ # a profile and temporary data in
+ self.profileDir = self.setupProfileDir()
+ self.tempDir = self.setupTempDir()
+ self.mozInfoJSPath = self.setupMozinfoJS()
+
+ self.buildXpcsCmd()
+ head_files, tail_files = self.getHeadAndTailFiles(self.test_object)
+ cmdH = self.buildCmdHead(head_files, tail_files, self.xpcsCmd)
+
+ # The test file will have to be loaded after the head files.
+ cmdT = self.buildCmdTestFile(path)
+
+ args = self.xpcsRunArgs[:]
+ if 'debug' in self.test_object:
+ args.insert(0, '-d')
+
+ # The test name to log
+ cmdI = ['-e', 'const _TEST_NAME = "%s"' % name]
+
+ # Directory for javascript code coverage output, null by default.
+ cmdC = ['-e', 'const _JSCOV_DIR = null']
+ if self.jscovdir:
+ cmdC = ['-e', 'const _JSCOV_DIR = "%s"' % self.jscovdir.replace('\\', '/')]
+ self.complete_command = cmdH + cmdT + cmdI + cmdC + args
+ else:
+ self.complete_command = cmdH + cmdT + cmdI + args
+
+ if self.test_object.get('dmd') == 'true':
+ if sys.platform.startswith('linux'):
+ preloadEnvVar = 'LD_PRELOAD'
+ libdmd = os.path.join(self.xrePath, 'libdmd.so')
+ elif sys.platform == 'osx' or sys.platform == 'darwin':
+ preloadEnvVar = 'DYLD_INSERT_LIBRARIES'
+ # self.xrePath is <prefix>/Contents/Resources.
+ # We need <prefix>/Contents/MacOS/libdmd.dylib.
+ contents_dir = os.path.dirname(self.xrePath)
+ libdmd = os.path.join(contents_dir, 'MacOS', 'libdmd.dylib')
+ elif sys.platform == 'win32':
+ preloadEnvVar = 'MOZ_REPLACE_MALLOC_LIB'
+ libdmd = os.path.join(self.xrePath, 'dmd.dll')
+
+ self.env['PYTHON'] = sys.executable
+ self.env['BREAKPAD_SYMBOLS_PATH'] = self.symbolsPath
+ self.env['DMD_PRELOAD_VAR'] = preloadEnvVar
+ self.env['DMD_PRELOAD_VALUE'] = libdmd
+
+ if self.test_object.get('subprocess') == 'true':
+ self.env['PYTHON'] = sys.executable
+
+ testTimeoutInterval = self.harness_timeout
+ # Allow a test to request a multiple of the timeout if it is expected to take long
+ if 'requesttimeoutfactor' in self.test_object:
+ testTimeoutInterval *= int(self.test_object['requesttimeoutfactor'])
+
+ testTimer = None
+ if not self.interactive and not self.debuggerInfo and not self.jsDebuggerInfo:
+ testTimer = Timer(testTimeoutInterval, lambda: self.testTimeout(proc))
+ testTimer.start()
+
+ proc = None
+ process_output = None
+
+ try:
+ self.log.test_start(name)
+ if self.verbose:
+ self.logCommand(name, self.complete_command, test_dir)
+
+ proc = self.launchProcess(self.complete_command,
+ stdout=self.pStdout, stderr=self.pStderr, env=self.env, cwd=test_dir, timeout=testTimeoutInterval)
+
+ if hasattr(proc, "pid"):
+ self.proc_ident = proc.pid
+ else:
+ # On mobile, "proc" is just a file.
+ self.proc_ident = name
+
+ if self.interactive:
+ self.log.info("%s | Process ID: %d" % (name, self.proc_ident))
+
+ # Communicate returns a tuple of (stdout, stderr), however we always
+ # redirect stderr to stdout, so the second element is ignored.
+ process_output, _ = self.communicate(proc)
+
+ if self.interactive:
+ # Not sure what else to do here...
+ self.keep_going = True
+ return
+
+ if testTimer:
+ testTimer.cancel()
+
+ if process_output:
+ # For the remote case, stdout is not yet depleted, so we parse
+ # it here all at once.
+ self.parse_output(process_output)
+
+ return_code = self.getReturnCode(proc)
+
+ # TSan'd processes return 66 if races are detected. This isn't
+ # good in the sense that there's no way to distinguish between
+ # a process that would normally have returned zero but has races,
+ # and a race-free process that returns 66. But I don't see how
+ # to do better. This ambiguity is at least constrained to the
+ # with-TSan case. It doesn't affect normal builds.
+ #
+ # This also assumes that the magic value 66 isn't overridden by
+ # a TSAN_OPTIONS=exitcode=<number> environment variable setting.
+ #
+ TSAN_EXIT_CODE_WITH_RACES = 66
+
+ return_code_ok = (return_code == 0 or
+ (self.usingTSan and
+ return_code == TSAN_EXIT_CODE_WITH_RACES))
+ passed = (not self.has_failure_output) and return_code_ok
+
+ status = 'PASS' if passed else 'FAIL'
+ expected = 'PASS' if expect_pass else 'FAIL'
+ message = 'xpcshell return code: %d' % return_code
+
+ if self.timedout:
+ return
+
+ if status != expected:
+ if self.retry:
+ self.log.test_end(name, status, expected=status,
+ message="Test failed or timed out, will retry")
+ self.clean_temp_dirs(path)
+ return
+
+ self.log.test_end(name, status, expected=expected, message=message)
+ self.log_full_output()
+
+ self.failCount += 1
+
+ if self.failureManifest:
+ with open(self.failureManifest, 'a') as f:
+ f.write('[%s]\n' % self.test_object['path'])
+ for k, v in self.test_object.items():
+ f.write('%s = %s\n' % (k, v))
+
+ else:
+ # If TSan reports a race, dump the output, else we can't
+ # diagnose what the problem was. See comments above about
+ # the significance of TSAN_EXIT_CODE_WITH_RACES.
+ if self.usingTSan and return_code == TSAN_EXIT_CODE_WITH_RACES:
+ self.log_full_output()
+
+ self.log.test_end(name, status, expected=expected, message=message)
+ if self.verbose:
+ self.log_full_output()
+
+ self.retry = False
+
+ if expect_pass:
+ self.passCount = 1
+ else:
+ self.todoCount = 1
+
+ if self.checkForCrashes(self.tempDir, self.symbolsPath, test_name=name):
+ if self.retry:
+ self.clean_temp_dirs(path)
+ return
+
+ # If we assert during shutdown there's a chance the test has passed
+ # but we haven't logged full output, so do so here.
+ self.log_full_output()
+ self.failCount = 1
+
+ if self.logfiles and process_output:
+ self.createLogFile(name, process_output)
+
+ finally:
+ self.postCheck(proc)
+ self.clean_temp_dirs(path)
+
+ if gotSIGINT:
+ self.log.error("Received SIGINT (control-C) during test execution")
+ if self.keep_going:
+ gotSIGINT = False
+ else:
+ self.keep_going = False
+ return
+
+ self.keep_going = True
+
+class XPCShellTests(object):
+
+ def __init__(self, log=None):
+ """ Initializes node status and logger. """
+ self.log = log
+ self.harness_timeout = HARNESS_TIMEOUT
+ self.nodeProc = {}
+
+ def getTestManifest(self, manifest):
+ if isinstance(manifest, TestManifest):
+ return manifest
+ elif manifest is not None:
+ manifest = os.path.normpath(os.path.abspath(manifest))
+ if os.path.isfile(manifest):
+ return TestManifest([manifest], strict=True)
+ else:
+ ini_path = os.path.join(manifest, "xpcshell.ini")
+ else:
+ ini_path = os.path.join(SCRIPT_DIR, "tests", "xpcshell.ini")
+
+ if os.path.exists(ini_path):
+ return TestManifest([ini_path], strict=True)
+ else:
+ print >> sys.stderr, ("Failed to find manifest at %s; use --manifest "
+ "to set path explicitly." % (ini_path,))
+ sys.exit(1)
+
+ def buildTestList(self, test_tags=None, test_paths=None):
+ """
+ read the xpcshell.ini manifest and set self.alltests to be
+ an array of test objects.
+
+ if we are chunking tests, it will be done here as well
+ """
+
+ if test_paths is None:
+ test_paths = []
+
+ if len(test_paths) == 1 and test_paths[0].endswith(".js"):
+ self.singleFile = os.path.basename(test_paths[0])
+ else:
+ self.singleFile = None
+
+ mp = self.getTestManifest(self.manifest)
+
+ filters = []
+ if test_tags:
+ filters.append(tags(test_tags))
+
+ if test_paths:
+ filters.append(pathprefix(test_paths))
+
+ if self.singleFile is None and self.totalChunks > 1:
+ filters.append(chunk_by_slice(self.thisChunk, self.totalChunks))
+ try:
+ self.alltests = mp.active_tests(filters=filters, **mozinfo.info)
+ except TypeError:
+ sys.stderr.write("*** offending mozinfo.info: %s\n" % repr(mozinfo.info))
+ raise
+
+ if len(self.alltests) == 0:
+ self.log.error("no tests to run using specified "
+ "combination of filters: {}".format(
+ mp.fmt_filters()))
+
+ if self.dump_tests:
+ self.dump_tests = os.path.expanduser(self.dump_tests)
+ assert os.path.exists(os.path.dirname(self.dump_tests))
+ with open(self.dump_tests, 'w') as dumpFile:
+ dumpFile.write(json.dumps({'active_tests': self.alltests}))
+
+ self.log.info("Dumping active_tests to %s file." % self.dump_tests)
+ sys.exit()
+
+ def setAbsPath(self):
+ """
+ Set the absolute path for xpcshell, httpdjspath and xrepath.
+ These 3 variables depend on input from the command line and we need to allow for absolute paths.
+ This function is overloaded for a remote solution as os.path* won't work remotely.
+ """
+ self.testharnessdir = os.path.dirname(os.path.abspath(__file__))
+ self.headJSPath = self.testharnessdir.replace("\\", "/") + "/head.js"
+ self.xpcshell = os.path.abspath(self.xpcshell)
+
+ if self.xrePath is None:
+ self.xrePath = os.path.dirname(self.xpcshell)
+ if mozinfo.isMac:
+ # Check if we're run from an OSX app bundle and override
+ # self.xrePath if we are.
+ appBundlePath = os.path.join(os.path.dirname(os.path.dirname(self.xpcshell)), 'Resources')
+ if os.path.exists(os.path.join(appBundlePath, 'application.ini')):
+ self.xrePath = appBundlePath
+ else:
+ self.xrePath = os.path.abspath(self.xrePath)
+
+ # httpd.js belongs in xrePath/components, which is Contents/Resources on mac
+ self.httpdJSPath = os.path.join(self.xrePath, 'components', 'httpd.js')
+ self.httpdJSPath = self.httpdJSPath.replace('\\', '/')
+
+ self.httpdManifest = os.path.join(self.xrePath, 'components', 'httpd.manifest')
+ self.httpdManifest = self.httpdManifest.replace('\\', '/')
+
+ if self.mozInfo is None:
+ self.mozInfo = os.path.join(self.testharnessdir, "mozinfo.json")
+
+ def buildCoreEnvironment(self):
+ """
+ Add environment variables likely to be used across all platforms, including remote systems.
+ """
+ # Make assertions fatal
+ self.env["XPCOM_DEBUG_BREAK"] = "stack-and-abort"
+ # Crash reporting interferes with debugging
+ if not self.debuggerInfo:
+ self.env["MOZ_CRASHREPORTER"] = "1"
+ # Don't launch the crash reporter client
+ self.env["MOZ_CRASHREPORTER_NO_REPORT"] = "1"
+ # Don't permit remote connections by default.
+ # MOZ_DISABLE_NONLOCAL_CONNECTIONS can be set to "0" to temporarily
+ # enable non-local connections for the purposes of local testing.
+ # Don't override the user's choice here. See bug 1049688.
+ self.env.setdefault('MOZ_DISABLE_NONLOCAL_CONNECTIONS', '1')
+
+ def buildEnvironment(self):
+ """
+ Create and returns a dictionary of self.env to include all the appropriate env variables and values.
+ On a remote system, we overload this to set different values and are missing things like os.environ and PATH.
+ """
+ self.env = dict(os.environ)
+ self.buildCoreEnvironment()
+ if sys.platform == 'win32':
+ self.env["PATH"] = self.env["PATH"] + ";" + self.xrePath
+ elif sys.platform in ('os2emx', 'os2knix'):
+ os.environ["BEGINLIBPATH"] = self.xrePath + ";" + self.env["BEGINLIBPATH"]
+ os.environ["LIBPATHSTRICT"] = "T"
+ elif sys.platform == 'osx' or sys.platform == "darwin":
+ self.env["DYLD_LIBRARY_PATH"] = os.path.join(os.path.dirname(self.xrePath), 'MacOS')
+ else: # unix or linux?
+ if not "LD_LIBRARY_PATH" in self.env or self.env["LD_LIBRARY_PATH"] is None:
+ self.env["LD_LIBRARY_PATH"] = self.xrePath
+ else:
+ self.env["LD_LIBRARY_PATH"] = ":".join([self.xrePath, self.env["LD_LIBRARY_PATH"]])
+
+ usingASan = "asan" in self.mozInfo and self.mozInfo["asan"]
+ usingTSan = "tsan" in self.mozInfo and self.mozInfo["tsan"]
+ if usingASan or usingTSan:
+ # symbolizer support
+ llvmsym = os.path.join(self.xrePath, "llvm-symbolizer")
+ if os.path.isfile(llvmsym):
+ if usingASan:
+ self.env["ASAN_SYMBOLIZER_PATH"] = llvmsym
+ else:
+ oldTSanOptions = self.env.get("TSAN_OPTIONS", "")
+ self.env["TSAN_OPTIONS"] = "external_symbolizer_path={} {}".format(llvmsym, oldTSanOptions)
+ self.log.info("runxpcshelltests.py | using symbolizer at %s" % llvmsym)
+ else:
+ self.log.error("TEST-UNEXPECTED-FAIL | runxpcshelltests.py | Failed to find symbolizer at %s" % llvmsym)
+
+ return self.env
+
+ def getPipes(self):
+ """
+ Determine the value of the stdout and stderr for the test.
+ Return value is a list (pStdout, pStderr).
+ """
+ if self.interactive:
+ pStdout = None
+ pStderr = None
+ else:
+ if (self.debuggerInfo and self.debuggerInfo.interactive):
+ pStdout = None
+ pStderr = None
+ else:
+ if sys.platform == 'os2emx':
+ pStdout = None
+ else:
+ pStdout = PIPE
+ pStderr = STDOUT
+ return pStdout, pStderr
+
+ def verifyDirPath(self, dirname):
+ """
+ Simple wrapper to get the absolute path for a given directory name.
+ On a remote system, we need to overload this to work on the remote filesystem.
+ """
+ return os.path.abspath(dirname)
+
+ def trySetupNode(self):
+ """
+ Run node for HTTP/2 tests, if available, and updates mozinfo as appropriate.
+ """
+ nodeMozInfo = {'hasNode': False} # Assume the worst
+ nodeBin = None
+
+ # We try to find the node executable in the path given to us by the user in
+ # the MOZ_NODE_PATH environment variable
+ localPath = os.getenv('MOZ_NODE_PATH', None)
+ if localPath and os.path.exists(localPath) and os.path.isfile(localPath):
+ try:
+ version_str = subprocess.check_output([localPath, "--version"],
+ stderr=subprocess.STDOUT)
+ # nodejs prefixes its version strings with "v"
+ version = LooseVersion(version_str.lstrip('v'))
+ # Use node only if node version is >=5.0.0 because
+ # node did not support ALPN until this version.
+ if version >= LooseVersion("5.0.0"):
+ nodeBin = localPath
+ except (subprocess.CalledProcessError, OSError), e:
+ self.log.error('Could not retrieve node version: %s' % str(e))
+
+ if os.getenv('MOZ_ASSUME_NODE_RUNNING', None):
+ self.log.info('Assuming required node servers are already running')
+ nodeMozInfo['hasNode'] = True
+ elif nodeBin:
+ self.log.info('Found node at %s' % (nodeBin,))
+
+ def startServer(name, serverJs):
+ if os.path.exists(serverJs):
+ # OK, we found our server, let's try to get it running
+ self.log.info('Found %s at %s' % (name, serverJs))
+ try:
+ # We pipe stdin to node because the server will exit when its
+ # stdin reaches EOF
+ process = Popen([nodeBin, serverJs], stdin=PIPE, stdout=PIPE,
+ stderr=PIPE, env=self.env, cwd=os.getcwd())
+ self.nodeProc[name] = process
+
+ # Check to make sure the server starts properly by waiting for it to
+ # tell us it's started
+ msg = process.stdout.readline()
+ if 'server listening' in msg:
+ nodeMozInfo['hasNode'] = True
+ searchObj = re.search( r'HTTP2 server listening on port (.*)', msg, 0)
+ if searchObj:
+ self.env["MOZHTTP2_PORT"] = searchObj.group(1)
+ except OSError, e:
+ # This occurs if the subprocess couldn't be started
+ self.log.error('Could not run %s server: %s' % (name, str(e)))
+
+ myDir = os.path.split(os.path.abspath(__file__))[0]
+ startServer('moz-http2', os.path.join(myDir, 'moz-http2', 'moz-http2.js'))
+
+ mozinfo.update(nodeMozInfo)
+
+ def shutdownNode(self):
+ """
+ Shut down our node process, if it exists
+ """
+ for name, proc in self.nodeProc.iteritems():
+ self.log.info('Node %s server shutting down ...' % name)
+ if proc.poll() is not None:
+ self.log.info('Node server %s already dead %s' % (name, proc.poll()))
+ else:
+ proc.terminate()
+ def dumpOutput(fd, label):
+ firstTime = True
+ for msg in fd:
+ if firstTime:
+ firstTime = False;
+ self.log.info('Process %s' % label)
+ self.log.info(msg)
+ dumpOutput(proc.stdout, "stdout")
+ dumpOutput(proc.stderr, "stderr")
+
+ def buildXpcsRunArgs(self):
+ """
+ Add arguments to run the test or make it interactive.
+ """
+ if self.interactive:
+ self.xpcsRunArgs = [
+ '-e', 'print("To start the test, type |_execute_test();|.");',
+ '-i']
+ else:
+ self.xpcsRunArgs = ['-e', '_execute_test(); quit(0);']
+
+ def addTestResults(self, test):
+ self.passCount += test.passCount
+ self.failCount += test.failCount
+ self.todoCount += test.todoCount
+
+ def makeTestId(self, test_object):
+ """Calculate an identifier for a test based on its path or a combination of
+ its path and the source manifest."""
+
+ relpath_key = 'file_relpath' if 'file_relpath' in test_object else 'relpath'
+ path = test_object[relpath_key].replace('\\', '/');
+ if 'dupe-manifest' in test_object and 'ancestor-manifest' in test_object:
+ return '%s:%s' % (os.path.basename(test_object['ancestor-manifest']), path)
+ return path
+
+ def runTests(self, xpcshell=None, xrePath=None, appPath=None, symbolsPath=None,
+ manifest=None, testPaths=None, mobileArgs=None, tempDir=None,
+ interactive=False, verbose=False, keepGoing=False, logfiles=True,
+ thisChunk=1, totalChunks=1, debugger=None,
+ debuggerArgs=None, debuggerInteractive=False,
+ profileName=None, mozInfo=None, sequential=False, shuffle=False,
+ testingModulesDir=None, pluginsPath=None,
+ testClass=XPCShellTestThread, failureManifest=None,
+ log=None, stream=None, jsDebugger=False, jsDebuggerPort=0,
+ test_tags=None, dump_tests=None, utility_path=None,
+ rerun_failures=False, failure_manifest=None, jscovdir=None, **otherOptions):
+ """Run xpcshell tests.
+
+ |xpcshell|, is the xpcshell executable to use to run the tests.
+ |xrePath|, if provided, is the path to the XRE to use.
+ |appPath|, if provided, is the path to an application directory.
+ |symbolsPath|, if provided is the path to a directory containing
+ breakpad symbols for processing crashes in tests.
+ |manifest|, if provided, is a file containing a list of
+ test directories to run.
+ |testPaths|, if provided, is a list of paths to files or directories containing
+ tests to run.
+ |pluginsPath|, if provided, custom plugins directory to be returned from
+ the xpcshell dir svc provider for NS_APP_PLUGINS_DIR_LIST.
+ |interactive|, if set to True, indicates to provide an xpcshell prompt
+ instead of automatically executing the test.
+ |verbose|, if set to True, will cause stdout/stderr from tests to
+ be printed always
+ |logfiles|, if set to False, indicates not to save output to log files.
+ Non-interactive only option.
+ |debugger|, if set, specifies the name of the debugger that will be used
+ to launch xpcshell.
+ |debuggerArgs|, if set, specifies arguments to use with the debugger.
+ |debuggerInteractive|, if set, allows the debugger to be run in interactive
+ mode.
+ |profileName|, if set, specifies the name of the application for the profile
+ directory if running only a subset of tests.
+ |mozInfo|, if set, specifies specifies build configuration information, either as a filename containing JSON, or a dict.
+ |shuffle|, if True, execute tests in random order.
+ |testingModulesDir|, if provided, specifies where JS modules reside.
+ xpcshell will register a resource handler mapping this path.
+ |tempDir|, if provided, specifies a temporary directory to use.
+ |otherOptions| may be present for the convenience of subclasses
+ """
+
+ global gotSIGINT
+
+ # Try to guess modules directory.
+ # This somewhat grotesque hack allows the buildbot machines to find the
+ # modules directory without having to configure the buildbot hosts. This
+ # code path should never be executed in local runs because the build system
+ # should always set this argument.
+ if not testingModulesDir:
+ possible = os.path.join(here, os.path.pardir, 'modules')
+
+ if os.path.isdir(possible):
+ testingModulesDir = possible
+
+ if rerun_failures:
+ if os.path.exists(failure_manifest):
+ rerun_manifest = os.path.join(os.path.dirname(failure_manifest), "rerun.ini")
+ shutil.copyfile(failure_manifest, rerun_manifest)
+ os.remove(failure_manifest)
+ manifest = rerun_manifest
+ else:
+ print >> sys.stderr, "No failures were found to re-run."
+ sys.exit(1)
+
+ if testingModulesDir:
+ # The resource loader expects native paths. Depending on how we were
+ # invoked, a UNIX style path may sneak in on Windows. We try to
+ # normalize that.
+ testingModulesDir = os.path.normpath(testingModulesDir)
+
+ if not os.path.isabs(testingModulesDir):
+ testingModulesDir = os.path.abspath(testingModulesDir)
+
+ if not testingModulesDir.endswith(os.path.sep):
+ testingModulesDir += os.path.sep
+
+ self.debuggerInfo = None
+
+ if debugger:
+ self.debuggerInfo = mozdebug.get_debugger_info(debugger, debuggerArgs, debuggerInteractive)
+
+ self.jsDebuggerInfo = None
+ if jsDebugger:
+ # A namedtuple let's us keep .port instead of ['port']
+ JSDebuggerInfo = namedtuple('JSDebuggerInfo', ['port'])
+ self.jsDebuggerInfo = JSDebuggerInfo(port=jsDebuggerPort)
+
+ self.xpcshell = xpcshell
+ self.xrePath = xrePath
+ self.appPath = appPath
+ self.symbolsPath = symbolsPath
+ self.tempDir = os.path.normpath(tempDir or tempfile.gettempdir())
+ self.manifest = manifest
+ self.dump_tests = dump_tests
+ self.interactive = interactive
+ self.verbose = verbose
+ self.keepGoing = keepGoing
+ self.logfiles = logfiles
+ self.totalChunks = totalChunks
+ self.thisChunk = thisChunk
+ self.profileName = profileName or "xpcshell"
+ self.mozInfo = mozInfo
+ self.testingModulesDir = testingModulesDir
+ self.pluginsPath = pluginsPath
+ self.sequential = sequential
+ self.failure_manifest = failure_manifest
+ self.jscovdir = jscovdir
+
+ self.testCount = 0
+ self.passCount = 0
+ self.failCount = 0
+ self.todoCount = 0
+
+ self.setAbsPath()
+ self.buildXpcsRunArgs()
+
+ self.event = Event()
+
+ # Handle filenames in mozInfo
+ if not isinstance(self.mozInfo, dict):
+ mozInfoFile = self.mozInfo
+ if not os.path.isfile(mozInfoFile):
+ self.log.error("Error: couldn't find mozinfo.json at '%s'. Perhaps you need to use --build-info-json?" % mozInfoFile)
+ return False
+ self.mozInfo = json.load(open(mozInfoFile))
+
+ # mozinfo.info is used as kwargs. Some builds are done with
+ # an older Python that can't handle Unicode keys in kwargs.
+ # All of the keys in question should be ASCII.
+ fixedInfo = {}
+ for k, v in self.mozInfo.items():
+ if isinstance(k, unicode):
+ k = k.encode('ascii')
+ fixedInfo[k] = v
+ self.mozInfo = fixedInfo
+
+ mozinfo.update(self.mozInfo)
+
+ self.stack_fixer_function = None
+ if utility_path and os.path.exists(utility_path):
+ self.stack_fixer_function = get_stack_fixer_function(utility_path, self.symbolsPath)
+
+ # buildEnvironment() needs mozInfo, so we call it after mozInfo is initialized.
+ self.buildEnvironment()
+
+ # The appDirKey is a optional entry in either the default or individual test
+ # sections that defines a relative application directory for test runs. If
+ # defined we pass 'grePath/$appDirKey' for the -a parameter of the xpcshell
+ # test harness.
+ appDirKey = None
+ if "appname" in self.mozInfo:
+ appDirKey = self.mozInfo["appname"] + "-appdir"
+
+ # We have to do this before we build the test list so we know whether or
+ # not to run tests that depend on having the node http/2 server
+ self.trySetupNode()
+
+ pStdout, pStderr = self.getPipes()
+
+ self.buildTestList(test_tags, testPaths)
+ if self.singleFile:
+ self.sequential = True
+
+ if shuffle:
+ random.shuffle(self.alltests)
+
+ self.cleanup_dir_list = []
+ self.try_again_list = []
+
+ kwargs = {
+ 'appPath': self.appPath,
+ 'xrePath': self.xrePath,
+ 'testingModulesDir': self.testingModulesDir,
+ 'debuggerInfo': self.debuggerInfo,
+ 'jsDebuggerInfo': self.jsDebuggerInfo,
+ 'pluginsPath': self.pluginsPath,
+ 'httpdManifest': self.httpdManifest,
+ 'httpdJSPath': self.httpdJSPath,
+ 'headJSPath': self.headJSPath,
+ 'tempDir': self.tempDir,
+ 'testharnessdir': self.testharnessdir,
+ 'profileName': self.profileName,
+ 'singleFile': self.singleFile,
+ 'env': self.env, # making a copy of this in the testthreads
+ 'symbolsPath': self.symbolsPath,
+ 'logfiles': self.logfiles,
+ 'xpcshell': self.xpcshell,
+ 'xpcsRunArgs': self.xpcsRunArgs,
+ 'failureManifest': self.failure_manifest,
+ 'jscovdir': self.jscovdir,
+ 'harness_timeout': self.harness_timeout,
+ 'stack_fixer_function': self.stack_fixer_function,
+ }
+
+ if self.sequential:
+ # Allow user to kill hung xpcshell subprocess with SIGINT
+ # when we are only running tests sequentially.
+ signal.signal(signal.SIGINT, markGotSIGINT)
+
+ if self.debuggerInfo:
+ # Force a sequential run
+ self.sequential = True
+
+ # If we have an interactive debugger, disable SIGINT entirely.
+ if self.debuggerInfo.interactive:
+ signal.signal(signal.SIGINT, lambda signum, frame: None)
+
+ if "lldb" in self.debuggerInfo.path:
+ # Ask people to start debugging using 'process launch', see bug 952211.
+ self.log.info("It appears that you're using LLDB to debug this test. " +
+ "Please use the 'process launch' command instead of the 'run' command to start xpcshell.")
+
+ if self.jsDebuggerInfo:
+ # The js debugger magic needs more work to do the right thing
+ # if debugging multiple files.
+ if len(self.alltests) != 1:
+ self.log.error("Error: --jsdebugger can only be used with a single test!")
+ return False
+
+ # The test itself needs to know whether it is a tsan build, since
+ # that has an effect on interpretation of the process return value.
+ usingTSan = "tsan" in self.mozInfo and self.mozInfo["tsan"]
+
+ # create a queue of all tests that will run
+ tests_queue = deque()
+ # also a list for the tests that need to be run sequentially
+ sequential_tests = []
+ for test_object in self.alltests:
+ # Test identifiers are provided for the convenience of logging. These
+ # start as path names but are rewritten in case tests from the same path
+ # are re-run.
+
+ path = test_object['path']
+ test_object['id'] = self.makeTestId(test_object)
+
+ if self.singleFile and not path.endswith(self.singleFile):
+ continue
+
+ self.testCount += 1
+
+ test = testClass(test_object, self.event, self.cleanup_dir_list,
+ app_dir_key=appDirKey,
+ interactive=interactive,
+ verbose=verbose or test_object.get("verbose") == "true",
+ pStdout=pStdout, pStderr=pStderr,
+ keep_going=keepGoing, log=self.log, usingTSan=usingTSan,
+ mobileArgs=mobileArgs, **kwargs)
+ if 'run-sequentially' in test_object or self.sequential:
+ sequential_tests.append(test)
+ else:
+ tests_queue.append(test)
+
+ if self.sequential:
+ self.log.info("Running tests sequentially.")
+ else:
+ self.log.info("Using at most %d threads." % NUM_THREADS)
+
+ # keep a set of NUM_THREADS running tests and start running the
+ # tests in the queue at most NUM_THREADS at a time
+ running_tests = set()
+ keep_going = True
+ exceptions = []
+ tracebacks = []
+ self.log.suite_start([t['id'] for t in self.alltests])
+
+ while tests_queue or running_tests:
+ # if we're not supposed to continue and all of the running tests
+ # are done, stop
+ if not keep_going and not running_tests:
+ break
+
+ # if there's room to run more tests, start running them
+ while keep_going and tests_queue and (len(running_tests) < NUM_THREADS):
+ test = tests_queue.popleft()
+ running_tests.add(test)
+ test.start()
+
+ # queue is full (for now) or no more new tests,
+ # process the finished tests so far
+
+ # wait for at least one of the tests to finish
+ self.event.wait(1)
+ self.event.clear()
+
+ # find what tests are done (might be more than 1)
+ done_tests = set()
+ for test in running_tests:
+ if test.done:
+ done_tests.add(test)
+ test.join(1) # join with timeout so we don't hang on blocked threads
+ # if the test had trouble, we will try running it again
+ # at the end of the run
+ if test.retry or test.is_alive():
+ # if the join call timed out, test.is_alive => True
+ self.try_again_list.append(test.test_object)
+ continue
+ # did the test encounter any exception?
+ if test.exception:
+ exceptions.append(test.exception)
+ tracebacks.append(test.traceback)
+ # we won't add any more tests, will just wait for
+ # the currently running ones to finish
+ keep_going = False
+ keep_going = keep_going and test.keep_going
+ self.addTestResults(test)
+
+ # make room for new tests to run
+ running_tests.difference_update(done_tests)
+
+ if keep_going:
+ # run the other tests sequentially
+ for test in sequential_tests:
+ if not keep_going:
+ self.log.error("TEST-UNEXPECTED-FAIL | Received SIGINT (control-C), so stopped run. " \
+ "(Use --keep-going to keep running tests after killing one with SIGINT)")
+ break
+ # we don't want to retry these tests
+ test.retry = False
+ test.start()
+ test.join()
+ self.addTestResults(test)
+ # did the test encounter any exception?
+ if test.exception:
+ exceptions.append(test.exception)
+ tracebacks.append(test.traceback)
+ break
+ keep_going = test.keep_going
+
+ # retry tests that failed when run in parallel
+ if self.try_again_list:
+ self.log.info("Retrying tests that failed when run in parallel.")
+ for test_object in self.try_again_list:
+ test = testClass(test_object, self.event, self.cleanup_dir_list,
+ retry=False,
+ app_dir_key=appDirKey, interactive=interactive,
+ verbose=verbose, pStdout=pStdout, pStderr=pStderr,
+ keep_going=keepGoing, log=self.log, mobileArgs=mobileArgs,
+ **kwargs)
+ test.start()
+ test.join()
+ self.addTestResults(test)
+ # did the test encounter any exception?
+ if test.exception:
+ exceptions.append(test.exception)
+ tracebacks.append(test.traceback)
+ break
+ keep_going = test.keep_going
+
+ # restore default SIGINT behaviour
+ signal.signal(signal.SIGINT, signal.SIG_DFL)
+
+ self.shutdownNode()
+ # Clean up any slacker directories that might be lying around
+ # Some might fail because of windows taking too long to unlock them.
+ # We don't do anything if this fails because the test slaves will have
+ # their $TEMP dirs cleaned up on reboot anyway.
+ for directory in self.cleanup_dir_list:
+ try:
+ shutil.rmtree(directory)
+ except:
+ self.log.info("%s could not be cleaned up." % directory)
+
+ if exceptions:
+ self.log.info("Following exceptions were raised:")
+ for t in tracebacks:
+ self.log.error(t)
+ raise exceptions[0]
+
+ if self.testCount == 0:
+ self.log.error("No tests run. Did you pass an invalid --test-path?")
+ self.failCount = 1
+
+ self.log.info("INFO | Result summary:")
+ self.log.info("INFO | Passed: %d" % self.passCount)
+ self.log.info("INFO | Failed: %d" % self.failCount)
+ self.log.info("INFO | Todo: %d" % self.todoCount)
+ self.log.info("INFO | Retried: %d" % len(self.try_again_list))
+
+ if gotSIGINT and not keepGoing:
+ self.log.error("TEST-UNEXPECTED-FAIL | Received SIGINT (control-C), so stopped run. " \
+ "(Use --keep-going to keep running tests after killing one with SIGINT)")
+ return False
+
+ self.log.suite_end()
+ return self.failCount == 0
+
+
+def main():
+ parser = parser_desktop()
+ options = parser.parse_args()
+
+ log = commandline.setup_logging("XPCShell", options, {"tbpl": sys.stdout})
+
+ if options.xpcshell is None:
+ print >> sys.stderr, """Must provide path to xpcshell using --xpcshell"""
+
+ xpcsh = XPCShellTests(log)
+
+ if options.interactive and not options.testPath:
+ print >>sys.stderr, "Error: You must specify a test filename in interactive mode!"
+ sys.exit(1)
+
+ if not xpcsh.runTests(**vars(options)):
+ sys.exit(1)
+
+if __name__ == '__main__':
+ main()
diff --git a/testing/xpcshell/selftest.py b/testing/xpcshell/selftest.py
new file mode 100755
index 0000000000..856f505fc1
--- /dev/null
+++ b/testing/xpcshell/selftest.py
@@ -0,0 +1,1344 @@
+#!/usr/bin/env python
+#
+# Any copyright is dedicated to the Public Domain.
+# http://creativecommons.org/publicdomain/zero/1.0/
+#
+
+import mozinfo
+import mozunit
+import os
+import pprint
+import re
+import shutil
+import sys
+import tempfile
+import unittest
+
+from buildconfig import substs
+from StringIO import StringIO
+from mozlog import structured
+from mozbuild.base import MozbuildObject
+os.environ.pop('MOZ_OBJDIR', None)
+build_obj = MozbuildObject.from_environment()
+
+from runxpcshelltests import XPCShellTests
+
+mozinfo.find_and_update_from_json()
+
+objdir = build_obj.topobjdir.encode("utf-8")
+
+if mozinfo.isMac:
+ xpcshellBin = os.path.join(objdir, "dist", substs['MOZ_MACBUNDLE_NAME'], "Contents", "MacOS", "xpcshell")
+else:
+ xpcshellBin = os.path.join(objdir, "dist", "bin", "xpcshell")
+ if sys.platform == "win32":
+ xpcshellBin += ".exe"
+
+TEST_PASS_STRING = "TEST-PASS"
+TEST_FAIL_STRING = "TEST-UNEXPECTED-FAIL"
+
+SIMPLE_PASSING_TEST = "function run_test() { do_check_true(true); }"
+SIMPLE_FAILING_TEST = "function run_test() { do_check_true(false); }"
+
+SIMPLE_UNCAUGHT_REJECTION_TEST = '''
+function run_test() {
+ Promise.reject(new Error("Test rejection."));
+ do_check_true(true);
+}
+'''
+
+SIMPLE_UNCAUGHT_REJECTION_JSM_TEST = '''
+Components.utils.import("resource://gre/modules/Promise.jsm");
+
+Promise.reject(new Error("Test rejection."));
+
+function run_test() {
+ do_check_true(true);
+}
+'''
+
+ADD_TEST_SIMPLE = '''
+function run_test() { run_next_test(); }
+
+add_test(function test_simple() {
+ do_check_true(true);
+ run_next_test();
+});
+'''
+
+ADD_TEST_FAILING = '''
+function run_test() { run_next_test(); }
+
+add_test(function test_failing() {
+ do_check_true(false);
+ run_next_test();
+});
+'''
+
+ADD_TEST_UNCAUGHT_REJECTION = '''
+function run_test() { run_next_test(); }
+
+add_test(function test_uncaught_rejection() {
+ Promise.reject(new Error("Test rejection."));
+ run_next_test();
+});
+'''
+
+ADD_TEST_UNCAUGHT_REJECTION_JSM = '''
+Components.utils.import("resource://gre/modules/Promise.jsm");
+
+function run_test() { run_next_test(); }
+
+add_test(function test_uncaught_rejection() {
+ Promise.reject(new Error("Test rejection."));
+ run_next_test();
+});
+'''
+
+CHILD_TEST_PASSING = '''
+function run_test () { run_next_test(); }
+
+add_test(function test_child_simple () {
+ run_test_in_child("test_pass.js");
+ run_next_test();
+});
+'''
+
+CHILD_TEST_FAILING = '''
+function run_test () { run_next_test(); }
+
+add_test(function test_child_simple () {
+ run_test_in_child("test_fail.js");
+ run_next_test();
+});
+'''
+
+CHILD_HARNESS_SIMPLE = '''
+function run_test () { run_next_test(); }
+
+add_test(function test_child_assert () {
+ do_load_child_test_harness();
+ do_test_pending("test child assertion");
+ sendCommand("Assert.ok(true);", do_test_finished);
+ run_next_test();
+});
+'''
+
+CHILD_TEST_HANG = '''
+function run_test () { run_next_test(); }
+
+add_test(function test_child_simple () {
+ do_test_pending("hang test");
+ do_load_child_test_harness();
+ sendCommand("_testLogger.info('CHILD-TEST-STARTED'); " +
+ + "const _TEST_FILE=['test_pass.js']; _execute_test(); ",
+ do_test_finished);
+ run_next_test();
+});
+'''
+
+SIMPLE_LOOPING_TEST = '''
+function run_test () { run_next_test(); }
+
+add_test(function test_loop () {
+ do_test_pending()
+});
+'''
+
+PASSING_TEST_UNICODE = '''
+function run_test () { run_next_test(); }
+
+add_test(function test_unicode_print () {
+ do_check_eq("\u201c\u201d", "\u201c\u201d");
+ run_next_test();
+});
+'''
+
+ADD_TASK_SINGLE = '''
+Components.utils.import("resource://gre/modules/Promise.jsm");
+
+function run_test() { run_next_test(); }
+
+add_task(function test_task() {
+ yield Promise.resolve(true);
+ yield Promise.resolve(false);
+});
+'''
+
+ADD_TASK_MULTIPLE = '''
+Components.utils.import("resource://gre/modules/Promise.jsm");
+
+function run_test() { run_next_test(); }
+
+add_task(function test_task() {
+ yield Promise.resolve(true);
+});
+
+add_task(function test_2() {
+ yield Promise.resolve(true);
+});
+'''
+
+ADD_TASK_REJECTED = '''
+Components.utils.import("resource://gre/modules/Promise.jsm");
+
+function run_test() { run_next_test(); }
+
+add_task(function test_failing() {
+ yield Promise.reject(new Error("I fail."));
+});
+'''
+
+ADD_TASK_FAILURE_INSIDE = '''
+Components.utils.import("resource://gre/modules/Promise.jsm");
+
+function run_test() { run_next_test(); }
+
+add_task(function test() {
+ let result = yield Promise.resolve(false);
+
+ do_check_true(result);
+});
+'''
+
+ADD_TASK_RUN_NEXT_TEST = '''
+function run_test() { run_next_test(); }
+
+add_task(function () {
+ Assert.ok(true);
+
+ run_next_test();
+});
+'''
+
+ADD_TASK_STACK_TRACE = '''
+Components.utils.import("resource://gre/modules/Promise.jsm", this);
+
+function run_test() { run_next_test(); }
+
+add_task(function* this_test_will_fail() {
+ for (let i = 0; i < 10; ++i) {
+ yield Promise.resolve();
+ }
+ Assert.ok(false);
+});
+'''
+
+ADD_TASK_STACK_TRACE_WITHOUT_STAR = '''
+Components.utils.import("resource://gre/modules/Promise.jsm", this);
+
+function run_test() { run_next_test(); }
+
+add_task(function this_test_will_fail() {
+ for (let i = 0; i < 10; ++i) {
+ yield Promise.resolve();
+ }
+ Assert.ok(false);
+});
+'''
+
+ADD_TEST_THROW_STRING = '''
+function run_test() {do_throw("Passing a string to do_throw")};
+'''
+
+ADD_TEST_THROW_OBJECT = '''
+let error = {
+ message: "Error object",
+ fileName: "failure.js",
+ stack: "ERROR STACK",
+ toString: function() {return this.message;}
+};
+function run_test() {do_throw(error)};
+'''
+
+ADD_TEST_REPORT_OBJECT = '''
+let error = {
+ message: "Error object",
+ fileName: "failure.js",
+ stack: "ERROR STACK",
+ toString: function() {return this.message;}
+};
+function run_test() {do_report_unexpected_exception(error)};
+'''
+
+ADD_TEST_VERBOSE = '''
+function run_test() {do_print("a message from do_print")};
+'''
+
+# A test for genuine JS-generated Error objects
+ADD_TEST_REPORT_REF_ERROR = '''
+function run_test() {
+ let obj = {blah: 0};
+ try {
+ obj.noSuchFunction();
+ }
+ catch (error) {
+ do_report_unexpected_exception(error);
+ }
+};
+'''
+
+# A test for failure to load a test due to a syntax error
+LOAD_ERROR_SYNTAX_ERROR = '''
+function run_test(
+'''
+
+# A test for failure to load a test due to an error other than a syntax error
+LOAD_ERROR_OTHER_ERROR = '''
+function run_test() {
+ yield "foo";
+ return "foo"; // can't use return in a generator!
+};
+'''
+
+# A test for asynchronous cleanup functions
+ASYNC_CLEANUP = '''
+function run_test() {
+ Components.utils.import("resource://gre/modules/Promise.jsm", this);
+
+ // The list of checkpoints in the order we encounter them.
+ let checkpoints = [];
+
+ // Cleanup tasks, in reverse order
+ do_register_cleanup(function cleanup_checkout() {
+ do_check_eq(checkpoints.join(""), "1234");
+ do_print("At this stage, the test has succeeded");
+ do_throw("Throwing an error to force displaying the log");
+ });
+
+ do_register_cleanup(function sync_cleanup_2() {
+ checkpoints.push(4);
+ });
+
+ do_register_cleanup(function async_cleanup_2() {
+ let deferred = Promise.defer();
+ do_execute_soon(deferred.resolve);
+ return deferred.promise.then(function() {
+ checkpoints.push(3);
+ });
+ });
+
+ do_register_cleanup(function sync_cleanup() {
+ checkpoints.push(2);
+ });
+
+ do_register_cleanup(function async_cleanup() {
+ let deferred = Promise.defer();
+ do_execute_soon(deferred.resolve);
+ return deferred.promise.then(function() {
+ checkpoints.push(1);
+ });
+ });
+
+}
+'''
+
+# A test to check that add_test() tests run without run_test()
+NO_RUN_TEST_ADD_TEST = '''
+add_test(function no_run_test_add_test() {
+ do_check_true(true);
+ run_next_test();
+});
+'''
+
+# A test to check that add_task() tests run without run_test()
+NO_RUN_TEST_ADD_TASK = '''
+add_task(function no_run_test_add_task() {
+ do_check_true(true);
+});
+'''
+
+# A test to check that both add_task() and add_test() work without run_test()
+NO_RUN_TEST_ADD_TEST_ADD_TASK = '''
+add_test(function no_run_test_add_test() {
+ do_check_true(true);
+ run_next_test();
+});
+
+add_task(function no_run_test_add_task() {
+ do_check_true(true);
+});
+'''
+
+# A test to check that an empty test file without run_test(),
+# add_test() or add_task() works.
+NO_RUN_TEST_EMPTY_TEST = '''
+// This is an empty test file.
+'''
+
+NO_RUN_TEST_ADD_TEST_FAIL = '''
+add_test(function no_run_test_add_test_fail() {
+ do_check_true(false);
+ run_next_test();
+});
+'''
+
+NO_RUN_TEST_ADD_TASK_FAIL = '''
+add_task(function no_run_test_add_task_fail() {
+ do_check_true(false);
+});
+'''
+
+NO_RUN_TEST_ADD_TASK_MULTIPLE = '''
+Components.utils.import("resource://gre/modules/Promise.jsm");
+
+add_task(function test_task() {
+ yield Promise.resolve(true);
+});
+
+add_task(function test_2() {
+ yield Promise.resolve(true);
+});
+'''
+
+LOAD_MOZINFO = '''
+function run_test() {
+ do_check_neq(typeof mozinfo, undefined);
+ do_check_neq(typeof mozinfo.os, undefined);
+}
+'''
+
+CHILD_MOZINFO = '''
+function run_test () { run_next_test(); }
+
+add_test(function test_child_mozinfo () {
+ run_test_in_child("test_mozinfo.js");
+ run_next_test();
+});
+'''
+class XPCShellTestsTests(unittest.TestCase):
+ """
+ Yes, these are unit tests for a unit test harness.
+ """
+ def setUp(self):
+ self.log = StringIO()
+ self.tempdir = tempfile.mkdtemp()
+ self.utility_path = os.path.join(objdir, 'dist', 'bin')
+ logger = structured.commandline.setup_logging("selftest%s" % id(self),
+ {},
+ {"tbpl": self.log})
+ self.x = XPCShellTests(logger)
+ self.x.harness_timeout = 15
+ self.symbols_path = None
+ candidate_path = os.path.join(build_obj.distdir, 'crashreporter-symbols')
+ if (os.path.isdir(candidate_path)):
+ self.symbols_path = candidate_path
+
+ def tearDown(self):
+ shutil.rmtree(self.tempdir)
+
+ def writeFile(self, name, contents):
+ """
+ Write |contents| to a file named |name| in the temp directory,
+ and return the full path to the file.
+ """
+ fullpath = os.path.join(self.tempdir, name)
+ with open(fullpath, "w") as f:
+ f.write(contents)
+ return fullpath
+
+ def writeManifest(self, tests):
+ """
+ Write an xpcshell.ini in the temp directory and set
+ self.manifest to its pathname. |tests| is a list containing
+ either strings (for test names), or tuples with a test name
+ as the first element and manifest conditions as the following
+ elements.
+ """
+ testlines = []
+ for t in tests:
+ testlines.append("[%s]" % (t if isinstance(t, basestring)
+ else t[0]))
+ if isinstance(t, tuple):
+ testlines.extend(t[1:])
+ self.manifest = self.writeFile("xpcshell.ini", """
+[DEFAULT]
+head =
+tail =
+
+""" + "\n".join(testlines))
+
+ def assertTestResult(self, expected, shuffle=False, verbose=False):
+ """
+ Assert that self.x.runTests with manifest=self.manifest
+ returns |expected|.
+ """
+ self.assertEquals(expected,
+ self.x.runTests(xpcshellBin,
+ symbolsPath=self.symbols_path,
+ manifest=self.manifest,
+ mozInfo=mozinfo.info,
+ shuffle=shuffle,
+ verbose=verbose,
+ sequential=True,
+ testingModulesDir=os.path.join(objdir, '_tests', 'modules'),
+ utility_path=self.utility_path),
+ msg="""Tests should have %s, log:
+========
+%s
+========
+""" % ("passed" if expected else "failed", self.log.getvalue()))
+
+ def _assertLog(self, s, expected):
+ l = self.log.getvalue()
+ self.assertEqual(expected, s in l,
+ msg="""Value %s %s in log:
+========
+%s
+========""" % (s, "expected" if expected else "not expected", l))
+
+ def assertInLog(self, s):
+ """
+ Assert that the string |s| is contained in self.log.
+ """
+ self._assertLog(s, True)
+
+ def assertNotInLog(self, s):
+ """
+ Assert that the string |s| is not contained in self.log.
+ """
+ self._assertLog(s, False)
+
+ def testPass(self):
+ """
+ Check that a simple test without any manifest conditions passes.
+ """
+ self.writeFile("test_basic.js", SIMPLE_PASSING_TEST)
+ self.writeManifest(["test_basic.js"])
+
+ self.assertTestResult(True)
+ self.assertEquals(1, self.x.testCount)
+ self.assertEquals(1, self.x.passCount)
+ self.assertEquals(0, self.x.failCount)
+ self.assertEquals(0, self.x.todoCount)
+ self.assertInLog(TEST_PASS_STRING)
+ self.assertNotInLog(TEST_FAIL_STRING)
+
+ def testFail(self):
+ """
+ Check that a simple failing test without any manifest conditions fails.
+ """
+ self.writeFile("test_basic.js", SIMPLE_FAILING_TEST)
+ self.writeManifest(["test_basic.js"])
+
+ self.assertTestResult(False)
+ self.assertEquals(1, self.x.testCount)
+ self.assertEquals(0, self.x.passCount)
+ self.assertEquals(1, self.x.failCount)
+ self.assertEquals(0, self.x.todoCount)
+ self.assertInLog(TEST_FAIL_STRING)
+ self.assertNotInLog(TEST_PASS_STRING)
+
+ @unittest.skipIf(mozinfo.isWin or not mozinfo.info.get('debug'),
+ 'We don\'t have a stack fixer on hand for windows.')
+ def testAssertStack(self):
+ """
+ When an assertion is hit, we should produce a useful stack.
+ """
+ self.writeFile("test_assert.js", '''
+ add_test(function test_asserts_immediately() {
+ Components.classes["@mozilla.org/xpcom/debug;1"]
+ .getService(Components.interfaces.nsIDebug2)
+ .assertion("foo", "assertion failed", "test.js", 1)
+ run_next_test();
+ });
+ ''')
+
+ self.writeManifest(["test_assert.js"])
+ self.assertTestResult(False)
+
+ self.assertInLog("###!!! ASSERTION")
+ log_lines = self.log.getvalue().splitlines()
+ line_pat = "#\d\d:"
+ unknown_pat = "#\d\d\: \?\?\?\[.* \+0x[a-f0-9]+\]"
+ self.assertFalse(any(re.search(unknown_pat, line) for line in log_lines),
+ "An stack frame without symbols was found in\n%s" % pprint.pformat(log_lines))
+ self.assertTrue(any(re.search(line_pat, line) for line in log_lines),
+ "No line resembling a stack frame was found in\n%s" % pprint.pformat(log_lines))
+
+ def testChildPass(self):
+ """
+ Check that a simple test running in a child process passes.
+ """
+ self.writeFile("test_pass.js", SIMPLE_PASSING_TEST)
+ self.writeFile("test_child_pass.js", CHILD_TEST_PASSING)
+ self.writeManifest(["test_child_pass.js"])
+
+ self.assertTestResult(True, verbose=True)
+ self.assertEquals(1, self.x.testCount)
+ self.assertEquals(1, self.x.passCount)
+ self.assertEquals(0, self.x.failCount)
+ self.assertEquals(0, self.x.todoCount)
+ self.assertInLog(TEST_PASS_STRING)
+ self.assertInLog("CHILD-TEST-STARTED")
+ self.assertInLog("CHILD-TEST-COMPLETED")
+ self.assertNotInLog(TEST_FAIL_STRING)
+
+
+ def testChildFail(self):
+ """
+ Check that a simple failing test running in a child process fails.
+ """
+ self.writeFile("test_fail.js", SIMPLE_FAILING_TEST)
+ self.writeFile("test_child_fail.js", CHILD_TEST_FAILING)
+ self.writeManifest(["test_child_fail.js"])
+
+ self.assertTestResult(False)
+ self.assertEquals(1, self.x.testCount)
+ self.assertEquals(0, self.x.passCount)
+ self.assertEquals(1, self.x.failCount)
+ self.assertEquals(0, self.x.todoCount)
+ self.assertInLog(TEST_FAIL_STRING)
+ self.assertInLog("CHILD-TEST-STARTED")
+ self.assertInLog("CHILD-TEST-COMPLETED")
+ self.assertNotInLog(TEST_PASS_STRING)
+
+ def testChildHang(self):
+ """
+ Check that incomplete output from a child process results in a
+ test failure.
+ """
+ self.writeFile("test_pass.js", SIMPLE_PASSING_TEST)
+ self.writeFile("test_child_hang.js", CHILD_TEST_HANG)
+ self.writeManifest(["test_child_hang.js"])
+
+ self.assertTestResult(False)
+ self.assertEquals(1, self.x.testCount)
+ self.assertEquals(0, self.x.passCount)
+ self.assertEquals(1, self.x.failCount)
+ self.assertEquals(0, self.x.todoCount)
+ self.assertInLog(TEST_FAIL_STRING)
+ self.assertInLog("CHILD-TEST-STARTED")
+ self.assertNotInLog("CHILD-TEST-COMPLETED")
+ self.assertNotInLog(TEST_PASS_STRING)
+
+ def testChild(self):
+ """
+ Checks that calling do_load_child_test_harness without run_test_in_child
+ results in a usable test state. This test has a spurious failure when
+ run using |mach python-test|. See bug 1103226.
+ """
+ self.writeFile("test_child_assertions.js", CHILD_HARNESS_SIMPLE)
+ self.writeManifest(["test_child_assertions.js"])
+
+ self.assertTestResult(True)
+ self.assertEquals(1, self.x.testCount)
+ self.assertEquals(1, self.x.passCount)
+ self.assertEquals(0, self.x.failCount)
+ self.assertEquals(0, self.x.todoCount)
+ self.assertInLog(TEST_PASS_STRING)
+ self.assertNotInLog(TEST_FAIL_STRING)
+
+ def testSkipForAddTest(self):
+ """
+ Check that add_test is skipped if |skip_if| condition is true
+ """
+ self.writeFile("test_skip.js", """
+add_test({
+ skip_if: () => true,
+}, function test_should_be_skipped() {
+ do_check_true(false);
+ run_next_test();
+});
+""")
+ self.writeManifest(["test_skip.js"])
+ self.assertTestResult(True, verbose=True)
+ self.assertEquals(1, self.x.testCount)
+ self.assertEquals(1, self.x.passCount)
+ self.assertEquals(0, self.x.failCount)
+ self.assertEquals(0, self.x.todoCount)
+ self.assertInLog(TEST_PASS_STRING)
+ self.assertInLog("TEST-SKIP")
+ self.assertNotInLog(TEST_FAIL_STRING)
+
+ def testNotSkipForAddTask(self):
+ """
+ Check that add_task is not skipped if |skip_if| condition is false
+ """
+ self.writeFile("test_not_skip.js", """
+add_task({
+ skip_if: () => false,
+}, function test_should_not_be_skipped() {
+ do_check_true(true);
+});
+""")
+ self.writeManifest(["test_not_skip.js"])
+ self.assertTestResult(True, verbose=True)
+ self.assertEquals(1, self.x.testCount)
+ self.assertEquals(1, self.x.passCount)
+ self.assertEquals(0, self.x.failCount)
+ self.assertEquals(0, self.x.todoCount)
+ self.assertInLog(TEST_PASS_STRING)
+ self.assertNotInLog("TEST-SKIP")
+ self.assertNotInLog(TEST_FAIL_STRING)
+
+ def testSkipForAddTask(self):
+ """
+ Check that add_task is skipped if |skip_if| condition is true
+ """
+ self.writeFile("test_skip.js", """
+add_task({
+ skip_if: () => true,
+}, function test_should_be_skipped() {
+ do_check_true(false);
+});
+""")
+ self.writeManifest(["test_skip.js"])
+ self.assertTestResult(True, verbose=True)
+ self.assertEquals(1, self.x.testCount)
+ self.assertEquals(1, self.x.passCount)
+ self.assertEquals(0, self.x.failCount)
+ self.assertEquals(0, self.x.todoCount)
+ self.assertInLog(TEST_PASS_STRING)
+ self.assertInLog("TEST-SKIP")
+ self.assertNotInLog(TEST_FAIL_STRING)
+
+ def testNotSkipForAddTest(self):
+ """
+ Check that add_test is not skipped if |skip_if| condition is false
+ """
+ self.writeFile("test_not_skip.js", """
+add_test({
+ skip_if: () => false,
+}, function test_should_not_be_skipped() {
+ do_check_true(true);
+ run_next_test();
+});
+""")
+ self.writeManifest(["test_not_skip.js"])
+ self.assertTestResult(True, verbose=True)
+ self.assertEquals(1, self.x.testCount)
+ self.assertEquals(1, self.x.passCount)
+ self.assertEquals(0, self.x.failCount)
+ self.assertEquals(0, self.x.todoCount)
+ self.assertInLog(TEST_PASS_STRING)
+ self.assertNotInLog("TEST-SKIP")
+ self.assertNotInLog(TEST_FAIL_STRING)
+
+ def testSyntaxError(self):
+ """
+ Check that running a test file containing a syntax error produces
+ a test failure and expected output.
+ """
+ self.writeFile("test_syntax_error.js", '"')
+ self.writeManifest(["test_syntax_error.js"])
+
+ self.assertTestResult(False, verbose=True)
+ self.assertEquals(1, self.x.testCount)
+ self.assertEquals(0, self.x.passCount)
+ self.assertEquals(1, self.x.failCount)
+ self.assertEquals(0, self.x.todoCount)
+ self.assertInLog(TEST_FAIL_STRING)
+ self.assertNotInLog(TEST_PASS_STRING)
+
+ def testUnicodeInAssertMethods(self):
+ """
+ Check that passing unicode characters through an assertion method works.
+ """
+ self.writeFile("test_unicode_assert.js", PASSING_TEST_UNICODE)
+ self.writeManifest(["test_unicode_assert.js"])
+
+ self.assertTestResult(True, verbose=True)
+
+ @unittest.skipIf('MOZ_AUTOMATION' in os.environ,
+ 'Timeout code path occasionally times out (bug 1098121)')
+ def testHangingTimeout(self):
+ """
+ Check that a test that never finishes results in the correct error log.
+ """
+ self.writeFile("test_loop.js", SIMPLE_LOOPING_TEST)
+ self.writeManifest(["test_loop.js"])
+
+ old_timeout = self.x.harness_timeout
+ self.x.harness_timeout = 1
+
+ self.assertTestResult(False)
+ self.assertEquals(1, self.x.testCount)
+ self.assertEquals(1, self.x.failCount)
+ self.assertEquals(0, self.x.passCount)
+ self.assertEquals(0, self.x.todoCount)
+ self.assertInLog("TEST-UNEXPECTED-TIMEOUT")
+
+ self.x.harness_timeout = old_timeout
+
+ def testPassFail(self):
+ """
+ Check that running more than one test works.
+ """
+ self.writeFile("test_pass.js", SIMPLE_PASSING_TEST)
+ self.writeFile("test_fail.js", SIMPLE_FAILING_TEST)
+ self.writeManifest(["test_pass.js", "test_fail.js"])
+
+ self.assertTestResult(False)
+ self.assertEquals(2, self.x.testCount)
+ self.assertEquals(1, self.x.passCount)
+ self.assertEquals(1, self.x.failCount)
+ self.assertEquals(0, self.x.todoCount)
+ self.assertInLog(TEST_PASS_STRING)
+ self.assertInLog(TEST_FAIL_STRING)
+
+ def testSkip(self):
+ """
+ Check that a simple failing test skipped in the manifest does
+ not cause failure.
+ """
+ self.writeFile("test_basic.js", SIMPLE_FAILING_TEST)
+ self.writeManifest([("test_basic.js", "skip-if = true")])
+ self.assertTestResult(True)
+ self.assertEquals(1, self.x.testCount)
+ self.assertEquals(0, self.x.passCount)
+ self.assertEquals(0, self.x.failCount)
+ self.assertEquals(0, self.x.todoCount)
+ self.assertNotInLog(TEST_FAIL_STRING)
+ self.assertNotInLog(TEST_PASS_STRING)
+
+ def testKnownFail(self):
+ """
+ Check that a simple failing test marked as known-fail in the manifest
+ does not cause failure.
+ """
+ self.writeFile("test_basic.js", SIMPLE_FAILING_TEST)
+ self.writeManifest([("test_basic.js", "fail-if = true")])
+ self.assertTestResult(True)
+ self.assertEquals(1, self.x.testCount)
+ self.assertEquals(0, self.x.passCount)
+ self.assertEquals(0, self.x.failCount)
+ self.assertEquals(1, self.x.todoCount)
+ self.assertInLog("TEST-FAIL")
+ # This should be suppressed because the harness doesn't include
+ # the full log from the xpcshell run when things pass.
+ self.assertNotInLog(TEST_FAIL_STRING)
+ self.assertNotInLog(TEST_PASS_STRING)
+
+ def testUnexpectedPass(self):
+ """
+ Check that a simple failing test marked as known-fail in the manifest
+ that passes causes an unexpected pass.
+ """
+ self.writeFile("test_basic.js", SIMPLE_PASSING_TEST)
+ self.writeManifest([("test_basic.js", "fail-if = true")])
+ self.assertTestResult(False)
+ self.assertEquals(1, self.x.testCount)
+ self.assertEquals(0, self.x.passCount)
+ self.assertEquals(1, self.x.failCount)
+ self.assertEquals(0, self.x.todoCount)
+ # From the outer (Python) harness
+ self.assertInLog("TEST-UNEXPECTED-PASS")
+ self.assertNotInLog("TEST-KNOWN-FAIL")
+
+ def testReturnNonzero(self):
+ """
+ Check that a test where xpcshell returns nonzero fails.
+ """
+ self.writeFile("test_error.js", "throw 'foo'")
+ self.writeManifest(["test_error.js"])
+
+ self.assertTestResult(False)
+ self.assertEquals(1, self.x.testCount)
+ self.assertEquals(0, self.x.passCount)
+ self.assertEquals(1, self.x.failCount)
+ self.assertEquals(0, self.x.todoCount)
+ self.assertInLog(TEST_FAIL_STRING)
+ self.assertNotInLog(TEST_PASS_STRING)
+
+ def testUncaughtRejection(self):
+ """
+ Ensure a simple test with an uncaught rejection is reported.
+ """
+ self.writeFile("test_simple_uncaught_rejection.js", SIMPLE_UNCAUGHT_REJECTION_TEST)
+ self.writeManifest(["test_simple_uncaught_rejection.js"])
+
+ self.assertTestResult(False)
+ self.assertInLog(TEST_FAIL_STRING)
+ if not substs.get('RELEASE_OR_BETA'):
+ # async stacks are currently not enabled in release builds.
+ self.assertInLog("test_simple_uncaught_rejection.js:3:3")
+ self.assertInLog("Test rejection.")
+ self.assertEquals(1, self.x.testCount)
+ self.assertEquals(0, self.x.passCount)
+ self.assertEquals(1, self.x.failCount)
+
+ def testUncaughtRejectionJSM(self):
+ """
+ Ensure a simple test with an uncaught rejection from Promise.jsm is reported.
+ """
+ self.writeFile("test_simple_uncaught_rejection_jsm.js", SIMPLE_UNCAUGHT_REJECTION_JSM_TEST)
+ self.writeManifest(["test_simple_uncaught_rejection_jsm.js"])
+
+ self.assertTestResult(False)
+ self.assertInLog(TEST_FAIL_STRING)
+ self.assertInLog("test_simple_uncaught_rejection_jsm.js:4:16")
+ self.assertInLog("Test rejection.")
+ self.assertEquals(1, self.x.testCount)
+ self.assertEquals(0, self.x.passCount)
+ self.assertEquals(1, self.x.failCount)
+
+ def testAddTestSimple(self):
+ """
+ Ensure simple add_test() works.
+ """
+ self.writeFile("test_add_test_simple.js", ADD_TEST_SIMPLE)
+ self.writeManifest(["test_add_test_simple.js"])
+
+ self.assertTestResult(True)
+ self.assertEquals(1, self.x.testCount)
+ self.assertEquals(1, self.x.passCount)
+ self.assertEquals(0, self.x.failCount)
+
+ def testLogCorrectFileName(self):
+ """
+ Make sure a meaningful filename and line number is logged
+ by a passing test.
+ """
+ self.writeFile("test_add_test_simple.js", ADD_TEST_SIMPLE)
+ self.writeManifest(["test_add_test_simple.js"])
+
+ self.assertTestResult(True, verbose=True)
+ self.assertInLog("true == true")
+ self.assertNotInLog("[do_check_true :")
+ self.assertInLog("[test_simple : 5]")
+
+ def testAddTestFailing(self):
+ """
+ Ensure add_test() with a failing test is reported.
+ """
+ self.writeFile("test_add_test_failing.js", ADD_TEST_FAILING)
+ self.writeManifest(["test_add_test_failing.js"])
+
+ self.assertTestResult(False)
+ self.assertEquals(1, self.x.testCount)
+ self.assertEquals(0, self.x.passCount)
+ self.assertEquals(1, self.x.failCount)
+
+ def testAddTestUncaughtRejection(self):
+ """
+ Ensure add_test() with an uncaught rejection is reported.
+ """
+ self.writeFile("test_add_test_uncaught_rejection.js", ADD_TEST_UNCAUGHT_REJECTION)
+ self.writeManifest(["test_add_test_uncaught_rejection.js"])
+
+ self.assertTestResult(False)
+ self.assertEquals(1, self.x.testCount)
+ self.assertEquals(0, self.x.passCount)
+ self.assertEquals(1, self.x.failCount)
+
+ def testAddTestUncaughtRejectionJSM(self):
+ """
+ Ensure add_test() with an uncaught rejection from Promise.jsm is reported.
+ """
+ self.writeFile("test_add_test_uncaught_rejection_jsm.js", ADD_TEST_UNCAUGHT_REJECTION_JSM)
+ self.writeManifest(["test_add_test_uncaught_rejection_jsm.js"])
+
+ self.assertTestResult(False)
+ self.assertEquals(1, self.x.testCount)
+ self.assertEquals(0, self.x.passCount)
+ self.assertEquals(1, self.x.failCount)
+
+ def testAddTaskTestSingle(self):
+ """
+ Ensure add_test_task() with a single passing test works.
+ """
+ self.writeFile("test_add_task_simple.js", ADD_TASK_SINGLE)
+ self.writeManifest(["test_add_task_simple.js"])
+
+ self.assertTestResult(True)
+ self.assertEquals(1, self.x.testCount)
+ self.assertEquals(1, self.x.passCount)
+ self.assertEquals(0, self.x.failCount)
+
+ def testAddTaskTestMultiple(self):
+ """
+ Ensure multiple calls to add_test_task() work as expected.
+ """
+ self.writeFile("test_add_task_multiple.js",
+ ADD_TASK_MULTIPLE)
+ self.writeManifest(["test_add_task_multiple.js"])
+
+ self.assertTestResult(True)
+ self.assertEquals(1, self.x.testCount)
+ self.assertEquals(1, self.x.passCount)
+ self.assertEquals(0, self.x.failCount)
+
+ def testAddTaskTestRejected(self):
+ """
+ Ensure rejected task reports as failure.
+ """
+ self.writeFile("test_add_task_rejected.js",
+ ADD_TASK_REJECTED)
+ self.writeManifest(["test_add_task_rejected.js"])
+
+ self.assertTestResult(False)
+ self.assertEquals(1, self.x.testCount)
+ self.assertEquals(0, self.x.passCount)
+ self.assertEquals(1, self.x.failCount)
+
+ def testAddTaskTestFailureInside(self):
+ """
+ Ensure tests inside task are reported as failures.
+ """
+ self.writeFile("test_add_task_failure_inside.js",
+ ADD_TASK_FAILURE_INSIDE)
+ self.writeManifest(["test_add_task_failure_inside.js"])
+
+ self.assertTestResult(False)
+ self.assertEquals(1, self.x.testCount)
+ self.assertEquals(0, self.x.passCount)
+ self.assertEquals(1, self.x.failCount)
+
+ def testAddTaskRunNextTest(self):
+ """
+ Calling run_next_test() from inside add_task() results in failure.
+ """
+ self.writeFile("test_add_task_run_next_test.js",
+ ADD_TASK_RUN_NEXT_TEST)
+ self.writeManifest(["test_add_task_run_next_test.js"])
+
+ self.assertTestResult(False)
+ self.assertEquals(1, self.x.testCount)
+ self.assertEquals(0, self.x.passCount)
+ self.assertEquals(1, self.x.failCount)
+
+ def testAddTaskStackTrace(self):
+ """
+ Ensuring that calling Assert.ok(false) from inside add_task()
+ results in a human-readable stack trace.
+ """
+ self.writeFile("test_add_task_stack_trace.js",
+ ADD_TASK_STACK_TRACE)
+ self.writeManifest(["test_add_task_stack_trace.js"])
+
+ self.assertTestResult(False)
+ self.assertInLog("this_test_will_fail")
+ self.assertInLog("run_next_test")
+ self.assertInLog("run_test")
+ self.assertNotInLog("Task.jsm")
+
+ def testAddTaskStackTraceWithoutStar(self):
+ """
+ Ensuring that calling Assert.ok(false) from inside add_task()
+ results in a human-readable stack trace. This variant uses deprecated
+ `function()` syntax instead of now standard `function*()`.
+ """
+ self.writeFile("test_add_task_stack_trace_without_star.js",
+ ADD_TASK_STACK_TRACE)
+ self.writeManifest(["test_add_task_stack_trace_without_star.js"])
+
+ self.assertTestResult(False)
+ self.assertInLog("this_test_will_fail")
+ self.assertInLog("run_next_test")
+ self.assertInLog("run_test")
+ self.assertNotInLog("Task.jsm")
+
+ def testMissingHeadFile(self):
+ """
+ Ensure that missing head file results in fatal error.
+ """
+ self.writeFile("test_basic.js", SIMPLE_PASSING_TEST)
+ self.writeManifest([("test_basic.js", "head = missing.js")])
+
+ raised = False
+
+ try:
+ # The actual return value is never checked because we raise.
+ self.assertTestResult(True)
+ except Exception, ex:
+ raised = True
+ self.assertEquals(ex.message[0:9], "head file")
+
+ self.assertTrue(raised)
+
+ def testMissingTailFile(self):
+ """
+ Ensure that missing tail file results in fatal error.
+ """
+ self.writeFile("test_basic.js", SIMPLE_PASSING_TEST)
+ self.writeManifest([("test_basic.js", "tail = missing.js")])
+
+ raised = False
+
+ try:
+ self.assertTestResult(True)
+ except Exception, ex:
+ raised = True
+ self.assertEquals(ex.message[0:9], "tail file")
+
+ self.assertTrue(raised)
+
+ def testRandomExecution(self):
+ """
+ Check that random execution doesn't break.
+ """
+ manifest = []
+ for i in range(0, 10):
+ filename = "test_pass_%d.js" % i
+ self.writeFile(filename, SIMPLE_PASSING_TEST)
+ manifest.append(filename)
+
+ self.writeManifest(manifest)
+ self.assertTestResult(True, shuffle=True)
+ self.assertEquals(10, self.x.testCount)
+ self.assertEquals(10, self.x.passCount)
+
+ def testDoThrowString(self):
+ """
+ Check that do_throw produces reasonable messages when the
+ input is a string instead of an object
+ """
+ self.writeFile("test_error.js", ADD_TEST_THROW_STRING)
+ self.writeManifest(["test_error.js"])
+
+ self.assertTestResult(False)
+ self.assertInLog(TEST_FAIL_STRING)
+ self.assertInLog("Passing a string to do_throw")
+ self.assertNotInLog(TEST_PASS_STRING)
+
+ def testDoThrowForeignObject(self):
+ """
+ Check that do_throw produces reasonable messages when the
+ input is a generic object with 'filename', 'message' and 'stack' attributes
+ but 'object instanceof Error' returns false
+ """
+ self.writeFile("test_error.js", ADD_TEST_THROW_OBJECT)
+ self.writeManifest(["test_error.js"])
+
+ self.assertTestResult(False)
+ self.assertInLog(TEST_FAIL_STRING)
+ self.assertInLog("failure.js")
+ self.assertInLog("Error object")
+ self.assertInLog("ERROR STACK")
+ self.assertNotInLog(TEST_PASS_STRING)
+
+ def testDoReportForeignObject(self):
+ """
+ Check that do_report_unexpected_exception produces reasonable messages when the
+ input is a generic object with 'filename', 'message' and 'stack' attributes
+ but 'object instanceof Error' returns false
+ """
+ self.writeFile("test_error.js", ADD_TEST_REPORT_OBJECT)
+ self.writeManifest(["test_error.js"])
+
+ self.assertTestResult(False)
+ self.assertInLog(TEST_FAIL_STRING)
+ self.assertInLog("failure.js")
+ self.assertInLog("Error object")
+ self.assertInLog("ERROR STACK")
+ self.assertNotInLog(TEST_PASS_STRING)
+
+ def testDoReportRefError(self):
+ """
+ Check that do_report_unexpected_exception produces reasonable messages when the
+ input is a JS-generated Error
+ """
+ self.writeFile("test_error.js", ADD_TEST_REPORT_REF_ERROR)
+ self.writeManifest(["test_error.js"])
+
+ self.assertTestResult(False)
+ self.assertInLog(TEST_FAIL_STRING)
+ self.assertInLog("test_error.js")
+ self.assertInLog("obj.noSuchFunction is not a function")
+ self.assertInLog("run_test@")
+ self.assertNotInLog(TEST_PASS_STRING)
+
+ def testDoReportSyntaxError(self):
+ """
+ Check that attempting to load a test file containing a syntax error
+ generates details of the error in the log
+ """
+ self.writeFile("test_error.js", LOAD_ERROR_SYNTAX_ERROR)
+ self.writeManifest(["test_error.js"])
+
+ self.assertTestResult(False)
+ self.assertInLog(TEST_FAIL_STRING)
+ self.assertInLog("test_error.js:3")
+ self.assertNotInLog(TEST_PASS_STRING)
+
+ def testDoReportNonSyntaxError(self):
+ """
+ Check that attempting to load a test file containing an error other
+ than a syntax error generates details of the error in the log
+ """
+ self.writeFile("test_error.js", LOAD_ERROR_OTHER_ERROR)
+ self.writeManifest(["test_error.js"])
+
+ self.assertTestResult(False)
+ self.assertInLog(TEST_FAIL_STRING)
+ self.assertInLog("TypeError: generator function run_test returns a value at")
+ self.assertInLog("test_error.js:4")
+ self.assertNotInLog(TEST_PASS_STRING)
+
+ def testDoPrintWhenVerboseNotExplicit(self):
+ """
+ Check that do_print() and similar calls that generate output do
+ not have the output when not run verbosely.
+ """
+ self.writeFile("test_verbose.js", ADD_TEST_VERBOSE)
+ self.writeManifest(["test_verbose.js"])
+
+ self.assertTestResult(True)
+ self.assertNotInLog("a message from do_print")
+
+ def testDoPrintWhenVerboseExplicit(self):
+ """
+ Check that do_print() and similar calls that generate output have the
+ output shown when run verbosely.
+ """
+ self.writeFile("test_verbose.js", ADD_TEST_VERBOSE)
+ self.writeManifest(["test_verbose.js"])
+ self.assertTestResult(True, verbose=True)
+ self.assertInLog("a message from do_print")
+
+ def testDoPrintWhenVerboseInManifest(self):
+ """
+ Check that do_print() and similar calls that generate output have the
+ output shown when 'verbose = true' is in the manifest, even when
+ not run verbosely.
+ """
+ self.writeFile("test_verbose.js", ADD_TEST_VERBOSE)
+ self.writeManifest([("test_verbose.js", "verbose = true")])
+
+ self.assertTestResult(True)
+ self.assertInLog("a message from do_print")
+
+ def testAsyncCleanup(self):
+ """
+ Check that do_register_cleanup handles nicely cleanup tasks that
+ return a promise
+ """
+ self.writeFile("test_asyncCleanup.js", ASYNC_CLEANUP)
+ self.writeManifest(["test_asyncCleanup.js"])
+ self.assertTestResult(False)
+ self.assertInLog("\"1234\" == \"1234\"")
+ self.assertInLog("At this stage, the test has succeeded")
+ self.assertInLog("Throwing an error to force displaying the log")
+
+ def testNoRunTestAddTest(self):
+ """
+ Check that add_test() works fine without run_test() in the test file.
+ """
+ self.writeFile("test_noRunTestAddTest.js", NO_RUN_TEST_ADD_TEST)
+ self.writeManifest(["test_noRunTestAddTest.js"])
+
+ self.assertTestResult(True)
+ self.assertEquals(1, self.x.testCount)
+ self.assertEquals(1, self.x.passCount)
+ self.assertEquals(0, self.x.failCount)
+ self.assertInLog(TEST_PASS_STRING)
+ self.assertNotInLog(TEST_FAIL_STRING)
+
+ def testNoRunTestAddTask(self):
+ """
+ Check that add_task() works fine without run_test() in the test file.
+ """
+ self.writeFile("test_noRunTestAddTask.js", NO_RUN_TEST_ADD_TASK)
+ self.writeManifest(["test_noRunTestAddTask.js"])
+
+ self.assertTestResult(True)
+ self.assertEquals(1, self.x.testCount)
+ self.assertEquals(1, self.x.passCount)
+ self.assertEquals(0, self.x.failCount)
+ self.assertInLog(TEST_PASS_STRING)
+ self.assertNotInLog(TEST_FAIL_STRING)
+
+ def testNoRunTestAddTestAddTask(self):
+ """
+ Check that both add_test() and add_task() work without run_test()
+ in the test file.
+ """
+ self.writeFile("test_noRunTestAddTestAddTask.js", NO_RUN_TEST_ADD_TEST_ADD_TASK)
+ self.writeManifest(["test_noRunTestAddTestAddTask.js"])
+
+ self.assertTestResult(True)
+ self.assertEquals(1, self.x.testCount)
+ self.assertEquals(1, self.x.passCount)
+ self.assertEquals(0, self.x.failCount)
+ self.assertInLog(TEST_PASS_STRING)
+ self.assertNotInLog(TEST_FAIL_STRING)
+
+ def testNoRunTestEmptyTest(self):
+ """
+ Check that the test passes on an empty file that contains neither
+ run_test() nor add_test(), add_task().
+ """
+ self.writeFile("test_noRunTestEmptyTest.js", NO_RUN_TEST_EMPTY_TEST)
+ self.writeManifest(["test_noRunTestEmptyTest.js"])
+
+ self.assertTestResult(True)
+ self.assertEquals(1, self.x.testCount)
+ self.assertEquals(1, self.x.passCount)
+ self.assertEquals(0, self.x.failCount)
+ self.assertInLog(TEST_PASS_STRING)
+ self.assertNotInLog(TEST_FAIL_STRING)
+
+ def testNoRunTestAddTestFail(self):
+ """
+ Check that test fails on using add_test() without run_test().
+ """
+ self.writeFile("test_noRunTestAddTestFail.js", NO_RUN_TEST_ADD_TEST_FAIL)
+ self.writeManifest(["test_noRunTestAddTestFail.js"])
+
+ self.assertTestResult(False)
+ self.assertEquals(1, self.x.testCount)
+ self.assertEquals(0, self.x.passCount)
+ self.assertEquals(1, self.x.failCount)
+ self.assertInLog(TEST_FAIL_STRING)
+ self.assertNotInLog(TEST_PASS_STRING)
+
+ def testNoRunTestAddTaskFail(self):
+ """
+ Check that test fails on using add_task() without run_test().
+ """
+ self.writeFile("test_noRunTestAddTaskFail.js", NO_RUN_TEST_ADD_TASK_FAIL)
+ self.writeManifest(["test_noRunTestAddTaskFail.js"])
+
+ self.assertTestResult(False)
+ self.assertEquals(1, self.x.testCount)
+ self.assertEquals(0, self.x.passCount)
+ self.assertEquals(1, self.x.failCount)
+ self.assertInLog(TEST_FAIL_STRING)
+ self.assertNotInLog(TEST_PASS_STRING)
+
+ def testNoRunTestAddTaskMultiple(self):
+ """
+ Check that multple add_task() tests work without run_test().
+ """
+ self.writeFile("test_noRunTestAddTaskMultiple.js", NO_RUN_TEST_ADD_TASK_MULTIPLE)
+ self.writeManifest(["test_noRunTestAddTaskMultiple.js"])
+
+ self.assertTestResult(True)
+ self.assertEquals(1, self.x.testCount)
+ self.assertEquals(1, self.x.passCount)
+ self.assertEquals(0, self.x.failCount)
+ self.assertInLog(TEST_PASS_STRING)
+ self.assertNotInLog(TEST_FAIL_STRING)
+
+ def testMozinfo(self):
+ """
+ Check that mozinfo.json is loaded
+ """
+ self.writeFile("test_mozinfo.js", LOAD_MOZINFO)
+ self.writeManifest(["test_mozinfo.js"])
+ self.assertTestResult(True)
+ self.assertEquals(1, self.x.testCount)
+ self.assertEquals(1, self.x.passCount)
+ self.assertEquals(0, self.x.failCount)
+ self.assertEquals(0, self.x.todoCount)
+ self.assertInLog(TEST_PASS_STRING)
+ self.assertNotInLog(TEST_FAIL_STRING)
+
+ def testChildMozinfo(self):
+ """
+ Check that mozinfo.json is loaded in child process
+ """
+ self.writeFile("test_mozinfo.js", LOAD_MOZINFO)
+ self.writeFile("test_child_mozinfo.js", CHILD_MOZINFO)
+ self.writeManifest(["test_child_mozinfo.js"])
+ self.assertTestResult(True)
+ self.assertEquals(1, self.x.testCount)
+ self.assertEquals(1, self.x.passCount)
+ self.assertEquals(0, self.x.failCount)
+ self.assertEquals(0, self.x.todoCount)
+ self.assertInLog(TEST_PASS_STRING)
+ self.assertNotInLog(TEST_FAIL_STRING)
+
+if __name__ == "__main__":
+ mozunit.main()
diff --git a/testing/xpcshell/xpcshell.eslintrc.js b/testing/xpcshell/xpcshell.eslintrc.js
new file mode 100644
index 0000000000..4f8fc3d518
--- /dev/null
+++ b/testing/xpcshell/xpcshell.eslintrc.js
@@ -0,0 +1,57 @@
+// Parent config file for all xpcshell files.
+module.exports = {
+ rules: {
+ "mozilla/import-headjs-globals": "warn",
+ "mozilla/mark-test-function-used": "warn",
+ "no-shadow": "error",
+ },
+
+ // All globals made available in the test environment.
+ "globals": {
+ "add_task": false,
+ "add_test": false,
+ "Assert": false,
+ "deepEqual": false,
+ "do_check_eq": false,
+ "do_check_false": false,
+ "do_check_matches": false,
+ "do_check_neq": false,
+ "do_check_null": false,
+ "do_check_true": false,
+ "do_execute_soon": false,
+ "do_get_cwd": false,
+ "do_get_file": false,
+ "do_get_idle": false,
+ "do_get_profile": false,
+ "do_get_tempdir": false,
+ "do_load_manifest": false,
+ "do_load_module": false,
+ "do_parse_document": false,
+ "do_print": false,
+ "do_register_cleanup": false,
+ "do_report_unexpected_exception": false,
+ "do_test_finished": false,
+ "do_test_pending": false,
+ "do_throw": false,
+ "do_timeout": false,
+ "equal": false,
+ "greater": false,
+ "greaterOrEqual": false,
+ "less": false,
+ "lessOrEqual": false,
+ "load": false,
+ "mozinfo": false,
+ "notDeepEqual": false,
+ "notEqual": false,
+ "notStrictEqual": false,
+ "ok": false,
+ "run_next_test": false,
+ "run_test": false,
+ "run_test_in_child": false,
+ "strictEqual": false,
+ "throws": false,
+ "todo": false,
+ "todo_check_false": false,
+ "todo_check_true": false,
+ }
+};
diff --git a/testing/xpcshell/xpcshellcommandline.py b/testing/xpcshell/xpcshellcommandline.py
new file mode 100644
index 0000000000..d2a8e6bc13
--- /dev/null
+++ b/testing/xpcshell/xpcshellcommandline.py
@@ -0,0 +1,166 @@
+import argparse
+
+from mozlog import commandline
+
+
+def add_common_arguments(parser):
+ parser.add_argument("--app-path",
+ type=unicode, dest="appPath", default=None,
+ help="application directory (as opposed to XRE directory)")
+ parser.add_argument("--interactive",
+ action="store_true", dest="interactive", default=False,
+ help="don't automatically run tests, drop to an xpcshell prompt")
+ parser.add_argument("--verbose",
+ action="store_true", dest="verbose", default=False,
+ help="always print stdout and stderr from tests")
+ parser.add_argument("--keep-going",
+ action="store_true", dest="keepGoing", default=False,
+ help="continue running tests after test killed with control-C (SIGINT)")
+ parser.add_argument("--logfiles",
+ action="store_true", dest="logfiles", default=True,
+ help="create log files (default, only used to override --no-logfiles)")
+ parser.add_argument("--dump-tests", type=str, dest="dump_tests", default=None,
+ help="Specify path to a filename to dump all the tests that will be run")
+ parser.add_argument("--manifest",
+ type=unicode, dest="manifest", default=None,
+ help="Manifest of test directories to use")
+ parser.add_argument("--no-logfiles",
+ action="store_false", dest="logfiles",
+ help="don't create log files")
+ parser.add_argument("--sequential",
+ action="store_true", dest="sequential", default=False,
+ help="Run all tests sequentially")
+ parser.add_argument("--temp-dir",
+ dest="tempDir", default=None,
+ help="Directory to use for temporary files")
+ parser.add_argument("--testing-modules-dir",
+ dest="testingModulesDir", default=None,
+ help="Directory where testing modules are located.")
+ parser.add_argument("--test-plugin-path",
+ type=str, dest="pluginsPath", default=None,
+ help="Path to the location of a plugins directory containing the test plugin or plugins required for tests. "
+ "By default xpcshell's dir svc provider returns gre/plugins. Use test-plugin-path to add a directory "
+ "to return for NS_APP_PLUGINS_DIR_LIST when queried.")
+ parser.add_argument("--total-chunks",
+ type=int, dest="totalChunks", default=1,
+ help="how many chunks to split the tests up into")
+ parser.add_argument("--this-chunk",
+ type=int, dest="thisChunk", default=1,
+ help="which chunk to run between 1 and --total-chunks")
+ parser.add_argument("--profile-name",
+ type=str, dest="profileName", default=None,
+ help="name of application profile being tested")
+ parser.add_argument("--build-info-json",
+ type=str, dest="mozInfo", default=None,
+ help="path to a mozinfo.json including information about the build configuration. defaults to looking for mozinfo.json next to the script.")
+ parser.add_argument("--shuffle",
+ action="store_true", dest="shuffle", default=False,
+ help="Execute tests in random order")
+ parser.add_argument("--xre-path",
+ action="store", type=str, dest="xrePath",
+ # individual scripts will set a sane default
+ default=None,
+ help="absolute path to directory containing XRE (probably xulrunner)")
+ parser.add_argument("--symbols-path",
+ action="store", type=str, dest="symbolsPath",
+ default=None,
+ help="absolute path to directory containing breakpad symbols, or the URL of a zip file containing symbols")
+ parser.add_argument("--jscov-dir-prefix",
+ action="store", type=str, dest="jscovdir",
+ default=argparse.SUPPRESS,
+ help="Directory to store per-test javascript line coverage data as json.")
+ parser.add_argument("--debugger",
+ action="store", dest="debugger",
+ help="use the given debugger to launch the application")
+ parser.add_argument("--debugger-args",
+ action="store", dest="debuggerArgs",
+ help="pass the given args to the debugger _before_ "
+ "the application on the command line")
+ parser.add_argument("--debugger-interactive",
+ action="store_true", dest="debuggerInteractive",
+ help="prevents the test harness from redirecting "
+ "stdout and stderr for interactive debuggers")
+ parser.add_argument("--jsdebugger", dest="jsDebugger", action="store_true",
+ help="Waits for a devtools JS debugger to connect before "
+ "starting the test.")
+ parser.add_argument("--jsdebugger-port", type=int, dest="jsDebuggerPort",
+ default=6000,
+ help="The port to listen on for a debugger connection if "
+ "--jsdebugger is specified.")
+ parser.add_argument("--tag",
+ action="append", dest="test_tags",
+ default=None,
+ help="filter out tests that don't have the given tag. Can be "
+ "used multiple times in which case the test must contain "
+ "at least one of the given tags.")
+ parser.add_argument("--utility-path",
+ action="store", dest="utility_path",
+ default=None,
+ help="Path to a directory containing utility programs, such "
+ "as stack fixer scripts.")
+ parser.add_argument("--xpcshell",
+ action="store", dest="xpcshell",
+ default=None,
+ help="Path to xpcshell binary")
+ # This argument can be just present, or the path to a manifest file. The
+ # just-present case is usually used for mach which can provide a default
+ # path to the failure file from the previous run
+ parser.add_argument("--rerun-failures",
+ action="store_true",
+ help="Rerun failures from the previous run, if any")
+ parser.add_argument("--failure-manifest",
+ action="store",
+ help="Path to a manifest file from which to rerun failures "
+ "(with --rerun-failure) or in which to record failed tests")
+ parser.add_argument("testPaths", nargs="*", default=None,
+ help="Paths of tests to run.")
+
+def add_remote_arguments(parser):
+ parser.add_argument("--deviceIP", action="store", type=str, dest="deviceIP",
+ help="ip address of remote device to test")
+
+ parser.add_argument("--devicePort", action="store", type=str, dest="devicePort",
+ default=20701, help="port of remote device to test")
+
+ parser.add_argument("--dm_trans", action="store", type=str, dest="dm_trans",
+ choices=["adb", "sut"], default="adb",
+ help="the transport to use to communicate with device: [adb|sut]; default=adb")
+
+ parser.add_argument("--objdir", action="store", type=str, dest="objdir",
+ help="local objdir, containing xpcshell binaries")
+
+
+ parser.add_argument("--apk", action="store", type=str, dest="localAPK",
+ help="local path to Fennec APK")
+
+
+ parser.add_argument("--noSetup", action="store_false", dest="setup", default=True,
+ help="do not copy any files to device (to be used only if device is already setup)")
+
+ parser.add_argument("--local-lib-dir", action="store", type=str, dest="localLib",
+ help="local path to library directory")
+
+ parser.add_argument("--local-bin-dir", action="store", type=str, dest="localBin",
+ help="local path to bin directory")
+
+ parser.add_argument("--remoteTestRoot", action="store", type=str, dest="remoteTestRoot",
+ help="remote directory to use as test root (eg. /mnt/sdcard/tests or /data/local/tests)")
+
+
+def parser_desktop():
+ parser = argparse.ArgumentParser()
+ add_common_arguments(parser)
+ commandline.add_logging_group(parser)
+
+ return parser
+
+
+def parser_remote():
+ parser = argparse.ArgumentParser()
+ common = parser.add_argument_group("Common Options")
+ add_common_arguments(common)
+ remote = parser.add_argument_group("Remote Options")
+ add_remote_arguments(remote)
+ commandline.add_logging_group(parser)
+
+ return parser