summaryrefslogtreecommitdiff
path: root/js/public
diff options
context:
space:
mode:
authorMatt A. Tobin <mattatobin@localhost.localdomain>2018-02-02 04:16:08 -0500
committerMatt A. Tobin <mattatobin@localhost.localdomain>2018-02-02 04:16:08 -0500
commit5f8de423f190bbb79a62f804151bc24824fa32d8 (patch)
tree10027f336435511475e392454359edea8e25895d /js/public
parent49ee0794b5d912db1f95dce6eb52d781dc210db5 (diff)
downloaduxp-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.gz
Add m-esr52 at 52.6.0
Diffstat (limited to 'js/public')
-rw-r--r--js/public/CallArgs.h369
-rw-r--r--js/public/CallNonGenericMethod.h117
-rw-r--r--js/public/CharacterEncoding.h338
-rw-r--r--js/public/Class.h995
-rw-r--r--js/public/Conversions.h581
-rw-r--r--js/public/Date.h170
-rw-r--r--js/public/Debug.h384
-rw-r--r--js/public/GCAPI.h723
-rw-r--r--js/public/GCAnnotations.h57
-rw-r--r--js/public/GCHashTable.h399
-rw-r--r--js/public/GCPolicyAPI.h164
-rw-r--r--js/public/GCVariant.h198
-rw-r--r--js/public/GCVector.h249
-rw-r--r--js/public/HashTable.h1880
-rw-r--r--js/public/HeapAPI.h406
-rw-r--r--js/public/Id.h207
-rw-r--r--js/public/Initialization.h125
-rw-r--r--js/public/LegacyIntTypes.h59
-rw-r--r--js/public/MemoryMetrics.h971
-rw-r--r--js/public/Principals.h132
-rw-r--r--js/public/ProfilingFrameIterator.h206
-rw-r--r--js/public/ProfilingStack.h208
-rw-r--r--js/public/Proxy.h632
-rw-r--r--js/public/Realm.h42
-rw-r--r--js/public/RequiredDefines.h34
-rw-r--r--js/public/RootingAPI.h1330
-rw-r--r--js/public/SliceBudget.h91
-rw-r--r--js/public/StructuredClone.h359
-rw-r--r--js/public/SweepingAPI.h65
-rw-r--r--js/public/TraceKind.h212
-rw-r--r--js/public/TracingAPI.h403
-rw-r--r--js/public/TrackedOptimizationInfo.h285
-rw-r--r--js/public/TypeDecls.h79
-rw-r--r--js/public/UbiNode.h1146
-rw-r--r--js/public/UbiNodeBreadthFirst.h244
-rw-r--r--js/public/UbiNodeCensus.h252
-rw-r--r--js/public/UbiNodeDominatorTree.h677
-rw-r--r--js/public/UbiNodePostOrder.h191
-rw-r--r--js/public/UbiNodeShortestPaths.h350
-rw-r--r--js/public/UniquePtr.h61
-rw-r--r--js/public/Utility.h577
-rw-r--r--js/public/Value.h1509
-rw-r--r--js/public/Vector.h45
-rw-r--r--js/public/WeakMapPtr.h46
44 files changed, 17568 insertions, 0 deletions
diff --git a/js/public/CallArgs.h b/js/public/CallArgs.h
new file mode 100644
index 0000000000..6e6164e55a
--- /dev/null
+++ b/js/public/CallArgs.h
@@ -0,0 +1,369 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * Helper classes encapsulating access to the callee, |this| value, arguments,
+ * and argument count for a call/construct operation.
+ *
+ * JS::CallArgs encapsulates access to a JSNative's un-abstracted
+ * |unsigned argc, Value* vp| arguments. The principal way to create a
+ * JS::CallArgs is using JS::CallArgsFromVp:
+ *
+ * // If provided no arguments or a non-numeric first argument, return zero.
+ * // Otherwise return |this| exactly as given, without boxing.
+ * static bool
+ * Func(JSContext* cx, unsigned argc, JS::Value* vp)
+ * {
+ * JS::CallArgs args = JS::CallArgsFromVp(argc, vp);
+ *
+ * // Guard against no arguments or a non-numeric arg0.
+ * if (args.length() == 0 || !args[0].isNumber()) {
+ * args.rval().setInt32(0);
+ * return true;
+ * }
+ *
+ * // Access to the callee must occur before accessing/setting
+ * // the return value.
+ * JSObject& callee = args.callee();
+ * args.rval().setObject(callee);
+ *
+ * // callee() and calleev() will now assert.
+ *
+ * // It's always fine to access thisv().
+ * HandleValue thisv = args.thisv();
+ * args.rval().set(thisv);
+ *
+ * // As the return value was last set to |this|, returns |this|.
+ * return true;
+ * }
+ *
+ * CallArgs is exposed publicly and used internally. Not all parts of its
+ * public interface are meant to be used by embedders! See inline comments to
+ * for details.
+ *
+ * It's possible (albeit deprecated) to manually index into |vp| to access the
+ * callee, |this|, and arguments of a function, and to set its return value.
+ * It's also possible to use the supported API of JS_CALLEE, JS_THIS, JS_ARGV,
+ * JS_RVAL, and JS_SET_RVAL to the same ends.
+ *
+ * But neither API has the error-handling or moving-GC correctness of CallArgs.
+ * New code should use CallArgs instead whenever possible.
+ *
+ * The eventual plan is to change JSNative to take |const CallArgs&| directly,
+ * for automatic assertion of correct use and to make calling functions more
+ * efficient. Embedders should start internally switching away from using
+ * |argc| and |vp| directly, except to create a |CallArgs|. Then, when an
+ * eventual release making that change occurs, porting efforts will require
+ * changing methods' signatures but won't require invasive changes to the
+ * methods' implementations, potentially under time pressure.
+ */
+
+#ifndef js_CallArgs_h
+#define js_CallArgs_h
+
+#include "mozilla/Assertions.h"
+#include "mozilla/Attributes.h"
+#include "mozilla/TypeTraits.h"
+
+#include "jstypes.h"
+
+#include "js/RootingAPI.h"
+#include "js/Value.h"
+
+/* Typedef for native functions called by the JS VM. */
+typedef bool
+(* JSNative)(JSContext* cx, unsigned argc, JS::Value* vp);
+
+namespace JS {
+
+extern JS_PUBLIC_DATA(const HandleValue) UndefinedHandleValue;
+
+namespace detail {
+
+/*
+ * Compute |this| for the |vp| inside a JSNative, either boxing primitives or
+ * replacing with the global object as necessary.
+ */
+extern JS_PUBLIC_API(Value)
+ComputeThis(JSContext* cx, JS::Value* vp);
+
+#ifdef JS_DEBUG
+extern JS_PUBLIC_API(void)
+CheckIsValidConstructible(const Value& v);
+#endif
+
+class MOZ_STACK_CLASS IncludeUsedRval
+{
+ protected:
+#ifdef JS_DEBUG
+ mutable bool usedRval_;
+ void setUsedRval() const { usedRval_ = true; }
+ void clearUsedRval() const { usedRval_ = false; }
+ void assertUnusedRval() const { MOZ_ASSERT(!usedRval_); }
+#else
+ void setUsedRval() const {}
+ void clearUsedRval() const {}
+ void assertUnusedRval() const {}
+#endif
+};
+
+class MOZ_STACK_CLASS NoUsedRval
+{
+ protected:
+ void setUsedRval() const {}
+ void clearUsedRval() const {}
+ void assertUnusedRval() const {}
+};
+
+template<class WantUsedRval>
+class MOZ_STACK_CLASS CallArgsBase : public WantUsedRval
+{
+ static_assert(mozilla::IsSame<WantUsedRval, IncludeUsedRval>::value ||
+ mozilla::IsSame<WantUsedRval, NoUsedRval>::value,
+ "WantUsedRval can only be IncludeUsedRval or NoUsedRval");
+
+ protected:
+ Value* argv_;
+ unsigned argc_;
+ bool constructing_;
+
+ public:
+ // CALLEE ACCESS
+
+ /*
+ * Returns the function being called, as a value. Must not be called after
+ * rval() has been used!
+ */
+ HandleValue calleev() const {
+ this->assertUnusedRval();
+ return HandleValue::fromMarkedLocation(&argv_[-2]);
+ }
+
+ /*
+ * Returns the function being called, as an object. Must not be called
+ * after rval() has been used!
+ */
+ JSObject& callee() const {
+ return calleev().toObject();
+ }
+
+ // CALLING/CONSTRUCTING-DIFFERENTIATIONS
+
+ bool isConstructing() const {
+ if (!argv_[-1].isMagic())
+ return false;
+
+#ifdef JS_DEBUG
+ if (!this->usedRval_)
+ CheckIsValidConstructible(calleev());
+#endif
+
+ return true;
+ }
+
+ MutableHandleValue newTarget() const {
+ MOZ_ASSERT(constructing_);
+ return MutableHandleValue::fromMarkedLocation(&this->argv_[argc_]);
+ }
+
+ /*
+ * Returns the |this| value passed to the function. This method must not
+ * be called when the function is being called as a constructor via |new|.
+ * The value may or may not be an object: it is the individual function's
+ * responsibility to box the value if needed.
+ */
+ HandleValue thisv() const {
+ // Some internal code uses thisv() in constructing cases, so don't do
+ // this yet.
+ // MOZ_ASSERT(!argv_[-1].isMagic(JS_IS_CONSTRUCTING));
+ return HandleValue::fromMarkedLocation(&argv_[-1]);
+ }
+
+ Value computeThis(JSContext* cx) const {
+ if (thisv().isObject())
+ return thisv();
+
+ return ComputeThis(cx, base());
+ }
+
+ // ARGUMENTS
+
+ /* Returns the number of arguments. */
+ unsigned length() const { return argc_; }
+
+ /* Returns the i-th zero-indexed argument. */
+ MutableHandleValue operator[](unsigned i) const {
+ MOZ_ASSERT(i < argc_);
+ return MutableHandleValue::fromMarkedLocation(&this->argv_[i]);
+ }
+
+ /*
+ * Returns the i-th zero-indexed argument, or |undefined| if there's no
+ * such argument.
+ */
+ HandleValue get(unsigned i) const {
+ return i < length()
+ ? HandleValue::fromMarkedLocation(&this->argv_[i])
+ : UndefinedHandleValue;
+ }
+
+ /*
+ * Returns true if the i-th zero-indexed argument is present and is not
+ * |undefined|.
+ */
+ bool hasDefined(unsigned i) const {
+ return i < argc_ && !this->argv_[i].isUndefined();
+ }
+
+ // RETURN VALUE
+
+ /*
+ * Returns the currently-set return value. The initial contents of this
+ * value are unspecified. Once this method has been called, callee() and
+ * calleev() can no longer be used. (If you're compiling against a debug
+ * build of SpiderMonkey, these methods will assert to aid debugging.)
+ *
+ * If the method you're implementing succeeds by returning true, you *must*
+ * set this. (SpiderMonkey doesn't currently assert this, but it will do
+ * so eventually.) You don't need to use or change this if your method
+ * fails.
+ */
+ MutableHandleValue rval() const {
+ this->setUsedRval();
+ return MutableHandleValue::fromMarkedLocation(&argv_[-2]);
+ }
+
+ public:
+ // These methods are publicly exposed, but they are *not* to be used when
+ // implementing a JSNative method and encapsulating access to |vp| within
+ // it. You probably don't want to use these!
+
+ void setCallee(const Value& aCalleev) const {
+ this->clearUsedRval();
+ argv_[-2] = aCalleev;
+ }
+
+ void setThis(const Value& aThisv) const {
+ argv_[-1] = aThisv;
+ }
+
+ MutableHandleValue mutableThisv() const {
+ return MutableHandleValue::fromMarkedLocation(&argv_[-1]);
+ }
+
+ public:
+ // These methods are publicly exposed, but we're unsure of the interfaces
+ // (because they're hackish and drop assertions). Avoid using these if you
+ // can.
+
+ Value* array() const { return argv_; }
+ Value* end() const { return argv_ + argc_ + constructing_; }
+
+ public:
+ // These methods are only intended for internal use. Embedders shouldn't
+ // use them!
+
+ Value* base() const { return argv_ - 2; }
+
+ Value* spAfterCall() const {
+ this->setUsedRval();
+ return argv_ - 1;
+ }
+};
+
+} // namespace detail
+
+class MOZ_STACK_CLASS CallArgs : public detail::CallArgsBase<detail::IncludeUsedRval>
+{
+ private:
+ friend CallArgs CallArgsFromVp(unsigned argc, Value* vp);
+ friend CallArgs CallArgsFromSp(unsigned stackSlots, Value* sp, bool constructing);
+
+ static CallArgs create(unsigned argc, Value* argv, bool constructing) {
+ CallArgs args;
+ args.clearUsedRval();
+ args.argv_ = argv;
+ args.argc_ = argc;
+ args.constructing_ = constructing;
+#ifdef DEBUG
+ for (unsigned i = 0; i < argc; ++i)
+ MOZ_ASSERT_IF(argv[i].isMarkable(), !GCThingIsMarkedGray(GCCellPtr(argv[i])));
+#endif
+ return args;
+ }
+
+ public:
+ /*
+ * Returns true if there are at least |required| arguments passed in. If
+ * false, it reports an error message on the context.
+ */
+ JS_PUBLIC_API(bool) requireAtLeast(JSContext* cx, const char* fnname, unsigned required) const;
+
+};
+
+MOZ_ALWAYS_INLINE CallArgs
+CallArgsFromVp(unsigned argc, Value* vp)
+{
+ return CallArgs::create(argc, vp + 2, vp[1].isMagic(JS_IS_CONSTRUCTING));
+}
+
+// This method is only intended for internal use in SpiderMonkey. We may
+// eventually move it to an internal header. Embedders should use
+// JS::CallArgsFromVp!
+MOZ_ALWAYS_INLINE CallArgs
+CallArgsFromSp(unsigned stackSlots, Value* sp, bool constructing = false)
+{
+ return CallArgs::create(stackSlots - constructing, sp - stackSlots, constructing);
+}
+
+} // namespace JS
+
+/*
+ * Macros to hide interpreter stack layout details from a JSNative using its
+ * JS::Value* vp parameter. DO NOT USE THESE! Instead use JS::CallArgs and
+ * friends, above. These macros will be removed when we change JSNative to
+ * take a const JS::CallArgs&.
+ */
+
+/*
+ * Return |this| if |this| is an object. Otherwise, return the global object
+ * if |this| is null or undefined, and finally return a boxed version of any
+ * other primitive.
+ *
+ * Note: if this method returns null, an error has occurred and must be
+ * propagated or caught.
+ */
+MOZ_ALWAYS_INLINE JS::Value
+JS_THIS(JSContext* cx, JS::Value* vp)
+{
+ return vp[1].isPrimitive() ? JS::detail::ComputeThis(cx, vp) : vp[1];
+}
+
+/*
+ * A note on JS_THIS_OBJECT: no equivalent method is part of the CallArgs
+ * interface, and we're unlikely to add one (functions shouldn't be implicitly
+ * exposing the global object to arbitrary callers). Continue using |vp|
+ * directly for this case, but be aware this API will eventually be replaced
+ * with a function that operates directly upon |args.thisv()|.
+ */
+#define JS_THIS_OBJECT(cx,vp) (JS_THIS(cx,vp).toObjectOrNull())
+
+/*
+ * |this| is passed to functions in ES5 without change. Functions themselves
+ * do any post-processing they desire to box |this|, compute the global object,
+ * &c. This macro retrieves a function's unboxed |this| value.
+ *
+ * This macro must not be used in conjunction with JS_THIS or JS_THIS_OBJECT,
+ * or vice versa. Either use the provided this value with this macro, or
+ * compute the boxed |this| value using those. JS_THIS_VALUE must not be used
+ * if the function is being called as a constructor.
+ *
+ * But: DO NOT USE THIS! Instead use JS::CallArgs::thisv(), above.
+ *
+ */
+#define JS_THIS_VALUE(cx,vp) ((vp)[1])
+
+#endif /* js_CallArgs_h */
diff --git a/js/public/CallNonGenericMethod.h b/js/public/CallNonGenericMethod.h
new file mode 100644
index 0000000000..9a1cf01024
--- /dev/null
+++ b/js/public/CallNonGenericMethod.h
@@ -0,0 +1,117 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef js_CallNonGenericMethod_h
+#define js_CallNonGenericMethod_h
+
+#include "jstypes.h"
+
+#include "js/CallArgs.h"
+
+namespace JS {
+
+// Returns true if |v| is considered an acceptable this-value.
+typedef bool (*IsAcceptableThis)(HandleValue v);
+
+// Implements the guts of a method; guaranteed to be provided an acceptable
+// this-value, as determined by a corresponding IsAcceptableThis method.
+typedef bool (*NativeImpl)(JSContext* cx, const CallArgs& args);
+
+namespace detail {
+
+// DON'T CALL THIS DIRECTLY. It's for use only by CallNonGenericMethod!
+extern JS_PUBLIC_API(bool)
+CallMethodIfWrapped(JSContext* cx, IsAcceptableThis test, NativeImpl impl, const CallArgs& args);
+
+} // namespace detail
+
+// Methods usually act upon |this| objects only from a single global object and
+// compartment. Sometimes, however, a method must act upon |this| values from
+// multiple global objects or compartments. In such cases the |this| value a
+// method might see will be wrapped, such that various access to the object --
+// to its class, its private data, its reserved slots, and so on -- will not
+// work properly without entering that object's compartment. This method
+// implements a solution to this problem.
+//
+// To implement a method that accepts |this| values from multiple compartments,
+// define two functions. The first function matches the IsAcceptableThis type
+// and indicates whether the provided value is an acceptable |this| for the
+// method; it must be a pure function only of its argument.
+//
+// static const JSClass AnswerClass = { ... };
+//
+// static bool
+// IsAnswerObject(const Value& v)
+// {
+// if (!v.isObject())
+// return false;
+// return JS_GetClass(&v.toObject()) == &AnswerClass;
+// }
+//
+// The second function implements the NativeImpl signature and defines the
+// behavior of the method when it is provided an acceptable |this| value.
+// Aside from some typing niceties -- see the CallArgs interface for details --
+// its interface is the same as that of JSNative.
+//
+// static bool
+// answer_getAnswer_impl(JSContext* cx, JS::CallArgs args)
+// {
+// args.rval().setInt32(42);
+// return true;
+// }
+//
+// The implementation function is guaranteed to be called *only* with a |this|
+// value which is considered acceptable.
+//
+// Now to implement the actual method, write a JSNative that calls the method
+// declared below, passing the appropriate template and runtime arguments.
+//
+// static bool
+// answer_getAnswer(JSContext* cx, unsigned argc, JS::Value* vp)
+// {
+// JS::CallArgs args = JS::CallArgsFromVp(argc, vp);
+// return JS::CallNonGenericMethod<IsAnswerObject, answer_getAnswer_impl>(cx, args);
+// }
+//
+// Note that, because they are used as template arguments, the predicate
+// and implementation functions must have external linkage. (This is
+// unfortunate, but GCC wasn't inlining things as one would hope when we
+// passed them as function arguments.)
+//
+// JS::CallNonGenericMethod will test whether |args.thisv()| is acceptable. If
+// it is, it will call the provided implementation function, which will return
+// a value and indicate success. If it is not, it will attempt to unwrap
+// |this| and call the implementation function on the unwrapped |this|. If
+// that succeeds, all well and good. If it doesn't succeed, a TypeError will
+// be thrown.
+//
+// Note: JS::CallNonGenericMethod will only work correctly if it's called in
+// tail position in a JSNative. Do not call it from any other place.
+//
+template<IsAcceptableThis Test, NativeImpl Impl>
+MOZ_ALWAYS_INLINE bool
+CallNonGenericMethod(JSContext* cx, const CallArgs& args)
+{
+ HandleValue thisv = args.thisv();
+ if (Test(thisv))
+ return Impl(cx, args);
+
+ return detail::CallMethodIfWrapped(cx, Test, Impl, args);
+}
+
+MOZ_ALWAYS_INLINE bool
+CallNonGenericMethod(JSContext* cx, IsAcceptableThis Test, NativeImpl Impl, const CallArgs& args)
+{
+ HandleValue thisv = args.thisv();
+ if (Test(thisv))
+ return Impl(cx, args);
+
+ return detail::CallMethodIfWrapped(cx, Test, Impl, args);
+}
+
+} // namespace JS
+
+#endif /* js_CallNonGenericMethod_h */
diff --git a/js/public/CharacterEncoding.h b/js/public/CharacterEncoding.h
new file mode 100644
index 0000000000..90a31d1889
--- /dev/null
+++ b/js/public/CharacterEncoding.h
@@ -0,0 +1,338 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef js_CharacterEncoding_h
+#define js_CharacterEncoding_h
+
+#include "mozilla/Range.h"
+
+#include "js/TypeDecls.h"
+#include "js/Utility.h"
+
+namespace js {
+class ExclusiveContext;
+} // namespace js
+
+class JSFlatString;
+
+namespace JS {
+
+/*
+ * By default, all C/C++ 1-byte-per-character strings passed into the JSAPI
+ * are treated as ISO/IEC 8859-1, also known as Latin-1. That is, each
+ * byte is treated as a 2-byte character, and there is no way to pass in a
+ * string containing characters beyond U+00FF.
+ */
+class Latin1Chars : public mozilla::Range<Latin1Char>
+{
+ typedef mozilla::Range<Latin1Char> Base;
+
+ public:
+ using CharT = Latin1Char;
+
+ Latin1Chars() : Base() {}
+ Latin1Chars(char* aBytes, size_t aLength) : Base(reinterpret_cast<Latin1Char*>(aBytes), aLength) {}
+ Latin1Chars(const Latin1Char* aBytes, size_t aLength)
+ : Base(const_cast<Latin1Char*>(aBytes), aLength)
+ {}
+ Latin1Chars(const char* aBytes, size_t aLength)
+ : Base(reinterpret_cast<Latin1Char*>(const_cast<char*>(aBytes)), aLength)
+ {}
+};
+
+/*
+ * A Latin1Chars, but with \0 termination for C compatibility.
+ */
+class Latin1CharsZ : public mozilla::RangedPtr<Latin1Char>
+{
+ typedef mozilla::RangedPtr<Latin1Char> Base;
+
+ public:
+ using CharT = Latin1Char;
+
+ Latin1CharsZ() : Base(nullptr, 0) {}
+
+ Latin1CharsZ(char* aBytes, size_t aLength)
+ : Base(reinterpret_cast<Latin1Char*>(aBytes), aLength)
+ {
+ MOZ_ASSERT(aBytes[aLength] == '\0');
+ }
+
+ Latin1CharsZ(Latin1Char* aBytes, size_t aLength)
+ : Base(aBytes, aLength)
+ {
+ MOZ_ASSERT(aBytes[aLength] == '\0');
+ }
+
+ using Base::operator=;
+
+ char* c_str() { return reinterpret_cast<char*>(get()); }
+};
+
+class UTF8Chars : public mozilla::Range<unsigned char>
+{
+ typedef mozilla::Range<unsigned char> Base;
+
+ public:
+ using CharT = unsigned char;
+
+ UTF8Chars() : Base() {}
+ UTF8Chars(char* aBytes, size_t aLength)
+ : Base(reinterpret_cast<unsigned char*>(aBytes), aLength)
+ {}
+ UTF8Chars(const char* aBytes, size_t aLength)
+ : Base(reinterpret_cast<unsigned char*>(const_cast<char*>(aBytes)), aLength)
+ {}
+};
+
+/*
+ * SpiderMonkey also deals directly with UTF-8 encoded text in some places.
+ */
+class UTF8CharsZ : public mozilla::RangedPtr<unsigned char>
+{
+ typedef mozilla::RangedPtr<unsigned char> Base;
+
+ public:
+ using CharT = unsigned char;
+
+ UTF8CharsZ() : Base(nullptr, 0) {}
+
+ UTF8CharsZ(char* aBytes, size_t aLength)
+ : Base(reinterpret_cast<unsigned char*>(aBytes), aLength)
+ {
+ MOZ_ASSERT(aBytes[aLength] == '\0');
+ }
+
+ UTF8CharsZ(unsigned char* aBytes, size_t aLength)
+ : Base(aBytes, aLength)
+ {
+ MOZ_ASSERT(aBytes[aLength] == '\0');
+ }
+
+ using Base::operator=;
+
+ char* c_str() { return reinterpret_cast<char*>(get()); }
+};
+
+/*
+ * A wrapper for a "const char*" that is encoded using UTF-8.
+ * This class does not manage ownership of the data; that is left
+ * to others. This differs from UTF8CharsZ in that the chars are
+ * const and it allows assignment.
+ */
+class JS_PUBLIC_API(ConstUTF8CharsZ)
+{
+ const char* data_;
+
+ public:
+ using CharT = unsigned char;
+
+ ConstUTF8CharsZ() : data_(nullptr)
+ {}
+
+ ConstUTF8CharsZ(const char* aBytes, size_t aLength)
+ : data_(aBytes)
+ {
+ MOZ_ASSERT(aBytes[aLength] == '\0');
+#ifdef DEBUG
+ validate(aLength);
+#endif
+ }
+
+ const void* get() const { return data_; }
+
+ const char* c_str() const { return data_; }
+
+ explicit operator bool() const { return data_ != nullptr; }
+
+ private:
+#ifdef DEBUG
+ void validate(size_t aLength);
+#endif
+};
+
+/*
+ * SpiderMonkey uses a 2-byte character representation: it is a
+ * 2-byte-at-a-time view of a UTF-16 byte stream. This is similar to UCS-2,
+ * but unlike UCS-2, we do not strip UTF-16 extension bytes. This allows a
+ * sufficiently dedicated JavaScript program to be fully unicode-aware by
+ * manually interpreting UTF-16 extension characters embedded in the JS
+ * string.
+ */
+class TwoByteChars : public mozilla::Range<char16_t>
+{
+ typedef mozilla::Range<char16_t> Base;
+
+ public:
+ using CharT = char16_t;
+
+ TwoByteChars() : Base() {}
+ TwoByteChars(char16_t* aChars, size_t aLength) : Base(aChars, aLength) {}
+ TwoByteChars(const char16_t* aChars, size_t aLength) : Base(const_cast<char16_t*>(aChars), aLength) {}
+};
+
+/*
+ * A TwoByteChars, but \0 terminated for compatibility with JSFlatString.
+ */
+class TwoByteCharsZ : public mozilla::RangedPtr<char16_t>
+{
+ typedef mozilla::RangedPtr<char16_t> Base;
+
+ public:
+ using CharT = char16_t;
+
+ TwoByteCharsZ() : Base(nullptr, 0) {}
+
+ TwoByteCharsZ(char16_t* chars, size_t length)
+ : Base(chars, length)
+ {
+ MOZ_ASSERT(chars[length] == '\0');
+ }
+
+ using Base::operator=;
+};
+
+typedef mozilla::RangedPtr<const char16_t> ConstCharPtr;
+
+/*
+ * Like TwoByteChars, but the chars are const.
+ */
+class ConstTwoByteChars : public mozilla::Range<const char16_t>
+{
+ typedef mozilla::Range<const char16_t> Base;
+
+ public:
+ using CharT = char16_t;
+
+ ConstTwoByteChars() : Base() {}
+ ConstTwoByteChars(const char16_t* aChars, size_t aLength) : Base(aChars, aLength) {}
+};
+
+/*
+ * Convert a 2-byte character sequence to "ISO-Latin-1". This works by
+ * truncating each 2-byte pair in the sequence to a 1-byte pair. If the source
+ * contains any UTF-16 extension characters, then this may give invalid Latin1
+ * output. The returned string is zero terminated. The returned string or the
+ * returned string's |start()| must be freed with JS_free or js_free,
+ * respectively. If allocation fails, an OOM error will be set and the method
+ * will return a nullptr chars (which can be tested for with the ! operator).
+ * This method cannot trigger GC.
+ */
+extern Latin1CharsZ
+LossyTwoByteCharsToNewLatin1CharsZ(js::ExclusiveContext* cx,
+ const mozilla::Range<const char16_t> tbchars);
+
+inline Latin1CharsZ
+LossyTwoByteCharsToNewLatin1CharsZ(js::ExclusiveContext* cx, const char16_t* begin, size_t length)
+{
+ const mozilla::Range<const char16_t> tbchars(begin, length);
+ return JS::LossyTwoByteCharsToNewLatin1CharsZ(cx, tbchars);
+}
+
+template <typename CharT>
+extern UTF8CharsZ
+CharsToNewUTF8CharsZ(js::ExclusiveContext* maybeCx, const mozilla::Range<CharT> chars);
+
+JS_PUBLIC_API(uint32_t)
+Utf8ToOneUcs4Char(const uint8_t* utf8Buffer, int utf8Length);
+
+/*
+ * Inflate bytes in UTF-8 encoding to char16_t.
+ * - On error, returns an empty TwoByteCharsZ.
+ * - On success, returns a malloc'd TwoByteCharsZ, and updates |outlen| to hold
+ * its length; the length value excludes the trailing null.
+ */
+extern JS_PUBLIC_API(TwoByteCharsZ)
+UTF8CharsToNewTwoByteCharsZ(JSContext* cx, const UTF8Chars utf8, size_t* outlen);
+
+/*
+ * Like UTF8CharsToNewTwoByteCharsZ, but for ConstUTF8CharsZ.
+ */
+extern JS_PUBLIC_API(TwoByteCharsZ)
+UTF8CharsToNewTwoByteCharsZ(JSContext* cx, const ConstUTF8CharsZ& utf8, size_t* outlen);
+
+/*
+ * The same as UTF8CharsToNewTwoByteCharsZ(), except that any malformed UTF-8 characters
+ * will be replaced by \uFFFD. No exception will be thrown for malformed UTF-8
+ * input.
+ */
+extern JS_PUBLIC_API(TwoByteCharsZ)
+LossyUTF8CharsToNewTwoByteCharsZ(JSContext* cx, const UTF8Chars utf8, size_t* outlen);
+
+extern JS_PUBLIC_API(TwoByteCharsZ)
+LossyUTF8CharsToNewTwoByteCharsZ(JSContext* cx, const ConstUTF8CharsZ& utf8, size_t* outlen);
+
+/*
+ * Returns the length of the char buffer required to encode |s| as UTF8.
+ * Does not include the null-terminator.
+ */
+JS_PUBLIC_API(size_t)
+GetDeflatedUTF8StringLength(JSFlatString* s);
+
+/*
+ * Encode |src| as UTF8. The caller must either ensure |dst| has enough space
+ * to encode the entire string or pass the length of the buffer as |dstlenp|,
+ * in which case the function will encode characters from the string until
+ * the buffer is exhausted. Does not write the null terminator.
+ *
+ * If |dstlenp| is provided, it will be updated to hold the number of bytes
+ * written to the buffer. If |numcharsp| is provided, it will be updated to hold
+ * the number of Unicode characters written to the buffer (which can be less
+ * than the length of the string, if the buffer is exhausted before the string
+ * is fully encoded).
+ */
+JS_PUBLIC_API(void)
+DeflateStringToUTF8Buffer(JSFlatString* src, mozilla::RangedPtr<char> dst,
+ size_t* dstlenp = nullptr, size_t* numcharsp = nullptr);
+
+/*
+ * The smallest character encoding capable of fully representing a particular
+ * string.
+ */
+enum class SmallestEncoding {
+ ASCII,
+ Latin1,
+ UTF16
+};
+
+/*
+ * Returns the smallest encoding possible for the given string: if all
+ * codepoints are <128 then ASCII, otherwise if all codepoints are <256
+ * Latin-1, else UTF16.
+ */
+JS_PUBLIC_API(SmallestEncoding)
+FindSmallestEncoding(UTF8Chars utf8);
+
+/*
+ * Return a null-terminated Latin-1 string copied from the input string,
+ * storing its length (excluding null terminator) in |*outlen|. Fail and
+ * report an error if the string contains non-Latin-1 codepoints. Returns
+ * Latin1CharsZ() on failure.
+ */
+extern JS_PUBLIC_API(Latin1CharsZ)
+UTF8CharsToNewLatin1CharsZ(JSContext* cx, const UTF8Chars utf8, size_t* outlen);
+
+/*
+ * Return a null-terminated Latin-1 string copied from the input string,
+ * storing its length (excluding null terminator) in |*outlen|. Non-Latin-1
+ * codepoints are replaced by '?'. Returns Latin1CharsZ() on failure.
+ */
+extern JS_PUBLIC_API(Latin1CharsZ)
+LossyUTF8CharsToNewLatin1CharsZ(JSContext* cx, const UTF8Chars utf8, size_t* outlen);
+
+/*
+ * Returns true if all characters in the given null-terminated string are
+ * ASCII, i.e. < 0x80, false otherwise.
+ */
+extern JS_PUBLIC_API(bool)
+StringIsASCII(const char* s);
+
+} // namespace JS
+
+inline void JS_free(JS::Latin1CharsZ& ptr) { js_free((void*)ptr.get()); }
+inline void JS_free(JS::UTF8CharsZ& ptr) { js_free((void*)ptr.get()); }
+
+#endif /* js_CharacterEncoding_h */
diff --git a/js/public/Class.h b/js/public/Class.h
new file mode 100644
index 0000000000..3b5023875e
--- /dev/null
+++ b/js/public/Class.h
@@ -0,0 +1,995 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* JSClass definition and its component types, plus related interfaces. */
+
+#ifndef js_Class_h
+#define js_Class_h
+
+#include "jstypes.h"
+
+#include "js/CallArgs.h"
+#include "js/Id.h"
+#include "js/TypeDecls.h"
+
+/*
+ * A JSClass acts as a vtable for JS objects that allows JSAPI clients to
+ * control various aspects of the behavior of an object like property lookup.
+ * js::Class is an engine-private extension that allows more control over
+ * object behavior and, e.g., allows custom slow layout.
+ */
+
+struct JSAtomState;
+struct JSFreeOp;
+struct JSFunctionSpec;
+
+namespace js {
+
+struct Class;
+class FreeOp;
+class Shape;
+
+// This is equal to JSFunction::class_. Use it in places where you don't want
+// to #include jsfun.h.
+extern JS_FRIEND_DATA(const js::Class* const) FunctionClassPtr;
+
+} // namespace js
+
+namespace JS {
+
+class AutoIdVector;
+
+/**
+ * The answer to a successful query as to whether an object is an Array per
+ * ES6's internal |IsArray| operation (as exposed by |Array.isArray|).
+ */
+enum class IsArrayAnswer
+{
+ Array,
+ NotArray,
+ RevokedProxy
+};
+
+/**
+ * ES6 7.2.2.
+ *
+ * Returns false on failure, otherwise returns true and sets |*isArray|
+ * indicating whether the object passes ECMAScript's IsArray test. This is the
+ * same test performed by |Array.isArray|.
+ *
+ * This is NOT the same as asking whether |obj| is an Array or a wrapper around
+ * one. If |obj| is a proxy created by |Proxy.revocable()| and has been
+ * revoked, or if |obj| is a proxy whose target (at any number of hops) is a
+ * revoked proxy, this method throws a TypeError and returns false.
+ */
+extern JS_PUBLIC_API(bool)
+IsArray(JSContext* cx, HandleObject obj, bool* isArray);
+
+/**
+ * Identical to IsArray above, but the nature of the object (if successfully
+ * determined) is communicated via |*answer|. In particular this method
+ * returns true and sets |*answer = IsArrayAnswer::RevokedProxy| when called on
+ * a revoked proxy.
+ *
+ * Most users will want the overload above, not this one.
+ */
+extern JS_PUBLIC_API(bool)
+IsArray(JSContext* cx, HandleObject obj, IsArrayAnswer* answer);
+
+/**
+ * Per ES6, the [[DefineOwnProperty]] internal method has three different
+ * possible outcomes:
+ *
+ * - It can throw an exception (which we indicate by returning false).
+ *
+ * - It can return true, indicating unvarnished success.
+ *
+ * - It can return false, indicating "strict failure". The property could
+ * not be defined. It's an error, but no exception was thrown.
+ *
+ * It's not just [[DefineOwnProperty]]: all the mutating internal methods have
+ * the same three outcomes. (The other affected internal methods are [[Set]],
+ * [[Delete]], [[SetPrototypeOf]], and [[PreventExtensions]].)
+ *
+ * If you think this design is awful, you're not alone. But as it's the
+ * standard, we must represent these boolean "success" values somehow.
+ * ObjectOpSuccess is the class for this. It's like a bool, but when it's false
+ * it also stores an error code.
+ *
+ * Typical usage:
+ *
+ * ObjectOpResult result;
+ * if (!DefineProperty(cx, obj, id, ..., result))
+ * return false;
+ * if (!result)
+ * return result.reportError(cx, obj, id);
+ *
+ * Users don't have to call `result.report()`; another possible ending is:
+ *
+ * argv.rval().setBoolean(bool(result));
+ * return true;
+ */
+class ObjectOpResult
+{
+ private:
+ /**
+ * code_ is either one of the special codes OkCode or Uninitialized, or
+ * an error code. For now the error codes are private to the JS engine;
+ * they're defined in js/src/js.msg.
+ *
+ * code_ is uintptr_t (rather than uint32_t) for the convenience of the
+ * JITs, which would otherwise have to deal with either padding or stack
+ * alignment on 64-bit platforms.
+ */
+ uintptr_t code_;
+
+ public:
+ enum SpecialCodes : uintptr_t {
+ OkCode = 0,
+ Uninitialized = uintptr_t(-1)
+ };
+
+ ObjectOpResult() : code_(Uninitialized) {}
+
+ /* Return true if succeed() was called. */
+ bool ok() const {
+ MOZ_ASSERT(code_ != Uninitialized);
+ return code_ == OkCode;
+ }
+
+ explicit operator bool() const { return ok(); }
+
+ /* Set this ObjectOpResult to true and return true. */
+ bool succeed() {
+ code_ = OkCode;
+ return true;
+ }
+
+ /*
+ * Set this ObjectOpResult to false with an error code.
+ *
+ * Always returns true, as a convenience. Typical usage will be:
+ *
+ * if (funny condition)
+ * return result.fail(JSMSG_CANT_DO_THE_THINGS);
+ *
+ * The true return value indicates that no exception is pending, and it
+ * would be OK to ignore the failure and continue.
+ */
+ bool fail(uint32_t msg) {
+ MOZ_ASSERT(msg != OkCode);
+ code_ = msg;
+ return true;
+ }
+
+ JS_PUBLIC_API(bool) failCantRedefineProp();
+ JS_PUBLIC_API(bool) failReadOnly();
+ JS_PUBLIC_API(bool) failGetterOnly();
+ JS_PUBLIC_API(bool) failCantDelete();
+
+ JS_PUBLIC_API(bool) failCantSetInterposed();
+ JS_PUBLIC_API(bool) failCantDefineWindowElement();
+ JS_PUBLIC_API(bool) failCantDeleteWindowElement();
+ JS_PUBLIC_API(bool) failCantDeleteWindowNamedProperty();
+ JS_PUBLIC_API(bool) failCantPreventExtensions();
+ JS_PUBLIC_API(bool) failCantSetProto();
+ JS_PUBLIC_API(bool) failNoNamedSetter();
+ JS_PUBLIC_API(bool) failNoIndexedSetter();
+
+ uint32_t failureCode() const {
+ MOZ_ASSERT(!ok());
+ return uint32_t(code_);
+ }
+
+ /*
+ * Report an error or warning if necessary; return true to proceed and
+ * false if an error was reported. Call this when failure should cause
+ * a warning if extraWarnings are enabled.
+ *
+ * The precise rules are like this:
+ *
+ * - If ok(), then we succeeded. Do nothing and return true.
+ * - Otherwise, if |strict| is true, or if cx has both extraWarnings and
+ * werrorOption enabled, throw a TypeError and return false.
+ * - Otherwise, if cx has extraWarnings enabled, emit a warning and
+ * return true.
+ * - Otherwise, do nothing and return true.
+ */
+ bool checkStrictErrorOrWarning(JSContext* cx, HandleObject obj, HandleId id, bool strict) {
+ if (ok())
+ return true;
+ return reportStrictErrorOrWarning(cx, obj, id, strict);
+ }
+
+ /*
+ * The same as checkStrictErrorOrWarning(cx, id, strict), except the
+ * operation is not associated with a particular property id. This is
+ * used for [[PreventExtensions]] and [[SetPrototypeOf]]. failureCode()
+ * must not be an error that has "{0}" in the error message.
+ */
+ bool checkStrictErrorOrWarning(JSContext* cx, HandleObject obj, bool strict) {
+ return ok() || reportStrictErrorOrWarning(cx, obj, strict);
+ }
+
+ /* Throw a TypeError. Call this only if !ok(). */
+ bool reportError(JSContext* cx, HandleObject obj, HandleId id) {
+ return reportStrictErrorOrWarning(cx, obj, id, true);
+ }
+
+ /*
+ * The same as reportError(cx, obj, id), except the operation is not
+ * associated with a particular property id.
+ */
+ bool reportError(JSContext* cx, HandleObject obj) {
+ return reportStrictErrorOrWarning(cx, obj, true);
+ }
+
+ /* Helper function for checkStrictErrorOrWarning's slow path. */
+ JS_PUBLIC_API(bool) reportStrictErrorOrWarning(JSContext* cx, HandleObject obj, HandleId id, bool strict);
+ JS_PUBLIC_API(bool) reportStrictErrorOrWarning(JSContext* cx, HandleObject obj, bool strict);
+
+ /*
+ * Convenience method. Return true if ok() or if strict is false; otherwise
+ * throw a TypeError and return false.
+ */
+ bool checkStrict(JSContext* cx, HandleObject obj, HandleId id) {
+ return checkStrictErrorOrWarning(cx, obj, id, true);
+ }
+
+ /*
+ * Convenience method. The same as checkStrict(cx, id), except the
+ * operation is not associated with a particular property id.
+ */
+ bool checkStrict(JSContext* cx, HandleObject obj) {
+ return checkStrictErrorOrWarning(cx, obj, true);
+ }
+};
+
+} // namespace JS
+
+// JSClass operation signatures.
+
+/**
+ * Get a property named by id in obj. Note the jsid id type -- id may
+ * be a string (Unicode property identifier) or an int (element index). The
+ * *vp out parameter, on success, is the new property value after the action.
+ */
+typedef bool
+(* JSGetterOp)(JSContext* cx, JS::HandleObject obj, JS::HandleId id,
+ JS::MutableHandleValue vp);
+
+/** Add a property named by id to obj. */
+typedef bool
+(* JSAddPropertyOp)(JSContext* cx, JS::HandleObject obj, JS::HandleId id, JS::HandleValue v);
+
+/**
+ * Set a property named by id in obj, treating the assignment as strict
+ * mode code if strict is true. Note the jsid id type -- id may be a string
+ * (Unicode property identifier) or an int (element index). The *vp out
+ * parameter, on success, is the new property value after the
+ * set.
+ */
+typedef bool
+(* JSSetterOp)(JSContext* cx, JS::HandleObject obj, JS::HandleId id,
+ JS::MutableHandleValue vp, JS::ObjectOpResult& result);
+
+/**
+ * Delete a property named by id in obj.
+ *
+ * If an error occurred, return false as per normal JSAPI error practice.
+ *
+ * If no error occurred, but the deletion attempt wasn't allowed (perhaps
+ * because the property was non-configurable), call result.fail() and
+ * return true. This will cause |delete obj[id]| to evaluate to false in
+ * non-strict mode code, and to throw a TypeError in strict mode code.
+ *
+ * If no error occurred and the deletion wasn't disallowed (this is *not* the
+ * same as saying that a deletion actually occurred -- deleting a non-existent
+ * property, or an inherited property, is allowed -- it's just pointless),
+ * call result.succeed() and return true.
+ */
+typedef bool
+(* JSDeletePropertyOp)(JSContext* cx, JS::HandleObject obj, JS::HandleId id,
+ JS::ObjectOpResult& result);
+
+/**
+ * The type of ObjectOps::enumerate. This callback overrides a portion of
+ * SpiderMonkey's default [[Enumerate]] internal method. When an ordinary object
+ * is enumerated, that object and each object on its prototype chain is tested
+ * for an enumerate op, and those ops are called in order. The properties each
+ * op adds to the 'properties' vector are added to the set of values the for-in
+ * loop will iterate over. All of this is nonstandard.
+ *
+ * An object is "enumerated" when it's the target of a for-in loop or
+ * JS_Enumerate(). The callback's job is to populate 'properties' with the
+ * object's property keys. If `enumerableOnly` is true, the callback should only
+ * add enumerable properties.
+ */
+typedef bool
+(* JSNewEnumerateOp)(JSContext* cx, JS::HandleObject obj, JS::AutoIdVector& properties,
+ bool enumerableOnly);
+
+/**
+ * The old-style JSClass.enumerate op should define all lazy properties not
+ * yet reflected in obj.
+ */
+typedef bool
+(* JSEnumerateOp)(JSContext* cx, JS::HandleObject obj);
+
+/**
+ * The type of ObjectOps::funToString. This callback allows an object to
+ * provide a custom string to use when Function.prototype.toString is invoked on
+ * that object. A null return value means OOM.
+ */
+typedef JSString*
+(* JSFunToStringOp)(JSContext* cx, JS::HandleObject obj, unsigned indent);
+
+/**
+ * Resolve a lazy property named by id in obj by defining it directly in obj.
+ * Lazy properties are those reflected from some peer native property space
+ * (e.g., the DOM attributes for a given node reflected as obj) on demand.
+ *
+ * JS looks for a property in an object, and if not found, tries to resolve
+ * the given id. *resolvedp should be set to true iff the property was defined
+ * on |obj|.
+ */
+typedef bool
+(* JSResolveOp)(JSContext* cx, JS::HandleObject obj, JS::HandleId id,
+ bool* resolvedp);
+
+/**
+ * A class with a resolve hook can optionally have a mayResolve hook. This hook
+ * must have no side effects and must return true for a given id if the resolve
+ * hook may resolve this id. This is useful when we're doing a "pure" lookup: if
+ * mayResolve returns false, we know we don't have to call the effectful resolve
+ * hook.
+ *
+ * maybeObj, if non-null, is the object on which we're doing the lookup. This
+ * can be nullptr: during JIT compilation we sometimes know the Class but not
+ * the object.
+ */
+typedef bool
+(* JSMayResolveOp)(const JSAtomState& names, jsid id, JSObject* maybeObj);
+
+/**
+ * Finalize obj, which the garbage collector has determined to be unreachable
+ * from other live objects or from GC roots. Obviously, finalizers must never
+ * store a reference to obj.
+ */
+typedef void
+(* JSFinalizeOp)(JSFreeOp* fop, JSObject* obj);
+
+/** Finalizes external strings created by JS_NewExternalString. */
+struct JSStringFinalizer {
+ void (*finalize)(JS::Zone* zone, const JSStringFinalizer* fin, char16_t* chars);
+};
+
+/**
+ * Check whether v is an instance of obj. Return false on error or exception,
+ * true on success with true in *bp if v is an instance of obj, false in
+ * *bp otherwise.
+ */
+typedef bool
+(* JSHasInstanceOp)(JSContext* cx, JS::HandleObject obj, JS::MutableHandleValue vp,
+ bool* bp);
+
+/**
+ * Function type for trace operation of the class called to enumerate all
+ * traceable things reachable from obj's private data structure. For each such
+ * thing, a trace implementation must call JS::TraceEdge on the thing's
+ * location.
+ *
+ * JSTraceOp implementation can assume that no other threads mutates object
+ * state. It must not change state of the object or corresponding native
+ * structures. The only exception for this rule is the case when the embedding
+ * needs a tight integration with GC. In that case the embedding can check if
+ * the traversal is a part of the marking phase through calling
+ * JS_IsGCMarkingTracer and apply a special code like emptying caches or
+ * marking its native structures.
+ */
+typedef void
+(* JSTraceOp)(JSTracer* trc, JSObject* obj);
+
+typedef JSObject*
+(* JSWeakmapKeyDelegateOp)(JSObject* obj);
+
+typedef void
+(* JSObjectMovedOp)(JSObject* obj, const JSObject* old);
+
+/* js::Class operation signatures. */
+
+namespace js {
+
+typedef bool
+(* LookupPropertyOp)(JSContext* cx, JS::HandleObject obj, JS::HandleId id,
+ JS::MutableHandleObject objp, JS::MutableHandle<Shape*> propp);
+typedef bool
+(* DefinePropertyOp)(JSContext* cx, JS::HandleObject obj, JS::HandleId id,
+ JS::Handle<JS::PropertyDescriptor> desc,
+ JS::ObjectOpResult& result);
+typedef bool
+(* HasPropertyOp)(JSContext* cx, JS::HandleObject obj, JS::HandleId id, bool* foundp);
+typedef bool
+(* GetPropertyOp)(JSContext* cx, JS::HandleObject obj, JS::HandleValue receiver, JS::HandleId id,
+ JS::MutableHandleValue vp);
+typedef bool
+(* SetPropertyOp)(JSContext* cx, JS::HandleObject obj, JS::HandleId id, JS::HandleValue v,
+ JS::HandleValue receiver, JS::ObjectOpResult& result);
+typedef bool
+(* GetOwnPropertyOp)(JSContext* cx, JS::HandleObject obj, JS::HandleId id,
+ JS::MutableHandle<JS::PropertyDescriptor> desc);
+typedef bool
+(* DeletePropertyOp)(JSContext* cx, JS::HandleObject obj, JS::HandleId id,
+ JS::ObjectOpResult& result);
+
+typedef bool
+(* WatchOp)(JSContext* cx, JS::HandleObject obj, JS::HandleId id, JS::HandleObject callable);
+
+typedef bool
+(* UnwatchOp)(JSContext* cx, JS::HandleObject obj, JS::HandleId id);
+
+class JS_FRIEND_API(ElementAdder)
+{
+ public:
+ enum GetBehavior {
+ // Check if the element exists before performing the Get and preserve
+ // holes.
+ CheckHasElemPreserveHoles,
+
+ // Perform a Get operation, like obj[index] in JS.
+ GetElement
+ };
+
+ private:
+ // Only one of these is used.
+ JS::RootedObject resObj_;
+ JS::Value* vp_;
+
+ uint32_t index_;
+#ifdef DEBUG
+ uint32_t length_;
+#endif
+ GetBehavior getBehavior_;
+
+ public:
+ ElementAdder(JSContext* cx, JSObject* obj, uint32_t length, GetBehavior behavior)
+ : resObj_(cx, obj), vp_(nullptr), index_(0),
+#ifdef DEBUG
+ length_(length),
+#endif
+ getBehavior_(behavior)
+ {}
+ ElementAdder(JSContext* cx, JS::Value* vp, uint32_t length, GetBehavior behavior)
+ : resObj_(cx), vp_(vp), index_(0),
+#ifdef DEBUG
+ length_(length),
+#endif
+ getBehavior_(behavior)
+ {}
+
+ GetBehavior getBehavior() const { return getBehavior_; }
+
+ bool append(JSContext* cx, JS::HandleValue v);
+ void appendHole();
+};
+
+typedef bool
+(* GetElementsOp)(JSContext* cx, JS::HandleObject obj, uint32_t begin, uint32_t end,
+ ElementAdder* adder);
+
+typedef void
+(* FinalizeOp)(FreeOp* fop, JSObject* obj);
+
+// The special treatment of |finalize| and |trace| is necessary because if we
+// assign either of those hooks to a local variable and then call it -- as is
+// done with the other hooks -- the GC hazard analysis gets confused.
+#define JS_CLASS_MEMBERS(ClassOpsType, FreeOpType) \
+ const char* name; \
+ uint32_t flags; \
+ const ClassOpsType* cOps; \
+ \
+ JSAddPropertyOp getAddProperty() const { return cOps ? cOps->addProperty : nullptr; } \
+ JSDeletePropertyOp getDelProperty() const { return cOps ? cOps->delProperty : nullptr; } \
+ JSGetterOp getGetProperty() const { return cOps ? cOps->getProperty : nullptr; } \
+ JSSetterOp getSetProperty() const { return cOps ? cOps->setProperty : nullptr; } \
+ JSEnumerateOp getEnumerate() const { return cOps ? cOps->enumerate : nullptr; } \
+ JSResolveOp getResolve() const { return cOps ? cOps->resolve : nullptr; } \
+ JSMayResolveOp getMayResolve() const { return cOps ? cOps->mayResolve : nullptr; } \
+ JSNative getCall() const { return cOps ? cOps->call : nullptr; } \
+ JSHasInstanceOp getHasInstance() const { return cOps ? cOps->hasInstance : nullptr; } \
+ JSNative getConstruct() const { return cOps ? cOps->construct : nullptr; } \
+ \
+ bool hasFinalize() const { return cOps && cOps->finalize; } \
+ bool hasTrace() const { return cOps && cOps->trace; } \
+ \
+ bool isTrace(JSTraceOp trace) const { return cOps && cOps->trace == trace; } \
+ \
+ void doFinalize(FreeOpType* fop, JSObject* obj) const { \
+ MOZ_ASSERT(cOps && cOps->finalize); \
+ cOps->finalize(fop, obj); \
+ } \
+ void doTrace(JSTracer* trc, JSObject* obj) const { \
+ MOZ_ASSERT(cOps && cOps->trace); \
+ cOps->trace(trc, obj); \
+ }
+
+struct ClassOps
+{
+ /* Function pointer members (may be null). */
+ JSAddPropertyOp addProperty;
+ JSDeletePropertyOp delProperty;
+ JSGetterOp getProperty;
+ JSSetterOp setProperty;
+ JSEnumerateOp enumerate;
+ JSResolveOp resolve;
+ JSMayResolveOp mayResolve;
+ FinalizeOp finalize;
+ JSNative call;
+ JSHasInstanceOp hasInstance;
+ JSNative construct;
+ JSTraceOp trace;
+};
+
+/** Callback for the creation of constructor and prototype objects. */
+typedef JSObject* (*ClassObjectCreationOp)(JSContext* cx, JSProtoKey key);
+
+/** Callback for custom post-processing after class initialization via ClassSpec. */
+typedef bool (*FinishClassInitOp)(JSContext* cx, JS::HandleObject ctor,
+ JS::HandleObject proto);
+
+const size_t JSCLASS_CACHED_PROTO_WIDTH = 6;
+
+struct ClassSpec
+{
+ // All properties except flags should be accessed through accessor.
+ ClassObjectCreationOp createConstructor_;
+ ClassObjectCreationOp createPrototype_;
+ const JSFunctionSpec* constructorFunctions_;
+ const JSPropertySpec* constructorProperties_;
+ const JSFunctionSpec* prototypeFunctions_;
+ const JSPropertySpec* prototypeProperties_;
+ FinishClassInitOp finishInit_;
+ uintptr_t flags;
+
+ static const size_t ProtoKeyWidth = JSCLASS_CACHED_PROTO_WIDTH;
+
+ static const uintptr_t ProtoKeyMask = (1 << ProtoKeyWidth) - 1;
+ static const uintptr_t DontDefineConstructor = 1 << ProtoKeyWidth;
+ static const uintptr_t IsDelegated = 1 << (ProtoKeyWidth + 1);
+
+ bool defined() const { return !!createConstructor_; }
+
+ bool delegated() const {
+ return (flags & IsDelegated);
+ }
+
+ // The ProtoKey this class inherits from.
+ JSProtoKey inheritanceProtoKey() const {
+ MOZ_ASSERT(defined());
+ static_assert(JSProto_Null == 0, "zeroed key must be null");
+
+ // Default: Inherit from Object.
+ if (!(flags & ProtoKeyMask))
+ return JSProto_Object;
+
+ return JSProtoKey(flags & ProtoKeyMask);
+ }
+
+ bool shouldDefineConstructor() const {
+ MOZ_ASSERT(defined());
+ return !(flags & DontDefineConstructor);
+ }
+
+ const ClassSpec* delegatedClassSpec() const {
+ MOZ_ASSERT(delegated());
+ return reinterpret_cast<ClassSpec*>(createConstructor_);
+ }
+
+ ClassObjectCreationOp createConstructorHook() const {
+ if (delegated())
+ return delegatedClassSpec()->createConstructorHook();
+ return createConstructor_;
+ }
+ ClassObjectCreationOp createPrototypeHook() const {
+ if (delegated())
+ return delegatedClassSpec()->createPrototypeHook();
+ return createPrototype_;
+ }
+ const JSFunctionSpec* constructorFunctions() const {
+ if (delegated())
+ return delegatedClassSpec()->constructorFunctions();
+ return constructorFunctions_;
+ }
+ const JSPropertySpec* constructorProperties() const {
+ if (delegated())
+ return delegatedClassSpec()->constructorProperties();
+ return constructorProperties_;
+ }
+ const JSFunctionSpec* prototypeFunctions() const {
+ if (delegated())
+ return delegatedClassSpec()->prototypeFunctions();
+ return prototypeFunctions_;
+ }
+ const JSPropertySpec* prototypeProperties() const {
+ if (delegated())
+ return delegatedClassSpec()->prototypeProperties();
+ return prototypeProperties_;
+ }
+ FinishClassInitOp finishInitHook() const {
+ if (delegated())
+ return delegatedClassSpec()->finishInitHook();
+ return finishInit_;
+ }
+};
+
+struct ClassExtension
+{
+ /**
+ * If an object is used as a key in a weakmap, it may be desirable for the
+ * garbage collector to keep that object around longer than it otherwise
+ * would. A common case is when the key is a wrapper around an object in
+ * another compartment, and we want to avoid collecting the wrapper (and
+ * removing the weakmap entry) as long as the wrapped object is alive. In
+ * that case, the wrapped object is returned by the wrapper's
+ * weakmapKeyDelegateOp hook. As long as the wrapper is used as a weakmap
+ * key, it will not be collected (and remain in the weakmap) until the
+ * wrapped object is collected.
+ */
+ JSWeakmapKeyDelegateOp weakmapKeyDelegateOp;
+
+ /**
+ * Optional hook called when an object is moved by a compacting GC.
+ *
+ * There may exist weak pointers to an object that are not traced through
+ * when the normal trace APIs are used, for example objects in the wrapper
+ * cache. This hook allows these pointers to be updated.
+ *
+ * Note that this hook can be called before JS_NewObject() returns if a GC
+ * is triggered during construction of the object. This can happen for
+ * global objects for example.
+ */
+ JSObjectMovedOp objectMovedOp;
+};
+
+inline ClassObjectCreationOp DELEGATED_CLASSSPEC(const ClassSpec* spec) {
+ return reinterpret_cast<ClassObjectCreationOp>(const_cast<ClassSpec*>(spec));
+}
+
+#define JS_NULL_CLASS_SPEC nullptr
+#define JS_NULL_CLASS_EXT nullptr
+
+struct ObjectOps
+{
+ LookupPropertyOp lookupProperty;
+ DefinePropertyOp defineProperty;
+ HasPropertyOp hasProperty;
+ GetPropertyOp getProperty;
+ SetPropertyOp setProperty;
+ GetOwnPropertyOp getOwnPropertyDescriptor;
+ DeletePropertyOp deleteProperty;
+ WatchOp watch;
+ UnwatchOp unwatch;
+ GetElementsOp getElements;
+ JSNewEnumerateOp enumerate;
+ JSFunToStringOp funToString;
+};
+
+#define JS_NULL_OBJECT_OPS nullptr
+
+} // namespace js
+
+// Classes, objects, and properties.
+
+typedef void (*JSClassInternal)();
+
+struct JSClassOps
+{
+ /* Function pointer members (may be null). */
+ JSAddPropertyOp addProperty;
+ JSDeletePropertyOp delProperty;
+ JSGetterOp getProperty;
+ JSSetterOp setProperty;
+ JSEnumerateOp enumerate;
+ JSResolveOp resolve;
+ JSMayResolveOp mayResolve;
+ JSFinalizeOp finalize;
+ JSNative call;
+ JSHasInstanceOp hasInstance;
+ JSNative construct;
+ JSTraceOp trace;
+};
+
+#define JS_NULL_CLASS_OPS nullptr
+
+struct JSClass {
+ JS_CLASS_MEMBERS(JSClassOps, JSFreeOp);
+
+ void* reserved[3];
+};
+
+#define JSCLASS_HAS_PRIVATE (1<<0) // objects have private slot
+#define JSCLASS_DELAY_METADATA_BUILDER (1<<1) // class's initialization code
+ // will call
+ // SetNewObjectMetadata itself
+#define JSCLASS_IS_WRAPPED_NATIVE (1<<2) // class is an XPCWrappedNative.
+ // WeakMaps use this to override
+ // the wrapper disposal
+ // mechanism.
+#define JSCLASS_PRIVATE_IS_NSISUPPORTS (1<<3) // private is (nsISupports*)
+#define JSCLASS_IS_DOMJSCLASS (1<<4) // objects are DOM
+#define JSCLASS_HAS_XRAYED_CONSTRUCTOR (1<<5) // if wrapped by an xray
+ // wrapper, the builtin
+ // class's constructor won't
+ // be unwrapped and invoked.
+ // Instead, the constructor is
+ // resolved in the caller's
+ // compartment and invoked
+ // with a wrapped newTarget.
+ // The constructor has to
+ // detect and handle this
+ // situation.
+ // See PromiseConstructor for
+ // details.
+#define JSCLASS_EMULATES_UNDEFINED (1<<6) // objects of this class act
+ // like the value undefined,
+ // in some contexts
+#define JSCLASS_USERBIT1 (1<<7) // Reserved for embeddings.
+
+// To reserve slots fetched and stored via JS_Get/SetReservedSlot, bitwise-or
+// JSCLASS_HAS_RESERVED_SLOTS(n) into the initializer for JSClass.flags, where
+// n is a constant in [1, 255]. Reserved slots are indexed from 0 to n-1.
+#define JSCLASS_RESERVED_SLOTS_SHIFT 8 // room for 8 flags below */
+#define JSCLASS_RESERVED_SLOTS_WIDTH 8 // and 16 above this field */
+#define JSCLASS_RESERVED_SLOTS_MASK JS_BITMASK(JSCLASS_RESERVED_SLOTS_WIDTH)
+#define JSCLASS_HAS_RESERVED_SLOTS(n) (((n) & JSCLASS_RESERVED_SLOTS_MASK) \
+ << JSCLASS_RESERVED_SLOTS_SHIFT)
+#define JSCLASS_RESERVED_SLOTS(clasp) (((clasp)->flags \
+ >> JSCLASS_RESERVED_SLOTS_SHIFT) \
+ & JSCLASS_RESERVED_SLOTS_MASK)
+
+#define JSCLASS_HIGH_FLAGS_SHIFT (JSCLASS_RESERVED_SLOTS_SHIFT + \
+ JSCLASS_RESERVED_SLOTS_WIDTH)
+
+#define JSCLASS_IS_ANONYMOUS (1<<(JSCLASS_HIGH_FLAGS_SHIFT+0))
+#define JSCLASS_IS_GLOBAL (1<<(JSCLASS_HIGH_FLAGS_SHIFT+1))
+#define JSCLASS_INTERNAL_FLAG2 (1<<(JSCLASS_HIGH_FLAGS_SHIFT+2))
+#define JSCLASS_INTERNAL_FLAG3 (1<<(JSCLASS_HIGH_FLAGS_SHIFT+3))
+
+#define JSCLASS_IS_PROXY (1<<(JSCLASS_HIGH_FLAGS_SHIFT+4))
+
+#define JSCLASS_SKIP_NURSERY_FINALIZE (1<<(JSCLASS_HIGH_FLAGS_SHIFT+5))
+
+// Reserved for embeddings.
+#define JSCLASS_USERBIT2 (1<<(JSCLASS_HIGH_FLAGS_SHIFT+6))
+#define JSCLASS_USERBIT3 (1<<(JSCLASS_HIGH_FLAGS_SHIFT+7))
+
+#define JSCLASS_BACKGROUND_FINALIZE (1<<(JSCLASS_HIGH_FLAGS_SHIFT+8))
+#define JSCLASS_FOREGROUND_FINALIZE (1<<(JSCLASS_HIGH_FLAGS_SHIFT+9))
+
+// Bits 26 through 31 are reserved for the CACHED_PROTO_KEY mechanism, see
+// below.
+
+// ECMA-262 requires that most constructors used internally create objects
+// with "the original Foo.prototype value" as their [[Prototype]] (__proto__)
+// member initial value. The "original ... value" verbiage is there because
+// in ECMA-262, global properties naming class objects are read/write and
+// deleteable, for the most part.
+//
+// Implementing this efficiently requires that global objects have classes
+// with the following flags. Failure to use JSCLASS_GLOBAL_FLAGS was
+// previously allowed, but is now an ES5 violation and thus unsupported.
+//
+// JSCLASS_GLOBAL_APPLICATION_SLOTS is the number of slots reserved at
+// the beginning of every global object's slots for use by the
+// application.
+#define JSCLASS_GLOBAL_APPLICATION_SLOTS 5
+#define JSCLASS_GLOBAL_SLOT_COUNT \
+ (JSCLASS_GLOBAL_APPLICATION_SLOTS + JSProto_LIMIT * 2 + 39)
+#define JSCLASS_GLOBAL_FLAGS_WITH_SLOTS(n) \
+ (JSCLASS_IS_GLOBAL | JSCLASS_HAS_RESERVED_SLOTS(JSCLASS_GLOBAL_SLOT_COUNT + (n)))
+#define JSCLASS_GLOBAL_FLAGS \
+ JSCLASS_GLOBAL_FLAGS_WITH_SLOTS(0)
+#define JSCLASS_HAS_GLOBAL_FLAG_AND_SLOTS(clasp) \
+ (((clasp)->flags & JSCLASS_IS_GLOBAL) \
+ && JSCLASS_RESERVED_SLOTS(clasp) >= JSCLASS_GLOBAL_SLOT_COUNT)
+
+// Fast access to the original value of each standard class's prototype.
+#define JSCLASS_CACHED_PROTO_SHIFT (JSCLASS_HIGH_FLAGS_SHIFT + 10)
+#define JSCLASS_CACHED_PROTO_MASK JS_BITMASK(js::JSCLASS_CACHED_PROTO_WIDTH)
+#define JSCLASS_HAS_CACHED_PROTO(key) (uint32_t(key) << JSCLASS_CACHED_PROTO_SHIFT)
+#define JSCLASS_CACHED_PROTO_KEY(clasp) ((JSProtoKey) \
+ (((clasp)->flags \
+ >> JSCLASS_CACHED_PROTO_SHIFT) \
+ & JSCLASS_CACHED_PROTO_MASK))
+
+// Initializer for unused members of statically initialized JSClass structs.
+#define JSCLASS_NO_INTERNAL_MEMBERS {0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}
+#define JSCLASS_NO_OPTIONAL_MEMBERS 0,0,0,0,0,JSCLASS_NO_INTERNAL_MEMBERS
+
+namespace js {
+
+struct Class
+{
+ JS_CLASS_MEMBERS(js::ClassOps, FreeOp);
+ const ClassSpec* spec;
+ const ClassExtension* ext;
+ const ObjectOps* oOps;
+
+ /*
+ * Objects of this class aren't native objects. They don't have Shapes that
+ * describe their properties and layout. Classes using this flag must
+ * provide their own property behavior, either by being proxy classes (do
+ * this) or by overriding all the ObjectOps except getElements, watch and
+ * unwatch (don't do this).
+ */
+ static const uint32_t NON_NATIVE = JSCLASS_INTERNAL_FLAG2;
+
+ bool isNative() const {
+ return !(flags & NON_NATIVE);
+ }
+
+ bool hasPrivate() const {
+ return !!(flags & JSCLASS_HAS_PRIVATE);
+ }
+
+ bool emulatesUndefined() const {
+ return flags & JSCLASS_EMULATES_UNDEFINED;
+ }
+
+ bool isJSFunction() const {
+ return this == js::FunctionClassPtr;
+ }
+
+ bool nonProxyCallable() const {
+ MOZ_ASSERT(!isProxy());
+ return isJSFunction() || getCall();
+ }
+
+ bool isProxy() const {
+ return flags & JSCLASS_IS_PROXY;
+ }
+
+ bool isDOMClass() const {
+ return flags & JSCLASS_IS_DOMJSCLASS;
+ }
+
+ bool shouldDelayMetadataBuilder() const {
+ return flags & JSCLASS_DELAY_METADATA_BUILDER;
+ }
+
+ bool isWrappedNative() const {
+ return flags & JSCLASS_IS_WRAPPED_NATIVE;
+ }
+
+ static size_t offsetOfFlags() { return offsetof(Class, flags); }
+
+ bool specDefined() const { return spec ? spec->defined() : false; }
+ JSProtoKey specInheritanceProtoKey()
+ const { return spec ? spec->inheritanceProtoKey() : JSProto_Null; }
+ bool specShouldDefineConstructor()
+ const { return spec ? spec->shouldDefineConstructor() : true; }
+ ClassObjectCreationOp specCreateConstructorHook()
+ const { return spec ? spec->createConstructorHook() : nullptr; }
+ ClassObjectCreationOp specCreatePrototypeHook()
+ const { return spec ? spec->createPrototypeHook() : nullptr; }
+ const JSFunctionSpec* specConstructorFunctions()
+ const { return spec ? spec->constructorFunctions() : nullptr; }
+ const JSPropertySpec* specConstructorProperties()
+ const { return spec ? spec->constructorProperties() : nullptr; }
+ const JSFunctionSpec* specPrototypeFunctions()
+ const { return spec ? spec->prototypeFunctions() : nullptr; }
+ const JSPropertySpec* specPrototypeProperties()
+ const { return spec ? spec->prototypeProperties() : nullptr; }
+ FinishClassInitOp specFinishInitHook()
+ const { return spec ? spec->finishInitHook() : nullptr; }
+
+ JSWeakmapKeyDelegateOp extWeakmapKeyDelegateOp()
+ const { return ext ? ext->weakmapKeyDelegateOp : nullptr; }
+ JSObjectMovedOp extObjectMovedOp()
+ const { return ext ? ext->objectMovedOp : nullptr; }
+
+ LookupPropertyOp getOpsLookupProperty() const { return oOps ? oOps->lookupProperty : nullptr; }
+ DefinePropertyOp getOpsDefineProperty() const { return oOps ? oOps->defineProperty : nullptr; }
+ HasPropertyOp getOpsHasProperty() const { return oOps ? oOps->hasProperty : nullptr; }
+ GetPropertyOp getOpsGetProperty() const { return oOps ? oOps->getProperty : nullptr; }
+ SetPropertyOp getOpsSetProperty() const { return oOps ? oOps->setProperty : nullptr; }
+ GetOwnPropertyOp getOpsGetOwnPropertyDescriptor()
+ const { return oOps ? oOps->getOwnPropertyDescriptor
+ : nullptr; }
+ DeletePropertyOp getOpsDeleteProperty() const { return oOps ? oOps->deleteProperty : nullptr; }
+ WatchOp getOpsWatch() const { return oOps ? oOps->watch : nullptr; }
+ UnwatchOp getOpsUnwatch() const { return oOps ? oOps->unwatch : nullptr; }
+ GetElementsOp getOpsGetElements() const { return oOps ? oOps->getElements : nullptr; }
+ JSNewEnumerateOp getOpsEnumerate() const { return oOps ? oOps->enumerate : nullptr; }
+ JSFunToStringOp getOpsFunToString() const { return oOps ? oOps->funToString : nullptr; }
+};
+
+static_assert(offsetof(JSClassOps, addProperty) == offsetof(ClassOps, addProperty),
+ "ClassOps and JSClassOps must be consistent");
+static_assert(offsetof(JSClassOps, delProperty) == offsetof(ClassOps, delProperty),
+ "ClassOps and JSClassOps must be consistent");
+static_assert(offsetof(JSClassOps, getProperty) == offsetof(ClassOps, getProperty),
+ "ClassOps and JSClassOps must be consistent");
+static_assert(offsetof(JSClassOps, setProperty) == offsetof(ClassOps, setProperty),
+ "ClassOps and JSClassOps must be consistent");
+static_assert(offsetof(JSClassOps, enumerate) == offsetof(ClassOps, enumerate),
+ "ClassOps and JSClassOps must be consistent");
+static_assert(offsetof(JSClassOps, resolve) == offsetof(ClassOps, resolve),
+ "ClassOps and JSClassOps must be consistent");
+static_assert(offsetof(JSClassOps, mayResolve) == offsetof(ClassOps, mayResolve),
+ "ClassOps and JSClassOps must be consistent");
+static_assert(offsetof(JSClassOps, finalize) == offsetof(ClassOps, finalize),
+ "ClassOps and JSClassOps must be consistent");
+static_assert(offsetof(JSClassOps, call) == offsetof(ClassOps, call),
+ "ClassOps and JSClassOps must be consistent");
+static_assert(offsetof(JSClassOps, construct) == offsetof(ClassOps, construct),
+ "ClassOps and JSClassOps must be consistent");
+static_assert(offsetof(JSClassOps, hasInstance) == offsetof(ClassOps, hasInstance),
+ "ClassOps and JSClassOps must be consistent");
+static_assert(offsetof(JSClassOps, trace) == offsetof(ClassOps, trace),
+ "ClassOps and JSClassOps must be consistent");
+static_assert(sizeof(JSClassOps) == sizeof(ClassOps),
+ "ClassOps and JSClassOps must be consistent");
+
+static_assert(offsetof(JSClass, name) == offsetof(Class, name),
+ "Class and JSClass must be consistent");
+static_assert(offsetof(JSClass, flags) == offsetof(Class, flags),
+ "Class and JSClass must be consistent");
+static_assert(offsetof(JSClass, cOps) == offsetof(Class, cOps),
+ "Class and JSClass must be consistent");
+static_assert(sizeof(JSClass) == sizeof(Class),
+ "Class and JSClass must be consistent");
+
+static MOZ_ALWAYS_INLINE const JSClass*
+Jsvalify(const Class* c)
+{
+ return (const JSClass*)c;
+}
+
+static MOZ_ALWAYS_INLINE const Class*
+Valueify(const JSClass* c)
+{
+ return (const Class*)c;
+}
+
+/**
+ * Enumeration describing possible values of the [[Class]] internal property
+ * value of objects.
+ */
+enum class ESClass {
+ Object,
+ Array,
+ Number,
+ String,
+ Boolean,
+ RegExp,
+ ArrayBuffer,
+ SharedArrayBuffer,
+ Date,
+ Set,
+ Map,
+ Promise,
+ MapIterator,
+ SetIterator,
+ Arguments,
+ Error,
+
+ /** None of the above. */
+ Other
+};
+
+/* Fills |vp| with the unboxed value for boxed types, or undefined otherwise. */
+bool
+Unbox(JSContext* cx, JS::HandleObject obj, JS::MutableHandleValue vp);
+
+#ifdef DEBUG
+JS_FRIEND_API(bool)
+HasObjectMovedOp(JSObject* obj);
+#endif
+
+} /* namespace js */
+
+#endif /* js_Class_h */
diff --git a/js/public/Conversions.h b/js/public/Conversions.h
new file mode 100644
index 0000000000..1cee31c561
--- /dev/null
+++ b/js/public/Conversions.h
@@ -0,0 +1,581 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* ECMAScript conversion operations. */
+
+#ifndef js_Conversions_h
+#define js_Conversions_h
+
+#include "mozilla/Casting.h"
+#include "mozilla/FloatingPoint.h"
+#include "mozilla/TypeTraits.h"
+
+#include <math.h>
+
+#include "jspubtd.h"
+
+#include "js/RootingAPI.h"
+#include "js/Value.h"
+
+struct JSContext;
+
+namespace js {
+
+/* DO NOT CALL THIS. Use JS::ToBoolean. */
+extern JS_PUBLIC_API(bool)
+ToBooleanSlow(JS::HandleValue v);
+
+/* DO NOT CALL THIS. Use JS::ToNumber. */
+extern JS_PUBLIC_API(bool)
+ToNumberSlow(JSContext* cx, JS::HandleValue v, double* dp);
+
+/* DO NOT CALL THIS. Use JS::ToInt8. */
+extern JS_PUBLIC_API(bool)
+ToInt8Slow(JSContext *cx, JS::HandleValue v, int8_t *out);
+
+/* DO NOT CALL THIS. Use JS::ToUint8. */
+extern JS_PUBLIC_API(bool)
+ToUint8Slow(JSContext *cx, JS::HandleValue v, uint8_t *out);
+
+/* DO NOT CALL THIS. Use JS::ToInt16. */
+extern JS_PUBLIC_API(bool)
+ToInt16Slow(JSContext *cx, JS::HandleValue v, int16_t *out);
+
+/* DO NOT CALL THIS. Use JS::ToInt32. */
+extern JS_PUBLIC_API(bool)
+ToInt32Slow(JSContext* cx, JS::HandleValue v, int32_t* out);
+
+/* DO NOT CALL THIS. Use JS::ToUint32. */
+extern JS_PUBLIC_API(bool)
+ToUint32Slow(JSContext* cx, JS::HandleValue v, uint32_t* out);
+
+/* DO NOT CALL THIS. Use JS::ToUint16. */
+extern JS_PUBLIC_API(bool)
+ToUint16Slow(JSContext* cx, JS::HandleValue v, uint16_t* out);
+
+/* DO NOT CALL THIS. Use JS::ToInt64. */
+extern JS_PUBLIC_API(bool)
+ToInt64Slow(JSContext* cx, JS::HandleValue v, int64_t* out);
+
+/* DO NOT CALL THIS. Use JS::ToUint64. */
+extern JS_PUBLIC_API(bool)
+ToUint64Slow(JSContext* cx, JS::HandleValue v, uint64_t* out);
+
+/* DO NOT CALL THIS. Use JS::ToString. */
+extern JS_PUBLIC_API(JSString*)
+ToStringSlow(JSContext* cx, JS::HandleValue v);
+
+/* DO NOT CALL THIS. Use JS::ToObject. */
+extern JS_PUBLIC_API(JSObject*)
+ToObjectSlow(JSContext* cx, JS::HandleValue v, bool reportScanStack);
+
+} // namespace js
+
+namespace JS {
+
+namespace detail {
+
+#ifdef JS_DEBUG
+/**
+ * Assert that we're not doing GC on cx, that we're in a request as
+ * needed, and that the compartments for cx and v are correct.
+ * Also check that GC would be safe at this point.
+ */
+extern JS_PUBLIC_API(void)
+AssertArgumentsAreSane(JSContext* cx, HandleValue v);
+#else
+inline void AssertArgumentsAreSane(JSContext* cx, HandleValue v)
+{}
+#endif /* JS_DEBUG */
+
+} // namespace detail
+
+/**
+ * ES6 draft 20141224, 7.1.1, second algorithm.
+ *
+ * Most users shouldn't call this -- use JS::ToBoolean, ToNumber, or ToString
+ * instead. This will typically only be called from custom convert hooks that
+ * wish to fall back to the ES6 default conversion behavior shared by most
+ * objects in JS, codified as OrdinaryToPrimitive.
+ */
+extern JS_PUBLIC_API(bool)
+OrdinaryToPrimitive(JSContext* cx, HandleObject obj, JSType type, MutableHandleValue vp);
+
+/* ES6 draft 20141224, 7.1.2. */
+MOZ_ALWAYS_INLINE bool
+ToBoolean(HandleValue v)
+{
+ if (v.isBoolean())
+ return v.toBoolean();
+ if (v.isInt32())
+ return v.toInt32() != 0;
+ if (v.isNullOrUndefined())
+ return false;
+ if (v.isDouble()) {
+ double d = v.toDouble();
+ return !mozilla::IsNaN(d) && d != 0;
+ }
+ if (v.isSymbol())
+ return true;
+
+ /* The slow path handles strings and objects. */
+ return js::ToBooleanSlow(v);
+}
+
+/* ES6 draft 20141224, 7.1.3. */
+MOZ_ALWAYS_INLINE bool
+ToNumber(JSContext* cx, HandleValue v, double* out)
+{
+ detail::AssertArgumentsAreSane(cx, v);
+
+ if (v.isNumber()) {
+ *out = v.toNumber();
+ return true;
+ }
+ return js::ToNumberSlow(cx, v, out);
+}
+
+/* ES6 draft 20141224, ToInteger (specialized for doubles). */
+inline double
+ToInteger(double d)
+{
+ if (d == 0)
+ return d;
+
+ if (!mozilla::IsFinite(d)) {
+ if (mozilla::IsNaN(d))
+ return 0;
+ return d;
+ }
+
+ return d < 0 ? ceil(d) : floor(d);
+}
+
+/* ES6 draft 20141224, 7.1.5. */
+MOZ_ALWAYS_INLINE bool
+ToInt32(JSContext* cx, JS::HandleValue v, int32_t* out)
+{
+ detail::AssertArgumentsAreSane(cx, v);
+
+ if (v.isInt32()) {
+ *out = v.toInt32();
+ return true;
+ }
+ return js::ToInt32Slow(cx, v, out);
+}
+
+/* ES6 draft 20141224, 7.1.6. */
+MOZ_ALWAYS_INLINE bool
+ToUint32(JSContext* cx, HandleValue v, uint32_t* out)
+{
+ detail::AssertArgumentsAreSane(cx, v);
+
+ if (v.isInt32()) {
+ *out = uint32_t(v.toInt32());
+ return true;
+ }
+ return js::ToUint32Slow(cx, v, out);
+}
+
+/* ES6 draft 20141224, 7.1.7. */
+MOZ_ALWAYS_INLINE bool
+ToInt16(JSContext *cx, JS::HandleValue v, int16_t *out)
+{
+ detail::AssertArgumentsAreSane(cx, v);
+
+ if (v.isInt32()) {
+ *out = int16_t(v.toInt32());
+ return true;
+ }
+ return js::ToInt16Slow(cx, v, out);
+}
+
+/* ES6 draft 20141224, 7.1.8. */
+MOZ_ALWAYS_INLINE bool
+ToUint16(JSContext* cx, HandleValue v, uint16_t* out)
+{
+ detail::AssertArgumentsAreSane(cx, v);
+
+ if (v.isInt32()) {
+ *out = uint16_t(v.toInt32());
+ return true;
+ }
+ return js::ToUint16Slow(cx, v, out);
+}
+
+/* ES6 draft 20141224, 7.1.9 */
+MOZ_ALWAYS_INLINE bool
+ToInt8(JSContext *cx, JS::HandleValue v, int8_t *out)
+{
+ detail::AssertArgumentsAreSane(cx, v);
+
+ if (v.isInt32()) {
+ *out = int8_t(v.toInt32());
+ return true;
+ }
+ return js::ToInt8Slow(cx, v, out);
+}
+
+/* ES6 ECMA-262, 7.1.10 */
+MOZ_ALWAYS_INLINE bool
+ToUint8(JSContext *cx, JS::HandleValue v, uint8_t *out)
+{
+ detail::AssertArgumentsAreSane(cx, v);
+
+ if (v.isInt32()) {
+ *out = uint8_t(v.toInt32());
+ return true;
+ }
+ return js::ToUint8Slow(cx, v, out);
+}
+
+/*
+ * Non-standard, with behavior similar to that of ToInt32, except in its
+ * producing an int64_t.
+ */
+MOZ_ALWAYS_INLINE bool
+ToInt64(JSContext* cx, HandleValue v, int64_t* out)
+{
+ detail::AssertArgumentsAreSane(cx, v);
+
+ if (v.isInt32()) {
+ *out = int64_t(v.toInt32());
+ return true;
+ }
+ return js::ToInt64Slow(cx, v, out);
+}
+
+/*
+ * Non-standard, with behavior similar to that of ToUint32, except in its
+ * producing a uint64_t.
+ */
+MOZ_ALWAYS_INLINE bool
+ToUint64(JSContext* cx, HandleValue v, uint64_t* out)
+{
+ detail::AssertArgumentsAreSane(cx, v);
+
+ if (v.isInt32()) {
+ *out = uint64_t(v.toInt32());
+ return true;
+ }
+ return js::ToUint64Slow(cx, v, out);
+}
+
+/* ES6 draft 20141224, 7.1.12. */
+MOZ_ALWAYS_INLINE JSString*
+ToString(JSContext* cx, HandleValue v)
+{
+ detail::AssertArgumentsAreSane(cx, v);
+
+ if (v.isString())
+ return v.toString();
+ return js::ToStringSlow(cx, v);
+}
+
+/* ES6 draft 20141224, 7.1.13. */
+inline JSObject*
+ToObject(JSContext* cx, HandleValue v)
+{
+ detail::AssertArgumentsAreSane(cx, v);
+
+ if (v.isObject())
+ return &v.toObject();
+ return js::ToObjectSlow(cx, v, false);
+}
+
+namespace detail {
+
+/*
+ * Convert a double value to ResultType (an unsigned integral type) using
+ * ECMAScript-style semantics (that is, in like manner to how ECMAScript's
+ * ToInt32 converts to int32_t).
+ *
+ * If d is infinite or NaN, return 0.
+ * Otherwise compute d2 = sign(d) * floor(abs(d)), and return the ResultType
+ * value congruent to d2 mod 2**(bit width of ResultType).
+ *
+ * The algorithm below is inspired by that found in
+ * <http://trac.webkit.org/changeset/67825/trunk/JavaScriptCore/runtime/JSValue.cpp>
+ * but has been generalized to all integer widths.
+ */
+template<typename ResultType>
+inline ResultType
+ToUintWidth(double d)
+{
+ static_assert(mozilla::IsUnsigned<ResultType>::value,
+ "ResultType must be an unsigned type");
+
+ uint64_t bits = mozilla::BitwiseCast<uint64_t>(d);
+ unsigned DoubleExponentShift = mozilla::FloatingPoint<double>::kExponentShift;
+
+ // Extract the exponent component. (Be careful here! It's not technically
+ // the exponent in NaN, infinities, and subnormals.)
+ int_fast16_t exp =
+ int_fast16_t((bits & mozilla::FloatingPoint<double>::kExponentBits) >> DoubleExponentShift) -
+ int_fast16_t(mozilla::FloatingPoint<double>::kExponentBias);
+
+ // If the exponent's less than zero, abs(d) < 1, so the result is 0. (This
+ // also handles subnormals.)
+ if (exp < 0)
+ return 0;
+
+ uint_fast16_t exponent = mozilla::AssertedCast<uint_fast16_t>(exp);
+
+ // If the exponent is greater than or equal to the bits of precision of a
+ // double plus ResultType's width, the number is either infinite, NaN, or
+ // too large to have lower-order bits in the congruent value. (Example:
+ // 2**84 is exactly representable as a double. The next exact double is
+ // 2**84 + 2**32. Thus if ResultType is int32_t, an exponent >= 84 implies
+ // floor(abs(d)) == 0 mod 2**32.) Return 0 in all these cases.
+ const size_t ResultWidth = CHAR_BIT * sizeof(ResultType);
+ if (exponent >= DoubleExponentShift + ResultWidth)
+ return 0;
+
+ // The significand contains the bits that will determine the final result.
+ // Shift those bits left or right, according to the exponent, to their
+ // locations in the unsigned binary representation of floor(abs(d)).
+ static_assert(sizeof(ResultType) <= sizeof(uint64_t),
+ "Left-shifting below would lose upper bits");
+ ResultType result = (exponent > DoubleExponentShift)
+ ? ResultType(bits << (exponent - DoubleExponentShift))
+ : ResultType(bits >> (DoubleExponentShift - exponent));
+
+ // Two further complications remain. First, |result| may contain bogus
+ // sign/exponent bits. Second, IEEE-754 numbers' significands (excluding
+ // subnormals, but we already handled those) have an implicit leading 1
+ // which may affect the final result.
+ //
+ // It may appear that there's complexity here depending on how ResultWidth
+ // and DoubleExponentShift relate, but it turns out there's not.
+ //
+ // Assume ResultWidth < DoubleExponentShift:
+ // Only right-shifts leave bogus bits in |result|. For this to happen,
+ // we must right-shift by > |DoubleExponentShift - ResultWidth|, implying
+ // |exponent < ResultWidth|.
+ // The implicit leading bit only matters if it appears in the final
+ // result -- if |2**exponent mod 2**ResultWidth != 0|. This implies
+ // |exponent < ResultWidth|.
+ // Otherwise assume ResultWidth >= DoubleExponentShift:
+ // Any left-shift less than |ResultWidth - DoubleExponentShift| leaves
+ // bogus bits in |result|. This implies |exponent < ResultWidth|. Any
+ // right-shift less than |ResultWidth| does too, which implies
+ // |DoubleExponentShift - ResultWidth < exponent|. By assumption, then,
+ // |exponent| is negative, but we excluded that above. So bogus bits
+ // need only |exponent < ResultWidth|.
+ // The implicit leading bit matters identically to the other case, so
+ // again, |exponent < ResultWidth|.
+ if (exponent < ResultWidth) {
+ ResultType implicitOne = ResultType(1) << exponent;
+ result &= implicitOne - 1; // remove bogus bits
+ result += implicitOne; // add the implicit bit
+ }
+
+ // Compute the congruent value in the signed range.
+ return (bits & mozilla::FloatingPoint<double>::kSignBit) ? ~result + 1 : result;
+}
+
+template<typename ResultType>
+inline ResultType
+ToIntWidth(double d)
+{
+ static_assert(mozilla::IsSigned<ResultType>::value,
+ "ResultType must be a signed type");
+
+ const ResultType MaxValue = (1ULL << (CHAR_BIT * sizeof(ResultType) - 1)) - 1;
+ const ResultType MinValue = -MaxValue - 1;
+
+ typedef typename mozilla::MakeUnsigned<ResultType>::Type UnsignedResult;
+ UnsignedResult u = ToUintWidth<UnsignedResult>(d);
+ if (u <= UnsignedResult(MaxValue))
+ return static_cast<ResultType>(u);
+ return (MinValue + static_cast<ResultType>(u - MaxValue)) - 1;
+}
+
+} // namespace detail
+
+/* ES5 9.5 ToInt32 (specialized for doubles). */
+inline int32_t
+ToInt32(double d)
+{
+ // clang crashes compiling this when targeting arm:
+ // https://llvm.org/bugs/show_bug.cgi?id=22974
+#if defined (__arm__) && defined (__GNUC__) && !defined(__clang__)
+ int32_t i;
+ uint32_t tmp0;
+ uint32_t tmp1;
+ uint32_t tmp2;
+ asm (
+ // We use a pure integer solution here. In the 'softfp' ABI, the argument
+ // will start in r0 and r1, and VFP can't do all of the necessary ECMA
+ // conversions by itself so some integer code will be required anyway. A
+ // hybrid solution is faster on A9, but this pure integer solution is
+ // notably faster for A8.
+
+ // %0 is the result register, and may alias either of the %[QR]1 registers.
+ // %Q4 holds the lower part of the mantissa.
+ // %R4 holds the sign, exponent, and the upper part of the mantissa.
+ // %1, %2 and %3 are used as temporary values.
+
+ // Extract the exponent.
+" mov %1, %R4, LSR #20\n"
+" bic %1, %1, #(1 << 11)\n" // Clear the sign.
+
+ // Set the implicit top bit of the mantissa. This clobbers a bit of the
+ // exponent, but we have already extracted that.
+" orr %R4, %R4, #(1 << 20)\n"
+
+ // Special Cases
+ // We should return zero in the following special cases:
+ // - Exponent is 0x000 - 1023: +/-0 or subnormal.
+ // - Exponent is 0x7ff - 1023: +/-INFINITY or NaN
+ // - This case is implicitly handled by the standard code path anyway,
+ // as shifting the mantissa up by the exponent will result in '0'.
+ //
+ // The result is composed of the mantissa, prepended with '1' and
+ // bit-shifted left by the (decoded) exponent. Note that because the r1[20]
+ // is the bit with value '1', r1 is effectively already shifted (left) by
+ // 20 bits, and r0 is already shifted by 52 bits.
+
+ // Adjust the exponent to remove the encoding offset. If the decoded
+ // exponent is negative, quickly bail out with '0' as such values round to
+ // zero anyway. This also catches +/-0 and subnormals.
+" sub %1, %1, #0xff\n"
+" subs %1, %1, #0x300\n"
+" bmi 8f\n"
+
+ // %1 = (decoded) exponent >= 0
+ // %R4 = upper mantissa and sign
+
+ // ---- Lower Mantissa ----
+" subs %3, %1, #52\n" // Calculate exp-52
+" bmi 1f\n"
+
+ // Shift r0 left by exp-52.
+ // Ensure that we don't overflow ARM's 8-bit shift operand range.
+ // We need to handle anything up to an 11-bit value here as we know that
+ // 52 <= exp <= 1024 (0x400). Any shift beyond 31 bits results in zero
+ // anyway, so as long as we don't touch the bottom 5 bits, we can use
+ // a logical OR to push long shifts into the 32 <= (exp&0xff) <= 255 range.
+" bic %2, %3, #0xff\n"
+" orr %3, %3, %2, LSR #3\n"
+ // We can now perform a straight shift, avoiding the need for any
+ // conditional instructions or extra branches.
+" mov %Q4, %Q4, LSL %3\n"
+" b 2f\n"
+"1:\n" // Shift r0 right by 52-exp.
+ // We know that 0 <= exp < 52, and we can shift up to 255 bits so 52-exp
+ // will always be a valid shift and we can sk%3 the range check for this case.
+" rsb %3, %1, #52\n"
+" mov %Q4, %Q4, LSR %3\n"
+
+ // %1 = (decoded) exponent
+ // %R4 = upper mantissa and sign
+ // %Q4 = partially-converted integer
+
+"2:\n"
+ // ---- Upper Mantissa ----
+ // This is much the same as the lower mantissa, with a few different
+ // boundary checks and some masking to hide the exponent & sign bit in the
+ // upper word.
+ // Note that the upper mantissa is pre-shifted by 20 in %R4, but we shift
+ // it left more to remove the sign and exponent so it is effectively
+ // pre-shifted by 31 bits.
+" subs %3, %1, #31\n" // Calculate exp-31
+" mov %1, %R4, LSL #11\n" // Re-use %1 as a temporary register.
+" bmi 3f\n"
+
+ // Shift %R4 left by exp-31.
+ // Avoid overflowing the 8-bit shift range, as before.
+" bic %2, %3, #0xff\n"
+" orr %3, %3, %2, LSR #3\n"
+ // Perform the shift.
+" mov %2, %1, LSL %3\n"
+" b 4f\n"
+"3:\n" // Shift r1 right by 31-exp.
+ // We know that 0 <= exp < 31, and we can shift up to 255 bits so 31-exp
+ // will always be a valid shift and we can skip the range check for this case.
+" rsb %3, %3, #0\n" // Calculate 31-exp from -(exp-31)
+" mov %2, %1, LSR %3\n" // Thumb-2 can't do "LSR %3" in "orr".
+
+ // %Q4 = partially-converted integer (lower)
+ // %R4 = upper mantissa and sign
+ // %2 = partially-converted integer (upper)
+
+"4:\n"
+ // Combine the converted parts.
+" orr %Q4, %Q4, %2\n"
+ // Negate the result if we have to, and move it to %0 in the process. To
+ // avoid conditionals, we can do this by inverting on %R4[31], then adding
+ // %R4[31]>>31.
+" eor %Q4, %Q4, %R4, ASR #31\n"
+" add %0, %Q4, %R4, LSR #31\n"
+" b 9f\n"
+"8:\n"
+ // +/-INFINITY, +/-0, subnormals, NaNs, and anything else out-of-range that
+ // will result in a conversion of '0'.
+" mov %0, #0\n"
+"9:\n"
+ : "=r" (i), "=&r" (tmp0), "=&r" (tmp1), "=&r" (tmp2), "=&r" (d)
+ : "4" (d)
+ : "cc"
+ );
+ return i;
+#else
+ return detail::ToIntWidth<int32_t>(d);
+#endif
+}
+
+/* ES5 9.6 (specialized for doubles). */
+inline uint32_t
+ToUint32(double d)
+{
+ return detail::ToUintWidth<uint32_t>(d);
+}
+
+/* WEBIDL 4.2.4 */
+inline int8_t
+ToInt8(double d)
+{
+ return detail::ToIntWidth<int8_t>(d);
+}
+
+/* ECMA-262 7.1.10 ToUInt8() specialized for doubles. */
+inline int8_t
+ToUint8(double d)
+{
+ return detail::ToUintWidth<uint8_t>(d);
+}
+
+/* WEBIDL 4.2.6 */
+inline int16_t
+ToInt16(double d)
+{
+ return detail::ToIntWidth<int16_t>(d);
+}
+
+inline uint16_t
+ToUint16(double d)
+{
+ return detail::ToUintWidth<uint16_t>(d);
+}
+
+/* WEBIDL 4.2.10 */
+inline int64_t
+ToInt64(double d)
+{
+ return detail::ToIntWidth<int64_t>(d);
+}
+
+/* WEBIDL 4.2.11 */
+inline uint64_t
+ToUint64(double d)
+{
+ return detail::ToUintWidth<uint64_t>(d);
+}
+
+} // namespace JS
+
+#endif /* js_Conversions_h */
diff --git a/js/public/Date.h b/js/public/Date.h
new file mode 100644
index 0000000000..cba0ea875d
--- /dev/null
+++ b/js/public/Date.h
@@ -0,0 +1,170 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* JavaScript date/time computation and creation functions. */
+
+#ifndef js_Date_h
+#define js_Date_h
+
+/*
+ * Dates in JavaScript are defined by IEEE-754 double precision numbers from
+ * the set:
+ *
+ * { t ∈ ℕ : -8.64e15 ≤ t ≤ +8.64e15 } ∪ { NaN }
+ *
+ * The single NaN value represents any invalid-date value. All other values
+ * represent idealized durations in milliseconds since the UTC epoch. (Leap
+ * seconds are ignored; leap days are not.) +0 is the only zero in this set.
+ * The limit represented by 8.64e15 milliseconds is 100 million days either
+ * side of 00:00 January 1, 1970 UTC.
+ *
+ * Dates in the above set are represented by the |ClippedTime| class. The
+ * double type is a superset of the above set, so it *may* (but need not)
+ * represent a date. Use ECMAScript's |TimeClip| method to produce a date from
+ * a double.
+ *
+ * Date *objects* are simply wrappers around |TimeClip|'d numbers, with a bunch
+ * of accessor methods to the various aspects of the represented date.
+ */
+
+#include "mozilla/FloatingPoint.h"
+#include "mozilla/MathAlgorithms.h"
+
+#include "js/Conversions.h"
+#include "js/Value.h"
+
+struct JSContext;
+
+namespace JS {
+
+/**
+ * Re-query the system to determine the current time zone adjustment from UTC,
+ * including any component due to DST. If the time zone has changed, this will
+ * cause all Date object non-UTC methods and formatting functions to produce
+ * appropriately adjusted results.
+ *
+ * Left to its own devices, SpiderMonkey itself may occasionally call this
+ * method to attempt to keep up with system time changes. However, no
+ * particular frequency of checking is guaranteed. Embedders unable to accept
+ * occasional inaccuracies should call this method in response to system time
+ * changes, or immediately before operations requiring instantaneous
+ * correctness, to guarantee correct behavior.
+ */
+extern JS_PUBLIC_API(void)
+ResetTimeZone();
+
+class ClippedTime;
+inline ClippedTime TimeClip(double time);
+
+/*
+ * |ClippedTime| represents the limited subset of dates/times described above.
+ *
+ * An invalid date/time may be created through the |ClippedTime::invalid|
+ * method. Otherwise, a |ClippedTime| may be created using the |TimeClip|
+ * method.
+ *
+ * In typical use, the user might wish to manipulate a timestamp. The user
+ * performs a series of operations on it, but the final value might not be a
+ * date as defined above -- it could have overflowed, acquired a fractional
+ * component, &c. So as a *final* step, the user passes that value through
+ * |TimeClip| to produce a number restricted to JavaScript's date range.
+ *
+ * APIs that accept a JavaScript date value thus accept a |ClippedTime|, not a
+ * double. This ensures that date/time APIs will only ever receive acceptable
+ * JavaScript dates. This also forces users to perform any desired clipping,
+ * as only the user knows what behavior is desired when clipping occurs.
+ */
+class ClippedTime
+{
+ double t;
+
+ explicit ClippedTime(double time) : t(time) {}
+ friend ClippedTime TimeClip(double time);
+
+ public:
+ // Create an invalid date.
+ ClippedTime() : t(mozilla::UnspecifiedNaN<double>()) {}
+
+ // Create an invalid date/time, more explicitly; prefer this to the default
+ // constructor.
+ static ClippedTime invalid() { return ClippedTime(); }
+
+ double toDouble() const { return t; }
+
+ bool isValid() const { return !mozilla::IsNaN(t); }
+};
+
+// ES6 20.3.1.15.
+//
+// Clip a double to JavaScript's date range (or to an invalid date) using the
+// ECMAScript TimeClip algorithm.
+inline ClippedTime
+TimeClip(double time)
+{
+ // Steps 1-2.
+ const double MaxTimeMagnitude = 8.64e15;
+ if (!mozilla::IsFinite(time) || mozilla::Abs(time) > MaxTimeMagnitude)
+ return ClippedTime(mozilla::UnspecifiedNaN<double>());
+
+ // Step 3.
+ return ClippedTime(ToInteger(time) + (+0.0));
+}
+
+// Produce a double Value from the given time. Because times may be NaN,
+// prefer using this to manual canonicalization.
+inline Value
+TimeValue(ClippedTime time)
+{
+ return DoubleValue(JS::CanonicalizeNaN(time.toDouble()));
+}
+
+// Create a new Date object whose [[DateValue]] internal slot contains the
+// clipped |time|. (Users who must represent times outside that range must use
+// another representation.)
+extern JS_PUBLIC_API(JSObject*)
+NewDateObject(JSContext* cx, ClippedTime time);
+
+// Year is a year, month is 0-11, day is 1-based. The return value is a number
+// of milliseconds since the epoch.
+//
+// Consistent with the MakeDate algorithm defined in ECMAScript, this value is
+// *not* clipped! Use JS::TimeClip if you need a clipped date.
+JS_PUBLIC_API(double)
+MakeDate(double year, unsigned month, unsigned day);
+
+// Takes an integer number of milliseconds since the epoch and returns the
+// year. Can return NaN, and will do so if NaN is passed in.
+JS_PUBLIC_API(double)
+YearFromTime(double time);
+
+// Takes an integer number of milliseconds since the epoch and returns the
+// month (0-11). Can return NaN, and will do so if NaN is passed in.
+JS_PUBLIC_API(double)
+MonthFromTime(double time);
+
+// Takes an integer number of milliseconds since the epoch and returns the
+// day (1-based). Can return NaN, and will do so if NaN is passed in.
+JS_PUBLIC_API(double)
+DayFromTime(double time);
+
+// Takes an integer year and returns the number of days from epoch to the given
+// year.
+// NOTE: The calculation performed by this function is literally that given in
+// the ECMAScript specification. Nonfinite years, years containing fractional
+// components, and years outside ECMAScript's date range are not handled with
+// any particular intelligence. Garbage in, garbage out.
+JS_PUBLIC_API(double)
+DayFromYear(double year);
+
+// Takes an integer number of milliseconds since the epoch and an integer year,
+// returns the number of days in that year. If |time| is nonfinite, returns NaN.
+// Otherwise |time| *must* correspond to a time within the valid year |year|.
+// This should usually be ensured by computing |year| as |JS::DayFromYear(time)|.
+JS_PUBLIC_API(double)
+DayWithinYear(double time, double year);
+
+} // namespace JS
+
+#endif /* js_Date_h */
diff --git a/js/public/Debug.h b/js/public/Debug.h
new file mode 100644
index 0000000000..9ebc38d4a4
--- /dev/null
+++ b/js/public/Debug.h
@@ -0,0 +1,384 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+// Interfaces by which the embedding can interact with the Debugger API.
+
+#ifndef js_Debug_h
+#define js_Debug_h
+
+#include "mozilla/Assertions.h"
+#include "mozilla/Attributes.h"
+#include "mozilla/MemoryReporting.h"
+
+#include "jsapi.h"
+#include "jspubtd.h"
+
+#include "js/GCAPI.h"
+#include "js/RootingAPI.h"
+#include "js/TypeDecls.h"
+
+namespace js {
+class Debugger;
+} // namespace js
+
+namespace JS {
+namespace dbg {
+
+// Helping embedding code build objects for Debugger
+// -------------------------------------------------
+//
+// Some Debugger API features lean on the embedding application to construct
+// their result values. For example, Debugger.Frame.prototype.scriptEntryReason
+// calls hooks provided by the embedding to construct values explaining why it
+// invoked JavaScript; if F is a frame called from a mouse click event handler,
+// F.scriptEntryReason would return an object of the form:
+//
+// { eventType: "mousedown", event: <object> }
+//
+// where <object> is a Debugger.Object whose referent is the event being
+// dispatched.
+//
+// However, Debugger implements a trust boundary. Debuggee code may be
+// considered untrusted; debugger code needs to be protected from debuggee
+// getters, setters, proxies, Object.watch watchpoints, and any other feature
+// that might accidentally cause debugger code to set the debuggee running. The
+// Debugger API tries to make it easy to write safe debugger code by only
+// offering access to debuggee objects via Debugger.Object instances, which
+// ensure that only those operations whose explicit purpose is to invoke
+// debuggee code do so. But this protective membrane is only helpful if we
+// interpose Debugger.Object instances in all the necessary spots.
+//
+// SpiderMonkey's compartment system also implements a trust boundary. The
+// debuggee and debugger are always in different compartments. Inter-compartment
+// work requires carefully tracking which compartment each JSObject or JS::Value
+// belongs to, and ensuring that is is correctly wrapped for each operation.
+//
+// It seems precarious to expect the embedding's hooks to implement these trust
+// boundaries. Instead, the JS::dbg::Builder API segregates the code which
+// constructs trusted objects from that which deals with untrusted objects.
+// Trusted objects have an entirely different C++ type, so code that improperly
+// mixes trusted and untrusted objects is caught at compile time.
+//
+// In the structure shown above, there are two trusted objects, and one
+// untrusted object:
+//
+// - The overall object, with the 'eventType' and 'event' properties, is a
+// trusted object. We're going to return it to D.F.p.scriptEntryReason's
+// caller, which will handle it directly.
+//
+// - The Debugger.Object instance appearing as the value of the 'event' property
+// is a trusted object. It belongs to the same Debugger instance as the
+// Debugger.Frame instance whose scriptEntryReason accessor was called, and
+// presents a safe reflection-oriented API for inspecting its referent, which
+// is:
+//
+// - The actual event object, an untrusted object, and the referent of the
+// Debugger.Object above. (Content can do things like replacing accessors on
+// Event.prototype.)
+//
+// Using JS::dbg::Builder, all objects and values the embedding deals with
+// directly are considered untrusted, and are assumed to be debuggee values. The
+// only way to construct trusted objects is to use Builder's own methods, which
+// return a separate Object type. The only way to set a property on a trusted
+// object is through that Object type. The actual trusted object is never
+// exposed to the embedding.
+//
+// So, for example, the embedding might use code like the following to construct
+// the object shown above, given a Builder passed to it by Debugger:
+//
+// bool
+// MyScriptEntryReason::explain(JSContext* cx,
+// Builder& builder,
+// Builder::Object& result)
+// {
+// JSObject* eventObject = ... obtain debuggee event object somehow ...;
+// if (!eventObject)
+// return false;
+// result = builder.newObject(cx);
+// return result &&
+// result.defineProperty(cx, "eventType", SafelyFetchType(eventObject)) &&
+// result.defineProperty(cx, "event", eventObject);
+// }
+//
+//
+// Object::defineProperty also accepts an Object as the value to store on the
+// property. By its type, we know that the value is trusted, so we set it
+// directly as the property's value, without interposing a Debugger.Object
+// wrapper. This allows the embedding to builted nested structures of trusted
+// objects.
+//
+// The Builder and Builder::Object methods take care of doing whatever
+// compartment switching and wrapping are necessary to construct the trusted
+// values in the Debugger's compartment.
+//
+// The Object type is self-rooting. Construction, assignment, and destruction
+// all properly root the referent object.
+
+class BuilderOrigin;
+
+class Builder {
+ // The Debugger instance whose client we are building a value for. We build
+ // objects in this object's compartment.
+ PersistentRootedObject debuggerObject;
+
+ // debuggerObject's Debugger structure, for convenience.
+ js::Debugger* debugger;
+
+ // Check that |thing| is in the same compartment as our debuggerObject. Used
+ // for assertions when constructing BuiltThings. We can overload this as we
+ // add more instantiations of BuiltThing.
+#if DEBUG
+ void assertBuilt(JSObject* obj);
+#else
+ void assertBuilt(JSObject* obj) { }
+#endif
+
+ protected:
+ // A reference to a trusted object or value. At the moment, we only use it
+ // with JSObject*.
+ template<typename T>
+ class BuiltThing {
+ friend class BuilderOrigin;
+
+ protected:
+ // The Builder to which this trusted thing belongs.
+ Builder& owner;
+
+ // A rooted reference to our value.
+ PersistentRooted<T> value;
+
+ BuiltThing(JSContext* cx, Builder& owner_, T value_ = GCPolicy<T>::initial())
+ : owner(owner_), value(cx, value_)
+ {
+ owner.assertBuilt(value_);
+ }
+
+ // Forward some things from our owner, for convenience.
+ js::Debugger* debugger() const { return owner.debugger; }
+ JSObject* debuggerObject() const { return owner.debuggerObject; }
+
+ public:
+ BuiltThing(const BuiltThing& rhs) : owner(rhs.owner), value(rhs.value) { }
+ BuiltThing& operator=(const BuiltThing& rhs) {
+ MOZ_ASSERT(&owner == &rhs.owner);
+ owner.assertBuilt(rhs.value);
+ value = rhs.value;
+ return *this;
+ }
+
+ explicit operator bool() const {
+ // If we ever instantiate BuiltThing<Value>, this might not suffice.
+ return value;
+ }
+
+ private:
+ BuiltThing() = delete;
+ };
+
+ public:
+ // A reference to a trusted object, possibly null. Instances of Object are
+ // always properly rooted. They can be copied and assigned, as if they were
+ // pointers.
+ class Object: private BuiltThing<JSObject*> {
+ friend class Builder; // for construction
+ friend class BuilderOrigin; // for unwrapping
+
+ typedef BuiltThing<JSObject*> Base;
+
+ // This is private, because only Builders can create Objects that
+ // actually point to something (hence the 'friend' declaration).
+ Object(JSContext* cx, Builder& owner_, HandleObject obj) : Base(cx, owner_, obj.get()) { }
+
+ bool definePropertyToTrusted(JSContext* cx, const char* name,
+ JS::MutableHandleValue value);
+
+ public:
+ Object(JSContext* cx, Builder& owner_) : Base(cx, owner_, nullptr) { }
+ Object(const Object& rhs) : Base(rhs) { }
+
+ // Our automatically-generated assignment operator can see our base
+ // class's assignment operator, so we don't need to write one out here.
+
+ // Set the property named |name| on this object to |value|.
+ //
+ // If |value| is a string or primitive, re-wrap it for the debugger's
+ // compartment.
+ //
+ // If |value| is an object, assume it is a debuggee object and make a
+ // Debugger.Object instance referring to it. Set that as the propery's
+ // value.
+ //
+ // If |value| is another trusted object, store it directly as the
+ // property's value.
+ //
+ // On error, report the problem on cx and return false.
+ bool defineProperty(JSContext* cx, const char* name, JS::HandleValue value);
+ bool defineProperty(JSContext* cx, const char* name, JS::HandleObject value);
+ bool defineProperty(JSContext* cx, const char* name, Object& value);
+
+ using Base::operator bool;
+ };
+
+ // Build an empty object for direct use by debugger code, owned by this
+ // Builder. If an error occurs, report it on cx and return a false Object.
+ Object newObject(JSContext* cx);
+
+ protected:
+ Builder(JSContext* cx, js::Debugger* debugger);
+};
+
+// Debugger itself instantiates this subclass of Builder, which can unwrap
+// BuiltThings that belong to it.
+class BuilderOrigin : public Builder {
+ template<typename T>
+ T unwrapAny(const BuiltThing<T>& thing) {
+ MOZ_ASSERT(&thing.owner == this);
+ return thing.value.get();
+ }
+
+ public:
+ BuilderOrigin(JSContext* cx, js::Debugger* debugger_)
+ : Builder(cx, debugger_)
+ { }
+
+ JSObject* unwrap(Object& object) { return unwrapAny(object); }
+};
+
+
+
+// Finding the size of blocks allocated with malloc
+// ------------------------------------------------
+//
+// Debugger.Memory wants to be able to report how many bytes items in memory are
+// consuming. To do this, it needs a function that accepts a pointer to a block,
+// and returns the number of bytes allocated to that block. SpiderMonkey itself
+// doesn't know which function is appropriate to use, but the embedding does.
+
+// Tell Debuggers in |cx| to use |mallocSizeOf| to find the size of
+// malloc'd blocks.
+JS_PUBLIC_API(void)
+SetDebuggerMallocSizeOf(JSContext* cx, mozilla::MallocSizeOf mallocSizeOf);
+
+// Get the MallocSizeOf function that the given context is using to find the
+// size of malloc'd blocks.
+JS_PUBLIC_API(mozilla::MallocSizeOf)
+GetDebuggerMallocSizeOf(JSContext* cx);
+
+
+
+// Debugger and Garbage Collection Events
+// --------------------------------------
+//
+// The Debugger wants to report about its debuggees' GC cycles, however entering
+// JS after a GC is troublesome since SpiderMonkey will often do something like
+// force a GC and then rely on the nursery being empty. If we call into some
+// Debugger's hook after the GC, then JS runs and the nursery won't be
+// empty. Instead, we rely on embedders to call back into SpiderMonkey after a
+// GC and notify Debuggers to call their onGarbageCollection hook.
+
+
+// For each Debugger that observed a debuggee involved in the given GC event,
+// call its `onGarbageCollection` hook.
+JS_PUBLIC_API(bool)
+FireOnGarbageCollectionHook(JSContext* cx, GarbageCollectionEvent::Ptr&& data);
+
+
+
+// Handlers for observing Promises
+// -------------------------------
+//
+// The Debugger wants to observe behavior of promises, which are implemented by
+// Gecko with webidl and which SpiderMonkey knows nothing about. On the other
+// hand, Gecko knows nothing about which (if any) debuggers are observing a
+// promise's global. The compromise is that Gecko is responsible for calling
+// these handlers at the appropriate times, and SpiderMonkey will handle
+// notifying any Debugger instances that are observing the given promise's
+// global.
+
+// Notify any Debugger instances observing this promise's global that a new
+// promise was allocated.
+JS_PUBLIC_API(void)
+onNewPromise(JSContext* cx, HandleObject promise);
+
+// Notify any Debugger instances observing this promise's global that the
+// promise has settled (ie, it has either been fulfilled or rejected). Note that
+// this is *not* equivalent to the promise resolution (ie, the promise's fate
+// getting locked in) because you can resolve a promise with another pending
+// promise, in which case neither promise has settled yet.
+//
+// It is Gecko's responsibility to ensure that this is never called on the same
+// promise more than once (because a promise can only make the transition from
+// unsettled to settled once).
+JS_PUBLIC_API(void)
+onPromiseSettled(JSContext* cx, HandleObject promise);
+
+
+
+// Return true if the given value is a Debugger object, false otherwise.
+JS_PUBLIC_API(bool)
+IsDebugger(JSObject& obj);
+
+// Append each of the debuggee global objects observed by the Debugger object
+// |dbgObj| to |vector|. Returns true on success, false on failure.
+JS_PUBLIC_API(bool)
+GetDebuggeeGlobals(JSContext* cx, JSObject& dbgObj, AutoObjectVector& vector);
+
+
+// Hooks for reporting where JavaScript execution began.
+//
+// Our performance tools would like to be able to label blocks of JavaScript
+// execution with the function name and source location where execution began:
+// the event handler, the callback, etc.
+//
+// Construct an instance of this class on the stack, providing a JSContext
+// belonging to the runtime in which execution will occur. Each time we enter
+// JavaScript --- specifically, each time we push a JavaScript stack frame that
+// has no older JS frames younger than this AutoEntryMonitor --- we will
+// call the appropriate |Entry| member function to indicate where we've begun
+// execution.
+
+class MOZ_STACK_CLASS JS_PUBLIC_API(AutoEntryMonitor) {
+ JSRuntime* runtime_;
+ AutoEntryMonitor* savedMonitor_;
+
+ public:
+ explicit AutoEntryMonitor(JSContext* cx);
+ ~AutoEntryMonitor();
+
+ // SpiderMonkey reports the JavaScript entry points occuring within this
+ // AutoEntryMonitor's scope to the following member functions, which the
+ // embedding is expected to override.
+ //
+ // It is important to note that |asyncCause| is owned by the caller and its
+ // lifetime must outlive the lifetime of the AutoEntryMonitor object. It is
+ // strongly encouraged that |asyncCause| be a string constant or similar
+ // statically allocated string.
+
+ // We have begun executing |function|. Note that |function| may not be the
+ // actual closure we are running, but only the canonical function object to
+ // which the script refers.
+ virtual void Entry(JSContext* cx, JSFunction* function,
+ HandleValue asyncStack,
+ const char* asyncCause) = 0;
+
+ // Execution has begun at the entry point of |script|, which is not a
+ // function body. (This is probably being executed by 'eval' or some
+ // JSAPI equivalent.)
+ virtual void Entry(JSContext* cx, JSScript* script,
+ HandleValue asyncStack,
+ const char* asyncCause) = 0;
+
+ // Execution of the function or script has ended.
+ virtual void Exit(JSContext* cx) { }
+};
+
+
+
+} // namespace dbg
+} // namespace JS
+
+
+#endif /* js_Debug_h */
diff --git a/js/public/GCAPI.h b/js/public/GCAPI.h
new file mode 100644
index 0000000000..7a6675ca72
--- /dev/null
+++ b/js/public/GCAPI.h
@@ -0,0 +1,723 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef js_GCAPI_h
+#define js_GCAPI_h
+
+#include "mozilla/Vector.h"
+
+#include "js/GCAnnotations.h"
+#include "js/HeapAPI.h"
+#include "js/UniquePtr.h"
+
+namespace js {
+namespace gc {
+class GCRuntime;
+} // namespace gc
+namespace gcstats {
+struct Statistics;
+} // namespace gcstats
+} // namespace js
+
+typedef enum JSGCMode {
+ /** Perform only global GCs. */
+ JSGC_MODE_GLOBAL = 0,
+
+ /** Perform per-zone GCs until too much garbage has accumulated. */
+ JSGC_MODE_ZONE = 1,
+
+ /**
+ * Collect in short time slices rather than all at once. Implies
+ * JSGC_MODE_ZONE.
+ */
+ JSGC_MODE_INCREMENTAL = 2
+} JSGCMode;
+
+/**
+ * Kinds of js_GC invocation.
+ */
+typedef enum JSGCInvocationKind {
+ /* Normal invocation. */
+ GC_NORMAL = 0,
+
+ /* Minimize GC triggers and release empty GC chunks right away. */
+ GC_SHRINK = 1
+} JSGCInvocationKind;
+
+namespace JS {
+
+#define GCREASONS(D) \
+ /* Reasons internal to the JS engine */ \
+ D(API) \
+ D(EAGER_ALLOC_TRIGGER) \
+ D(DESTROY_RUNTIME) \
+ D(UNUSED0) \
+ D(LAST_DITCH) \
+ D(TOO_MUCH_MALLOC) \
+ D(ALLOC_TRIGGER) \
+ D(DEBUG_GC) \
+ D(COMPARTMENT_REVIVED) \
+ D(RESET) \
+ D(OUT_OF_NURSERY) \
+ D(EVICT_NURSERY) \
+ D(FULL_STORE_BUFFER) \
+ D(SHARED_MEMORY_LIMIT) \
+ D(UNUSED1) \
+ D(INCREMENTAL_TOO_SLOW) \
+ D(ABORT_GC) \
+ \
+ /* These are reserved for future use. */ \
+ D(RESERVED0) \
+ D(RESERVED1) \
+ D(RESERVED2) \
+ D(RESERVED3) \
+ D(RESERVED4) \
+ D(RESERVED5) \
+ D(RESERVED6) \
+ D(RESERVED7) \
+ D(RESERVED8) \
+ D(RESERVED9) \
+ D(RESERVED10) \
+ D(RESERVED11) \
+ D(RESERVED12) \
+ D(RESERVED13) \
+ D(RESERVED14) \
+ D(RESERVED15) \
+ \
+ /* Reasons from Firefox */ \
+ D(DOM_WINDOW_UTILS) \
+ D(COMPONENT_UTILS) \
+ D(MEM_PRESSURE) \
+ D(CC_WAITING) \
+ D(CC_FORCED) \
+ D(LOAD_END) \
+ D(POST_COMPARTMENT) \
+ D(PAGE_HIDE) \
+ D(NSJSCONTEXT_DESTROY) \
+ D(SET_NEW_DOCUMENT) \
+ D(SET_DOC_SHELL) \
+ D(DOM_UTILS) \
+ D(DOM_IPC) \
+ D(DOM_WORKER) \
+ D(INTER_SLICE_GC) \
+ D(REFRESH_FRAME) \
+ D(FULL_GC_TIMER) \
+ D(SHUTDOWN_CC) \
+ D(FINISH_LARGE_EVALUATE) \
+ D(USER_INACTIVE) \
+ D(XPCONNECT_SHUTDOWN)
+
+namespace gcreason {
+
+/* GCReasons will end up looking like JSGC_MAYBEGC */
+enum Reason {
+#define MAKE_REASON(name) name,
+ GCREASONS(MAKE_REASON)
+#undef MAKE_REASON
+ NO_REASON,
+ NUM_REASONS,
+
+ /*
+ * For telemetry, we want to keep a fixed max bucket size over time so we
+ * don't have to switch histograms. 100 is conservative; as of this writing
+ * there are 52. But the cost of extra buckets seems to be low while the
+ * cost of switching histograms is high.
+ */
+ NUM_TELEMETRY_REASONS = 100
+};
+
+/**
+ * Get a statically allocated C string explaining the given GC reason.
+ */
+extern JS_PUBLIC_API(const char*)
+ExplainReason(JS::gcreason::Reason reason);
+
+} /* namespace gcreason */
+
+/*
+ * Zone GC:
+ *
+ * SpiderMonkey's GC is capable of performing a collection on an arbitrary
+ * subset of the zones in the system. This allows an embedding to minimize
+ * collection time by only collecting zones that have run code recently,
+ * ignoring the parts of the heap that are unlikely to have changed.
+ *
+ * When triggering a GC using one of the functions below, it is first necessary
+ * to select the zones to be collected. To do this, you can call
+ * PrepareZoneForGC on each zone, or you can call PrepareForFullGC to select
+ * all zones. Failing to select any zone is an error.
+ */
+
+/**
+ * Schedule the given zone to be collected as part of the next GC.
+ */
+extern JS_PUBLIC_API(void)
+PrepareZoneForGC(Zone* zone);
+
+/**
+ * Schedule all zones to be collected in the next GC.
+ */
+extern JS_PUBLIC_API(void)
+PrepareForFullGC(JSContext* cx);
+
+/**
+ * When performing an incremental GC, the zones that were selected for the
+ * previous incremental slice must be selected in subsequent slices as well.
+ * This function selects those slices automatically.
+ */
+extern JS_PUBLIC_API(void)
+PrepareForIncrementalGC(JSContext* cx);
+
+/**
+ * Returns true if any zone in the system has been scheduled for GC with one of
+ * the functions above or by the JS engine.
+ */
+extern JS_PUBLIC_API(bool)
+IsGCScheduled(JSContext* cx);
+
+/**
+ * Undoes the effect of the Prepare methods above. The given zone will not be
+ * collected in the next GC.
+ */
+extern JS_PUBLIC_API(void)
+SkipZoneForGC(Zone* zone);
+
+/*
+ * Non-Incremental GC:
+ *
+ * The following functions perform a non-incremental GC.
+ */
+
+/**
+ * Performs a non-incremental collection of all selected zones.
+ *
+ * If the gckind argument is GC_NORMAL, then some objects that are unreachable
+ * from the program may still be alive afterwards because of internal
+ * references; if GC_SHRINK is passed then caches and other temporary references
+ * to objects will be cleared and all unreferenced objects will be removed from
+ * the system.
+ */
+extern JS_PUBLIC_API(void)
+GCForReason(JSContext* cx, JSGCInvocationKind gckind, gcreason::Reason reason);
+
+/*
+ * Incremental GC:
+ *
+ * Incremental GC divides the full mark-and-sweep collection into multiple
+ * slices, allowing client JavaScript code to run between each slice. This
+ * allows interactive apps to avoid long collection pauses. Incremental GC does
+ * not make collection take less time, it merely spreads that time out so that
+ * the pauses are less noticable.
+ *
+ * For a collection to be carried out incrementally the following conditions
+ * must be met:
+ * - The collection must be run by calling JS::IncrementalGC() rather than
+ * JS_GC().
+ * - The GC mode must have been set to JSGC_MODE_INCREMENTAL with
+ * JS_SetGCParameter().
+ *
+ * Note: Even if incremental GC is enabled and working correctly,
+ * non-incremental collections can still happen when low on memory.
+ */
+
+/**
+ * Begin an incremental collection and perform one slice worth of work. When
+ * this function returns, the collection may not be complete.
+ * IncrementalGCSlice() must be called repeatedly until
+ * !IsIncrementalGCInProgress(cx).
+ *
+ * Note: SpiderMonkey's GC is not realtime. Slices in practice may be longer or
+ * shorter than the requested interval.
+ */
+extern JS_PUBLIC_API(void)
+StartIncrementalGC(JSContext* cx, JSGCInvocationKind gckind, gcreason::Reason reason,
+ int64_t millis = 0);
+
+/**
+ * Perform a slice of an ongoing incremental collection. When this function
+ * returns, the collection may not be complete. It must be called repeatedly
+ * until !IsIncrementalGCInProgress(cx).
+ *
+ * Note: SpiderMonkey's GC is not realtime. Slices in practice may be longer or
+ * shorter than the requested interval.
+ */
+extern JS_PUBLIC_API(void)
+IncrementalGCSlice(JSContext* cx, gcreason::Reason reason, int64_t millis = 0);
+
+/**
+ * If IsIncrementalGCInProgress(cx), this call finishes the ongoing collection
+ * by performing an arbitrarily long slice. If !IsIncrementalGCInProgress(cx),
+ * this is equivalent to GCForReason. When this function returns,
+ * IsIncrementalGCInProgress(cx) will always be false.
+ */
+extern JS_PUBLIC_API(void)
+FinishIncrementalGC(JSContext* cx, gcreason::Reason reason);
+
+/**
+ * If IsIncrementalGCInProgress(cx), this call aborts the ongoing collection and
+ * performs whatever work needs to be done to return the collector to its idle
+ * state. This may take an arbitrarily long time. When this function returns,
+ * IsIncrementalGCInProgress(cx) will always be false.
+ */
+extern JS_PUBLIC_API(void)
+AbortIncrementalGC(JSContext* cx);
+
+namespace dbg {
+
+// The `JS::dbg::GarbageCollectionEvent` class is essentially a view of the
+// `js::gcstats::Statistics` data without the uber implementation-specific bits.
+// It should generally be palatable for web developers.
+class GarbageCollectionEvent
+{
+ // The major GC number of the GC cycle this data pertains to.
+ uint64_t majorGCNumber_;
+
+ // Reference to a non-owned, statically allocated C string. This is a very
+ // short reason explaining why a GC was triggered.
+ const char* reason;
+
+ // Reference to a nullable, non-owned, statically allocated C string. If the
+ // collection was forced to be non-incremental, this is a short reason of
+ // why the GC could not perform an incremental collection.
+ const char* nonincrementalReason;
+
+ // Represents a single slice of a possibly multi-slice incremental garbage
+ // collection.
+ struct Collection {
+ double startTimestamp;
+ double endTimestamp;
+ };
+
+ // The set of garbage collection slices that made up this GC cycle.
+ mozilla::Vector<Collection> collections;
+
+ GarbageCollectionEvent(const GarbageCollectionEvent& rhs) = delete;
+ GarbageCollectionEvent& operator=(const GarbageCollectionEvent& rhs) = delete;
+
+ public:
+ explicit GarbageCollectionEvent(uint64_t majorGCNum)
+ : majorGCNumber_(majorGCNum)
+ , reason(nullptr)
+ , nonincrementalReason(nullptr)
+ , collections()
+ { }
+
+ using Ptr = js::UniquePtr<GarbageCollectionEvent>;
+ static Ptr Create(JSRuntime* rt, ::js::gcstats::Statistics& stats, uint64_t majorGCNumber);
+
+ JSObject* toJSObject(JSContext* cx) const;
+
+ uint64_t majorGCNumber() const { return majorGCNumber_; }
+};
+
+} // namespace dbg
+
+enum GCProgress {
+ /*
+ * During non-incremental GC, the GC is bracketed by JSGC_CYCLE_BEGIN/END
+ * callbacks. During an incremental GC, the sequence of callbacks is as
+ * follows:
+ * JSGC_CYCLE_BEGIN, JSGC_SLICE_END (first slice)
+ * JSGC_SLICE_BEGIN, JSGC_SLICE_END (second slice)
+ * ...
+ * JSGC_SLICE_BEGIN, JSGC_CYCLE_END (last slice)
+ */
+
+ GC_CYCLE_BEGIN,
+ GC_SLICE_BEGIN,
+ GC_SLICE_END,
+ GC_CYCLE_END
+};
+
+struct JS_PUBLIC_API(GCDescription) {
+ bool isZone_;
+ JSGCInvocationKind invocationKind_;
+ gcreason::Reason reason_;
+
+ GCDescription(bool isZone, JSGCInvocationKind kind, gcreason::Reason reason)
+ : isZone_(isZone), invocationKind_(kind), reason_(reason) {}
+
+ char16_t* formatSliceMessage(JSContext* cx) const;
+ char16_t* formatSummaryMessage(JSContext* cx) const;
+ char16_t* formatJSON(JSContext* cx, uint64_t timestamp) const;
+
+ JS::dbg::GarbageCollectionEvent::Ptr toGCEvent(JSContext* cx) const;
+};
+
+typedef void
+(* GCSliceCallback)(JSContext* cx, GCProgress progress, const GCDescription& desc);
+
+/**
+ * The GC slice callback is called at the beginning and end of each slice. This
+ * callback may be used for GC notifications as well as to perform additional
+ * marking.
+ */
+extern JS_PUBLIC_API(GCSliceCallback)
+SetGCSliceCallback(JSContext* cx, GCSliceCallback callback);
+
+/**
+ * Describes the progress of an observed nursery collection.
+ */
+enum class GCNurseryProgress {
+ /**
+ * The nursery collection is starting.
+ */
+ GC_NURSERY_COLLECTION_START,
+ /**
+ * The nursery collection is ending.
+ */
+ GC_NURSERY_COLLECTION_END
+};
+
+/**
+ * A nursery collection callback receives the progress of the nursery collection
+ * and the reason for the collection.
+ */
+using GCNurseryCollectionCallback = void(*)(JSContext* cx, GCNurseryProgress progress,
+ gcreason::Reason reason);
+
+/**
+ * Set the nursery collection callback for the given runtime. When set, it will
+ * be called at the start and end of every nursery collection.
+ */
+extern JS_PUBLIC_API(GCNurseryCollectionCallback)
+SetGCNurseryCollectionCallback(JSContext* cx, GCNurseryCollectionCallback callback);
+
+typedef void
+(* DoCycleCollectionCallback)(JSContext* cx);
+
+/**
+ * The purge gray callback is called after any COMPARTMENT_REVIVED GC in which
+ * the majority of compartments have been marked gray.
+ */
+extern JS_PUBLIC_API(DoCycleCollectionCallback)
+SetDoCycleCollectionCallback(JSContext* cx, DoCycleCollectionCallback callback);
+
+/**
+ * Incremental GC defaults to enabled, but may be disabled for testing or in
+ * embeddings that have not yet implemented barriers on their native classes.
+ * There is not currently a way to re-enable incremental GC once it has been
+ * disabled on the runtime.
+ */
+extern JS_PUBLIC_API(void)
+DisableIncrementalGC(JSContext* cx);
+
+/**
+ * Returns true if incremental GC is enabled. Simply having incremental GC
+ * enabled is not sufficient to ensure incremental collections are happening.
+ * See the comment "Incremental GC" above for reasons why incremental GC may be
+ * suppressed. Inspection of the "nonincremental reason" field of the
+ * GCDescription returned by GCSliceCallback may help narrow down the cause if
+ * collections are not happening incrementally when expected.
+ */
+extern JS_PUBLIC_API(bool)
+IsIncrementalGCEnabled(JSContext* cx);
+
+/**
+ * Returns true while an incremental GC is ongoing, both when actively
+ * collecting and between slices.
+ */
+extern JS_PUBLIC_API(bool)
+IsIncrementalGCInProgress(JSContext* cx);
+
+/*
+ * Returns true when writes to GC things must call an incremental (pre) barrier.
+ * This is generally only true when running mutator code in-between GC slices.
+ * At other times, the barrier may be elided for performance.
+ */
+extern JS_PUBLIC_API(bool)
+IsIncrementalBarrierNeeded(JSContext* cx);
+
+/*
+ * Notify the GC that a reference to a GC thing is about to be overwritten.
+ * These methods must be called if IsIncrementalBarrierNeeded.
+ */
+extern JS_PUBLIC_API(void)
+IncrementalReferenceBarrier(GCCellPtr thing);
+
+extern JS_PUBLIC_API(void)
+IncrementalValueBarrier(const Value& v);
+
+extern JS_PUBLIC_API(void)
+IncrementalObjectBarrier(JSObject* obj);
+
+/**
+ * Returns true if the most recent GC ran incrementally.
+ */
+extern JS_PUBLIC_API(bool)
+WasIncrementalGC(JSContext* cx);
+
+/*
+ * Generational GC:
+ *
+ * Note: Generational GC is not yet enabled by default. The following class
+ * is non-functional unless SpiderMonkey was configured with
+ * --enable-gcgenerational.
+ */
+
+/** Ensure that generational GC is disabled within some scope. */
+class JS_PUBLIC_API(AutoDisableGenerationalGC)
+{
+ js::gc::GCRuntime* gc;
+
+ public:
+ explicit AutoDisableGenerationalGC(JSRuntime* rt);
+ ~AutoDisableGenerationalGC();
+};
+
+/**
+ * Returns true if generational allocation and collection is currently enabled
+ * on the given runtime.
+ */
+extern JS_PUBLIC_API(bool)
+IsGenerationalGCEnabled(JSRuntime* rt);
+
+/**
+ * Returns the GC's "number". This does not correspond directly to the number
+ * of GCs that have been run, but is guaranteed to be monotonically increasing
+ * with GC activity.
+ */
+extern JS_PUBLIC_API(size_t)
+GetGCNumber();
+
+/**
+ * Pass a subclass of this "abstract" class to callees to require that they
+ * never GC. Subclasses can use assertions or the hazard analysis to ensure no
+ * GC happens.
+ */
+class JS_PUBLIC_API(AutoRequireNoGC)
+{
+ protected:
+ AutoRequireNoGC() {}
+ ~AutoRequireNoGC() {}
+};
+
+/**
+ * Diagnostic assert (see MOZ_DIAGNOSTIC_ASSERT) that GC cannot occur while this
+ * class is live. This class does not disable the static rooting hazard
+ * analysis.
+ *
+ * This works by entering a GC unsafe region, which is checked on allocation and
+ * on GC.
+ */
+class JS_PUBLIC_API(AutoAssertNoGC) : public AutoRequireNoGC
+{
+ js::gc::GCRuntime* gc;
+ size_t gcNumber;
+
+ public:
+ AutoAssertNoGC();
+ explicit AutoAssertNoGC(JSRuntime* rt);
+ explicit AutoAssertNoGC(JSContext* cx);
+ ~AutoAssertNoGC();
+};
+
+/**
+ * Assert if an allocation of a GC thing occurs while this class is live. This
+ * class does not disable the static rooting hazard analysis.
+ */
+class JS_PUBLIC_API(AutoAssertNoAlloc)
+{
+#ifdef JS_DEBUG
+ js::gc::GCRuntime* gc;
+
+ public:
+ AutoAssertNoAlloc() : gc(nullptr) {}
+ explicit AutoAssertNoAlloc(JSContext* cx);
+ void disallowAlloc(JSRuntime* rt);
+ ~AutoAssertNoAlloc();
+#else
+ public:
+ AutoAssertNoAlloc() {}
+ explicit AutoAssertNoAlloc(JSContext* cx) {}
+ void disallowAlloc(JSRuntime* rt) {}
+#endif
+};
+
+/**
+ * Assert if a GC barrier is invoked while this class is live. This class does
+ * not disable the static rooting hazard analysis.
+ */
+class JS_PUBLIC_API(AutoAssertOnBarrier)
+{
+ JSContext* context;
+ bool prev;
+
+ public:
+ explicit AutoAssertOnBarrier(JSContext* cx);
+ ~AutoAssertOnBarrier();
+};
+
+/**
+ * Disable the static rooting hazard analysis in the live region and assert if
+ * any allocation that could potentially trigger a GC occurs while this guard
+ * object is live. This is most useful to help the exact rooting hazard analysis
+ * in complex regions, since it cannot understand dataflow.
+ *
+ * Note: GC behavior is unpredictable even when deterministic and is generally
+ * non-deterministic in practice. The fact that this guard has not
+ * asserted is not a guarantee that a GC cannot happen in the guarded
+ * region. As a rule, anyone performing a GC unsafe action should
+ * understand the GC properties of all code in that region and ensure
+ * that the hazard analysis is correct for that code, rather than relying
+ * on this class.
+ */
+class JS_PUBLIC_API(AutoSuppressGCAnalysis) : public AutoAssertNoAlloc
+{
+ public:
+ AutoSuppressGCAnalysis() : AutoAssertNoAlloc() {}
+ explicit AutoSuppressGCAnalysis(JSContext* cx) : AutoAssertNoAlloc(cx) {}
+} JS_HAZ_GC_SUPPRESSED;
+
+/**
+ * Assert that code is only ever called from a GC callback, disable the static
+ * rooting hazard analysis and assert if any allocation that could potentially
+ * trigger a GC occurs while this guard object is live.
+ *
+ * This is useful to make the static analysis ignore code that runs in GC
+ * callbacks.
+ */
+class JS_PUBLIC_API(AutoAssertGCCallback) : public AutoSuppressGCAnalysis
+{
+ public:
+ explicit AutoAssertGCCallback(JSObject* obj);
+};
+
+/**
+ * Place AutoCheckCannotGC in scopes that you believe can never GC. These
+ * annotations will be verified both dynamically via AutoAssertNoGC, and
+ * statically with the rooting hazard analysis (implemented by making the
+ * analysis consider AutoCheckCannotGC to be a GC pointer, and therefore
+ * complain if it is live across a GC call.) It is useful when dealing with
+ * internal pointers to GC things where the GC thing itself may not be present
+ * for the static analysis: e.g. acquiring inline chars from a JSString* on the
+ * heap.
+ *
+ * We only do the assertion checking in DEBUG builds.
+ */
+#ifdef DEBUG
+class JS_PUBLIC_API(AutoCheckCannotGC) : public AutoAssertNoGC
+{
+ public:
+ AutoCheckCannotGC() : AutoAssertNoGC() {}
+ explicit AutoCheckCannotGC(JSContext* cx) : AutoAssertNoGC(cx) {}
+} JS_HAZ_GC_INVALIDATED;
+#else
+class JS_PUBLIC_API(AutoCheckCannotGC) : public AutoRequireNoGC
+{
+ public:
+ AutoCheckCannotGC() {}
+ explicit AutoCheckCannotGC(JSContext* cx) {}
+} JS_HAZ_GC_INVALIDATED;
+#endif
+
+/**
+ * Unsets the gray bit for anything reachable from |thing|. |kind| should not be
+ * JS::TraceKind::Shape. |thing| should be non-null. The return value indicates
+ * if anything was unmarked.
+ */
+extern JS_FRIEND_API(bool)
+UnmarkGrayGCThingRecursively(GCCellPtr thing);
+
+} /* namespace JS */
+
+namespace js {
+namespace gc {
+
+static MOZ_ALWAYS_INLINE void
+ExposeGCThingToActiveJS(JS::GCCellPtr thing)
+{
+ // GC things residing in the nursery cannot be gray: they have no mark bits.
+ // All live objects in the nursery are moved to tenured at the beginning of
+ // each GC slice, so the gray marker never sees nursery things.
+ if (IsInsideNursery(thing.asCell()))
+ return;
+
+ // There's nothing to do for permanent GC things that might be owned by
+ // another runtime.
+ if (thing.mayBeOwnedByOtherRuntime())
+ return;
+
+ JS::shadow::Runtime* rt = detail::GetCellRuntime(thing.asCell());
+ MOZ_DIAGNOSTIC_ASSERT(rt->allowGCBarriers());
+
+ if (IsIncrementalBarrierNeededOnTenuredGCThing(rt, thing))
+ JS::IncrementalReferenceBarrier(thing);
+ else if (!thing.mayBeOwnedByOtherRuntime() && js::gc::detail::CellIsMarkedGray(thing.asCell()))
+ JS::UnmarkGrayGCThingRecursively(thing);
+}
+
+static MOZ_ALWAYS_INLINE void
+MarkGCThingAsLive(JSRuntime* aRt, JS::GCCellPtr thing)
+{
+ // Any object in the nursery will not be freed during any GC running at that
+ // time.
+ if (IsInsideNursery(thing.asCell()))
+ return;
+
+ // There's nothing to do for permanent GC things that might be owned by
+ // another runtime.
+ if (thing.mayBeOwnedByOtherRuntime())
+ return;
+
+ JS::shadow::Runtime* rt = JS::shadow::Runtime::asShadowRuntime(aRt);
+ MOZ_DIAGNOSTIC_ASSERT(rt->allowGCBarriers());
+
+ if (IsIncrementalBarrierNeededOnTenuredGCThing(rt, thing))
+ JS::IncrementalReferenceBarrier(thing);
+}
+
+} /* namespace gc */
+} /* namespace js */
+
+namespace JS {
+
+/*
+ * This should be called when an object that is marked gray is exposed to the JS
+ * engine (by handing it to running JS code or writing it into live JS
+ * data). During incremental GC, since the gray bits haven't been computed yet,
+ * we conservatively mark the object black.
+ */
+static MOZ_ALWAYS_INLINE void
+ExposeObjectToActiveJS(JSObject* obj)
+{
+ MOZ_ASSERT(obj);
+ js::gc::ExposeGCThingToActiveJS(GCCellPtr(obj));
+}
+
+static MOZ_ALWAYS_INLINE void
+ExposeScriptToActiveJS(JSScript* script)
+{
+ js::gc::ExposeGCThingToActiveJS(GCCellPtr(script));
+}
+
+/*
+ * If a GC is currently marking, mark the string black.
+ */
+static MOZ_ALWAYS_INLINE void
+MarkStringAsLive(Zone* zone, JSString* string)
+{
+ JSRuntime* rt = JS::shadow::Zone::asShadowZone(zone)->runtimeFromMainThread();
+ js::gc::MarkGCThingAsLive(rt, GCCellPtr(string));
+}
+
+/*
+ * Internal to Firefox.
+ *
+ * Note: this is not related to the PokeGC in nsJSEnvironment.
+ */
+extern JS_FRIEND_API(void)
+PokeGC(JSContext* cx);
+
+/*
+ * Internal to Firefox.
+ */
+extern JS_FRIEND_API(void)
+NotifyDidPaint(JSContext* cx);
+
+} /* namespace JS */
+
+#endif /* js_GCAPI_h */
diff --git a/js/public/GCAnnotations.h b/js/public/GCAnnotations.h
new file mode 100644
index 0000000000..366d787bf4
--- /dev/null
+++ b/js/public/GCAnnotations.h
@@ -0,0 +1,57 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef js_GCAnnotations_h
+#define js_GCAnnotations_h
+
+// Set of annotations for the rooting hazard analysis, used to categorize types
+// and functions.
+#ifdef XGILL_PLUGIN
+
+// Mark a type as being a GC thing (eg js::gc::Cell has this annotation).
+# define JS_HAZ_GC_THING __attribute__((tag("GC Thing")))
+
+// Mark a type as holding a pointer to a GC thing (eg JS::Value has this
+// annotation.)
+# define JS_HAZ_GC_POINTER __attribute__((tag("GC Pointer")))
+
+// Mark a type as a rooted pointer, suitable for use on the stack (eg all
+// Rooted<T> instantiations should have this.)
+# define JS_HAZ_ROOTED __attribute__((tag("Rooted Pointer")))
+
+// Mark a type as something that should not be held live across a GC, but which
+// is not itself a GC pointer.
+# define JS_HAZ_GC_INVALIDATED __attribute__((tag("Invalidated by GC")))
+
+// Mark a type that would otherwise be considered a GC Pointer (eg because it
+// contains a JS::Value field) as a non-GC pointer. It is handled almost the
+// same in the analysis as a rooted pointer, except it will not be reported as
+// an unnecessary root if used across a GC call. This should rarely be used,
+// but makes sense for something like ErrorResult, which only contains a GC
+// pointer when it holds an exception (and it does its own rooting,
+// conditionally.)
+# define JS_HAZ_NON_GC_POINTER __attribute__((tag("Suppressed GC Pointer")))
+
+// Mark a function as something that runs a garbage collection, potentially
+// invalidating GC pointers.
+# define JS_HAZ_GC_CALL __attribute__((tag("GC Call")))
+
+// Mark an RAII class as suppressing GC within its scope.
+# define JS_HAZ_GC_SUPPRESSED __attribute__((tag("Suppress GC")))
+
+#else
+
+# define JS_HAZ_GC_THING
+# define JS_HAZ_GC_POINTER
+# define JS_HAZ_ROOTED
+# define JS_HAZ_GC_INVALIDATED
+# define JS_HAZ_NON_GC_POINTER
+# define JS_HAZ_GC_CALL
+# define JS_HAZ_GC_SUPPRESSED
+
+#endif
+
+#endif /* js_GCAnnotations_h */
diff --git a/js/public/GCHashTable.h b/js/public/GCHashTable.h
new file mode 100644
index 0000000000..d6c2ce75b3
--- /dev/null
+++ b/js/public/GCHashTable.h
@@ -0,0 +1,399 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef GCHashTable_h
+#define GCHashTable_h
+
+#include "js/GCPolicyAPI.h"
+#include "js/HashTable.h"
+#include "js/RootingAPI.h"
+#include "js/SweepingAPI.h"
+#include "js/TracingAPI.h"
+
+namespace JS {
+
+// Define a reasonable default GC policy for GC-aware Maps.
+template <typename Key, typename Value>
+struct DefaultMapSweepPolicy {
+ static bool needsSweep(Key* key, Value* value) {
+ return GCPolicy<Key>::needsSweep(key) || GCPolicy<Value>::needsSweep(value);
+ }
+};
+
+// A GCHashMap is a GC-aware HashMap, meaning that it has additional trace and
+// sweep methods that know how to visit all keys and values in the table.
+// HashMaps that contain GC pointers will generally want to use this GCHashMap
+// specialization instead of HashMap, because this conveniently supports tracing
+// keys and values, and cleaning up weak entries.
+//
+// GCHashMap::trace applies GCPolicy<T>::trace to each entry's key and value.
+// Most types of GC pointers already have appropriate specializations of
+// GCPolicy, so they should just work as keys and values. Any struct type with a
+// default constructor and trace and sweep functions should work as well. If you
+// need to define your own GCPolicy specialization, generic helpers can be found
+// in js/public/TracingAPI.h.
+//
+// The MapSweepPolicy template parameter controls how the table drops entries
+// when swept. GCHashMap::sweep applies MapSweepPolicy::needsSweep to each table
+// entry; if it returns true, the entry is dropped. The default MapSweepPolicy
+// drops the entry if either the key or value is about to be finalized,
+// according to its GCPolicy<T>::needsSweep method. (This default is almost
+// always fine: it's hard to imagine keeping such an entry around anyway.)
+//
+// Note that this HashMap only knows *how* to trace and sweep, but it does not
+// itself cause tracing or sweeping to be invoked. For tracing, it must be used
+// with Rooted or PersistentRooted, or barriered and traced manually. For
+// sweeping, currently it requires an explicit call to <map>.sweep().
+template <typename Key,
+ typename Value,
+ typename HashPolicy = js::DefaultHasher<Key>,
+ typename AllocPolicy = js::TempAllocPolicy,
+ typename MapSweepPolicy = DefaultMapSweepPolicy<Key, Value>>
+class GCHashMap : public js::HashMap<Key, Value, HashPolicy, AllocPolicy>
+{
+ using Base = js::HashMap<Key, Value, HashPolicy, AllocPolicy>;
+
+ public:
+ explicit GCHashMap(AllocPolicy a = AllocPolicy()) : Base(a) {}
+
+ static void trace(GCHashMap* map, JSTracer* trc) { map->trace(trc); }
+ void trace(JSTracer* trc) {
+ if (!this->initialized())
+ return;
+ for (typename Base::Enum e(*this); !e.empty(); e.popFront()) {
+ GCPolicy<Value>::trace(trc, &e.front().value(), "hashmap value");
+ GCPolicy<Key>::trace(trc, &e.front().mutableKey(), "hashmap key");
+ }
+ }
+
+ void sweep() {
+ if (!this->initialized())
+ return;
+
+ for (typename Base::Enum e(*this); !e.empty(); e.popFront()) {
+ if (MapSweepPolicy::needsSweep(&e.front().mutableKey(), &e.front().value()))
+ e.removeFront();
+ }
+ }
+
+ // GCHashMap is movable
+ GCHashMap(GCHashMap&& rhs) : Base(mozilla::Move(rhs)) {}
+ void operator=(GCHashMap&& rhs) {
+ MOZ_ASSERT(this != &rhs, "self-move assignment is prohibited");
+ Base::operator=(mozilla::Move(rhs));
+ }
+
+ private:
+ // GCHashMap is not copyable or assignable
+ GCHashMap(const GCHashMap& hm) = delete;
+ GCHashMap& operator=(const GCHashMap& hm) = delete;
+};
+
+} // namespace JS
+
+namespace js {
+
+// HashMap that supports rekeying.
+//
+// If your keys are pointers to something like JSObject that can be tenured or
+// compacted, prefer to use GCHashMap with MovableCellHasher, which takes
+// advantage of the Zone's stable id table to make rekeying unnecessary.
+template <typename Key,
+ typename Value,
+ typename HashPolicy = DefaultHasher<Key>,
+ typename AllocPolicy = TempAllocPolicy,
+ typename MapSweepPolicy = JS::DefaultMapSweepPolicy<Key, Value>>
+class GCRekeyableHashMap : public JS::GCHashMap<Key, Value, HashPolicy, AllocPolicy, MapSweepPolicy>
+{
+ using Base = JS::GCHashMap<Key, Value, HashPolicy, AllocPolicy>;
+
+ public:
+ explicit GCRekeyableHashMap(AllocPolicy a = AllocPolicy()) : Base(a) {}
+
+ void sweep() {
+ if (!this->initialized())
+ return;
+
+ for (typename Base::Enum e(*this); !e.empty(); e.popFront()) {
+ Key key(e.front().key());
+ if (MapSweepPolicy::needsSweep(&key, &e.front().value()))
+ e.removeFront();
+ else if (!HashPolicy::match(key, e.front().key()))
+ e.rekeyFront(key);
+ }
+ }
+
+ // GCRekeyableHashMap is movable
+ GCRekeyableHashMap(GCRekeyableHashMap&& rhs) : Base(mozilla::Move(rhs)) {}
+ void operator=(GCRekeyableHashMap&& rhs) {
+ MOZ_ASSERT(this != &rhs, "self-move assignment is prohibited");
+ Base::operator=(mozilla::Move(rhs));
+ }
+};
+
+template <typename Outer, typename... Args>
+class GCHashMapOperations
+{
+ using Map = JS::GCHashMap<Args...>;
+ using Lookup = typename Map::Lookup;
+
+ const Map& map() const { return static_cast<const Outer*>(this)->get(); }
+
+ public:
+ using AddPtr = typename Map::AddPtr;
+ using Ptr = typename Map::Ptr;
+ using Range = typename Map::Range;
+
+ bool initialized() const { return map().initialized(); }
+ Ptr lookup(const Lookup& l) const { return map().lookup(l); }
+ AddPtr lookupForAdd(const Lookup& l) const { return map().lookupForAdd(l); }
+ Range all() const { return map().all(); }
+ bool empty() const { return map().empty(); }
+ uint32_t count() const { return map().count(); }
+ size_t capacity() const { return map().capacity(); }
+ bool has(const Lookup& l) const { return map().lookup(l).found(); }
+ size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
+ return map().sizeOfExcludingThis(mallocSizeOf);
+ }
+ size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
+ return mallocSizeOf(this) + map().sizeOfExcludingThis(mallocSizeOf);
+ }
+};
+
+template <typename Outer, typename... Args>
+class MutableGCHashMapOperations
+ : public GCHashMapOperations<Outer, Args...>
+{
+ using Map = JS::GCHashMap<Args...>;
+ using Lookup = typename Map::Lookup;
+
+ Map& map() { return static_cast<Outer*>(this)->get(); }
+
+ public:
+ using AddPtr = typename Map::AddPtr;
+ struct Enum : public Map::Enum { explicit Enum(Outer& o) : Map::Enum(o.map()) {} };
+ using Ptr = typename Map::Ptr;
+ using Range = typename Map::Range;
+
+ bool init(uint32_t len = 16) { return map().init(len); }
+ void clear() { map().clear(); }
+ void finish() { map().finish(); }
+ void remove(Ptr p) { map().remove(p); }
+
+ template<typename KeyInput, typename ValueInput>
+ bool add(AddPtr& p, KeyInput&& k, ValueInput&& v) {
+ return map().add(p, mozilla::Forward<KeyInput>(k), mozilla::Forward<ValueInput>(v));
+ }
+
+ template<typename KeyInput>
+ bool add(AddPtr& p, KeyInput&& k) {
+ return map().add(p, mozilla::Forward<KeyInput>(k), Map::Value());
+ }
+
+ template<typename KeyInput, typename ValueInput>
+ bool relookupOrAdd(AddPtr& p, KeyInput&& k, ValueInput&& v) {
+ return map().relookupOrAdd(p, k,
+ mozilla::Forward<KeyInput>(k),
+ mozilla::Forward<ValueInput>(v));
+ }
+
+ template<typename KeyInput, typename ValueInput>
+ bool put(KeyInput&& k, ValueInput&& v) {
+ return map().put(mozilla::Forward<KeyInput>(k), mozilla::Forward<ValueInput>(v));
+ }
+
+ template<typename KeyInput, typename ValueInput>
+ bool putNew(KeyInput&& k, ValueInput&& v) {
+ return map().putNew(mozilla::Forward<KeyInput>(k), mozilla::Forward<ValueInput>(v));
+ }
+};
+
+template <typename A, typename B, typename C, typename D, typename E>
+class RootedBase<JS::GCHashMap<A,B,C,D,E>>
+ : public MutableGCHashMapOperations<JS::Rooted<JS::GCHashMap<A,B,C,D,E>>, A,B,C,D,E>
+{};
+
+template <typename A, typename B, typename C, typename D, typename E>
+class MutableHandleBase<JS::GCHashMap<A,B,C,D,E>>
+ : public MutableGCHashMapOperations<JS::MutableHandle<JS::GCHashMap<A,B,C,D,E>>, A,B,C,D,E>
+{};
+
+template <typename A, typename B, typename C, typename D, typename E>
+class HandleBase<JS::GCHashMap<A,B,C,D,E>>
+ : public GCHashMapOperations<JS::Handle<JS::GCHashMap<A,B,C,D,E>>, A,B,C,D,E>
+{};
+
+template <typename A, typename B, typename C, typename D, typename E>
+class WeakCacheBase<JS::GCHashMap<A,B,C,D,E>>
+ : public MutableGCHashMapOperations<JS::WeakCache<JS::GCHashMap<A,B,C,D,E>>, A,B,C,D,E>
+{};
+
+} // namespace js
+
+namespace JS {
+
+// A GCHashSet is a HashSet with an additional trace method that knows
+// be traced to be kept alive will generally want to use this GCHashSet
+// specialization in lieu of HashSet.
+//
+// Most types of GC pointers can be traced with no extra infrastructure. For
+// structs and non-gc-pointer members, ensure that there is a specialization of
+// GCPolicy<T> with an appropriate trace method available to handle the custom
+// type. Generic helpers can be found in js/public/TracingAPI.h.
+//
+// Note that although this HashSet's trace will deal correctly with moved
+// elements, it does not itself know when to barrier or trace elements. To
+// function properly it must either be used with Rooted or barriered and traced
+// manually.
+template <typename T,
+ typename HashPolicy = js::DefaultHasher<T>,
+ typename AllocPolicy = js::TempAllocPolicy>
+class GCHashSet : public js::HashSet<T, HashPolicy, AllocPolicy>
+{
+ using Base = js::HashSet<T, HashPolicy, AllocPolicy>;
+
+ public:
+ explicit GCHashSet(AllocPolicy a = AllocPolicy()) : Base(a) {}
+
+ static void trace(GCHashSet* set, JSTracer* trc) { set->trace(trc); }
+ void trace(JSTracer* trc) {
+ if (!this->initialized())
+ return;
+ for (typename Base::Enum e(*this); !e.empty(); e.popFront())
+ GCPolicy<T>::trace(trc, &e.mutableFront(), "hashset element");
+ }
+
+ void sweep() {
+ if (!this->initialized())
+ return;
+ for (typename Base::Enum e(*this); !e.empty(); e.popFront()) {
+ if (GCPolicy<T>::needsSweep(&e.mutableFront()))
+ e.removeFront();
+ }
+ }
+
+ // GCHashSet is movable
+ GCHashSet(GCHashSet&& rhs) : Base(mozilla::Move(rhs)) {}
+ void operator=(GCHashSet&& rhs) {
+ MOZ_ASSERT(this != &rhs, "self-move assignment is prohibited");
+ Base::operator=(mozilla::Move(rhs));
+ }
+
+ private:
+ // GCHashSet is not copyable or assignable
+ GCHashSet(const GCHashSet& hs) = delete;
+ GCHashSet& operator=(const GCHashSet& hs) = delete;
+};
+
+} // namespace JS
+
+namespace js {
+
+template <typename Outer, typename... Args>
+class GCHashSetOperations
+{
+ using Set = JS::GCHashSet<Args...>;
+ using Lookup = typename Set::Lookup;
+
+ const Set& set() const { return static_cast<const Outer*>(this)->get(); }
+
+ public:
+ using AddPtr = typename Set::AddPtr;
+ using Entry = typename Set::Entry;
+ using Ptr = typename Set::Ptr;
+ using Range = typename Set::Range;
+
+ bool initialized() const { return set().initialized(); }
+ Ptr lookup(const Lookup& l) const { return set().lookup(l); }
+ AddPtr lookupForAdd(const Lookup& l) const { return set().lookupForAdd(l); }
+ Range all() const { return set().all(); }
+ bool empty() const { return set().empty(); }
+ uint32_t count() const { return set().count(); }
+ size_t capacity() const { return set().capacity(); }
+ bool has(const Lookup& l) const { return set().lookup(l).found(); }
+ size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
+ return set().sizeOfExcludingThis(mallocSizeOf);
+ }
+ size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
+ return mallocSizeOf(this) + set().sizeOfExcludingThis(mallocSizeOf);
+ }
+};
+
+template <typename Outer, typename... Args>
+class MutableGCHashSetOperations
+ : public GCHashSetOperations<Outer, Args...>
+{
+ using Set = JS::GCHashSet<Args...>;
+ using Lookup = typename Set::Lookup;
+
+ Set& set() { return static_cast<Outer*>(this)->get(); }
+
+ public:
+ using AddPtr = typename Set::AddPtr;
+ using Entry = typename Set::Entry;
+ struct Enum : public Set::Enum { explicit Enum(Outer& o) : Set::Enum(o.set()) {} };
+ using Ptr = typename Set::Ptr;
+ using Range = typename Set::Range;
+
+ bool init(uint32_t len = 16) { return set().init(len); }
+ void clear() { set().clear(); }
+ void finish() { set().finish(); }
+ void remove(Ptr p) { set().remove(p); }
+ void remove(const Lookup& l) { set().remove(l); }
+
+ template<typename TInput>
+ bool add(AddPtr& p, TInput&& t) {
+ return set().add(p, mozilla::Forward<TInput>(t));
+ }
+
+ template<typename TInput>
+ bool relookupOrAdd(AddPtr& p, const Lookup& l, TInput&& t) {
+ return set().relookupOrAdd(p, l, mozilla::Forward<TInput>(t));
+ }
+
+ template<typename TInput>
+ bool put(TInput&& t) {
+ return set().put(mozilla::Forward<TInput>(t));
+ }
+
+ template<typename TInput>
+ bool putNew(TInput&& t) {
+ return set().putNew(mozilla::Forward<TInput>(t));
+ }
+
+ template<typename TInput>
+ bool putNew(const Lookup& l, TInput&& t) {
+ return set().putNew(l, mozilla::Forward<TInput>(t));
+ }
+};
+
+template <typename T, typename HP, typename AP>
+class RootedBase<JS::GCHashSet<T, HP, AP>>
+ : public MutableGCHashSetOperations<JS::Rooted<JS::GCHashSet<T, HP, AP>>, T, HP, AP>
+{
+};
+
+template <typename T, typename HP, typename AP>
+class MutableHandleBase<JS::GCHashSet<T, HP, AP>>
+ : public MutableGCHashSetOperations<JS::MutableHandle<JS::GCHashSet<T, HP, AP>>, T, HP, AP>
+{
+};
+
+template <typename T, typename HP, typename AP>
+class HandleBase<JS::GCHashSet<T, HP, AP>>
+ : public GCHashSetOperations<JS::Handle<JS::GCHashSet<T, HP, AP>>, T, HP, AP>
+{
+};
+
+template <typename T, typename HP, typename AP>
+class WeakCacheBase<JS::GCHashSet<T, HP, AP>>
+ : public MutableGCHashSetOperations<JS::WeakCache<JS::GCHashSet<T, HP, AP>>, T, HP, AP>
+{
+};
+
+} /* namespace js */
+
+#endif /* GCHashTable_h */
diff --git a/js/public/GCPolicyAPI.h b/js/public/GCPolicyAPI.h
new file mode 100644
index 0000000000..054e397af4
--- /dev/null
+++ b/js/public/GCPolicyAPI.h
@@ -0,0 +1,164 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+// GC Policy Mechanism
+
+// A GCPolicy controls how the GC interacts with both direct pointers to GC
+// things (e.g. JSObject* or JSString*), tagged and/or optional pointers to GC
+// things (e.g. Value or jsid), and C++ container types (e.g.
+// JSPropertyDescriptor or GCHashMap).
+//
+// The GCPolicy provides at a minimum:
+//
+// static T initial()
+// - Construct and return an empty T.
+//
+// static void trace(JSTracer, T* tp, const char* name)
+// - Trace the edge |*tp|, calling the edge |name|. Containers like
+// GCHashMap and GCHashSet use this method to trace their children.
+//
+// static bool needsSweep(T* tp)
+// - Return true if |*tp| is about to be finalized. Otherwise, update the
+// edge for moving GC, and return false. Containers like GCHashMap and
+// GCHashSet use this method to decide when to remove an entry: if this
+// function returns true on a key/value/member/etc, its entry is dropped
+// from the container. Specializing this method is the standard way to
+// get custom weak behavior from a container type.
+//
+// The default GCPolicy<T> assumes that T has a default constructor and |trace|
+// and |needsSweep| methods, and forwards to them. GCPolicy has appropriate
+// specializations for pointers to GC things and pointer-like types like
+// JS::Heap<T> and mozilla::UniquePtr<T>.
+//
+// There are some stock structs your specializations can inherit from.
+// IgnoreGCPolicy<T> does nothing. StructGCPolicy<T> forwards the methods to the
+// referent type T.
+
+#ifndef GCPolicyAPI_h
+#define GCPolicyAPI_h
+
+#include "mozilla/UniquePtr.h"
+
+#include "js/TraceKind.h"
+#include "js/TracingAPI.h"
+
+// Expand the given macro D for each public GC pointer.
+#define FOR_EACH_PUBLIC_GC_POINTER_TYPE(D) \
+ D(JS::Symbol*) \
+ D(JSAtom*) \
+ D(JSFunction*) \
+ D(JSObject*) \
+ D(JSScript*) \
+ D(JSString*)
+
+// Expand the given macro D for each public tagged GC pointer type.
+#define FOR_EACH_PUBLIC_TAGGED_GC_POINTER_TYPE(D) \
+ D(JS::Value) \
+ D(jsid)
+
+#define FOR_EACH_PUBLIC_AGGREGATE_GC_POINTER_TYPE(D) \
+ D(JSPropertyDescriptor)
+
+class JSAtom;
+class JSFunction;
+class JSObject;
+class JSScript;
+class JSString;
+namespace JS {
+class Symbol;
+}
+
+namespace JS {
+
+// Defines a policy for container types with non-GC, i.e. C storage. This
+// policy dispatches to the underlying struct for GC interactions.
+template <typename T>
+struct StructGCPolicy
+{
+ static T initial() {
+ return T();
+ }
+
+ static void trace(JSTracer* trc, T* tp, const char* name) {
+ tp->trace(trc);
+ }
+
+ static void sweep(T* tp) {
+ return tp->sweep();
+ }
+
+ static bool needsSweep(T* tp) {
+ return tp->needsSweep();
+ }
+};
+
+// The default GC policy attempts to defer to methods on the underlying type.
+// Most C++ structures that contain a default constructor, a trace function and
+// a sweep function will work out of the box with Rooted, Handle, GCVector,
+// and GCHash{Set,Map}.
+template <typename T> struct GCPolicy : public StructGCPolicy<T> {};
+
+// This policy ignores any GC interaction, e.g. for non-GC types.
+template <typename T>
+struct IgnoreGCPolicy {
+ static T initial() { return T(); }
+ static void trace(JSTracer* trc, T* t, const char* name) {}
+ static bool needsSweep(T* v) { return false; }
+};
+template <> struct GCPolicy<uint32_t> : public IgnoreGCPolicy<uint32_t> {};
+template <> struct GCPolicy<uint64_t> : public IgnoreGCPolicy<uint64_t> {};
+
+template <typename T>
+struct GCPointerPolicy
+{
+ static T initial() { return nullptr; }
+ static void trace(JSTracer* trc, T* vp, const char* name) {
+ if (*vp)
+ js::UnsafeTraceManuallyBarrieredEdge(trc, vp, name);
+ }
+ static bool needsSweep(T* vp) {
+ if (*vp)
+ return js::gc::IsAboutToBeFinalizedUnbarriered(vp);
+ return false;
+ }
+};
+template <> struct GCPolicy<JS::Symbol*> : public GCPointerPolicy<JS::Symbol*> {};
+template <> struct GCPolicy<JSAtom*> : public GCPointerPolicy<JSAtom*> {};
+template <> struct GCPolicy<JSFunction*> : public GCPointerPolicy<JSFunction*> {};
+template <> struct GCPolicy<JSObject*> : public GCPointerPolicy<JSObject*> {};
+template <> struct GCPolicy<JSScript*> : public GCPointerPolicy<JSScript*> {};
+template <> struct GCPolicy<JSString*> : public GCPointerPolicy<JSString*> {};
+
+template <typename T>
+struct GCPolicy<JS::Heap<T>>
+{
+ static void trace(JSTracer* trc, JS::Heap<T>* thingp, const char* name) {
+ TraceEdge(trc, thingp, name);
+ }
+ static bool needsSweep(JS::Heap<T>* thingp) {
+ return js::gc::EdgeNeedsSweep(thingp);
+ }
+};
+
+// GCPolicy<UniquePtr<T>> forwards the contained pointer to GCPolicy<T>.
+template <typename T, typename D>
+struct GCPolicy<mozilla::UniquePtr<T, D>>
+{
+ static mozilla::UniquePtr<T,D> initial() { return mozilla::UniquePtr<T,D>(); }
+ static void trace(JSTracer* trc, mozilla::UniquePtr<T,D>* tp, const char* name) {
+ if (tp->get())
+ GCPolicy<T>::trace(trc, tp->get(), name);
+ }
+ static bool needsSweep(mozilla::UniquePtr<T,D>* tp) {
+ if (tp->get())
+ return GCPolicy<T>::needsSweep(tp->get());
+ return false;
+ }
+};
+
+} // namespace JS
+
+#endif // GCPolicyAPI_h
diff --git a/js/public/GCVariant.h b/js/public/GCVariant.h
new file mode 100644
index 0000000000..31ab23f54c
--- /dev/null
+++ b/js/public/GCVariant.h
@@ -0,0 +1,198 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef js_GCVariant_h
+#define js_GCVariant_h
+
+#include "mozilla/Variant.h"
+
+#include "js/GCPolicyAPI.h"
+#include "js/RootingAPI.h"
+#include "js/TracingAPI.h"
+
+namespace JS {
+
+// These template specializations allow Variant to be used inside GC wrappers.
+//
+// When matching on GC wrappers around Variants, matching should be done on
+// the wrapper itself. The matcher class's methods should take Handles or
+// MutableHandles. For example,
+//
+// struct MyMatcher
+// {
+// using ReturnType = const char*;
+// ReturnType match(HandleObject o) { return "object"; }
+// ReturnType match(HandleScript s) { return "script"; }
+// };
+//
+// Rooted<Variant<JSObject*, JSScript*>> v(cx, someScript);
+// MyMatcher mm;
+// v.match(mm);
+//
+// If you get compile errors about inability to upcast subclasses (e.g., from
+// NativeObject* to JSObject*) and are inside js/src, be sure to also include
+// "gc/Policy.h".
+
+namespace detail {
+
+template <typename... Ts>
+struct GCVariantImplementation;
+
+// The base case.
+template <typename T>
+struct GCVariantImplementation<T>
+{
+ template <typename ConcreteVariant>
+ static void trace(JSTracer* trc, ConcreteVariant* v, const char* name) {
+ T& thing = v->template as<T>();
+ if (!mozilla::IsPointer<T>::value || thing)
+ GCPolicy<T>::trace(trc, &thing, name);
+ }
+
+ template <typename Matcher, typename ConcreteVariant>
+ static typename Matcher::ReturnType
+ match(Matcher& matcher, Handle<ConcreteVariant> v) {
+ const T& thing = v.get().template as<T>();
+ return matcher.match(Handle<T>::fromMarkedLocation(&thing));
+ }
+
+ template <typename Matcher, typename ConcreteVariant>
+ static typename Matcher::ReturnType
+ match(Matcher& matcher, MutableHandle<ConcreteVariant> v) {
+ T& thing = v.get().template as<T>();
+ return matcher.match(MutableHandle<T>::fromMarkedLocation(&thing));
+ }
+};
+
+// The inductive case.
+template <typename T, typename... Ts>
+struct GCVariantImplementation<T, Ts...>
+{
+ using Next = GCVariantImplementation<Ts...>;
+
+ template <typename ConcreteVariant>
+ static void trace(JSTracer* trc, ConcreteVariant* v, const char* name) {
+ if (v->template is<T>()) {
+ T& thing = v->template as<T>();
+ if (!mozilla::IsPointer<T>::value || thing)
+ GCPolicy<T>::trace(trc, &thing, name);
+ } else {
+ Next::trace(trc, v, name);
+ }
+ }
+
+ template <typename Matcher, typename ConcreteVariant>
+ static typename Matcher::ReturnType
+ match(Matcher& matcher, Handle<ConcreteVariant> v) {
+ if (v.get().template is<T>()) {
+ const T& thing = v.get().template as<T>();
+ return matcher.match(Handle<T>::fromMarkedLocation(&thing));
+ }
+ return Next::match(matcher, v);
+ }
+
+ template <typename Matcher, typename ConcreteVariant>
+ static typename Matcher::ReturnType
+ match(Matcher& matcher, MutableHandle<ConcreteVariant> v) {
+ if (v.get().template is<T>()) {
+ T& thing = v.get().template as<T>();
+ return matcher.match(MutableHandle<T>::fromMarkedLocation(&thing));
+ }
+ return Next::match(matcher, v);
+ }
+};
+
+} // namespace detail
+
+template <typename... Ts>
+struct GCPolicy<mozilla::Variant<Ts...>>
+{
+ using Impl = detail::GCVariantImplementation<Ts...>;
+
+ // Variants do not provide initial(). They do not have a default initial
+ // value and one must be provided.
+
+ static void trace(JSTracer* trc, mozilla::Variant<Ts...>* v, const char* name) {
+ Impl::trace(trc, v, name);
+ }
+};
+
+} // namespace JS
+
+namespace js {
+
+template <typename Outer, typename... Ts>
+class GCVariantOperations
+{
+ using Impl = JS::detail::GCVariantImplementation<Ts...>;
+ using Variant = mozilla::Variant<Ts...>;
+
+ const Variant& variant() const { return static_cast<const Outer*>(this)->get(); }
+
+ public:
+ template <typename T>
+ bool is() const {
+ return variant().template is<T>();
+ }
+
+ template <typename T>
+ JS::Handle<T> as() const {
+ return Handle<T>::fromMarkedLocation(&variant().template as<T>());
+ }
+
+ template <typename Matcher>
+ typename Matcher::ReturnType
+ match(Matcher& matcher) const {
+ return Impl::match(matcher, JS::Handle<Variant>::fromMarkedLocation(&variant()));
+ }
+};
+
+template <typename Outer, typename... Ts>
+class MutableGCVariantOperations
+ : public GCVariantOperations<Outer, Ts...>
+{
+ using Impl = JS::detail::GCVariantImplementation<Ts...>;
+ using Variant = mozilla::Variant<Ts...>;
+
+ const Variant& variant() const { return static_cast<const Outer*>(this)->get(); }
+ Variant& variant() { return static_cast<Outer*>(this)->get(); }
+
+ public:
+ template <typename T>
+ JS::MutableHandle<T> as() {
+ return JS::MutableHandle<T>::fromMarkedLocation(&variant().template as<T>());
+ }
+
+ template <typename Matcher>
+ typename Matcher::ReturnType
+ match(Matcher& matcher) {
+ return Impl::match(matcher, JS::MutableHandle<Variant>::fromMarkedLocation(&variant()));
+ }
+};
+
+template <typename... Ts>
+class RootedBase<mozilla::Variant<Ts...>>
+ : public MutableGCVariantOperations<JS::Rooted<mozilla::Variant<Ts...>>, Ts...>
+{ };
+
+template <typename... Ts>
+class MutableHandleBase<mozilla::Variant<Ts...>>
+ : public MutableGCVariantOperations<JS::MutableHandle<mozilla::Variant<Ts...>>, Ts...>
+{ };
+
+template <typename... Ts>
+class HandleBase<mozilla::Variant<Ts...>>
+ : public GCVariantOperations<JS::Handle<mozilla::Variant<Ts...>>, Ts...>
+{ };
+
+template <typename... Ts>
+class PersistentRootedBase<mozilla::Variant<Ts...>>
+ : public MutableGCVariantOperations<JS::PersistentRooted<mozilla::Variant<Ts...>>, Ts...>
+{ };
+
+} // namespace js
+
+#endif // js_GCVariant_h
diff --git a/js/public/GCVector.h b/js/public/GCVector.h
new file mode 100644
index 0000000000..2668e65b2c
--- /dev/null
+++ b/js/public/GCVector.h
@@ -0,0 +1,249 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef js_GCVector_h
+#define js_GCVector_h
+
+#include "mozilla/Vector.h"
+
+#include "js/GCPolicyAPI.h"
+#include "js/RootingAPI.h"
+#include "js/TracingAPI.h"
+#include "js/Vector.h"
+
+namespace JS {
+
+// A GCVector is a Vector with an additional trace method that knows how
+// to visit all of the items stored in the Vector. For vectors that contain GC
+// things, this is usually more convenient than manually iterating and marking
+// the contents.
+//
+// Most types of GC pointers as keys and values can be traced with no extra
+// infrastructure. For structs and non-gc-pointer members, ensure that there is
+// a specialization of GCPolicy<T> with an appropriate trace method available
+// to handle the custom type. Generic helpers can be found in
+// js/public/TracingAPI.h.
+//
+// Note that although this Vector's trace will deal correctly with moved items,
+// it does not itself know when to barrier or trace items. To function properly
+// it must either be used with Rooted, or barriered and traced manually.
+template <typename T,
+ size_t MinInlineCapacity = 0,
+ typename AllocPolicy = js::TempAllocPolicy>
+class GCVector
+{
+ mozilla::Vector<T, MinInlineCapacity, AllocPolicy> vector;
+
+ public:
+ explicit GCVector(AllocPolicy alloc = AllocPolicy())
+ : vector(alloc)
+ {}
+
+ GCVector(GCVector&& vec)
+ : vector(mozilla::Move(vec.vector))
+ {}
+
+ GCVector& operator=(GCVector&& vec) {
+ vector = mozilla::Move(vec.vector);
+ return *this;
+ }
+
+ size_t length() const { return vector.length(); }
+ bool empty() const { return vector.empty(); }
+ size_t capacity() const { return vector.capacity(); }
+
+ T* begin() { return vector.begin(); }
+ const T* begin() const { return vector.begin(); }
+
+ T* end() { return vector.end(); }
+ const T* end() const { return vector.end(); }
+
+ T& operator[](size_t i) { return vector[i]; }
+ const T& operator[](size_t i) const { return vector[i]; }
+
+ T& back() { return vector.back(); }
+ const T& back() const { return vector.back(); }
+
+ bool initCapacity(size_t cap) { return vector.initCapacity(cap); }
+ bool reserve(size_t req) { return vector.reserve(req); }
+ void shrinkBy(size_t amount) { return vector.shrinkBy(amount); }
+ bool growBy(size_t amount) { return vector.growBy(amount); }
+ bool resize(size_t newLen) { return vector.resize(newLen); }
+
+ void clear() { return vector.clear(); }
+
+ template<typename U> bool append(U&& item) { return vector.append(mozilla::Forward<U>(item)); }
+
+ template<typename... Args>
+ bool
+ emplaceBack(Args&&... args) {
+ return vector.emplaceBack(mozilla::Forward<Args>(args)...);
+ }
+
+ template<typename U>
+ void infallibleAppend(U&& aU) {
+ return vector.infallibleAppend(mozilla::Forward<U>(aU));
+ }
+ void infallibleAppendN(const T& aT, size_t aN) {
+ return vector.infallibleAppendN(aT, aN);
+ }
+ template<typename U> void
+ infallibleAppend(const U* aBegin, const U* aEnd) {
+ return vector.infallibleAppend(aBegin, aEnd);
+ }
+ template<typename U> void infallibleAppend(const U* aBegin, size_t aLength) {
+ return vector.infallibleAppend(aBegin, aLength);
+ }
+
+ template<typename U, size_t O, class BP>
+ bool appendAll(const mozilla::Vector<U, O, BP>& aU) { return vector.appendAll(aU); }
+ template<typename U, size_t O, class BP>
+ bool appendAll(const GCVector<U, O, BP>& aU) { return vector.append(aU.begin(), aU.length()); }
+
+ bool appendN(const T& val, size_t count) { return vector.appendN(val, count); }
+
+ template<typename U> bool append(const U* aBegin, const U* aEnd) {
+ return vector.append(aBegin, aEnd);
+ }
+ template<typename U> bool append(const U* aBegin, size_t aLength) {
+ return vector.append(aBegin, aLength);
+ }
+
+ void popBack() { return vector.popBack(); }
+ T popCopy() { return vector.popCopy(); }
+
+ size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
+ return vector.sizeOfExcludingThis(mallocSizeOf);
+ }
+
+ size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
+ return vector.sizeOfIncludingThis(mallocSizeOf);
+ }
+
+ static void trace(GCVector* vec, JSTracer* trc) { vec->trace(trc); }
+
+ void trace(JSTracer* trc) {
+ for (auto& elem : vector)
+ GCPolicy<T>::trace(trc, &elem, "vector element");
+ }
+};
+
+} // namespace JS
+
+namespace js {
+
+template <typename Outer, typename T, size_t Capacity, typename AllocPolicy>
+class GCVectorOperations
+{
+ using Vec = JS::GCVector<T, Capacity, AllocPolicy>;
+ const Vec& vec() const { return static_cast<const Outer*>(this)->get(); }
+
+ public:
+ const AllocPolicy& allocPolicy() const { return vec().allocPolicy(); }
+ size_t length() const { return vec().length(); }
+ bool empty() const { return vec().empty(); }
+ size_t capacity() const { return vec().capacity(); }
+ const T* begin() const { return vec().begin(); }
+ const T* end() const { return vec().end(); }
+ const T& back() const { return vec().back(); }
+
+ JS::Handle<T> operator[](size_t aIndex) const {
+ return JS::Handle<T>::fromMarkedLocation(&vec().operator[](aIndex));
+ }
+};
+
+template <typename Outer, typename T, size_t Capacity, typename AllocPolicy>
+class MutableGCVectorOperations
+ : public GCVectorOperations<Outer, T, Capacity, AllocPolicy>
+{
+ using Vec = JS::GCVector<T, Capacity, AllocPolicy>;
+ const Vec& vec() const { return static_cast<const Outer*>(this)->get(); }
+ Vec& vec() { return static_cast<Outer*>(this)->get(); }
+
+ public:
+ const AllocPolicy& allocPolicy() const { return vec().allocPolicy(); }
+ AllocPolicy& allocPolicy() { return vec().allocPolicy(); }
+ const T* begin() const { return vec().begin(); }
+ T* begin() { return vec().begin(); }
+ const T* end() const { return vec().end(); }
+ T* end() { return vec().end(); }
+ const T& back() const { return vec().back(); }
+ T& back() { return vec().back(); }
+
+ JS::Handle<T> operator[](size_t aIndex) const {
+ return JS::Handle<T>::fromMarkedLocation(&vec().operator[](aIndex));
+ }
+ JS::MutableHandle<T> operator[](size_t aIndex) {
+ return JS::MutableHandle<T>::fromMarkedLocation(&vec().operator[](aIndex));
+ }
+
+ bool initCapacity(size_t aRequest) { return vec().initCapacity(aRequest); }
+ bool reserve(size_t aRequest) { return vec().reserve(aRequest); }
+ void shrinkBy(size_t aIncr) { vec().shrinkBy(aIncr); }
+ bool growBy(size_t aIncr) { return vec().growBy(aIncr); }
+ bool resize(size_t aNewLength) { return vec().resize(aNewLength); }
+ bool growByUninitialized(size_t aIncr) { return vec().growByUninitialized(aIncr); }
+ void infallibleGrowByUninitialized(size_t aIncr) { vec().infallibleGrowByUninitialized(aIncr); }
+ bool resizeUninitialized(size_t aNewLength) { return vec().resizeUninitialized(aNewLength); }
+ void clear() { vec().clear(); }
+ void clearAndFree() { vec().clearAndFree(); }
+ template<typename U> bool append(U&& aU) { return vec().append(mozilla::Forward<U>(aU)); }
+ template<typename... Args> bool emplaceBack(Args&&... aArgs) {
+ return vec().emplaceBack(mozilla::Forward<Args...>(aArgs...));
+ }
+ template<typename U, size_t O, class BP>
+ bool appendAll(const mozilla::Vector<U, O, BP>& aU) { return vec().appendAll(aU); }
+ template<typename U, size_t O, class BP>
+ bool appendAll(const JS::GCVector<U, O, BP>& aU) { return vec().appendAll(aU); }
+ bool appendN(const T& aT, size_t aN) { return vec().appendN(aT, aN); }
+ template<typename U> bool append(const U* aBegin, const U* aEnd) {
+ return vec().append(aBegin, aEnd);
+ }
+ template<typename U> bool append(const U* aBegin, size_t aLength) {
+ return vec().append(aBegin, aLength);
+ }
+ template<typename U> void infallibleAppend(U&& aU) {
+ vec().infallibleAppend(mozilla::Forward<U>(aU));
+ }
+ void infallibleAppendN(const T& aT, size_t aN) { vec().infallibleAppendN(aT, aN); }
+ template<typename U> void infallibleAppend(const U* aBegin, const U* aEnd) {
+ vec().infallibleAppend(aBegin, aEnd);
+ }
+ template<typename U> void infallibleAppend(const U* aBegin, size_t aLength) {
+ vec().infallibleAppend(aBegin, aLength);
+ }
+ void popBack() { vec().popBack(); }
+ T popCopy() { return vec().popCopy(); }
+ template<typename U> T* insert(T* aP, U&& aVal) {
+ return vec().insert(aP, mozilla::Forward<U>(aVal));
+ }
+ void erase(T* aT) { vec().erase(aT); }
+ void erase(T* aBegin, T* aEnd) { vec().erase(aBegin, aEnd); }
+};
+
+template <typename T, size_t N, typename AP>
+class RootedBase<JS::GCVector<T,N,AP>>
+ : public MutableGCVectorOperations<JS::Rooted<JS::GCVector<T,N,AP>>, T,N,AP>
+{};
+
+template <typename T, size_t N, typename AP>
+class MutableHandleBase<JS::GCVector<T,N,AP>>
+ : public MutableGCVectorOperations<JS::MutableHandle<JS::GCVector<T,N,AP>>, T,N,AP>
+{};
+
+template <typename T, size_t N, typename AP>
+class HandleBase<JS::GCVector<T,N,AP>>
+ : public GCVectorOperations<JS::Handle<JS::GCVector<T,N,AP>>, T,N,AP>
+{};
+
+template <typename T, size_t N, typename AP>
+class PersistentRootedBase<JS::GCVector<T,N,AP>>
+ : public MutableGCVectorOperations<JS::PersistentRooted<JS::GCVector<T,N,AP>>, T,N,AP>
+{};
+
+} // namespace js
+
+#endif // js_GCVector_h
diff --git a/js/public/HashTable.h b/js/public/HashTable.h
new file mode 100644
index 0000000000..5d4c0665d7
--- /dev/null
+++ b/js/public/HashTable.h
@@ -0,0 +1,1880 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef js_HashTable_h
+#define js_HashTable_h
+
+#include "mozilla/Alignment.h"
+#include "mozilla/Assertions.h"
+#include "mozilla/Attributes.h"
+#include "mozilla/Casting.h"
+#include "mozilla/HashFunctions.h"
+#include "mozilla/MemoryReporting.h"
+#include "mozilla/Move.h"
+#include "mozilla/Opaque.h"
+#include "mozilla/PodOperations.h"
+#include "mozilla/ReentrancyGuard.h"
+#include "mozilla/TemplateLib.h"
+#include "mozilla/TypeTraits.h"
+#include "mozilla/UniquePtr.h"
+
+#include "js/Utility.h"
+
+namespace js {
+
+class TempAllocPolicy;
+template <class> struct DefaultHasher;
+template <class, class> class HashMapEntry;
+namespace detail {
+ template <class T> class HashTableEntry;
+ template <class T, class HashPolicy, class AllocPolicy> class HashTable;
+} // namespace detail
+
+/*****************************************************************************/
+
+// The "generation" of a hash table is an opaque value indicating the state of
+// modification of the hash table through its lifetime. If the generation of
+// a hash table compares equal at times T1 and T2, then lookups in the hash
+// table, pointers to (or into) hash table entries, etc. at time T1 are valid
+// at time T2. If the generation compares unequal, these computations are all
+// invalid and must be performed again to be used.
+//
+// Generations are meaningfully comparable only with respect to a single hash
+// table. It's always nonsensical to compare the generation of distinct hash
+// tables H1 and H2.
+using Generation = mozilla::Opaque<uint64_t>;
+
+// A JS-friendly, STL-like container providing a hash-based map from keys to
+// values. In particular, HashMap calls constructors and destructors of all
+// objects added so non-PODs may be used safely.
+//
+// Key/Value requirements:
+// - movable, destructible, assignable
+// HashPolicy requirements:
+// - see Hash Policy section below
+// AllocPolicy:
+// - see jsalloc.h
+//
+// Note:
+// - HashMap is not reentrant: Key/Value/HashPolicy/AllocPolicy members
+// called by HashMap must not call back into the same HashMap object.
+// - Due to the lack of exception handling, the user must call |init()|.
+template <class Key,
+ class Value,
+ class HashPolicy = DefaultHasher<Key>,
+ class AllocPolicy = TempAllocPolicy>
+class HashMap
+{
+ typedef HashMapEntry<Key, Value> TableEntry;
+
+ struct MapHashPolicy : HashPolicy
+ {
+ using Base = HashPolicy;
+ typedef Key KeyType;
+ static const Key& getKey(TableEntry& e) { return e.key(); }
+ static void setKey(TableEntry& e, Key& k) { HashPolicy::rekey(e.mutableKey(), k); }
+ };
+
+ typedef detail::HashTable<TableEntry, MapHashPolicy, AllocPolicy> Impl;
+ Impl impl;
+
+ public:
+ typedef typename HashPolicy::Lookup Lookup;
+ typedef TableEntry Entry;
+
+ // HashMap construction is fallible (due to OOM); thus the user must call
+ // init after constructing a HashMap and check the return value.
+ explicit HashMap(AllocPolicy a = AllocPolicy()) : impl(a) {}
+ MOZ_MUST_USE bool init(uint32_t len = 16) { return impl.init(len); }
+ bool initialized() const { return impl.initialized(); }
+
+ // Return whether the given lookup value is present in the map. E.g.:
+ //
+ // typedef HashMap<int,char> HM;
+ // HM h;
+ // if (HM::Ptr p = h.lookup(3)) {
+ // const HM::Entry& e = *p; // p acts like a pointer to Entry
+ // assert(p->key == 3); // Entry contains the key
+ // char val = p->value; // and value
+ // }
+ //
+ // Also see the definition of Ptr in HashTable above (with T = Entry).
+ typedef typename Impl::Ptr Ptr;
+ Ptr lookup(const Lookup& l) const { return impl.lookup(l); }
+
+ // Like lookup, but does not assert if two threads call lookup at the same
+ // time. Only use this method when none of the threads will modify the map.
+ Ptr readonlyThreadsafeLookup(const Lookup& l) const { return impl.readonlyThreadsafeLookup(l); }
+
+ // Assuming |p.found()|, remove |*p|.
+ void remove(Ptr p) { impl.remove(p); }
+
+ // Like |lookup(l)|, but on miss, |p = lookupForAdd(l)| allows efficient
+ // insertion of Key |k| (where |HashPolicy::match(k,l) == true|) using
+ // |add(p,k,v)|. After |add(p,k,v)|, |p| points to the new Entry. E.g.:
+ //
+ // typedef HashMap<int,char> HM;
+ // HM h;
+ // HM::AddPtr p = h.lookupForAdd(3);
+ // if (!p) {
+ // if (!h.add(p, 3, 'a'))
+ // return false;
+ // }
+ // const HM::Entry& e = *p; // p acts like a pointer to Entry
+ // assert(p->key == 3); // Entry contains the key
+ // char val = p->value; // and value
+ //
+ // Also see the definition of AddPtr in HashTable above (with T = Entry).
+ //
+ // N.B. The caller must ensure that no mutating hash table operations
+ // occur between a pair of |lookupForAdd| and |add| calls. To avoid
+ // looking up the key a second time, the caller may use the more efficient
+ // relookupOrAdd method. This method reuses part of the hashing computation
+ // to more efficiently insert the key if it has not been added. For
+ // example, a mutation-handling version of the previous example:
+ //
+ // HM::AddPtr p = h.lookupForAdd(3);
+ // if (!p) {
+ // call_that_may_mutate_h();
+ // if (!h.relookupOrAdd(p, 3, 'a'))
+ // return false;
+ // }
+ // const HM::Entry& e = *p;
+ // assert(p->key == 3);
+ // char val = p->value;
+ typedef typename Impl::AddPtr AddPtr;
+ AddPtr lookupForAdd(const Lookup& l) const {
+ return impl.lookupForAdd(l);
+ }
+
+ template<typename KeyInput, typename ValueInput>
+ MOZ_MUST_USE bool add(AddPtr& p, KeyInput&& k, ValueInput&& v) {
+ return impl.add(p,
+ mozilla::Forward<KeyInput>(k),
+ mozilla::Forward<ValueInput>(v));
+ }
+
+ template<typename KeyInput>
+ MOZ_MUST_USE bool add(AddPtr& p, KeyInput&& k) {
+ return impl.add(p, mozilla::Forward<KeyInput>(k), Value());
+ }
+
+ template<typename KeyInput, typename ValueInput>
+ MOZ_MUST_USE bool relookupOrAdd(AddPtr& p, KeyInput&& k, ValueInput&& v) {
+ return impl.relookupOrAdd(p, k,
+ mozilla::Forward<KeyInput>(k),
+ mozilla::Forward<ValueInput>(v));
+ }
+
+ // |all()| returns a Range containing |count()| elements. E.g.:
+ //
+ // typedef HashMap<int,char> HM;
+ // HM h;
+ // for (HM::Range r = h.all(); !r.empty(); r.popFront())
+ // char c = r.front().value();
+ //
+ // Also see the definition of Range in HashTable above (with T = Entry).
+ typedef typename Impl::Range Range;
+ Range all() const { return impl.all(); }
+
+ // Typedef for the enumeration class. An Enum may be used to examine and
+ // remove table entries:
+ //
+ // typedef HashMap<int,char> HM;
+ // HM s;
+ // for (HM::Enum e(s); !e.empty(); e.popFront())
+ // if (e.front().value() == 'l')
+ // e.removeFront();
+ //
+ // Table resize may occur in Enum's destructor. Also see the definition of
+ // Enum in HashTable above (with T = Entry).
+ typedef typename Impl::Enum Enum;
+
+ // Remove all entries. This does not shrink the table. For that consider
+ // using the finish() method.
+ void clear() { impl.clear(); }
+
+ // Remove all the entries and release all internal buffers. The map must
+ // be initialized again before any use.
+ void finish() { impl.finish(); }
+
+ // Does the table contain any entries?
+ bool empty() const { return impl.empty(); }
+
+ // Number of live elements in the map.
+ uint32_t count() const { return impl.count(); }
+
+ // Total number of allocation in the dynamic table. Note: resize will
+ // happen well before count() == capacity().
+ size_t capacity() const { return impl.capacity(); }
+
+ // Don't just call |impl.sizeOfExcludingThis()| because there's no
+ // guarantee that |impl| is the first field in HashMap.
+ size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
+ return impl.sizeOfExcludingThis(mallocSizeOf);
+ }
+ size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
+ return mallocSizeOf(this) + impl.sizeOfExcludingThis(mallocSizeOf);
+ }
+
+ Generation generation() const {
+ return impl.generation();
+ }
+
+ /************************************************** Shorthand operations */
+
+ bool has(const Lookup& l) const {
+ return impl.lookup(l).found();
+ }
+
+ // Overwrite existing value with v. Return false on oom.
+ template<typename KeyInput, typename ValueInput>
+ MOZ_MUST_USE bool put(KeyInput&& k, ValueInput&& v) {
+ AddPtr p = lookupForAdd(k);
+ if (p) {
+ p->value() = mozilla::Forward<ValueInput>(v);
+ return true;
+ }
+ return add(p, mozilla::Forward<KeyInput>(k), mozilla::Forward<ValueInput>(v));
+ }
+
+ // Like put, but assert that the given key is not already present.
+ template<typename KeyInput, typename ValueInput>
+ MOZ_MUST_USE bool putNew(KeyInput&& k, ValueInput&& v) {
+ return impl.putNew(k, mozilla::Forward<KeyInput>(k), mozilla::Forward<ValueInput>(v));
+ }
+
+ // Only call this to populate an empty map after reserving space with init().
+ template<typename KeyInput, typename ValueInput>
+ void putNewInfallible(KeyInput&& k, ValueInput&& v) {
+ impl.putNewInfallible(k, mozilla::Forward<KeyInput>(k), mozilla::Forward<ValueInput>(v));
+ }
+
+ // Add (k,defaultValue) if |k| is not found. Return a false-y Ptr on oom.
+ Ptr lookupWithDefault(const Key& k, const Value& defaultValue) {
+ AddPtr p = lookupForAdd(k);
+ if (p)
+ return p;
+ bool ok = add(p, k, defaultValue);
+ MOZ_ASSERT_IF(!ok, !p); // p is left false-y on oom.
+ (void)ok;
+ return p;
+ }
+
+ // Remove if present.
+ void remove(const Lookup& l) {
+ if (Ptr p = lookup(l))
+ remove(p);
+ }
+
+ // Infallibly rekey one entry, if necessary.
+ // Requires template parameters Key and HashPolicy::Lookup to be the same type.
+ void rekeyIfMoved(const Key& old_key, const Key& new_key) {
+ if (old_key != new_key)
+ rekeyAs(old_key, new_key, new_key);
+ }
+
+ // Infallibly rekey one entry if present, and return whether that happened.
+ bool rekeyAs(const Lookup& old_lookup, const Lookup& new_lookup, const Key& new_key) {
+ if (Ptr p = lookup(old_lookup)) {
+ impl.rekeyAndMaybeRehash(p, new_lookup, new_key);
+ return true;
+ }
+ return false;
+ }
+
+ // HashMap is movable
+ HashMap(HashMap&& rhs) : impl(mozilla::Move(rhs.impl)) {}
+ void operator=(HashMap&& rhs) {
+ MOZ_ASSERT(this != &rhs, "self-move assignment is prohibited");
+ impl = mozilla::Move(rhs.impl);
+ }
+
+ private:
+ // HashMap is not copyable or assignable
+ HashMap(const HashMap& hm) = delete;
+ HashMap& operator=(const HashMap& hm) = delete;
+
+ friend class Impl::Enum;
+};
+
+/*****************************************************************************/
+
+// A JS-friendly, STL-like container providing a hash-based set of values. In
+// particular, HashSet calls constructors and destructors of all objects added
+// so non-PODs may be used safely.
+//
+// T requirements:
+// - movable, destructible, assignable
+// HashPolicy requirements:
+// - see Hash Policy section below
+// AllocPolicy:
+// - see jsalloc.h
+//
+// Note:
+// - HashSet is not reentrant: T/HashPolicy/AllocPolicy members called by
+// HashSet must not call back into the same HashSet object.
+// - Due to the lack of exception handling, the user must call |init()|.
+template <class T,
+ class HashPolicy = DefaultHasher<T>,
+ class AllocPolicy = TempAllocPolicy>
+class HashSet
+{
+ struct SetOps : HashPolicy
+ {
+ using Base = HashPolicy;
+ typedef T KeyType;
+ static const KeyType& getKey(const T& t) { return t; }
+ static void setKey(T& t, KeyType& k) { HashPolicy::rekey(t, k); }
+ };
+
+ typedef detail::HashTable<const T, SetOps, AllocPolicy> Impl;
+ Impl impl;
+
+ public:
+ typedef typename HashPolicy::Lookup Lookup;
+ typedef T Entry;
+
+ // HashSet construction is fallible (due to OOM); thus the user must call
+ // init after constructing a HashSet and check the return value.
+ explicit HashSet(AllocPolicy a = AllocPolicy()) : impl(a) {}
+ MOZ_MUST_USE bool init(uint32_t len = 16) { return impl.init(len); }
+ bool initialized() const { return impl.initialized(); }
+
+ // Return whether the given lookup value is present in the map. E.g.:
+ //
+ // typedef HashSet<int> HS;
+ // HS h;
+ // if (HS::Ptr p = h.lookup(3)) {
+ // assert(*p == 3); // p acts like a pointer to int
+ // }
+ //
+ // Also see the definition of Ptr in HashTable above.
+ typedef typename Impl::Ptr Ptr;
+ Ptr lookup(const Lookup& l) const { return impl.lookup(l); }
+
+ // Like lookup, but does not assert if two threads call lookup at the same
+ // time. Only use this method when none of the threads will modify the map.
+ Ptr readonlyThreadsafeLookup(const Lookup& l) const { return impl.readonlyThreadsafeLookup(l); }
+
+ // Assuming |p.found()|, remove |*p|.
+ void remove(Ptr p) { impl.remove(p); }
+
+ // Like |lookup(l)|, but on miss, |p = lookupForAdd(l)| allows efficient
+ // insertion of T value |t| (where |HashPolicy::match(t,l) == true|) using
+ // |add(p,t)|. After |add(p,t)|, |p| points to the new element. E.g.:
+ //
+ // typedef HashSet<int> HS;
+ // HS h;
+ // HS::AddPtr p = h.lookupForAdd(3);
+ // if (!p) {
+ // if (!h.add(p, 3))
+ // return false;
+ // }
+ // assert(*p == 3); // p acts like a pointer to int
+ //
+ // Also see the definition of AddPtr in HashTable above.
+ //
+ // N.B. The caller must ensure that no mutating hash table operations
+ // occur between a pair of |lookupForAdd| and |add| calls. To avoid
+ // looking up the key a second time, the caller may use the more efficient
+ // relookupOrAdd method. This method reuses part of the hashing computation
+ // to more efficiently insert the key if it has not been added. For
+ // example, a mutation-handling version of the previous example:
+ //
+ // HS::AddPtr p = h.lookupForAdd(3);
+ // if (!p) {
+ // call_that_may_mutate_h();
+ // if (!h.relookupOrAdd(p, 3, 3))
+ // return false;
+ // }
+ // assert(*p == 3);
+ //
+ // Note that relookupOrAdd(p,l,t) performs Lookup using |l| and adds the
+ // entry |t|, where the caller ensures match(l,t).
+ typedef typename Impl::AddPtr AddPtr;
+ AddPtr lookupForAdd(const Lookup& l) const { return impl.lookupForAdd(l); }
+
+ template <typename U>
+ MOZ_MUST_USE bool add(AddPtr& p, U&& u) {
+ return impl.add(p, mozilla::Forward<U>(u));
+ }
+
+ template <typename U>
+ MOZ_MUST_USE bool relookupOrAdd(AddPtr& p, const Lookup& l, U&& u) {
+ return impl.relookupOrAdd(p, l, mozilla::Forward<U>(u));
+ }
+
+ // |all()| returns a Range containing |count()| elements:
+ //
+ // typedef HashSet<int> HS;
+ // HS h;
+ // for (HS::Range r = h.all(); !r.empty(); r.popFront())
+ // int i = r.front();
+ //
+ // Also see the definition of Range in HashTable above.
+ typedef typename Impl::Range Range;
+ Range all() const { return impl.all(); }
+
+ // Typedef for the enumeration class. An Enum may be used to examine and
+ // remove table entries:
+ //
+ // typedef HashSet<int> HS;
+ // HS s;
+ // for (HS::Enum e(s); !e.empty(); e.popFront())
+ // if (e.front() == 42)
+ // e.removeFront();
+ //
+ // Table resize may occur in Enum's destructor. Also see the definition of
+ // Enum in HashTable above.
+ typedef typename Impl::Enum Enum;
+
+ // Remove all entries. This does not shrink the table. For that consider
+ // using the finish() method.
+ void clear() { impl.clear(); }
+
+ // Remove all the entries and release all internal buffers. The set must
+ // be initialized again before any use.
+ void finish() { impl.finish(); }
+
+ // Does the table contain any entries?
+ bool empty() const { return impl.empty(); }
+
+ // Number of live elements in the map.
+ uint32_t count() const { return impl.count(); }
+
+ // Total number of allocation in the dynamic table. Note: resize will
+ // happen well before count() == capacity().
+ size_t capacity() const { return impl.capacity(); }
+
+ // Don't just call |impl.sizeOfExcludingThis()| because there's no
+ // guarantee that |impl| is the first field in HashSet.
+ size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
+ return impl.sizeOfExcludingThis(mallocSizeOf);
+ }
+ size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const {
+ return mallocSizeOf(this) + impl.sizeOfExcludingThis(mallocSizeOf);
+ }
+
+ Generation generation() const {
+ return impl.generation();
+ }
+
+ /************************************************** Shorthand operations */
+
+ bool has(const Lookup& l) const {
+ return impl.lookup(l).found();
+ }
+
+ // Add |u| if it is not present already. Return false on oom.
+ template <typename U>
+ MOZ_MUST_USE bool put(U&& u) {
+ AddPtr p = lookupForAdd(u);
+ return p ? true : add(p, mozilla::Forward<U>(u));
+ }
+
+ // Like put, but assert that the given key is not already present.
+ template <typename U>
+ MOZ_MUST_USE bool putNew(U&& u) {
+ return impl.putNew(u, mozilla::Forward<U>(u));
+ }
+
+ template <typename U>
+ MOZ_MUST_USE bool putNew(const Lookup& l, U&& u) {
+ return impl.putNew(l, mozilla::Forward<U>(u));
+ }
+
+ // Only call this to populate an empty set after reserving space with init().
+ template <typename U>
+ void putNewInfallible(const Lookup& l, U&& u) {
+ impl.putNewInfallible(l, mozilla::Forward<U>(u));
+ }
+
+ void remove(const Lookup& l) {
+ if (Ptr p = lookup(l))
+ remove(p);
+ }
+
+ // Infallibly rekey one entry, if present.
+ // Requires template parameters T and HashPolicy::Lookup to be the same type.
+ void rekeyIfMoved(const Lookup& old_value, const T& new_value) {
+ if (old_value != new_value)
+ rekeyAs(old_value, new_value, new_value);
+ }
+
+ // Infallibly rekey one entry if present, and return whether that happened.
+ bool rekeyAs(const Lookup& old_lookup, const Lookup& new_lookup, const T& new_value) {
+ if (Ptr p = lookup(old_lookup)) {
+ impl.rekeyAndMaybeRehash(p, new_lookup, new_value);
+ return true;
+ }
+ return false;
+ }
+
+ // Infallibly replace the current key at |p| with an equivalent key.
+ // Specifically, both HashPolicy::hash and HashPolicy::match must return
+ // identical results for the new and old key when applied against all
+ // possible matching values.
+ void replaceKey(Ptr p, const T& new_value) {
+ MOZ_ASSERT(p.found());
+ MOZ_ASSERT(*p != new_value);
+ MOZ_ASSERT(HashPolicy::hash(*p) == HashPolicy::hash(new_value));
+ MOZ_ASSERT(HashPolicy::match(*p, new_value));
+ const_cast<T&>(*p) = new_value;
+ }
+
+ // HashSet is movable
+ HashSet(HashSet&& rhs) : impl(mozilla::Move(rhs.impl)) {}
+ void operator=(HashSet&& rhs) {
+ MOZ_ASSERT(this != &rhs, "self-move assignment is prohibited");
+ impl = mozilla::Move(rhs.impl);
+ }
+
+ private:
+ // HashSet is not copyable or assignable
+ HashSet(const HashSet& hs) = delete;
+ HashSet& operator=(const HashSet& hs) = delete;
+
+ friend class Impl::Enum;
+};
+
+/*****************************************************************************/
+
+// Hash Policy
+//
+// A hash policy P for a hash table with key-type Key must provide:
+// - a type |P::Lookup| to use to lookup table entries;
+// - a static member function |P::hash| with signature
+//
+// static js::HashNumber hash(Lookup)
+//
+// to use to hash the lookup type; and
+// - a static member function |P::match| with signature
+//
+// static bool match(Key, Lookup)
+//
+// to use to test equality of key and lookup values.
+//
+// Normally, Lookup = Key. In general, though, different values and types of
+// values can be used to lookup and store. If a Lookup value |l| is != to the
+// added Key value |k|, the user must ensure that |P::match(k,l)|. E.g.:
+//
+// js::HashSet<Key, P>::AddPtr p = h.lookup(l);
+// if (!p) {
+// assert(P::match(k, l)); // must hold
+// h.add(p, k);
+// }
+
+// Pointer hashing policy that strips the lowest zeroBits when calculating the
+// hash to improve key distribution.
+template <typename Key, size_t zeroBits>
+struct PointerHasher
+{
+ typedef Key Lookup;
+ static HashNumber hash(const Lookup& l) {
+ size_t word = reinterpret_cast<size_t>(l) >> zeroBits;
+ static_assert(sizeof(HashNumber) == 4,
+ "subsequent code assumes a four-byte hash");
+#if JS_BITS_PER_WORD == 32
+ return HashNumber(word);
+#else
+ static_assert(sizeof(word) == 8,
+ "unexpected word size, new hashing strategy required to "
+ "properly incorporate all bits");
+ return HashNumber((word >> 32) ^ word);
+#endif
+ }
+ static bool match(const Key& k, const Lookup& l) {
+ return k == l;
+ }
+ static void rekey(Key& k, const Key& newKey) {
+ k = newKey;
+ }
+};
+
+// Default hash policy: just use the 'lookup' value. This of course only
+// works if the lookup value is integral. HashTable applies ScrambleHashCode to
+// the result of the 'hash' which means that it is 'ok' if the lookup value is
+// not well distributed over the HashNumber domain.
+template <class Key>
+struct DefaultHasher
+{
+ typedef Key Lookup;
+ static HashNumber hash(const Lookup& l) {
+ // Hash if can implicitly cast to hash number type.
+ return l;
+ }
+ static bool match(const Key& k, const Lookup& l) {
+ // Use builtin or overloaded operator==.
+ return k == l;
+ }
+ static void rekey(Key& k, const Key& newKey) {
+ k = newKey;
+ }
+};
+
+// Specialize hashing policy for pointer types. It assumes that the type is
+// at least word-aligned. For types with smaller size use PointerHasher.
+template <class T>
+struct DefaultHasher<T*> : PointerHasher<T*, mozilla::tl::FloorLog2<sizeof(void*)>::value>
+{};
+
+// Specialize hashing policy for mozilla::UniquePtr to proxy the UniquePtr's
+// raw pointer to PointerHasher.
+template <class T, class D>
+struct DefaultHasher<mozilla::UniquePtr<T, D>>
+{
+ using Lookup = mozilla::UniquePtr<T, D>;
+ using PtrHasher = PointerHasher<T*, mozilla::tl::FloorLog2<sizeof(void*)>::value>;
+
+ static HashNumber hash(const Lookup& l) {
+ return PtrHasher::hash(l.get());
+ }
+ static bool match(const mozilla::UniquePtr<T, D>& k, const Lookup& l) {
+ return PtrHasher::match(k.get(), l.get());
+ }
+ static void rekey(mozilla::UniquePtr<T, D>& k, mozilla::UniquePtr<T, D>&& newKey) {
+ k = mozilla::Move(newKey);
+ }
+};
+
+// For doubles, we can xor the two uint32s.
+template <>
+struct DefaultHasher<double>
+{
+ typedef double Lookup;
+ static HashNumber hash(double d) {
+ static_assert(sizeof(HashNumber) == 4,
+ "subsequent code assumes a four-byte hash");
+ uint64_t u = mozilla::BitwiseCast<uint64_t>(d);
+ return HashNumber(u ^ (u >> 32));
+ }
+ static bool match(double lhs, double rhs) {
+ return mozilla::BitwiseCast<uint64_t>(lhs) == mozilla::BitwiseCast<uint64_t>(rhs);
+ }
+};
+
+template <>
+struct DefaultHasher<float>
+{
+ typedef float Lookup;
+ static HashNumber hash(float f) {
+ static_assert(sizeof(HashNumber) == 4,
+ "subsequent code assumes a four-byte hash");
+ return HashNumber(mozilla::BitwiseCast<uint32_t>(f));
+ }
+ static bool match(float lhs, float rhs) {
+ return mozilla::BitwiseCast<uint32_t>(lhs) == mozilla::BitwiseCast<uint32_t>(rhs);
+ }
+};
+
+// A hash policy that compares C strings.
+struct CStringHasher
+{
+ typedef const char* Lookup;
+ static js::HashNumber hash(Lookup l) {
+ return mozilla::HashString(l);
+ }
+ static bool match(const char* key, Lookup lookup) {
+ return strcmp(key, lookup) == 0;
+ }
+};
+
+// Fallible hashing interface.
+//
+// Most of the time generating a hash code is infallible so this class provides
+// default methods that always succeed. Specialize this class for your own hash
+// policy to provide fallible hashing.
+//
+// This is used by MovableCellHasher to handle the fact that generating a unique
+// ID for cell pointer may fail due to OOM.
+template <typename HashPolicy>
+struct FallibleHashMethods
+{
+ // Return true if a hashcode is already available for its argument. Once
+ // this returns true for a specific argument it must continue to do so.
+ template <typename Lookup> static bool hasHash(Lookup&& l) { return true; }
+
+ // Fallible method to ensure a hashcode exists for its argument and create
+ // one if not. Returns false on error, e.g. out of memory.
+ template <typename Lookup> static bool ensureHash(Lookup&& l) { return true; }
+};
+
+template <typename HashPolicy, typename Lookup>
+static bool
+HasHash(Lookup&& l) {
+ return FallibleHashMethods<typename HashPolicy::Base>::hasHash(mozilla::Forward<Lookup>(l));
+}
+
+template <typename HashPolicy, typename Lookup>
+static bool
+EnsureHash(Lookup&& l) {
+ return FallibleHashMethods<typename HashPolicy::Base>::ensureHash(mozilla::Forward<Lookup>(l));
+}
+
+/*****************************************************************************/
+
+// Both HashMap and HashSet are implemented by a single HashTable that is even
+// more heavily parameterized than the other two. This leaves HashTable gnarly
+// and extremely coupled to HashMap and HashSet; thus code should not use
+// HashTable directly.
+
+template <class Key, class Value>
+class HashMapEntry
+{
+ Key key_;
+ Value value_;
+
+ template <class, class, class> friend class detail::HashTable;
+ template <class> friend class detail::HashTableEntry;
+ template <class, class, class, class> friend class HashMap;
+
+ public:
+ template<typename KeyInput, typename ValueInput>
+ HashMapEntry(KeyInput&& k, ValueInput&& v)
+ : key_(mozilla::Forward<KeyInput>(k)),
+ value_(mozilla::Forward<ValueInput>(v))
+ {}
+
+ HashMapEntry(HashMapEntry&& rhs)
+ : key_(mozilla::Move(rhs.key_)),
+ value_(mozilla::Move(rhs.value_))
+ {}
+
+ void operator=(HashMapEntry&& rhs) {
+ key_ = mozilla::Move(rhs.key_);
+ value_ = mozilla::Move(rhs.value_);
+ }
+
+ typedef Key KeyType;
+ typedef Value ValueType;
+
+ const Key& key() const { return key_; }
+ Key& mutableKey() { return key_; }
+ const Value& value() const { return value_; }
+ Value& value() { return value_; }
+
+ private:
+ HashMapEntry(const HashMapEntry&) = delete;
+ void operator=(const HashMapEntry&) = delete;
+};
+
+} // namespace js
+
+namespace mozilla {
+
+template <typename T>
+struct IsPod<js::detail::HashTableEntry<T> > : IsPod<T> {};
+
+template <typename K, typename V>
+struct IsPod<js::HashMapEntry<K, V> >
+ : IntegralConstant<bool, IsPod<K>::value && IsPod<V>::value>
+{};
+
+} // namespace mozilla
+
+namespace js {
+
+namespace detail {
+
+template <class T, class HashPolicy, class AllocPolicy>
+class HashTable;
+
+template <class T>
+class HashTableEntry
+{
+ template <class, class, class> friend class HashTable;
+ typedef typename mozilla::RemoveConst<T>::Type NonConstT;
+
+ HashNumber keyHash;
+ mozilla::AlignedStorage2<NonConstT> mem;
+
+ static const HashNumber sFreeKey = 0;
+ static const HashNumber sRemovedKey = 1;
+ static const HashNumber sCollisionBit = 1;
+
+ static bool isLiveHash(HashNumber hash)
+ {
+ return hash > sRemovedKey;
+ }
+
+ HashTableEntry(const HashTableEntry&) = delete;
+ void operator=(const HashTableEntry&) = delete;
+ ~HashTableEntry() = delete;
+
+ public:
+ // NB: HashTableEntry is treated as a POD: no constructor or destructor calls.
+
+ void destroyIfLive() {
+ if (isLive())
+ mem.addr()->~T();
+ }
+
+ void destroy() {
+ MOZ_ASSERT(isLive());
+ mem.addr()->~T();
+ }
+
+ void swap(HashTableEntry* other) {
+ if (this == other)
+ return;
+ MOZ_ASSERT(isLive());
+ if (other->isLive()) {
+ mozilla::Swap(*mem.addr(), *other->mem.addr());
+ } else {
+ *other->mem.addr() = mozilla::Move(*mem.addr());
+ destroy();
+ }
+ mozilla::Swap(keyHash, other->keyHash);
+ }
+
+ T& get() { MOZ_ASSERT(isLive()); return *mem.addr(); }
+ NonConstT& getMutable() { MOZ_ASSERT(isLive()); return *mem.addr(); }
+
+ bool isFree() const { return keyHash == sFreeKey; }
+ void clearLive() { MOZ_ASSERT(isLive()); keyHash = sFreeKey; mem.addr()->~T(); }
+ void clear() { if (isLive()) mem.addr()->~T(); keyHash = sFreeKey; }
+ bool isRemoved() const { return keyHash == sRemovedKey; }
+ void removeLive() { MOZ_ASSERT(isLive()); keyHash = sRemovedKey; mem.addr()->~T(); }
+ bool isLive() const { return isLiveHash(keyHash); }
+ void setCollision() { MOZ_ASSERT(isLive()); keyHash |= sCollisionBit; }
+ void unsetCollision() { keyHash &= ~sCollisionBit; }
+ bool hasCollision() const { return keyHash & sCollisionBit; }
+ bool matchHash(HashNumber hn) { return (keyHash & ~sCollisionBit) == hn; }
+ HashNumber getKeyHash() const { return keyHash & ~sCollisionBit; }
+
+ template <typename... Args>
+ void setLive(HashNumber hn, Args&&... args)
+ {
+ MOZ_ASSERT(!isLive());
+ keyHash = hn;
+ new(mem.addr()) T(mozilla::Forward<Args>(args)...);
+ MOZ_ASSERT(isLive());
+ }
+};
+
+template <class T, class HashPolicy, class AllocPolicy>
+class HashTable : private AllocPolicy
+{
+ friend class mozilla::ReentrancyGuard;
+
+ typedef typename mozilla::RemoveConst<T>::Type NonConstT;
+ typedef typename HashPolicy::KeyType Key;
+ typedef typename HashPolicy::Lookup Lookup;
+
+ public:
+ typedef HashTableEntry<T> Entry;
+
+ // A nullable pointer to a hash table element. A Ptr |p| can be tested
+ // either explicitly |if (p.found()) p->...| or using boolean conversion
+ // |if (p) p->...|. Ptr objects must not be used after any mutating hash
+ // table operations unless |generation()| is tested.
+ class Ptr
+ {
+ friend class HashTable;
+
+ Entry* entry_;
+#ifdef JS_DEBUG
+ const HashTable* table_;
+ Generation generation;
+#endif
+
+ protected:
+ Ptr(Entry& entry, const HashTable& tableArg)
+ : entry_(&entry)
+#ifdef JS_DEBUG
+ , table_(&tableArg)
+ , generation(tableArg.generation())
+#endif
+ {}
+
+ public:
+ Ptr()
+ : entry_(nullptr)
+#ifdef JS_DEBUG
+ , table_(nullptr)
+ , generation(0)
+#endif
+ {}
+
+ bool isValid() const {
+ return !entry_;
+ }
+
+ bool found() const {
+ if (isValid())
+ return false;
+#ifdef JS_DEBUG
+ MOZ_ASSERT(generation == table_->generation());
+#endif
+ return entry_->isLive();
+ }
+
+ explicit operator bool() const {
+ return found();
+ }
+
+ bool operator==(const Ptr& rhs) const {
+ MOZ_ASSERT(found() && rhs.found());
+ return entry_ == rhs.entry_;
+ }
+
+ bool operator!=(const Ptr& rhs) const {
+#ifdef JS_DEBUG
+ MOZ_ASSERT(generation == table_->generation());
+#endif
+ return !(*this == rhs);
+ }
+
+ T& operator*() const {
+#ifdef JS_DEBUG
+ MOZ_ASSERT(found());
+ MOZ_ASSERT(generation == table_->generation());
+#endif
+ return entry_->get();
+ }
+
+ T* operator->() const {
+#ifdef JS_DEBUG
+ MOZ_ASSERT(found());
+ MOZ_ASSERT(generation == table_->generation());
+#endif
+ return &entry_->get();
+ }
+ };
+
+ // A Ptr that can be used to add a key after a failed lookup.
+ class AddPtr : public Ptr
+ {
+ friend class HashTable;
+ HashNumber keyHash;
+#ifdef JS_DEBUG
+ uint64_t mutationCount;
+#endif
+
+ AddPtr(Entry& entry, const HashTable& tableArg, HashNumber hn)
+ : Ptr(entry, tableArg)
+ , keyHash(hn)
+#ifdef JS_DEBUG
+ , mutationCount(tableArg.mutationCount)
+#endif
+ {}
+
+ public:
+ AddPtr() : keyHash(0) {}
+ };
+
+ // A collection of hash table entries. The collection is enumerated by
+ // calling |front()| followed by |popFront()| as long as |!empty()|. As
+ // with Ptr/AddPtr, Range objects must not be used after any mutating hash
+ // table operation unless the |generation()| is tested.
+ class Range
+ {
+ protected:
+ friend class HashTable;
+
+ Range(const HashTable& tableArg, Entry* c, Entry* e)
+ : cur(c)
+ , end(e)
+#ifdef JS_DEBUG
+ , table_(&tableArg)
+ , mutationCount(tableArg.mutationCount)
+ , generation(tableArg.generation())
+ , validEntry(true)
+#endif
+ {
+ while (cur < end && !cur->isLive())
+ ++cur;
+ }
+
+ Entry* cur;
+ Entry* end;
+#ifdef JS_DEBUG
+ const HashTable* table_;
+ uint64_t mutationCount;
+ Generation generation;
+ bool validEntry;
+#endif
+
+ public:
+ Range()
+ : cur(nullptr)
+ , end(nullptr)
+#ifdef JS_DEBUG
+ , table_(nullptr)
+ , mutationCount(0)
+ , generation(0)
+ , validEntry(false)
+#endif
+ {}
+
+ bool empty() const {
+#ifdef JS_DEBUG
+ MOZ_ASSERT(generation == table_->generation());
+ MOZ_ASSERT(mutationCount == table_->mutationCount);
+#endif
+ return cur == end;
+ }
+
+ T& front() const {
+ MOZ_ASSERT(!empty());
+#ifdef JS_DEBUG
+ MOZ_ASSERT(validEntry);
+ MOZ_ASSERT(generation == table_->generation());
+ MOZ_ASSERT(mutationCount == table_->mutationCount);
+#endif
+ return cur->get();
+ }
+
+ void popFront() {
+ MOZ_ASSERT(!empty());
+#ifdef JS_DEBUG
+ MOZ_ASSERT(generation == table_->generation());
+ MOZ_ASSERT(mutationCount == table_->mutationCount);
+#endif
+ while (++cur < end && !cur->isLive())
+ continue;
+#ifdef JS_DEBUG
+ validEntry = true;
+#endif
+ }
+ };
+
+ // A Range whose lifetime delimits a mutating enumeration of a hash table.
+ // Since rehashing when elements were removed during enumeration would be
+ // bad, it is postponed until the Enum is destructed. Since the Enum's
+ // destructor touches the hash table, the user must ensure that the hash
+ // table is still alive when the destructor runs.
+ class Enum : public Range
+ {
+ friend class HashTable;
+
+ HashTable& table_;
+ bool rekeyed;
+ bool removed;
+
+ /* Not copyable. */
+ Enum(const Enum&) = delete;
+ void operator=(const Enum&) = delete;
+
+ public:
+ template<class Map> explicit
+ Enum(Map& map) : Range(map.all()), table_(map.impl), rekeyed(false), removed(false) {}
+
+ // Removes the |front()| element from the table, leaving |front()|
+ // invalid until the next call to |popFront()|. For example:
+ //
+ // HashSet<int> s;
+ // for (HashSet<int>::Enum e(s); !e.empty(); e.popFront())
+ // if (e.front() == 42)
+ // e.removeFront();
+ void removeFront() {
+ table_.remove(*this->cur);
+ removed = true;
+#ifdef JS_DEBUG
+ this->validEntry = false;
+ this->mutationCount = table_.mutationCount;
+#endif
+ }
+
+ NonConstT& mutableFront() {
+ MOZ_ASSERT(!this->empty());
+#ifdef JS_DEBUG
+ MOZ_ASSERT(this->validEntry);
+ MOZ_ASSERT(this->generation == this->Range::table_->generation());
+ MOZ_ASSERT(this->mutationCount == this->Range::table_->mutationCount);
+#endif
+ return this->cur->getMutable();
+ }
+
+ // Removes the |front()| element and re-inserts it into the table with
+ // a new key at the new Lookup position. |front()| is invalid after
+ // this operation until the next call to |popFront()|.
+ void rekeyFront(const Lookup& l, const Key& k) {
+ MOZ_ASSERT(&k != &HashPolicy::getKey(this->cur->get()));
+ Ptr p(*this->cur, table_);
+ table_.rekeyWithoutRehash(p, l, k);
+ rekeyed = true;
+#ifdef JS_DEBUG
+ this->validEntry = false;
+ this->mutationCount = table_.mutationCount;
+#endif
+ }
+
+ void rekeyFront(const Key& k) {
+ rekeyFront(k, k);
+ }
+
+ // Potentially rehashes the table.
+ ~Enum() {
+ if (rekeyed) {
+ table_.gen++;
+ table_.checkOverRemoved();
+ }
+
+ if (removed)
+ table_.compactIfUnderloaded();
+ }
+ };
+
+ // HashTable is movable
+ HashTable(HashTable&& rhs)
+ : AllocPolicy(rhs)
+ {
+ mozilla::PodAssign(this, &rhs);
+ rhs.table = nullptr;
+ }
+ void operator=(HashTable&& rhs) {
+ MOZ_ASSERT(this != &rhs, "self-move assignment is prohibited");
+ if (table)
+ destroyTable(*this, table, capacity());
+ mozilla::PodAssign(this, &rhs);
+ rhs.table = nullptr;
+ }
+
+ private:
+ // HashTable is not copyable or assignable
+ HashTable(const HashTable&) = delete;
+ void operator=(const HashTable&) = delete;
+
+ private:
+ static const size_t CAP_BITS = 30;
+
+ public:
+ uint64_t gen:56; // entry storage generation number
+ uint64_t hashShift:8; // multiplicative hash shift
+ Entry* table; // entry storage
+ uint32_t entryCount; // number of entries in table
+ uint32_t removedCount; // removed entry sentinels in table
+
+#ifdef JS_DEBUG
+ uint64_t mutationCount;
+ mutable bool mEntered;
+ // Note that some updates to these stats are not thread-safe. See the
+ // comment on the three-argument overloading of HashTable::lookup().
+ mutable struct Stats
+ {
+ uint32_t searches; // total number of table searches
+ uint32_t steps; // hash chain links traversed
+ uint32_t hits; // searches that found key
+ uint32_t misses; // searches that didn't find key
+ uint32_t addOverRemoved; // adds that recycled a removed entry
+ uint32_t removes; // calls to remove
+ uint32_t removeFrees; // calls to remove that freed the entry
+ uint32_t grows; // table expansions
+ uint32_t shrinks; // table contractions
+ uint32_t compresses; // table compressions
+ uint32_t rehashes; // tombstone decontaminations
+ } stats;
+# define METER(x) x
+#else
+# define METER(x)
+#endif
+
+ // The default initial capacity is 32 (enough to hold 16 elements), but it
+ // can be as low as 4.
+ static const unsigned sMinCapacityLog2 = 2;
+ static const unsigned sMinCapacity = 1 << sMinCapacityLog2;
+ static const unsigned sMaxInit = JS_BIT(CAP_BITS - 1);
+ static const unsigned sMaxCapacity = JS_BIT(CAP_BITS);
+ static const unsigned sHashBits = mozilla::tl::BitSize<HashNumber>::value;
+
+ // Hash-table alpha is conceptually a fraction, but to avoid floating-point
+ // math we implement it as a ratio of integers.
+ static const uint8_t sAlphaDenominator = 4;
+ static const uint8_t sMinAlphaNumerator = 1; // min alpha: 1/4
+ static const uint8_t sMaxAlphaNumerator = 3; // max alpha: 3/4
+
+ static const HashNumber sFreeKey = Entry::sFreeKey;
+ static const HashNumber sRemovedKey = Entry::sRemovedKey;
+ static const HashNumber sCollisionBit = Entry::sCollisionBit;
+
+ void setTableSizeLog2(unsigned sizeLog2)
+ {
+ hashShift = sHashBits - sizeLog2;
+ }
+
+ static bool isLiveHash(HashNumber hash)
+ {
+ return Entry::isLiveHash(hash);
+ }
+
+ static HashNumber prepareHash(const Lookup& l)
+ {
+ HashNumber keyHash = ScrambleHashCode(HashPolicy::hash(l));
+
+ // Avoid reserved hash codes.
+ if (!isLiveHash(keyHash))
+ keyHash -= (sRemovedKey + 1);
+ return keyHash & ~sCollisionBit;
+ }
+
+ enum FailureBehavior { DontReportFailure = false, ReportFailure = true };
+
+ static Entry* createTable(AllocPolicy& alloc, uint32_t capacity,
+ FailureBehavior reportFailure = ReportFailure)
+ {
+ static_assert(sFreeKey == 0,
+ "newly-calloc'd tables have to be considered empty");
+ if (reportFailure)
+ return alloc.template pod_calloc<Entry>(capacity);
+
+ return alloc.template maybe_pod_calloc<Entry>(capacity);
+ }
+
+ static Entry* maybeCreateTable(AllocPolicy& alloc, uint32_t capacity)
+ {
+ static_assert(sFreeKey == 0,
+ "newly-calloc'd tables have to be considered empty");
+ return alloc.template maybe_pod_calloc<Entry>(capacity);
+ }
+
+ static void destroyTable(AllocPolicy& alloc, Entry* oldTable, uint32_t capacity)
+ {
+ Entry* end = oldTable + capacity;
+ for (Entry* e = oldTable; e < end; ++e)
+ e->destroyIfLive();
+ alloc.free_(oldTable);
+ }
+
+ public:
+ explicit HashTable(AllocPolicy ap)
+ : AllocPolicy(ap)
+ , gen(0)
+ , hashShift(sHashBits)
+ , table(nullptr)
+ , entryCount(0)
+ , removedCount(0)
+#ifdef JS_DEBUG
+ , mutationCount(0)
+ , mEntered(false)
+#endif
+ {}
+
+ MOZ_MUST_USE bool init(uint32_t length)
+ {
+ MOZ_ASSERT(!initialized());
+
+ // Reject all lengths whose initial computed capacity would exceed
+ // sMaxCapacity. Round that maximum length down to the nearest power
+ // of two for speedier code.
+ if (MOZ_UNLIKELY(length > sMaxInit)) {
+ this->reportAllocOverflow();
+ return false;
+ }
+
+ static_assert((sMaxInit * sAlphaDenominator) / sAlphaDenominator == sMaxInit,
+ "multiplication in numerator below could overflow");
+ static_assert(sMaxInit * sAlphaDenominator <= UINT32_MAX - sMaxAlphaNumerator,
+ "numerator calculation below could potentially overflow");
+
+ // Compute the smallest capacity allowing |length| elements to be
+ // inserted without rehashing: ceil(length / max-alpha). (Ceiling
+ // integral division: <http://stackoverflow.com/a/2745086>.)
+ uint32_t newCapacity =
+ (length * sAlphaDenominator + sMaxAlphaNumerator - 1) / sMaxAlphaNumerator;
+ if (newCapacity < sMinCapacity)
+ newCapacity = sMinCapacity;
+
+ // FIXME: use JS_CEILING_LOG2 when PGO stops crashing (bug 543034).
+ uint32_t roundUp = sMinCapacity, roundUpLog2 = sMinCapacityLog2;
+ while (roundUp < newCapacity) {
+ roundUp <<= 1;
+ ++roundUpLog2;
+ }
+
+ newCapacity = roundUp;
+ MOZ_ASSERT(newCapacity >= length);
+ MOZ_ASSERT(newCapacity <= sMaxCapacity);
+
+ table = createTable(*this, newCapacity);
+ if (!table)
+ return false;
+
+ setTableSizeLog2(roundUpLog2);
+ METER(memset(&stats, 0, sizeof(stats)));
+ return true;
+ }
+
+ bool initialized() const
+ {
+ return !!table;
+ }
+
+ ~HashTable()
+ {
+ if (table)
+ destroyTable(*this, table, capacity());
+ }
+
+ private:
+ HashNumber hash1(HashNumber hash0) const
+ {
+ return hash0 >> hashShift;
+ }
+
+ struct DoubleHash
+ {
+ HashNumber h2;
+ HashNumber sizeMask;
+ };
+
+ DoubleHash hash2(HashNumber curKeyHash) const
+ {
+ unsigned sizeLog2 = sHashBits - hashShift;
+ DoubleHash dh = {
+ ((curKeyHash << sizeLog2) >> hashShift) | 1,
+ (HashNumber(1) << sizeLog2) - 1
+ };
+ return dh;
+ }
+
+ static HashNumber applyDoubleHash(HashNumber h1, const DoubleHash& dh)
+ {
+ return (h1 - dh.h2) & dh.sizeMask;
+ }
+
+ bool overloaded()
+ {
+ static_assert(sMaxCapacity <= UINT32_MAX / sMaxAlphaNumerator,
+ "multiplication below could overflow");
+ return entryCount + removedCount >=
+ capacity() * sMaxAlphaNumerator / sAlphaDenominator;
+ }
+
+ // Would the table be underloaded if it had the given capacity and entryCount?
+ static bool wouldBeUnderloaded(uint32_t capacity, uint32_t entryCount)
+ {
+ static_assert(sMaxCapacity <= UINT32_MAX / sMinAlphaNumerator,
+ "multiplication below could overflow");
+ return capacity > sMinCapacity &&
+ entryCount <= capacity * sMinAlphaNumerator / sAlphaDenominator;
+ }
+
+ bool underloaded()
+ {
+ return wouldBeUnderloaded(capacity(), entryCount);
+ }
+
+ static bool match(Entry& e, const Lookup& l)
+ {
+ return HashPolicy::match(HashPolicy::getKey(e.get()), l);
+ }
+
+ // Warning: in order for readonlyThreadsafeLookup() to be safe this
+ // function must not modify the table in any way when |collisionBit| is 0.
+ // (The use of the METER() macro to increment stats violates this
+ // restriction but we will live with that for now because it's enabled so
+ // rarely.)
+ Entry& lookup(const Lookup& l, HashNumber keyHash, unsigned collisionBit) const
+ {
+ MOZ_ASSERT(isLiveHash(keyHash));
+ MOZ_ASSERT(!(keyHash & sCollisionBit));
+ MOZ_ASSERT(collisionBit == 0 || collisionBit == sCollisionBit);
+ MOZ_ASSERT(table);
+ METER(stats.searches++);
+
+ // Compute the primary hash address.
+ HashNumber h1 = hash1(keyHash);
+ Entry* entry = &table[h1];
+
+ // Miss: return space for a new entry.
+ if (entry->isFree()) {
+ METER(stats.misses++);
+ return *entry;
+ }
+
+ // Hit: return entry.
+ if (entry->matchHash(keyHash) && match(*entry, l)) {
+ METER(stats.hits++);
+ return *entry;
+ }
+
+ // Collision: double hash.
+ DoubleHash dh = hash2(keyHash);
+
+ // Save the first removed entry pointer so we can recycle later.
+ Entry* firstRemoved = nullptr;
+
+ while (true) {
+ if (MOZ_UNLIKELY(entry->isRemoved())) {
+ if (!firstRemoved)
+ firstRemoved = entry;
+ } else {
+ if (collisionBit == sCollisionBit)
+ entry->setCollision();
+ }
+
+ METER(stats.steps++);
+ h1 = applyDoubleHash(h1, dh);
+
+ entry = &table[h1];
+ if (entry->isFree()) {
+ METER(stats.misses++);
+ return firstRemoved ? *firstRemoved : *entry;
+ }
+
+ if (entry->matchHash(keyHash) && match(*entry, l)) {
+ METER(stats.hits++);
+ return *entry;
+ }
+ }
+ }
+
+ // This is a copy of lookup hardcoded to the assumptions:
+ // 1. the lookup is a lookupForAdd
+ // 2. the key, whose |keyHash| has been passed is not in the table,
+ // 3. no entries have been removed from the table.
+ // This specialized search avoids the need for recovering lookup values
+ // from entries, which allows more flexible Lookup/Key types.
+ Entry& findFreeEntry(HashNumber keyHash)
+ {
+ MOZ_ASSERT(!(keyHash & sCollisionBit));
+ MOZ_ASSERT(table);
+ METER(stats.searches++);
+
+ // We assume 'keyHash' has already been distributed.
+
+ // Compute the primary hash address.
+ HashNumber h1 = hash1(keyHash);
+ Entry* entry = &table[h1];
+
+ // Miss: return space for a new entry.
+ if (!entry->isLive()) {
+ METER(stats.misses++);
+ return *entry;
+ }
+
+ // Collision: double hash.
+ DoubleHash dh = hash2(keyHash);
+
+ while (true) {
+ MOZ_ASSERT(!entry->isRemoved());
+ entry->setCollision();
+
+ METER(stats.steps++);
+ h1 = applyDoubleHash(h1, dh);
+
+ entry = &table[h1];
+ if (!entry->isLive()) {
+ METER(stats.misses++);
+ return *entry;
+ }
+ }
+ }
+
+ enum RebuildStatus { NotOverloaded, Rehashed, RehashFailed };
+
+ RebuildStatus changeTableSize(int deltaLog2, FailureBehavior reportFailure = ReportFailure)
+ {
+ // Look, but don't touch, until we succeed in getting new entry store.
+ Entry* oldTable = table;
+ uint32_t oldCap = capacity();
+ uint32_t newLog2 = sHashBits - hashShift + deltaLog2;
+ uint32_t newCapacity = JS_BIT(newLog2);
+ if (MOZ_UNLIKELY(newCapacity > sMaxCapacity)) {
+ if (reportFailure)
+ this->reportAllocOverflow();
+ return RehashFailed;
+ }
+
+ Entry* newTable = createTable(*this, newCapacity, reportFailure);
+ if (!newTable)
+ return RehashFailed;
+
+ // We can't fail from here on, so update table parameters.
+ setTableSizeLog2(newLog2);
+ removedCount = 0;
+ gen++;
+ table = newTable;
+
+ // Copy only live entries, leaving removed ones behind.
+ Entry* end = oldTable + oldCap;
+ for (Entry* src = oldTable; src < end; ++src) {
+ if (src->isLive()) {
+ HashNumber hn = src->getKeyHash();
+ findFreeEntry(hn).setLive(
+ hn, mozilla::Move(const_cast<typename Entry::NonConstT&>(src->get())));
+ src->destroy();
+ }
+ }
+
+ // All entries have been destroyed, no need to destroyTable.
+ this->free_(oldTable);
+ return Rehashed;
+ }
+
+ bool shouldCompressTable()
+ {
+ // Compress if a quarter or more of all entries are removed.
+ return removedCount >= (capacity() >> 2);
+ }
+
+ RebuildStatus checkOverloaded(FailureBehavior reportFailure = ReportFailure)
+ {
+ if (!overloaded())
+ return NotOverloaded;
+
+ int deltaLog2;
+ if (shouldCompressTable()) {
+ METER(stats.compresses++);
+ deltaLog2 = 0;
+ } else {
+ METER(stats.grows++);
+ deltaLog2 = 1;
+ }
+
+ return changeTableSize(deltaLog2, reportFailure);
+ }
+
+ // Infallibly rehash the table if we are overloaded with removals.
+ void checkOverRemoved()
+ {
+ if (overloaded()) {
+ if (checkOverloaded(DontReportFailure) == RehashFailed)
+ rehashTableInPlace();
+ }
+ }
+
+ void remove(Entry& e)
+ {
+ MOZ_ASSERT(table);
+ METER(stats.removes++);
+
+ if (e.hasCollision()) {
+ e.removeLive();
+ removedCount++;
+ } else {
+ METER(stats.removeFrees++);
+ e.clearLive();
+ }
+ entryCount--;
+#ifdef JS_DEBUG
+ mutationCount++;
+#endif
+ }
+
+ void checkUnderloaded()
+ {
+ if (underloaded()) {
+ METER(stats.shrinks++);
+ (void) changeTableSize(-1, DontReportFailure);
+ }
+ }
+
+ // Resize the table down to the largest capacity which doesn't underload the
+ // table. Since we call checkUnderloaded() on every remove, you only need
+ // to call this after a bulk removal of items done without calling remove().
+ void compactIfUnderloaded()
+ {
+ int32_t resizeLog2 = 0;
+ uint32_t newCapacity = capacity();
+ while (wouldBeUnderloaded(newCapacity, entryCount)) {
+ newCapacity = newCapacity >> 1;
+ resizeLog2--;
+ }
+
+ if (resizeLog2 != 0)
+ (void) changeTableSize(resizeLog2, DontReportFailure);
+ }
+
+ // This is identical to changeTableSize(currentSize), but without requiring
+ // a second table. We do this by recycling the collision bits to tell us if
+ // the element is already inserted or still waiting to be inserted. Since
+ // already-inserted elements win any conflicts, we get the same table as we
+ // would have gotten through random insertion order.
+ void rehashTableInPlace()
+ {
+ METER(stats.rehashes++);
+ removedCount = 0;
+ for (size_t i = 0; i < capacity(); ++i)
+ table[i].unsetCollision();
+
+ for (size_t i = 0; i < capacity();) {
+ Entry* src = &table[i];
+
+ if (!src->isLive() || src->hasCollision()) {
+ ++i;
+ continue;
+ }
+
+ HashNumber keyHash = src->getKeyHash();
+ HashNumber h1 = hash1(keyHash);
+ DoubleHash dh = hash2(keyHash);
+ Entry* tgt = &table[h1];
+ while (true) {
+ if (!tgt->hasCollision()) {
+ src->swap(tgt);
+ tgt->setCollision();
+ break;
+ }
+
+ h1 = applyDoubleHash(h1, dh);
+ tgt = &table[h1];
+ }
+ }
+
+ // TODO: this algorithm leaves collision bits on *all* elements, even if
+ // they are on no collision path. We have the option of setting the
+ // collision bits correctly on a subsequent pass or skipping the rehash
+ // unless we are totally filled with tombstones: benchmark to find out
+ // which approach is best.
+ }
+
+ // Note: |l| may be a reference to a piece of |u|, so this function
+ // must take care not to use |l| after moving |u|.
+ //
+ // Prefer to use putNewInfallible; this function does not check
+ // invariants.
+ template <typename... Args>
+ void putNewInfallibleInternal(const Lookup& l, Args&&... args)
+ {
+ MOZ_ASSERT(table);
+
+ HashNumber keyHash = prepareHash(l);
+ Entry* entry = &findFreeEntry(keyHash);
+ MOZ_ASSERT(entry);
+
+ if (entry->isRemoved()) {
+ METER(stats.addOverRemoved++);
+ removedCount--;
+ keyHash |= sCollisionBit;
+ }
+
+ entry->setLive(keyHash, mozilla::Forward<Args>(args)...);
+ entryCount++;
+#ifdef JS_DEBUG
+ mutationCount++;
+#endif
+ }
+
+ public:
+ void clear()
+ {
+ if (mozilla::IsPod<Entry>::value) {
+ memset(table, 0, sizeof(*table) * capacity());
+ } else {
+ uint32_t tableCapacity = capacity();
+ Entry* end = table + tableCapacity;
+ for (Entry* e = table; e < end; ++e)
+ e->clear();
+ }
+ removedCount = 0;
+ entryCount = 0;
+#ifdef JS_DEBUG
+ mutationCount++;
+#endif
+ }
+
+ void finish()
+ {
+#ifdef JS_DEBUG
+ MOZ_ASSERT(!mEntered);
+#endif
+ if (!table)
+ return;
+
+ destroyTable(*this, table, capacity());
+ table = nullptr;
+ gen++;
+ entryCount = 0;
+ removedCount = 0;
+#ifdef JS_DEBUG
+ mutationCount++;
+#endif
+ }
+
+ Range all() const
+ {
+ MOZ_ASSERT(table);
+ return Range(*this, table, table + capacity());
+ }
+
+ bool empty() const
+ {
+ MOZ_ASSERT(table);
+ return !entryCount;
+ }
+
+ uint32_t count() const
+ {
+ MOZ_ASSERT(table);
+ return entryCount;
+ }
+
+ uint32_t capacity() const
+ {
+ MOZ_ASSERT(table);
+ return JS_BIT(sHashBits - hashShift);
+ }
+
+ Generation generation() const
+ {
+ MOZ_ASSERT(table);
+ return Generation(gen);
+ }
+
+ size_t sizeOfExcludingThis(mozilla::MallocSizeOf mallocSizeOf) const
+ {
+ return mallocSizeOf(table);
+ }
+
+ size_t sizeOfIncludingThis(mozilla::MallocSizeOf mallocSizeOf) const
+ {
+ return mallocSizeOf(this) + sizeOfExcludingThis(mallocSizeOf);
+ }
+
+ Ptr lookup(const Lookup& l) const
+ {
+ mozilla::ReentrancyGuard g(*this);
+ if (!HasHash<HashPolicy>(l))
+ return Ptr();
+ HashNumber keyHash = prepareHash(l);
+ return Ptr(lookup(l, keyHash, 0), *this);
+ }
+
+ Ptr readonlyThreadsafeLookup(const Lookup& l) const
+ {
+ if (!HasHash<HashPolicy>(l))
+ return Ptr();
+ HashNumber keyHash = prepareHash(l);
+ return Ptr(lookup(l, keyHash, 0), *this);
+ }
+
+ AddPtr lookupForAdd(const Lookup& l) const
+ {
+ mozilla::ReentrancyGuard g(*this);
+ if (!EnsureHash<HashPolicy>(l))
+ return AddPtr();
+ HashNumber keyHash = prepareHash(l);
+ Entry& entry = lookup(l, keyHash, sCollisionBit);
+ AddPtr p(entry, *this, keyHash);
+ return p;
+ }
+
+ template <typename... Args>
+ MOZ_MUST_USE bool add(AddPtr& p, Args&&... args)
+ {
+ mozilla::ReentrancyGuard g(*this);
+ MOZ_ASSERT(table);
+ MOZ_ASSERT(!p.found());
+ MOZ_ASSERT(!(p.keyHash & sCollisionBit));
+
+ // Check for error from ensureHash() here.
+ if (p.isValid())
+ return false;
+
+ // Changing an entry from removed to live does not affect whether we
+ // are overloaded and can be handled separately.
+ if (p.entry_->isRemoved()) {
+ if (!this->checkSimulatedOOM())
+ return false;
+ METER(stats.addOverRemoved++);
+ removedCount--;
+ p.keyHash |= sCollisionBit;
+ } else {
+ // Preserve the validity of |p.entry_|.
+ RebuildStatus status = checkOverloaded();
+ if (status == RehashFailed)
+ return false;
+ if (status == NotOverloaded && !this->checkSimulatedOOM())
+ return false;
+ if (status == Rehashed)
+ p.entry_ = &findFreeEntry(p.keyHash);
+ }
+
+ p.entry_->setLive(p.keyHash, mozilla::Forward<Args>(args)...);
+ entryCount++;
+#ifdef JS_DEBUG
+ mutationCount++;
+ p.generation = generation();
+ p.mutationCount = mutationCount;
+#endif
+ return true;
+ }
+
+ // Note: |l| may be a reference to a piece of |u|, so this function
+ // must take care not to use |l| after moving |u|.
+ template <typename... Args>
+ void putNewInfallible(const Lookup& l, Args&&... args)
+ {
+ MOZ_ASSERT(!lookup(l).found());
+ mozilla::ReentrancyGuard g(*this);
+ putNewInfallibleInternal(l, mozilla::Forward<Args>(args)...);
+ }
+
+ // Note: |l| may be alias arguments in |args|, so this function must take
+ // care not to use |l| after moving |args|.
+ template <typename... Args>
+ MOZ_MUST_USE bool putNew(const Lookup& l, Args&&... args)
+ {
+ if (!this->checkSimulatedOOM())
+ return false;
+
+ if (!EnsureHash<HashPolicy>(l))
+ return false;
+
+ if (checkOverloaded() == RehashFailed)
+ return false;
+
+ putNewInfallible(l, mozilla::Forward<Args>(args)...);
+ return true;
+ }
+
+ // Note: |l| may be a reference to a piece of |u|, so this function
+ // must take care not to use |l| after moving |u|.
+ template <typename... Args>
+ MOZ_MUST_USE bool relookupOrAdd(AddPtr& p, const Lookup& l, Args&&... args)
+ {
+ // Check for error from ensureHash() here.
+ if (p.isValid())
+ return false;
+
+#ifdef JS_DEBUG
+ p.generation = generation();
+ p.mutationCount = mutationCount;
+#endif
+ {
+ mozilla::ReentrancyGuard g(*this);
+ MOZ_ASSERT(prepareHash(l) == p.keyHash); // l has not been destroyed
+ p.entry_ = &lookup(l, p.keyHash, sCollisionBit);
+ }
+ return p.found() || add(p, mozilla::Forward<Args>(args)...);
+ }
+
+ void remove(Ptr p)
+ {
+ MOZ_ASSERT(table);
+ mozilla::ReentrancyGuard g(*this);
+ MOZ_ASSERT(p.found());
+ remove(*p.entry_);
+ checkUnderloaded();
+ }
+
+ void rekeyWithoutRehash(Ptr p, const Lookup& l, const Key& k)
+ {
+ MOZ_ASSERT(table);
+ mozilla::ReentrancyGuard g(*this);
+ MOZ_ASSERT(p.found());
+ typename HashTableEntry<T>::NonConstT t(mozilla::Move(*p));
+ HashPolicy::setKey(t, const_cast<Key&>(k));
+ remove(*p.entry_);
+ putNewInfallibleInternal(l, mozilla::Move(t));
+ }
+
+ void rekeyAndMaybeRehash(Ptr p, const Lookup& l, const Key& k)
+ {
+ rekeyWithoutRehash(p, l, k);
+ checkOverRemoved();
+ }
+
+#undef METER
+};
+
+} // namespace detail
+} // namespace js
+
+#endif /* js_HashTable_h */
diff --git a/js/public/HeapAPI.h b/js/public/HeapAPI.h
new file mode 100644
index 0000000000..e37d13e932
--- /dev/null
+++ b/js/public/HeapAPI.h
@@ -0,0 +1,406 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef js_HeapAPI_h
+#define js_HeapAPI_h
+
+#include <limits.h>
+
+#include "jspubtd.h"
+
+#include "js/TraceKind.h"
+#include "js/Utility.h"
+
+/* These values are private to the JS engine. */
+namespace js {
+
+JS_FRIEND_API(bool)
+CurrentThreadCanAccessZone(JS::Zone* zone);
+
+namespace gc {
+
+struct Cell;
+
+const size_t ArenaShift = 12;
+const size_t ArenaSize = size_t(1) << ArenaShift;
+const size_t ArenaMask = ArenaSize - 1;
+
+#ifdef JS_GC_SMALL_CHUNK_SIZE
+const size_t ChunkShift = 18;
+#else
+const size_t ChunkShift = 20;
+#endif
+const size_t ChunkSize = size_t(1) << ChunkShift;
+const size_t ChunkMask = ChunkSize - 1;
+
+const size_t CellShift = 3;
+const size_t CellSize = size_t(1) << CellShift;
+const size_t CellMask = CellSize - 1;
+
+/* These are magic constants derived from actual offsets in gc/Heap.h. */
+#ifdef JS_GC_SMALL_CHUNK_SIZE
+const size_t ChunkMarkBitmapOffset = 258104;
+const size_t ChunkMarkBitmapBits = 31744;
+#else
+const size_t ChunkMarkBitmapOffset = 1032352;
+const size_t ChunkMarkBitmapBits = 129024;
+#endif
+const size_t ChunkRuntimeOffset = ChunkSize - sizeof(void*);
+const size_t ChunkTrailerSize = 2 * sizeof(uintptr_t) + sizeof(uint64_t);
+const size_t ChunkLocationOffset = ChunkSize - ChunkTrailerSize;
+const size_t ArenaZoneOffset = sizeof(size_t);
+const size_t ArenaHeaderSize = sizeof(size_t) + 2 * sizeof(uintptr_t) +
+ sizeof(size_t) + sizeof(uintptr_t);
+
+/*
+ * Live objects are marked black. How many other additional colors are available
+ * depends on the size of the GCThing. Objects marked gray are eligible for
+ * cycle collection.
+ */
+static const uint32_t BLACK = 0;
+static const uint32_t GRAY = 1;
+
+/*
+ * The "location" field in the Chunk trailer is a enum indicating various roles
+ * of the chunk.
+ */
+enum class ChunkLocation : uint32_t
+{
+ Invalid = 0,
+ Nursery = 1,
+ TenuredHeap = 2
+};
+
+#ifdef JS_DEBUG
+/* When downcasting, ensure we are actually the right type. */
+extern JS_FRIEND_API(void)
+AssertGCThingHasType(js::gc::Cell* cell, JS::TraceKind kind);
+#else
+inline void
+AssertGCThingHasType(js::gc::Cell* cell, JS::TraceKind kind) {}
+#endif
+
+MOZ_ALWAYS_INLINE bool IsInsideNursery(const js::gc::Cell* cell);
+
+} /* namespace gc */
+} /* namespace js */
+
+namespace JS {
+struct Zone;
+
+/* Default size for the generational nursery in bytes. */
+const uint32_t DefaultNurseryBytes = 16 * js::gc::ChunkSize;
+
+/* Default maximum heap size in bytes to pass to JS_NewRuntime(). */
+const uint32_t DefaultHeapMaxBytes = 32 * 1024 * 1024;
+
+namespace shadow {
+
+struct Zone
+{
+ protected:
+ JSRuntime* const runtime_;
+ JSTracer* const barrierTracer_; // A pointer to the JSRuntime's |gcMarker|.
+
+ public:
+ // Stack GC roots for Rooted GC pointers.
+ js::RootedListHeads stackRoots_;
+ template <typename T> friend class JS::Rooted;
+
+ bool needsIncrementalBarrier_;
+
+ Zone(JSRuntime* runtime, JSTracer* barrierTracerArg)
+ : runtime_(runtime),
+ barrierTracer_(barrierTracerArg),
+ needsIncrementalBarrier_(false)
+ {
+ for (auto& stackRootPtr : stackRoots_)
+ stackRootPtr = nullptr;
+ }
+
+ bool needsIncrementalBarrier() const {
+ return needsIncrementalBarrier_;
+ }
+
+ JSTracer* barrierTracer() {
+ MOZ_ASSERT(needsIncrementalBarrier_);
+ MOZ_ASSERT(js::CurrentThreadCanAccessRuntime(runtime_));
+ return barrierTracer_;
+ }
+
+ JSRuntime* runtimeFromMainThread() const {
+ MOZ_ASSERT(js::CurrentThreadCanAccessRuntime(runtime_));
+ return runtime_;
+ }
+
+ // Note: Unrestricted access to the zone's runtime from an arbitrary
+ // thread can easily lead to races. Use this method very carefully.
+ JSRuntime* runtimeFromAnyThread() const {
+ return runtime_;
+ }
+
+ static MOZ_ALWAYS_INLINE JS::shadow::Zone* asShadowZone(JS::Zone* zone) {
+ return reinterpret_cast<JS::shadow::Zone*>(zone);
+ }
+};
+
+} /* namespace shadow */
+
+/**
+ * A GC pointer, tagged with the trace kind.
+ *
+ * In general, a GC pointer should be stored with an exact type. This class
+ * is for use when that is not possible because a single pointer must point
+ * to several kinds of GC thing.
+ */
+class JS_FRIEND_API(GCCellPtr)
+{
+ public:
+ // Construction from a void* and trace kind.
+ GCCellPtr(void* gcthing, JS::TraceKind traceKind) : ptr(checkedCast(gcthing, traceKind)) {}
+
+ // Automatically construct a null GCCellPtr from nullptr.
+ MOZ_IMPLICIT GCCellPtr(decltype(nullptr)) : ptr(checkedCast(nullptr, JS::TraceKind::Null)) {}
+
+ // Construction from an explicit type.
+ template <typename T>
+ explicit GCCellPtr(T* p) : ptr(checkedCast(p, JS::MapTypeToTraceKind<T>::kind)) { }
+ explicit GCCellPtr(JSFunction* p) : ptr(checkedCast(p, JS::TraceKind::Object)) { }
+ explicit GCCellPtr(JSFlatString* str) : ptr(checkedCast(str, JS::TraceKind::String)) { }
+ explicit GCCellPtr(const Value& v);
+
+ JS::TraceKind kind() const {
+ JS::TraceKind traceKind = JS::TraceKind(ptr & OutOfLineTraceKindMask);
+ if (uintptr_t(traceKind) != OutOfLineTraceKindMask)
+ return traceKind;
+ return outOfLineKind();
+ }
+
+ // Allow GCCellPtr to be used in a boolean context.
+ explicit operator bool() const {
+ MOZ_ASSERT(bool(asCell()) == (kind() != JS::TraceKind::Null));
+ return asCell();
+ }
+
+ // Simplify checks to the kind.
+ template <typename T>
+ bool is() const { return kind() == JS::MapTypeToTraceKind<T>::kind; }
+
+ // Conversions to more specific types must match the kind. Access to
+ // further refined types is not allowed directly from a GCCellPtr.
+ template <typename T>
+ T& as() const {
+ MOZ_ASSERT(kind() == JS::MapTypeToTraceKind<T>::kind);
+ // We can't use static_cast here, because the fact that JSObject
+ // inherits from js::gc::Cell is not part of the public API.
+ return *reinterpret_cast<T*>(asCell());
+ }
+
+ // Return a pointer to the cell this |GCCellPtr| refers to, or |nullptr|.
+ // (It would be more symmetrical with |to| for this to return a |Cell&|, but
+ // the result can be |nullptr|, and null references are undefined behavior.)
+ js::gc::Cell* asCell() const {
+ return reinterpret_cast<js::gc::Cell*>(ptr & ~OutOfLineTraceKindMask);
+ }
+
+ // The CC's trace logger needs an identity that is XPIDL serializable.
+ uint64_t unsafeAsInteger() const {
+ return static_cast<uint64_t>(unsafeAsUIntPtr());
+ }
+ // Inline mark bitmap access requires direct pointer arithmetic.
+ uintptr_t unsafeAsUIntPtr() const {
+ MOZ_ASSERT(asCell());
+ MOZ_ASSERT(!js::gc::IsInsideNursery(asCell()));
+ return reinterpret_cast<uintptr_t>(asCell());
+ }
+
+ bool mayBeOwnedByOtherRuntime() const;
+
+ private:
+ static uintptr_t checkedCast(void* p, JS::TraceKind traceKind) {
+ js::gc::Cell* cell = static_cast<js::gc::Cell*>(p);
+ MOZ_ASSERT((uintptr_t(p) & OutOfLineTraceKindMask) == 0);
+ AssertGCThingHasType(cell, traceKind);
+ // Note: the OutOfLineTraceKindMask bits are set on all out-of-line kinds
+ // so that we can mask instead of branching.
+ MOZ_ASSERT_IF(uintptr_t(traceKind) >= OutOfLineTraceKindMask,
+ (uintptr_t(traceKind) & OutOfLineTraceKindMask) == OutOfLineTraceKindMask);
+ return uintptr_t(p) | (uintptr_t(traceKind) & OutOfLineTraceKindMask);
+ }
+
+ JS::TraceKind outOfLineKind() const;
+
+ uintptr_t ptr;
+};
+
+inline bool
+operator==(const GCCellPtr& ptr1, const GCCellPtr& ptr2)
+{
+ return ptr1.asCell() == ptr2.asCell();
+}
+
+inline bool
+operator!=(const GCCellPtr& ptr1, const GCCellPtr& ptr2)
+{
+ return !(ptr1 == ptr2);
+}
+
+// Unwraps the given GCCellPtr and calls the given functor with a template
+// argument of the actual type of the pointer.
+template <typename F, typename... Args>
+auto
+DispatchTyped(F f, GCCellPtr thing, Args&&... args)
+ -> decltype(f(static_cast<JSObject*>(nullptr), mozilla::Forward<Args>(args)...))
+{
+ switch (thing.kind()) {
+#define JS_EXPAND_DEF(name, type, _) \
+ case JS::TraceKind::name: \
+ return f(&thing.as<type>(), mozilla::Forward<Args>(args)...);
+ JS_FOR_EACH_TRACEKIND(JS_EXPAND_DEF);
+#undef JS_EXPAND_DEF
+ default:
+ MOZ_CRASH("Invalid trace kind in DispatchTyped for GCCellPtr.");
+ }
+}
+
+} /* namespace JS */
+
+namespace js {
+namespace gc {
+namespace detail {
+
+static MOZ_ALWAYS_INLINE uintptr_t*
+GetGCThingMarkBitmap(const uintptr_t addr)
+{
+ MOZ_ASSERT(addr);
+ const uintptr_t bmap_addr = (addr & ~ChunkMask) | ChunkMarkBitmapOffset;
+ return reinterpret_cast<uintptr_t*>(bmap_addr);
+}
+
+static MOZ_ALWAYS_INLINE void
+GetGCThingMarkWordAndMask(const uintptr_t addr, uint32_t color,
+ uintptr_t** wordp, uintptr_t* maskp)
+{
+ MOZ_ASSERT(addr);
+ const size_t bit = (addr & js::gc::ChunkMask) / js::gc::CellSize + color;
+ MOZ_ASSERT(bit < js::gc::ChunkMarkBitmapBits);
+ uintptr_t* bitmap = GetGCThingMarkBitmap(addr);
+ const uintptr_t nbits = sizeof(*bitmap) * CHAR_BIT;
+ *maskp = uintptr_t(1) << (bit % nbits);
+ *wordp = &bitmap[bit / nbits];
+}
+
+static MOZ_ALWAYS_INLINE JS::Zone*
+GetGCThingZone(const uintptr_t addr)
+{
+ MOZ_ASSERT(addr);
+ const uintptr_t zone_addr = (addr & ~ArenaMask) | ArenaZoneOffset;
+ return *reinterpret_cast<JS::Zone**>(zone_addr);
+
+}
+
+static MOZ_ALWAYS_INLINE JS::shadow::Runtime*
+GetCellRuntime(const Cell* cell)
+{
+ MOZ_ASSERT(cell);
+ const uintptr_t addr = uintptr_t(cell);
+ const uintptr_t rt_addr = (addr & ~ChunkMask) | ChunkRuntimeOffset;
+ return *reinterpret_cast<JS::shadow::Runtime**>(rt_addr);
+}
+
+static MOZ_ALWAYS_INLINE bool
+CellIsMarkedGray(const Cell* cell)
+{
+ MOZ_ASSERT(cell);
+ if (js::gc::IsInsideNursery(cell))
+ return false;
+
+ uintptr_t* word, mask;
+ js::gc::detail::GetGCThingMarkWordAndMask(uintptr_t(cell), js::gc::GRAY, &word, &mask);
+ return *word & mask;
+}
+
+extern JS_PUBLIC_API(bool)
+CellIsMarkedGrayIfKnown(const Cell* cell);
+
+} /* namespace detail */
+
+MOZ_ALWAYS_INLINE bool
+IsInsideNursery(const js::gc::Cell* cell)
+{
+ if (!cell)
+ return false;
+ uintptr_t addr = uintptr_t(cell);
+ addr &= ~js::gc::ChunkMask;
+ addr |= js::gc::ChunkLocationOffset;
+ auto location = *reinterpret_cast<ChunkLocation*>(addr);
+ MOZ_ASSERT(location == ChunkLocation::Nursery || location == ChunkLocation::TenuredHeap);
+ return location == ChunkLocation::Nursery;
+}
+
+} /* namespace gc */
+} /* namespace js */
+
+namespace JS {
+
+static MOZ_ALWAYS_INLINE Zone*
+GetTenuredGCThingZone(GCCellPtr thing)
+{
+ MOZ_ASSERT(!js::gc::IsInsideNursery(thing.asCell()));
+ return js::gc::detail::GetGCThingZone(thing.unsafeAsUIntPtr());
+}
+
+static MOZ_ALWAYS_INLINE Zone*
+GetStringZone(JSString* str)
+{
+ return js::gc::detail::GetGCThingZone(uintptr_t(str));
+}
+
+extern JS_PUBLIC_API(Zone*)
+GetObjectZone(JSObject* obj);
+
+static MOZ_ALWAYS_INLINE bool
+GCThingIsMarkedGray(GCCellPtr thing)
+{
+ if (thing.mayBeOwnedByOtherRuntime())
+ return false;
+ return js::gc::detail::CellIsMarkedGrayIfKnown(thing.asCell());
+}
+
+extern JS_PUBLIC_API(JS::TraceKind)
+GCThingTraceKind(void* thing);
+
+} /* namespace JS */
+
+namespace js {
+namespace gc {
+
+static MOZ_ALWAYS_INLINE bool
+IsIncrementalBarrierNeededOnTenuredGCThing(JS::shadow::Runtime* rt, const JS::GCCellPtr thing)
+{
+ MOZ_ASSERT(thing);
+ MOZ_ASSERT(!js::gc::IsInsideNursery(thing.asCell()));
+
+ // TODO: I'd like to assert !isHeapBusy() here but this gets called while we
+ // are tracing the heap, e.g. during memory reporting (see bug 1313318).
+ MOZ_ASSERT(!rt->isHeapCollecting());
+
+ JS::Zone* zone = JS::GetTenuredGCThingZone(thing);
+ return JS::shadow::Zone::asShadowZone(zone)->needsIncrementalBarrier();
+}
+
+/**
+ * Create an object providing access to the garbage collector's internal notion
+ * of the current state of memory (both GC heap memory and GCthing-controlled
+ * malloc memory.
+ */
+extern JS_PUBLIC_API(JSObject*)
+NewMemoryInfoObject(JSContext* cx);
+
+} /* namespace gc */
+} /* namespace js */
+
+#endif /* js_HeapAPI_h */
diff --git a/js/public/Id.h b/js/public/Id.h
new file mode 100644
index 0000000000..d474e784fb
--- /dev/null
+++ b/js/public/Id.h
@@ -0,0 +1,207 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef js_Id_h
+#define js_Id_h
+
+// A jsid is an identifier for a property or method of an object which is
+// either a 31-bit unsigned integer, interned string or symbol.
+//
+// Also, there is an additional jsid value, JSID_VOID, which does not occur in
+// JS scripts but may be used to indicate the absence of a valid jsid. A void
+// jsid is not a valid id and only arises as an exceptional API return value,
+// such as in JS_NextProperty. Embeddings must not pass JSID_VOID into JSAPI
+// entry points expecting a jsid and do not need to handle JSID_VOID in hooks
+// receiving a jsid except when explicitly noted in the API contract.
+//
+// A jsid is not implicitly convertible to or from a Value; JS_ValueToId or
+// JS_IdToValue must be used instead.
+
+#include "jstypes.h"
+
+#include "js/HeapAPI.h"
+#include "js/RootingAPI.h"
+#include "js/TypeDecls.h"
+#include "js/Utility.h"
+
+struct jsid
+{
+ size_t asBits;
+ bool operator==(const jsid& rhs) const { return asBits == rhs.asBits; }
+ bool operator!=(const jsid& rhs) const { return asBits != rhs.asBits; }
+} JS_HAZ_GC_POINTER;
+#define JSID_BITS(id) (id.asBits)
+
+#define JSID_TYPE_STRING 0x0
+#define JSID_TYPE_INT 0x1
+#define JSID_TYPE_VOID 0x2
+#define JSID_TYPE_SYMBOL 0x4
+#define JSID_TYPE_MASK 0x7
+
+// Avoid using canonical 'id' for jsid parameters since this is a magic word in
+// Objective-C++ which, apparently, wants to be able to #include jsapi.h.
+#define id iden
+
+static MOZ_ALWAYS_INLINE bool
+JSID_IS_STRING(jsid id)
+{
+ return (JSID_BITS(id) & JSID_TYPE_MASK) == 0;
+}
+
+static MOZ_ALWAYS_INLINE JSString*
+JSID_TO_STRING(jsid id)
+{
+ MOZ_ASSERT(JSID_IS_STRING(id));
+ return (JSString*)JSID_BITS(id);
+}
+
+/**
+ * Only JSStrings that have been interned via the JSAPI can be turned into
+ * jsids by API clients.
+ *
+ * N.B. if a jsid is backed by a string which has not been interned, that
+ * string must be appropriately rooted to avoid being collected by the GC.
+ */
+JS_PUBLIC_API(jsid)
+INTERNED_STRING_TO_JSID(JSContext* cx, JSString* str);
+
+static MOZ_ALWAYS_INLINE bool
+JSID_IS_INT(jsid id)
+{
+ return !!(JSID_BITS(id) & JSID_TYPE_INT);
+}
+
+static MOZ_ALWAYS_INLINE int32_t
+JSID_TO_INT(jsid id)
+{
+ MOZ_ASSERT(JSID_IS_INT(id));
+ return ((uint32_t)JSID_BITS(id)) >> 1;
+}
+
+#define JSID_INT_MIN 0
+#define JSID_INT_MAX INT32_MAX
+
+static MOZ_ALWAYS_INLINE bool
+INT_FITS_IN_JSID(int32_t i)
+{
+ return i >= 0;
+}
+
+static MOZ_ALWAYS_INLINE jsid
+INT_TO_JSID(int32_t i)
+{
+ jsid id;
+ MOZ_ASSERT(INT_FITS_IN_JSID(i));
+ JSID_BITS(id) = ((i << 1) | JSID_TYPE_INT);
+ return id;
+}
+
+static MOZ_ALWAYS_INLINE bool
+JSID_IS_SYMBOL(jsid id)
+{
+ return (JSID_BITS(id) & JSID_TYPE_MASK) == JSID_TYPE_SYMBOL &&
+ JSID_BITS(id) != JSID_TYPE_SYMBOL;
+}
+
+static MOZ_ALWAYS_INLINE JS::Symbol*
+JSID_TO_SYMBOL(jsid id)
+{
+ MOZ_ASSERT(JSID_IS_SYMBOL(id));
+ return (JS::Symbol*)(JSID_BITS(id) & ~(size_t)JSID_TYPE_MASK);
+}
+
+static MOZ_ALWAYS_INLINE jsid
+SYMBOL_TO_JSID(JS::Symbol* sym)
+{
+ jsid id;
+ MOZ_ASSERT(sym != nullptr);
+ MOZ_ASSERT((size_t(sym) & JSID_TYPE_MASK) == 0);
+ MOZ_ASSERT(!js::gc::IsInsideNursery(reinterpret_cast<js::gc::Cell*>(sym)));
+ JSID_BITS(id) = (size_t(sym) | JSID_TYPE_SYMBOL);
+ return id;
+}
+
+static MOZ_ALWAYS_INLINE bool
+JSID_IS_GCTHING(jsid id)
+{
+ return JSID_IS_STRING(id) || JSID_IS_SYMBOL(id);
+}
+
+static MOZ_ALWAYS_INLINE JS::GCCellPtr
+JSID_TO_GCTHING(jsid id)
+{
+ void* thing = (void*)(JSID_BITS(id) & ~(size_t)JSID_TYPE_MASK);
+ if (JSID_IS_STRING(id))
+ return JS::GCCellPtr(thing, JS::TraceKind::String);
+ MOZ_ASSERT(JSID_IS_SYMBOL(id));
+ return JS::GCCellPtr(thing, JS::TraceKind::Symbol);
+}
+
+static MOZ_ALWAYS_INLINE bool
+JSID_IS_VOID(const jsid id)
+{
+ MOZ_ASSERT_IF(((size_t)JSID_BITS(id) & JSID_TYPE_MASK) == JSID_TYPE_VOID,
+ JSID_BITS(id) == JSID_TYPE_VOID);
+ return (size_t)JSID_BITS(id) == JSID_TYPE_VOID;
+}
+
+static MOZ_ALWAYS_INLINE bool
+JSID_IS_EMPTY(const jsid id)
+{
+ return (size_t)JSID_BITS(id) == JSID_TYPE_SYMBOL;
+}
+
+extern JS_PUBLIC_DATA(const jsid) JSID_VOID;
+extern JS_PUBLIC_DATA(const jsid) JSID_EMPTY;
+
+extern JS_PUBLIC_DATA(const JS::HandleId) JSID_VOIDHANDLE;
+extern JS_PUBLIC_DATA(const JS::HandleId) JSID_EMPTYHANDLE;
+
+namespace JS {
+
+template <>
+struct GCPolicy<jsid>
+{
+ static jsid initial() { return JSID_VOID; }
+ static void trace(JSTracer* trc, jsid* idp, const char* name) {
+ js::UnsafeTraceManuallyBarrieredEdge(trc, idp, name);
+ }
+};
+
+} // namespace JS
+
+namespace js {
+
+template <>
+struct BarrierMethods<jsid>
+{
+ static void postBarrier(jsid* idp, jsid prev, jsid next) {}
+ static void exposeToJS(jsid id) {
+ if (JSID_IS_GCTHING(id))
+ js::gc::ExposeGCThingToActiveJS(JSID_TO_GCTHING(id));
+ }
+};
+
+// If the jsid is a GC pointer type, convert to that type and call |f| with
+// the pointer. If the jsid is not a GC type, calls F::defaultValue.
+template <typename F, typename... Args>
+auto
+DispatchTyped(F f, const jsid& id, Args&&... args)
+ -> decltype(f(static_cast<JSString*>(nullptr), mozilla::Forward<Args>(args)...))
+{
+ if (JSID_IS_STRING(id))
+ return f(JSID_TO_STRING(id), mozilla::Forward<Args>(args)...);
+ if (JSID_IS_SYMBOL(id))
+ return f(JSID_TO_SYMBOL(id), mozilla::Forward<Args>(args)...);
+ MOZ_ASSERT(!JSID_IS_GCTHING(id));
+ return F::defaultValue(id);
+}
+
+#undef id
+
+} // namespace js
+
+#endif /* js_Id_h */
diff --git a/js/public/Initialization.h b/js/public/Initialization.h
new file mode 100644
index 0000000000..8a1cf9101a
--- /dev/null
+++ b/js/public/Initialization.h
@@ -0,0 +1,125 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* SpiderMonkey initialization and shutdown APIs. */
+
+#ifndef js_Initialization_h
+#define js_Initialization_h
+
+#include "jstypes.h"
+
+namespace JS {
+namespace detail {
+
+enum class InitState { Uninitialized = 0, Running, ShutDown };
+
+/**
+ * SpiderMonkey's initialization status is tracked here, and it controls things
+ * that should happen only once across all runtimes. It's an API requirement
+ * that JS_Init (and JS_ShutDown, if called) be called in a thread-aware
+ * manner, so this (internal -- embedders, don't use!) variable doesn't need to
+ * be atomic.
+ */
+extern JS_PUBLIC_DATA(InitState)
+libraryInitState;
+
+extern JS_PUBLIC_API(const char*)
+InitWithFailureDiagnostic(bool isDebugBuild);
+
+} // namespace detail
+} // namespace JS
+
+// These are equivalent to ICU's |UMemAllocFn|, |UMemReallocFn|, and
+// |UMemFreeFn| types. The first argument (called |context| in the ICU docs)
+// will always be nullptr and should be ignored.
+typedef void* (*JS_ICUAllocFn)(const void*, size_t size);
+typedef void* (*JS_ICUReallocFn)(const void*, void* p, size_t size);
+typedef void (*JS_ICUFreeFn)(const void*, void* p);
+
+/**
+ * This function can be used to track memory used by ICU. If it is called, it
+ * *must* be called before JS_Init. Don't use it unless you know what you're
+ * doing!
+ */
+extern JS_PUBLIC_API(bool)
+JS_SetICUMemoryFunctions(JS_ICUAllocFn allocFn,
+ JS_ICUReallocFn reallocFn,
+ JS_ICUFreeFn freeFn);
+
+/**
+ * Initialize SpiderMonkey, returning true only if initialization succeeded.
+ * Once this method has succeeded, it is safe to call JS_NewRuntime and other
+ * JSAPI methods.
+ *
+ * This method must be called before any other JSAPI method is used on any
+ * thread. Once it has been used, it is safe to call any JSAPI method, and it
+ * remains safe to do so until JS_ShutDown is correctly called.
+ *
+ * It is currently not possible to initialize SpiderMonkey multiple times (that
+ * is, calling JS_Init/JSAPI methods/JS_ShutDown in that order, then doing so
+ * again). This restriction may eventually be lifted.
+ */
+inline bool
+JS_Init(void)
+{
+#ifdef DEBUG
+ return !JS::detail::InitWithFailureDiagnostic(true);
+#else
+ return !JS::detail::InitWithFailureDiagnostic(false);
+#endif
+}
+
+/**
+ * A variant of JS_Init. On success it returns nullptr. On failure it returns a
+ * pointer to a string literal that describes how initialization failed, which
+ * can be useful for debugging purposes.
+ */
+inline const char*
+JS_InitWithFailureDiagnostic(void)
+{
+#ifdef DEBUG
+ return JS::detail::InitWithFailureDiagnostic(true);
+#else
+ return JS::detail::InitWithFailureDiagnostic(false);
+#endif
+}
+
+/*
+ * Returns true if SpiderMonkey has been initialized successfully, even if it has
+ * possibly been shut down.
+ *
+ * Note that it is the responsibility of the embedder to call JS_Init() and
+ * JS_ShutDown() at the correct times, and therefore this API should ideally not
+ * be necessary to use. This is only intended to be used in cases where the
+ * embedder isn't in full control of deciding whether to initialize SpiderMonkey
+ * or hand off the task to another consumer.
+ */
+inline bool
+JS_IsInitialized(void)
+{
+ return JS::detail::libraryInitState != JS::detail::InitState::Uninitialized;
+}
+
+/**
+ * Destroy free-standing resources allocated by SpiderMonkey, not associated
+ * with any runtime, context, or other structure.
+ *
+ * This method should be called after all other JSAPI data has been properly
+ * cleaned up: every new runtime must have been destroyed, every new context
+ * must have been destroyed, and so on. Calling this method before all other
+ * resources have been destroyed has undefined behavior.
+ *
+ * Failure to call this method, at present, has no adverse effects other than
+ * leaking memory. This may not always be the case; it's recommended that all
+ * embedders call this method when all other JSAPI operations have completed.
+ *
+ * It is currently not possible to initialize SpiderMonkey multiple times (that
+ * is, calling JS_Init/JSAPI methods/JS_ShutDown in that order, then doing so
+ * again). This restriction may eventually be lifted.
+ */
+extern JS_PUBLIC_API(void)
+JS_ShutDown(void);
+
+#endif /* js_Initialization_h */
diff --git a/js/public/LegacyIntTypes.h b/js/public/LegacyIntTypes.h
new file mode 100644
index 0000000000..2c8498c89e
--- /dev/null
+++ b/js/public/LegacyIntTypes.h
@@ -0,0 +1,59 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * This section typedefs the old 'native' types to the new <stdint.h> types.
+ * These redefinitions are provided solely to allow JSAPI users to more easily
+ * transition to <stdint.h> types. They are not to be used in the JSAPI, and
+ * new JSAPI user code should not use them. This mapping file may eventually
+ * be removed from SpiderMonkey, so don't depend on it in the long run.
+ */
+
+/*
+ * BEWARE: Comity with other implementers of these types is not guaranteed.
+ * Indeed, if you use this header and third-party code defining these
+ * types, *expect* to encounter either compile errors or link errors,
+ * depending how these types are used and on the order of inclusion.
+ * It is safest to use only the <stdint.h> types.
+ */
+#ifndef js_LegacyIntTypes_h
+#define js_LegacyIntTypes_h
+
+#include <stdint.h>
+
+#include "js-config.h"
+
+typedef uint8_t uint8;
+typedef uint16_t uint16;
+typedef uint32_t uint32;
+typedef uint64_t uint64;
+
+/*
+ * On AIX 4.3, sys/inttypes.h (which is included by sys/types.h, a very
+ * common header file) defines the types int8, int16, int32, and int64.
+ * So we don't define these four types here to avoid conflicts in case
+ * the code also includes sys/types.h.
+ */
+#if defined(AIX) && defined(HAVE_SYS_INTTYPES_H)
+#include <sys/inttypes.h>
+#else
+typedef int8_t int8;
+typedef int16_t int16;
+typedef int32_t int32;
+typedef int64_t int64;
+#endif /* AIX && HAVE_SYS_INTTYPES_H */
+
+typedef uint8_t JSUint8;
+typedef uint16_t JSUint16;
+typedef uint32_t JSUint32;
+typedef uint64_t JSUint64;
+
+typedef int8_t JSInt8;
+typedef int16_t JSInt16;
+typedef int32_t JSInt32;
+typedef int64_t JSInt64;
+
+#endif /* js_LegacyIntTypes_h */
diff --git a/js/public/MemoryMetrics.h b/js/public/MemoryMetrics.h
new file mode 100644
index 0000000000..9b5caa24b7
--- /dev/null
+++ b/js/public/MemoryMetrics.h
@@ -0,0 +1,971 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef js_MemoryMetrics_h
+#define js_MemoryMetrics_h
+
+// These declarations are highly likely to change in the future. Depend on them
+// at your own risk.
+
+#include "mozilla/MemoryReporting.h"
+#include "mozilla/PodOperations.h"
+#include "mozilla/TypeTraits.h"
+
+#include <string.h>
+
+#include "jsalloc.h"
+#include "jspubtd.h"
+
+#include "js/HashTable.h"
+#include "js/TracingAPI.h"
+#include "js/Utility.h"
+#include "js/Vector.h"
+
+class nsISupports; // Needed for ObjectPrivateVisitor.
+
+namespace JS {
+
+struct TabSizes
+{
+ enum Kind {
+ Objects,
+ Strings,
+ Private,
+ Other
+ };
+
+ TabSizes() { mozilla::PodZero(this); }
+
+ void add(Kind kind, size_t n) {
+ switch (kind) {
+ case Objects: objects += n; break;
+ case Strings: strings += n; break;
+ case Private: private_ += n; break;
+ case Other: other += n; break;
+ default: MOZ_CRASH("bad TabSizes kind");
+ }
+ }
+
+ size_t objects;
+ size_t strings;
+ size_t private_;
+ size_t other;
+};
+
+/** These are the measurements used by Servo. */
+struct ServoSizes
+{
+ enum Kind {
+ GCHeapUsed,
+ GCHeapUnused,
+ GCHeapAdmin,
+ GCHeapDecommitted,
+ MallocHeap,
+ NonHeap,
+ Ignore
+ };
+
+ ServoSizes() { mozilla::PodZero(this); }
+
+ void add(Kind kind, size_t n) {
+ switch (kind) {
+ case GCHeapUsed: gcHeapUsed += n; break;
+ case GCHeapUnused: gcHeapUnused += n; break;
+ case GCHeapAdmin: gcHeapAdmin += n; break;
+ case GCHeapDecommitted: gcHeapDecommitted += n; break;
+ case MallocHeap: mallocHeap += n; break;
+ case NonHeap: nonHeap += n; break;
+ case Ignore: /* do nothing */ break;
+ default: MOZ_CRASH("bad ServoSizes kind");
+ }
+ }
+
+ size_t gcHeapUsed;
+ size_t gcHeapUnused;
+ size_t gcHeapAdmin;
+ size_t gcHeapDecommitted;
+ size_t mallocHeap;
+ size_t nonHeap;
+};
+
+} // namespace JS
+
+namespace js {
+
+/**
+ * In memory reporting, we have concept of "sundries", line items which are too
+ * small to be worth reporting individually. Under some circumstances, a memory
+ * reporter gets tossed into the sundries bucket if it's smaller than
+ * MemoryReportingSundriesThreshold() bytes.
+ *
+ * We need to define this value here, rather than in the code which actually
+ * generates the memory reports, because NotableStringInfo uses this value.
+ */
+JS_FRIEND_API(size_t) MemoryReportingSundriesThreshold();
+
+/**
+ * This hash policy avoids flattening ropes (which perturbs the site being
+ * measured and requires a JSContext) at the expense of doing a FULL ROPE COPY
+ * on every hash and match! Beware.
+ */
+struct InefficientNonFlatteningStringHashPolicy
+{
+ typedef JSString* Lookup;
+ static HashNumber hash(const Lookup& l);
+ static bool match(const JSString* const& k, const Lookup& l);
+};
+
+struct CStringHashPolicy
+{
+ typedef const char* Lookup;
+ static HashNumber hash(const Lookup& l);
+ static bool match(const char* const& k, const Lookup& l);
+};
+
+// This file features many classes with numerous size_t fields, and each such
+// class has one or more methods that need to operate on all of these fields.
+// Writing these individually is error-prone -- it's easy to add a new field
+// without updating all the required methods. So we define a single macro list
+// in each class to name the fields (and notable characteristics of them), and
+// then use the following macros to transform those lists into the required
+// methods.
+//
+// - The |tabKind| value is used when measuring TabSizes.
+//
+// - The |servoKind| value is used when measuring ServoSizes and also for
+// the various sizeOfLiveGCThings() methods.
+//
+// In some classes, one or more of the macro arguments aren't used. We use '_'
+// for those.
+//
+#define DECL_SIZE(tabKind, servoKind, mSize) size_t mSize;
+#define ZERO_SIZE(tabKind, servoKind, mSize) mSize(0),
+#define COPY_OTHER_SIZE(tabKind, servoKind, mSize) mSize(other.mSize),
+#define ADD_OTHER_SIZE(tabKind, servoKind, mSize) mSize += other.mSize;
+#define SUB_OTHER_SIZE(tabKind, servoKind, mSize) \
+ MOZ_ASSERT(mSize >= other.mSize); \
+ mSize -= other.mSize;
+#define ADD_SIZE_TO_N(tabKind, servoKind, mSize) n += mSize;
+#define ADD_SIZE_TO_N_IF_LIVE_GC_THING(tabKind, servoKind, mSize) \
+ /* Avoid self-comparison warnings by comparing enums indirectly. */ \
+ n += (mozilla::IsSame<int[ServoSizes::servoKind], int[ServoSizes::GCHeapUsed]>::value) \
+ ? mSize \
+ : 0;
+#define ADD_TO_TAB_SIZES(tabKind, servoKind, mSize) sizes->add(JS::TabSizes::tabKind, mSize);
+#define ADD_TO_SERVO_SIZES(tabKind, servoKind, mSize) sizes->add(JS::ServoSizes::servoKind, mSize);
+
+} // namespace js
+
+namespace JS {
+
+struct ClassInfo
+{
+#define FOR_EACH_SIZE(macro) \
+ macro(Objects, GCHeapUsed, objectsGCHeap) \
+ macro(Objects, MallocHeap, objectsMallocHeapSlots) \
+ macro(Objects, MallocHeap, objectsMallocHeapElementsNormal) \
+ macro(Objects, MallocHeap, objectsMallocHeapElementsAsmJS) \
+ macro(Objects, MallocHeap, objectsMallocHeapMisc) \
+ macro(Objects, NonHeap, objectsNonHeapElementsNormal) \
+ macro(Objects, NonHeap, objectsNonHeapElementsShared) \
+ macro(Objects, NonHeap, objectsNonHeapElementsWasm) \
+ macro(Objects, NonHeap, objectsNonHeapCodeWasm)
+
+ ClassInfo()
+ : FOR_EACH_SIZE(ZERO_SIZE)
+ wasmGuardPages(0)
+ {}
+
+ void add(const ClassInfo& other) {
+ FOR_EACH_SIZE(ADD_OTHER_SIZE)
+ }
+
+ void subtract(const ClassInfo& other) {
+ FOR_EACH_SIZE(SUB_OTHER_SIZE)
+ }
+
+ size_t sizeOfAllThings() const {
+ size_t n = 0;
+ FOR_EACH_SIZE(ADD_SIZE_TO_N)
+ return n;
+ }
+
+ bool isNotable() const {
+ static const size_t NotabilityThreshold = 16 * 1024;
+ return sizeOfAllThings() >= NotabilityThreshold;
+ }
+
+ size_t sizeOfLiveGCThings() const {
+ size_t n = 0;
+ FOR_EACH_SIZE(ADD_SIZE_TO_N_IF_LIVE_GC_THING)
+ return n;
+ }
+
+ void addToTabSizes(TabSizes* sizes) const {
+ FOR_EACH_SIZE(ADD_TO_TAB_SIZES)
+ }
+
+ void addToServoSizes(ServoSizes *sizes) const {
+ FOR_EACH_SIZE(ADD_TO_SERVO_SIZES)
+ }
+
+ FOR_EACH_SIZE(DECL_SIZE)
+ size_t wasmGuardPages;
+
+#undef FOR_EACH_SIZE
+};
+
+struct ShapeInfo
+{
+#define FOR_EACH_SIZE(macro) \
+ macro(Other, GCHeapUsed, shapesGCHeapTree) \
+ macro(Other, GCHeapUsed, shapesGCHeapDict) \
+ macro(Other, GCHeapUsed, shapesGCHeapBase) \
+ macro(Other, MallocHeap, shapesMallocHeapTreeTables) \
+ macro(Other, MallocHeap, shapesMallocHeapDictTables) \
+ macro(Other, MallocHeap, shapesMallocHeapTreeKids)
+
+ ShapeInfo()
+ : FOR_EACH_SIZE(ZERO_SIZE)
+ dummy()
+ {}
+
+ void add(const ShapeInfo& other) {
+ FOR_EACH_SIZE(ADD_OTHER_SIZE)
+ }
+
+ void subtract(const ShapeInfo& other) {
+ FOR_EACH_SIZE(SUB_OTHER_SIZE)
+ }
+
+ size_t sizeOfAllThings() const {
+ size_t n = 0;
+ FOR_EACH_SIZE(ADD_SIZE_TO_N)
+ return n;
+ }
+
+ size_t sizeOfLiveGCThings() const {
+ size_t n = 0;
+ FOR_EACH_SIZE(ADD_SIZE_TO_N_IF_LIVE_GC_THING)
+ return n;
+ }
+
+ void addToTabSizes(TabSizes* sizes) const {
+ FOR_EACH_SIZE(ADD_TO_TAB_SIZES)
+ }
+
+ void addToServoSizes(ServoSizes *sizes) const {
+ FOR_EACH_SIZE(ADD_TO_SERVO_SIZES)
+ }
+
+ FOR_EACH_SIZE(DECL_SIZE)
+ int dummy; // present just to absorb the trailing comma from FOR_EACH_SIZE(ZERO_SIZE)
+
+#undef FOR_EACH_SIZE
+};
+
+/**
+ * Holds data about a notable class (one whose combined object and shape
+ * instances use more than a certain amount of memory) so we can report it
+ * individually.
+ *
+ * The only difference between this class and ClassInfo is that this class
+ * holds a copy of the filename.
+ */
+struct NotableClassInfo : public ClassInfo
+{
+ NotableClassInfo();
+ NotableClassInfo(const char* className, const ClassInfo& info);
+ NotableClassInfo(NotableClassInfo&& info);
+ NotableClassInfo& operator=(NotableClassInfo&& info);
+
+ ~NotableClassInfo() {
+ js_free(className_);
+ }
+
+ char* className_;
+
+ private:
+ NotableClassInfo(const NotableClassInfo& info) = delete;
+};
+
+/** Data for tracking JIT-code memory usage. */
+struct CodeSizes
+{
+#define FOR_EACH_SIZE(macro) \
+ macro(_, NonHeap, ion) \
+ macro(_, NonHeap, baseline) \
+ macro(_, NonHeap, regexp) \
+ macro(_, NonHeap, other) \
+ macro(_, NonHeap, unused)
+
+ CodeSizes()
+ : FOR_EACH_SIZE(ZERO_SIZE)
+ dummy()
+ {}
+
+ void addToServoSizes(ServoSizes *sizes) const {
+ FOR_EACH_SIZE(ADD_TO_SERVO_SIZES)
+ }
+
+ FOR_EACH_SIZE(DECL_SIZE)
+ int dummy; // present just to absorb the trailing comma from FOR_EACH_SIZE(ZERO_SIZE)
+
+#undef FOR_EACH_SIZE
+};
+
+/** Data for tracking GC memory usage. */
+struct GCSizes
+{
+ // |nurseryDecommitted| is marked as NonHeap rather than GCHeapDecommitted
+ // because we don't consider the nursery to be part of the GC heap.
+#define FOR_EACH_SIZE(macro) \
+ macro(_, MallocHeap, marker) \
+ macro(_, NonHeap, nurseryCommitted) \
+ macro(_, MallocHeap, nurseryMallocedBuffers) \
+ macro(_, MallocHeap, storeBufferVals) \
+ macro(_, MallocHeap, storeBufferCells) \
+ macro(_, MallocHeap, storeBufferSlots) \
+ macro(_, MallocHeap, storeBufferWholeCells) \
+ macro(_, MallocHeap, storeBufferGenerics)
+
+ GCSizes()
+ : FOR_EACH_SIZE(ZERO_SIZE)
+ dummy()
+ {}
+
+ void addToServoSizes(ServoSizes *sizes) const {
+ FOR_EACH_SIZE(ADD_TO_SERVO_SIZES)
+ }
+
+ FOR_EACH_SIZE(DECL_SIZE)
+ int dummy; // present just to absorb the trailing comma from FOR_EACH_SIZE(ZERO_SIZE)
+
+#undef FOR_EACH_SIZE
+};
+
+/**
+ * This class holds information about the memory taken up by identical copies of
+ * a particular string. Multiple JSStrings may have their sizes aggregated
+ * together into one StringInfo object. Note that two strings with identical
+ * chars will not be aggregated together if one is a short string and the other
+ * is not.
+ */
+struct StringInfo
+{
+#define FOR_EACH_SIZE(macro) \
+ macro(Strings, GCHeapUsed, gcHeapLatin1) \
+ macro(Strings, GCHeapUsed, gcHeapTwoByte) \
+ macro(Strings, MallocHeap, mallocHeapLatin1) \
+ macro(Strings, MallocHeap, mallocHeapTwoByte)
+
+ StringInfo()
+ : FOR_EACH_SIZE(ZERO_SIZE)
+ numCopies(0)
+ {}
+
+ void add(const StringInfo& other) {
+ FOR_EACH_SIZE(ADD_OTHER_SIZE);
+ numCopies++;
+ }
+
+ void subtract(const StringInfo& other) {
+ FOR_EACH_SIZE(SUB_OTHER_SIZE);
+ numCopies--;
+ }
+
+ bool isNotable() const {
+ static const size_t NotabilityThreshold = 16 * 1024;
+ size_t n = 0;
+ FOR_EACH_SIZE(ADD_SIZE_TO_N)
+ return n >= NotabilityThreshold;
+ }
+
+ size_t sizeOfLiveGCThings() const {
+ size_t n = 0;
+ FOR_EACH_SIZE(ADD_SIZE_TO_N_IF_LIVE_GC_THING)
+ return n;
+ }
+
+ void addToTabSizes(TabSizes* sizes) const {
+ FOR_EACH_SIZE(ADD_TO_TAB_SIZES)
+ }
+
+ void addToServoSizes(ServoSizes *sizes) const {
+ FOR_EACH_SIZE(ADD_TO_SERVO_SIZES)
+ }
+
+ FOR_EACH_SIZE(DECL_SIZE)
+ uint32_t numCopies; // How many copies of the string have we seen?
+
+#undef FOR_EACH_SIZE
+};
+
+/**
+ * Holds data about a notable string (one which, counting all duplicates, uses
+ * more than a certain amount of memory) so we can report it individually.
+ *
+ * The only difference between this class and StringInfo is that
+ * NotableStringInfo holds a copy of some or all of the string's chars.
+ */
+struct NotableStringInfo : public StringInfo
+{
+ static const size_t MAX_SAVED_CHARS = 1024;
+
+ NotableStringInfo();
+ NotableStringInfo(JSString* str, const StringInfo& info);
+ NotableStringInfo(NotableStringInfo&& info);
+ NotableStringInfo& operator=(NotableStringInfo&& info);
+
+ ~NotableStringInfo() {
+ js_free(buffer);
+ }
+
+ char* buffer;
+ size_t length;
+
+ private:
+ NotableStringInfo(const NotableStringInfo& info) = delete;
+};
+
+/**
+ * This class holds information about the memory taken up by script sources
+ * from a particular file.
+ */
+struct ScriptSourceInfo
+{
+#define FOR_EACH_SIZE(macro) \
+ macro(_, MallocHeap, misc)
+
+ ScriptSourceInfo()
+ : FOR_EACH_SIZE(ZERO_SIZE)
+ numScripts(0)
+ {}
+
+ void add(const ScriptSourceInfo& other) {
+ FOR_EACH_SIZE(ADD_OTHER_SIZE)
+ numScripts++;
+ }
+
+ void subtract(const ScriptSourceInfo& other) {
+ FOR_EACH_SIZE(SUB_OTHER_SIZE)
+ numScripts--;
+ }
+
+ void addToServoSizes(ServoSizes *sizes) const {
+ FOR_EACH_SIZE(ADD_TO_SERVO_SIZES)
+ }
+
+ bool isNotable() const {
+ static const size_t NotabilityThreshold = 16 * 1024;
+ size_t n = 0;
+ FOR_EACH_SIZE(ADD_SIZE_TO_N)
+ return n >= NotabilityThreshold;
+ }
+
+ FOR_EACH_SIZE(DECL_SIZE)
+ uint32_t numScripts; // How many ScriptSources come from this file? (It
+ // can be more than one in XML files that have
+ // multiple scripts in CDATA sections.)
+#undef FOR_EACH_SIZE
+};
+
+/**
+ * Holds data about a notable script source file (one whose combined
+ * script sources use more than a certain amount of memory) so we can report it
+ * individually.
+ *
+ * The only difference between this class and ScriptSourceInfo is that this
+ * class holds a copy of the filename.
+ */
+struct NotableScriptSourceInfo : public ScriptSourceInfo
+{
+ NotableScriptSourceInfo();
+ NotableScriptSourceInfo(const char* filename, const ScriptSourceInfo& info);
+ NotableScriptSourceInfo(NotableScriptSourceInfo&& info);
+ NotableScriptSourceInfo& operator=(NotableScriptSourceInfo&& info);
+
+ ~NotableScriptSourceInfo() {
+ js_free(filename_);
+ }
+
+ char* filename_;
+
+ private:
+ NotableScriptSourceInfo(const NotableScriptSourceInfo& info) = delete;
+};
+
+/**
+ * These measurements relate directly to the JSRuntime, and not to zones and
+ * compartments within it.
+ */
+struct RuntimeSizes
+{
+#define FOR_EACH_SIZE(macro) \
+ macro(_, MallocHeap, object) \
+ macro(_, MallocHeap, atomsTable) \
+ macro(_, MallocHeap, contexts) \
+ macro(_, MallocHeap, temporary) \
+ macro(_, MallocHeap, interpreterStack) \
+ macro(_, MallocHeap, mathCache) \
+ macro(_, MallocHeap, sharedImmutableStringsCache) \
+ macro(_, MallocHeap, sharedIntlData) \
+ macro(_, MallocHeap, uncompressedSourceCache) \
+ macro(_, MallocHeap, scriptData)
+
+ RuntimeSizes()
+ : FOR_EACH_SIZE(ZERO_SIZE)
+ scriptSourceInfo(),
+ code(),
+ gc(),
+ notableScriptSources()
+ {
+ allScriptSources = js_new<ScriptSourcesHashMap>();
+ if (!allScriptSources || !allScriptSources->init())
+ MOZ_CRASH("oom");
+ }
+
+ ~RuntimeSizes() {
+ // |allScriptSources| is usually deleted and set to nullptr before this
+ // destructor runs. But there are failure cases due to OOMs that may
+ // prevent that, so it doesn't hurt to try again here.
+ js_delete(allScriptSources);
+ }
+
+ void addToServoSizes(ServoSizes *sizes) const {
+ FOR_EACH_SIZE(ADD_TO_SERVO_SIZES)
+ scriptSourceInfo.addToServoSizes(sizes);
+ code.addToServoSizes(sizes);
+ gc.addToServoSizes(sizes);
+ }
+
+ // The script source measurements in |scriptSourceInfo| are initially for
+ // all script sources. At the end, if the measurement granularity is
+ // FineGrained, we subtract the measurements of the notable script sources
+ // and move them into |notableScriptSources|.
+ FOR_EACH_SIZE(DECL_SIZE)
+ ScriptSourceInfo scriptSourceInfo;
+ CodeSizes code;
+ GCSizes gc;
+
+ typedef js::HashMap<const char*, ScriptSourceInfo,
+ js::CStringHashPolicy,
+ js::SystemAllocPolicy> ScriptSourcesHashMap;
+
+ // |allScriptSources| is only used transiently. During the reporting phase
+ // it is filled with info about every script source in the runtime. It's
+ // then used to fill in |notableScriptSources| (which actually gets
+ // reported), and immediately discarded afterwards.
+ ScriptSourcesHashMap* allScriptSources;
+ js::Vector<NotableScriptSourceInfo, 0, js::SystemAllocPolicy> notableScriptSources;
+
+#undef FOR_EACH_SIZE
+};
+
+struct UnusedGCThingSizes
+{
+#define FOR_EACH_SIZE(macro) \
+ macro(Other, GCHeapUnused, object) \
+ macro(Other, GCHeapUnused, script) \
+ macro(Other, GCHeapUnused, lazyScript) \
+ macro(Other, GCHeapUnused, shape) \
+ macro(Other, GCHeapUnused, baseShape) \
+ macro(Other, GCHeapUnused, objectGroup) \
+ macro(Other, GCHeapUnused, string) \
+ macro(Other, GCHeapUnused, symbol) \
+ macro(Other, GCHeapUnused, jitcode) \
+ macro(Other, GCHeapUnused, scope)
+
+ UnusedGCThingSizes()
+ : FOR_EACH_SIZE(ZERO_SIZE)
+ dummy()
+ {}
+
+ UnusedGCThingSizes(UnusedGCThingSizes&& other)
+ : FOR_EACH_SIZE(COPY_OTHER_SIZE)
+ dummy()
+ {}
+
+ void addToKind(JS::TraceKind kind, intptr_t n) {
+ switch (kind) {
+ case JS::TraceKind::Object: object += n; break;
+ case JS::TraceKind::String: string += n; break;
+ case JS::TraceKind::Symbol: symbol += n; break;
+ case JS::TraceKind::Script: script += n; break;
+ case JS::TraceKind::Shape: shape += n; break;
+ case JS::TraceKind::BaseShape: baseShape += n; break;
+ case JS::TraceKind::JitCode: jitcode += n; break;
+ case JS::TraceKind::LazyScript: lazyScript += n; break;
+ case JS::TraceKind::ObjectGroup: objectGroup += n; break;
+ case JS::TraceKind::Scope: scope += n; break;
+ default:
+ MOZ_CRASH("Bad trace kind for UnusedGCThingSizes");
+ }
+ }
+
+ void addSizes(const UnusedGCThingSizes& other) {
+ FOR_EACH_SIZE(ADD_OTHER_SIZE)
+ }
+
+ size_t totalSize() const {
+ size_t n = 0;
+ FOR_EACH_SIZE(ADD_SIZE_TO_N)
+ return n;
+ }
+
+ void addToTabSizes(JS::TabSizes *sizes) const {
+ FOR_EACH_SIZE(ADD_TO_TAB_SIZES)
+ }
+
+ void addToServoSizes(JS::ServoSizes *sizes) const {
+ FOR_EACH_SIZE(ADD_TO_SERVO_SIZES)
+ }
+
+ FOR_EACH_SIZE(DECL_SIZE)
+ int dummy; // present just to absorb the trailing comma from FOR_EACH_SIZE(ZERO_SIZE)
+
+#undef FOR_EACH_SIZE
+};
+
+struct ZoneStats
+{
+#define FOR_EACH_SIZE(macro) \
+ macro(Other, GCHeapUsed, symbolsGCHeap) \
+ macro(Other, GCHeapAdmin, gcHeapArenaAdmin) \
+ macro(Other, GCHeapUsed, lazyScriptsGCHeap) \
+ macro(Other, MallocHeap, lazyScriptsMallocHeap) \
+ macro(Other, GCHeapUsed, jitCodesGCHeap) \
+ macro(Other, GCHeapUsed, objectGroupsGCHeap) \
+ macro(Other, MallocHeap, objectGroupsMallocHeap) \
+ macro(Other, GCHeapUsed, scopesGCHeap) \
+ macro(Other, MallocHeap, scopesMallocHeap) \
+ macro(Other, MallocHeap, typePool) \
+ macro(Other, MallocHeap, baselineStubsOptimized) \
+ macro(Other, MallocHeap, uniqueIdMap) \
+ macro(Other, MallocHeap, shapeTables)
+
+ ZoneStats()
+ : FOR_EACH_SIZE(ZERO_SIZE)
+ unusedGCThings(),
+ stringInfo(),
+ shapeInfo(),
+ extra(),
+ allStrings(nullptr),
+ notableStrings(),
+ isTotals(true)
+ {}
+
+ ZoneStats(ZoneStats&& other)
+ : FOR_EACH_SIZE(COPY_OTHER_SIZE)
+ unusedGCThings(mozilla::Move(other.unusedGCThings)),
+ stringInfo(mozilla::Move(other.stringInfo)),
+ shapeInfo(mozilla::Move(other.shapeInfo)),
+ extra(other.extra),
+ allStrings(other.allStrings),
+ notableStrings(mozilla::Move(other.notableStrings)),
+ isTotals(other.isTotals)
+ {
+ other.allStrings = nullptr;
+ MOZ_ASSERT(!other.isTotals);
+ }
+
+ ~ZoneStats() {
+ // |allStrings| is usually deleted and set to nullptr before this
+ // destructor runs. But there are failure cases due to OOMs that may
+ // prevent that, so it doesn't hurt to try again here.
+ js_delete(allStrings);
+ }
+
+ bool initStrings(JSRuntime* rt);
+
+ void addSizes(const ZoneStats& other) {
+ MOZ_ASSERT(isTotals);
+ FOR_EACH_SIZE(ADD_OTHER_SIZE)
+ unusedGCThings.addSizes(other.unusedGCThings);
+ stringInfo.add(other.stringInfo);
+ shapeInfo.add(other.shapeInfo);
+ }
+
+ size_t sizeOfLiveGCThings() const {
+ MOZ_ASSERT(isTotals);
+ size_t n = 0;
+ FOR_EACH_SIZE(ADD_SIZE_TO_N_IF_LIVE_GC_THING)
+ n += stringInfo.sizeOfLiveGCThings();
+ n += shapeInfo.sizeOfLiveGCThings();
+ return n;
+ }
+
+ void addToTabSizes(JS::TabSizes* sizes) const {
+ MOZ_ASSERT(isTotals);
+ FOR_EACH_SIZE(ADD_TO_TAB_SIZES)
+ unusedGCThings.addToTabSizes(sizes);
+ stringInfo.addToTabSizes(sizes);
+ shapeInfo.addToTabSizes(sizes);
+ }
+
+ void addToServoSizes(JS::ServoSizes *sizes) const {
+ MOZ_ASSERT(isTotals);
+ FOR_EACH_SIZE(ADD_TO_SERVO_SIZES)
+ unusedGCThings.addToServoSizes(sizes);
+ stringInfo.addToServoSizes(sizes);
+ shapeInfo.addToServoSizes(sizes);
+ }
+
+ // These string measurements are initially for all strings. At the end,
+ // if the measurement granularity is FineGrained, we subtract the
+ // measurements of the notable script sources and move them into
+ // |notableStrings|.
+ FOR_EACH_SIZE(DECL_SIZE)
+ UnusedGCThingSizes unusedGCThings;
+ StringInfo stringInfo;
+ ShapeInfo shapeInfo;
+ void* extra; // This field can be used by embedders.
+
+ typedef js::HashMap<JSString*, StringInfo,
+ js::InefficientNonFlatteningStringHashPolicy,
+ js::SystemAllocPolicy> StringsHashMap;
+
+ // |allStrings| is only used transiently. During the zone traversal it is
+ // filled with info about every string in the zone. It's then used to fill
+ // in |notableStrings| (which actually gets reported), and immediately
+ // discarded afterwards.
+ StringsHashMap* allStrings;
+ js::Vector<NotableStringInfo, 0, js::SystemAllocPolicy> notableStrings;
+ bool isTotals;
+
+#undef FOR_EACH_SIZE
+};
+
+struct CompartmentStats
+{
+ // We assume that |objectsPrivate| is on the malloc heap, but it's not
+ // actually guaranteed. But for Servo, at least, it's a moot point because
+ // it doesn't provide an ObjectPrivateVisitor so the value will always be
+ // zero.
+#define FOR_EACH_SIZE(macro) \
+ macro(Private, MallocHeap, objectsPrivate) \
+ macro(Other, GCHeapUsed, scriptsGCHeap) \
+ macro(Other, MallocHeap, scriptsMallocHeapData) \
+ macro(Other, MallocHeap, baselineData) \
+ macro(Other, MallocHeap, baselineStubsFallback) \
+ macro(Other, MallocHeap, ionData) \
+ macro(Other, MallocHeap, typeInferenceTypeScripts) \
+ macro(Other, MallocHeap, typeInferenceAllocationSiteTables) \
+ macro(Other, MallocHeap, typeInferenceArrayTypeTables) \
+ macro(Other, MallocHeap, typeInferenceObjectTypeTables) \
+ macro(Other, MallocHeap, compartmentObject) \
+ macro(Other, MallocHeap, compartmentTables) \
+ macro(Other, MallocHeap, innerViewsTable) \
+ macro(Other, MallocHeap, lazyArrayBuffersTable) \
+ macro(Other, MallocHeap, objectMetadataTable) \
+ macro(Other, MallocHeap, crossCompartmentWrappersTable) \
+ macro(Other, MallocHeap, regexpCompartment) \
+ macro(Other, MallocHeap, savedStacksSet) \
+ macro(Other, MallocHeap, varNamesSet) \
+ macro(Other, MallocHeap, nonSyntacticLexicalScopesTable) \
+ macro(Other, MallocHeap, jitCompartment) \
+ macro(Other, MallocHeap, privateData)
+
+ CompartmentStats()
+ : FOR_EACH_SIZE(ZERO_SIZE)
+ classInfo(),
+ extra(),
+ allClasses(nullptr),
+ notableClasses(),
+ isTotals(true)
+ {}
+
+ CompartmentStats(CompartmentStats&& other)
+ : FOR_EACH_SIZE(COPY_OTHER_SIZE)
+ classInfo(mozilla::Move(other.classInfo)),
+ extra(other.extra),
+ allClasses(other.allClasses),
+ notableClasses(mozilla::Move(other.notableClasses)),
+ isTotals(other.isTotals)
+ {
+ other.allClasses = nullptr;
+ MOZ_ASSERT(!other.isTotals);
+ }
+
+ CompartmentStats(const CompartmentStats&) = delete; // disallow copying
+
+ ~CompartmentStats() {
+ // |allClasses| is usually deleted and set to nullptr before this
+ // destructor runs. But there are failure cases due to OOMs that may
+ // prevent that, so it doesn't hurt to try again here.
+ js_delete(allClasses);
+ }
+
+ bool initClasses(JSRuntime* rt);
+
+ void addSizes(const CompartmentStats& other) {
+ MOZ_ASSERT(isTotals);
+ FOR_EACH_SIZE(ADD_OTHER_SIZE)
+ classInfo.add(other.classInfo);
+ }
+
+ size_t sizeOfLiveGCThings() const {
+ MOZ_ASSERT(isTotals);
+ size_t n = 0;
+ FOR_EACH_SIZE(ADD_SIZE_TO_N_IF_LIVE_GC_THING)
+ n += classInfo.sizeOfLiveGCThings();
+ return n;
+ }
+
+ void addToTabSizes(TabSizes* sizes) const {
+ MOZ_ASSERT(isTotals);
+ FOR_EACH_SIZE(ADD_TO_TAB_SIZES);
+ classInfo.addToTabSizes(sizes);
+ }
+
+ void addToServoSizes(ServoSizes *sizes) const {
+ MOZ_ASSERT(isTotals);
+ FOR_EACH_SIZE(ADD_TO_SERVO_SIZES);
+ classInfo.addToServoSizes(sizes);
+ }
+
+ // The class measurements in |classInfo| are initially for all classes. At
+ // the end, if the measurement granularity is FineGrained, we subtract the
+ // measurements of the notable classes and move them into |notableClasses|.
+ FOR_EACH_SIZE(DECL_SIZE)
+ ClassInfo classInfo;
+ void* extra; // This field can be used by embedders.
+
+ typedef js::HashMap<const char*, ClassInfo,
+ js::CStringHashPolicy,
+ js::SystemAllocPolicy> ClassesHashMap;
+
+ // These are similar to |allStrings| and |notableStrings| in ZoneStats.
+ ClassesHashMap* allClasses;
+ js::Vector<NotableClassInfo, 0, js::SystemAllocPolicy> notableClasses;
+ bool isTotals;
+
+#undef FOR_EACH_SIZE
+};
+
+typedef js::Vector<CompartmentStats, 0, js::SystemAllocPolicy> CompartmentStatsVector;
+typedef js::Vector<ZoneStats, 0, js::SystemAllocPolicy> ZoneStatsVector;
+
+struct RuntimeStats
+{
+ // |gcHeapChunkTotal| is ignored because it's the sum of all the other
+ // values. |gcHeapGCThings| is ignored because it's the sum of some of the
+ // values from the zones and compartments. Both of those values are not
+ // reported directly, but are just present for sanity-checking other
+ // values.
+#define FOR_EACH_SIZE(macro) \
+ macro(_, Ignore, gcHeapChunkTotal) \
+ macro(_, GCHeapDecommitted, gcHeapDecommittedArenas) \
+ macro(_, GCHeapUnused, gcHeapUnusedChunks) \
+ macro(_, GCHeapUnused, gcHeapUnusedArenas) \
+ macro(_, GCHeapAdmin, gcHeapChunkAdmin) \
+ macro(_, Ignore, gcHeapGCThings)
+
+ explicit RuntimeStats(mozilla::MallocSizeOf mallocSizeOf)
+ : FOR_EACH_SIZE(ZERO_SIZE)
+ runtime(),
+ cTotals(),
+ zTotals(),
+ compartmentStatsVector(),
+ zoneStatsVector(),
+ currZoneStats(nullptr),
+ mallocSizeOf_(mallocSizeOf)
+ {}
+
+ // Here's a useful breakdown of the GC heap.
+ //
+ // - rtStats.gcHeapChunkTotal
+ // - decommitted bytes
+ // - rtStats.gcHeapDecommittedArenas (decommitted arenas in non-empty chunks)
+ // - unused bytes
+ // - rtStats.gcHeapUnusedChunks (empty chunks)
+ // - rtStats.gcHeapUnusedArenas (empty arenas within non-empty chunks)
+ // - rtStats.zTotals.unusedGCThings.totalSize() (empty GC thing slots within non-empty arenas)
+ // - used bytes
+ // - rtStats.gcHeapChunkAdmin
+ // - rtStats.zTotals.gcHeapArenaAdmin
+ // - rtStats.gcHeapGCThings (in-use GC things)
+ // == rtStats.zTotals.sizeOfLiveGCThings() + rtStats.cTotals.sizeOfLiveGCThings()
+ //
+ // It's possible that some arenas in empty chunks may be decommitted, but
+ // we don't count those under rtStats.gcHeapDecommittedArenas because (a)
+ // it's rare, and (b) this means that rtStats.gcHeapUnusedChunks is a
+ // multiple of the chunk size, which is good.
+
+ void addToServoSizes(ServoSizes *sizes) const {
+ FOR_EACH_SIZE(ADD_TO_SERVO_SIZES)
+ runtime.addToServoSizes(sizes);
+ }
+
+ FOR_EACH_SIZE(DECL_SIZE)
+
+ RuntimeSizes runtime;
+
+ CompartmentStats cTotals; // The sum of this runtime's compartments' measurements.
+ ZoneStats zTotals; // The sum of this runtime's zones' measurements.
+
+ CompartmentStatsVector compartmentStatsVector;
+ ZoneStatsVector zoneStatsVector;
+
+ ZoneStats* currZoneStats;
+
+ mozilla::MallocSizeOf mallocSizeOf_;
+
+ virtual void initExtraCompartmentStats(JSCompartment* c, CompartmentStats* cstats) = 0;
+ virtual void initExtraZoneStats(JS::Zone* zone, ZoneStats* zstats) = 0;
+
+#undef FOR_EACH_SIZE
+};
+
+class ObjectPrivateVisitor
+{
+ public:
+ // Within CollectRuntimeStats, this method is called for each JS object
+ // that has an nsISupports pointer.
+ virtual size_t sizeOfIncludingThis(nsISupports* aSupports) = 0;
+
+ // A callback that gets a JSObject's nsISupports pointer, if it has one.
+ // Note: this function does *not* addref |iface|.
+ typedef bool(*GetISupportsFun)(JSObject* obj, nsISupports** iface);
+ GetISupportsFun getISupports_;
+
+ explicit ObjectPrivateVisitor(GetISupportsFun getISupports)
+ : getISupports_(getISupports)
+ {}
+};
+
+extern JS_PUBLIC_API(bool)
+CollectRuntimeStats(JSContext* cx, RuntimeStats* rtStats, ObjectPrivateVisitor* opv, bool anonymize);
+
+extern JS_PUBLIC_API(size_t)
+SystemCompartmentCount(JSContext* cx);
+
+extern JS_PUBLIC_API(size_t)
+UserCompartmentCount(JSContext* cx);
+
+extern JS_PUBLIC_API(size_t)
+PeakSizeOfTemporary(const JSContext* cx);
+
+extern JS_PUBLIC_API(bool)
+AddSizeOfTab(JSContext* cx, JS::HandleObject obj, mozilla::MallocSizeOf mallocSizeOf,
+ ObjectPrivateVisitor* opv, TabSizes* sizes);
+
+extern JS_PUBLIC_API(bool)
+AddServoSizeOf(JSContext* cx, mozilla::MallocSizeOf mallocSizeOf,
+ ObjectPrivateVisitor *opv, ServoSizes *sizes);
+
+} // namespace JS
+
+#undef DECL_SIZE
+#undef ZERO_SIZE
+#undef COPY_OTHER_SIZE
+#undef ADD_OTHER_SIZE
+#undef SUB_OTHER_SIZE
+#undef ADD_SIZE_TO_N
+#undef ADD_SIZE_TO_N_IF_LIVE_GC_THING
+#undef ADD_TO_TAB_SIZES
+
+#endif /* js_MemoryMetrics_h */
diff --git a/js/public/Principals.h b/js/public/Principals.h
new file mode 100644
index 0000000000..774f65941e
--- /dev/null
+++ b/js/public/Principals.h
@@ -0,0 +1,132 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* JSPrincipals and related interfaces. */
+
+#ifndef js_Principals_h
+#define js_Principals_h
+
+#include "mozilla/Atomics.h"
+
+#include <stdint.h>
+
+#include "jspubtd.h"
+
+#include "js/StructuredClone.h"
+
+namespace js {
+ struct JS_PUBLIC_API(PerformanceGroup);
+} // namespace js
+
+struct JSPrincipals {
+ /* Don't call "destroy"; use reference counting macros below. */
+ mozilla::Atomic<int32_t> refcount;
+
+#ifdef JS_DEBUG
+ /* A helper to facilitate principals debugging. */
+ uint32_t debugToken;
+#endif
+
+ JSPrincipals() : refcount(0) {}
+
+ void setDebugToken(uint32_t token) {
+# ifdef JS_DEBUG
+ debugToken = token;
+# endif
+ }
+
+ /*
+ * Write the principals with the given |writer|. Return false on failure,
+ * true on success.
+ */
+ virtual bool write(JSContext* cx, JSStructuredCloneWriter* writer) = 0;
+
+ /*
+ * This is not defined by the JS engine but should be provided by the
+ * embedding.
+ */
+ JS_PUBLIC_API(void) dump();
+};
+
+extern JS_PUBLIC_API(void)
+JS_HoldPrincipals(JSPrincipals* principals);
+
+extern JS_PUBLIC_API(void)
+JS_DropPrincipals(JSContext* cx, JSPrincipals* principals);
+
+// Return whether the first principal subsumes the second. The exact meaning of
+// 'subsumes' is left up to the browser. Subsumption is checked inside the JS
+// engine when determining, e.g., which stack frames to display in a backtrace.
+typedef bool
+(* JSSubsumesOp)(JSPrincipals* first, JSPrincipals* second);
+
+/*
+ * Used to check if a CSP instance wants to disable eval() and friends.
+ * See js_CheckCSPPermitsJSAction() in jsobj.
+ */
+typedef bool
+(* JSCSPEvalChecker)(JSContext* cx);
+
+struct JSSecurityCallbacks {
+ JSCSPEvalChecker contentSecurityPolicyAllows;
+ JSSubsumesOp subsumes;
+};
+
+extern JS_PUBLIC_API(void)
+JS_SetSecurityCallbacks(JSContext* cx, const JSSecurityCallbacks* callbacks);
+
+extern JS_PUBLIC_API(const JSSecurityCallbacks*)
+JS_GetSecurityCallbacks(JSContext* cx);
+
+/*
+ * Code running with "trusted" principals will be given a deeper stack
+ * allocation than ordinary scripts. This allows trusted script to run after
+ * untrusted script has exhausted the stack. This function sets the
+ * runtime-wide trusted principal.
+ *
+ * This principals is not held (via JS_HoldPrincipals/JS_DropPrincipals).
+ * Instead, the caller must ensure that the given principals stays valid for as
+ * long as 'cx' may point to it. If the principals would be destroyed before
+ * 'cx', JS_SetTrustedPrincipals must be called again, passing nullptr for
+ * 'prin'.
+ */
+extern JS_PUBLIC_API(void)
+JS_SetTrustedPrincipals(JSContext* cx, JSPrincipals* prin);
+
+typedef void
+(* JSDestroyPrincipalsOp)(JSPrincipals* principals);
+
+/*
+ * Initialize the callback that is called to destroy JSPrincipals instance
+ * when its reference counter drops to zero. The initialization can be done
+ * only once per JS runtime.
+ */
+extern JS_PUBLIC_API(void)
+JS_InitDestroyPrincipalsCallback(JSContext* cx, JSDestroyPrincipalsOp destroyPrincipals);
+
+/*
+ * Read a JSPrincipals instance from the given |reader| and initialize the out
+ * paratemer |outPrincipals| to the JSPrincipals instance read.
+ *
+ * Return false on failure, true on success. The |outPrincipals| parameter
+ * should not be modified if false is returned.
+ *
+ * The caller is not responsible for calling JS_HoldPrincipals on the resulting
+ * JSPrincipals instance, the JSReadPrincipalsOp must increment the refcount of
+ * the resulting JSPrincipals on behalf of the caller.
+ */
+using JSReadPrincipalsOp = bool (*)(JSContext* cx, JSStructuredCloneReader* reader,
+ JSPrincipals** outPrincipals);
+
+/*
+ * Initialize the callback that is called to read JSPrincipals instances from a
+ * buffer. The initialization can be done only once per JS runtime.
+ */
+extern JS_PUBLIC_API(void)
+JS_InitReadPrincipalsCallback(JSContext* cx, JSReadPrincipalsOp read);
+
+
+#endif /* js_Principals_h */
diff --git a/js/public/ProfilingFrameIterator.h b/js/public/ProfilingFrameIterator.h
new file mode 100644
index 0000000000..52621adb46
--- /dev/null
+++ b/js/public/ProfilingFrameIterator.h
@@ -0,0 +1,206 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef js_ProfilingFrameIterator_h
+#define js_ProfilingFrameIterator_h
+
+#include "mozilla/Alignment.h"
+#include "mozilla/Maybe.h"
+
+#include "jsbytecode.h"
+#include "js/GCAPI.h"
+#include "js/TypeDecls.h"
+#include "js/Utility.h"
+
+struct JSContext;
+struct JSRuntime;
+class JSScript;
+
+namespace js {
+ class Activation;
+ namespace jit {
+ class JitActivation;
+ class JitProfilingFrameIterator;
+ class JitcodeGlobalEntry;
+ } // namespace jit
+ namespace wasm {
+ class ProfilingFrameIterator;
+ } // namespace wasm
+} // namespace js
+
+namespace JS {
+
+struct ForEachTrackedOptimizationAttemptOp;
+struct ForEachTrackedOptimizationTypeInfoOp;
+
+// This iterator can be used to walk the stack of a thread suspended at an
+// arbitrary pc. To provide acurate results, profiling must have been enabled
+// (via EnableRuntimeProfilingStack) before executing the callstack being
+// unwound.
+//
+// Note that the caller must not do anything that could cause GC to happen while
+// the iterator is alive, since this could invalidate Ion code and cause its
+// contents to become out of date.
+class JS_PUBLIC_API(ProfilingFrameIterator)
+{
+ JSRuntime* rt_;
+ uint32_t sampleBufferGen_;
+ js::Activation* activation_;
+
+ // When moving past a JitActivation, we need to save the prevJitTop
+ // from it to use as the exit-frame pointer when the next caller jit
+ // activation (if any) comes around.
+ void* savedPrevJitTop_;
+
+ JS::AutoCheckCannotGC nogc_;
+
+ static const unsigned StorageSpace = 8 * sizeof(void*);
+ mozilla::AlignedStorage<StorageSpace> storage_;
+ js::wasm::ProfilingFrameIterator& wasmIter() {
+ MOZ_ASSERT(!done());
+ MOZ_ASSERT(isWasm());
+ return *reinterpret_cast<js::wasm::ProfilingFrameIterator*>(storage_.addr());
+ }
+ const js::wasm::ProfilingFrameIterator& wasmIter() const {
+ MOZ_ASSERT(!done());
+ MOZ_ASSERT(isWasm());
+ return *reinterpret_cast<const js::wasm::ProfilingFrameIterator*>(storage_.addr());
+ }
+
+ js::jit::JitProfilingFrameIterator& jitIter() {
+ MOZ_ASSERT(!done());
+ MOZ_ASSERT(isJit());
+ return *reinterpret_cast<js::jit::JitProfilingFrameIterator*>(storage_.addr());
+ }
+
+ const js::jit::JitProfilingFrameIterator& jitIter() const {
+ MOZ_ASSERT(!done());
+ MOZ_ASSERT(isJit());
+ return *reinterpret_cast<const js::jit::JitProfilingFrameIterator*>(storage_.addr());
+ }
+
+ void settle();
+
+ bool hasSampleBufferGen() const {
+ return sampleBufferGen_ != UINT32_MAX;
+ }
+
+ public:
+ struct RegisterState
+ {
+ RegisterState() : pc(nullptr), sp(nullptr), lr(nullptr) {}
+ void* pc;
+ void* sp;
+ void* lr;
+ };
+
+ ProfilingFrameIterator(JSContext* cx, const RegisterState& state,
+ uint32_t sampleBufferGen = UINT32_MAX);
+ ~ProfilingFrameIterator();
+ void operator++();
+ bool done() const { return !activation_; }
+
+ // Assuming the stack grows down (we do), the return value:
+ // - always points into the stack
+ // - is weakly monotonically increasing (may be equal for successive frames)
+ // - will compare greater than newer native and psuedo-stack frame addresses
+ // and less than older native and psuedo-stack frame addresses
+ void* stackAddress() const;
+
+ enum FrameKind
+ {
+ Frame_Baseline,
+ Frame_Ion,
+ Frame_Wasm
+ };
+
+ struct Frame
+ {
+ FrameKind kind;
+ void* stackAddress;
+ void* returnAddress;
+ void* activation;
+ UniqueChars label;
+ };
+
+ bool isWasm() const;
+ bool isJit() const;
+
+ uint32_t extractStack(Frame* frames, uint32_t offset, uint32_t end) const;
+
+ mozilla::Maybe<Frame> getPhysicalFrameWithoutLabel() const;
+
+ private:
+ mozilla::Maybe<Frame> getPhysicalFrameAndEntry(js::jit::JitcodeGlobalEntry* entry) const;
+
+ void iteratorConstruct(const RegisterState& state);
+ void iteratorConstruct();
+ void iteratorDestroy();
+ bool iteratorDone();
+};
+
+JS_FRIEND_API(bool)
+IsProfilingEnabledForContext(JSContext* cx);
+
+/**
+ * After each sample run, this method should be called with the latest sample
+ * buffer generation, and the lapCount. It will update corresponding fields on
+ * JSRuntime.
+ *
+ * See fields |profilerSampleBufferGen|, |profilerSampleBufferLapCount| on
+ * JSRuntime for documentation about what these values are used for.
+ */
+JS_FRIEND_API(void)
+UpdateJSContextProfilerSampleBufferGen(JSContext* cx, uint32_t generation,
+ uint32_t lapCount);
+
+struct ForEachProfiledFrameOp
+{
+ // A handle to the underlying JitcodeGlobalEntry, so as to avoid repeated
+ // lookups on JitcodeGlobalTable.
+ class MOZ_STACK_CLASS FrameHandle
+ {
+ friend JS_PUBLIC_API(void) ForEachProfiledFrame(JSContext* cx, void* addr,
+ ForEachProfiledFrameOp& op);
+
+ JSRuntime* rt_;
+ js::jit::JitcodeGlobalEntry& entry_;
+ void* addr_;
+ void* canonicalAddr_;
+ const char* label_;
+ uint32_t depth_;
+ mozilla::Maybe<uint8_t> optsIndex_;
+
+ FrameHandle(JSRuntime* rt, js::jit::JitcodeGlobalEntry& entry, void* addr,
+ const char* label, uint32_t depth);
+
+ void updateHasTrackedOptimizations();
+
+ public:
+ const char* label() const { return label_; }
+ uint32_t depth() const { return depth_; }
+ bool hasTrackedOptimizations() const { return optsIndex_.isSome(); }
+ void* canonicalAddress() const { return canonicalAddr_; }
+
+ JS_PUBLIC_API(ProfilingFrameIterator::FrameKind) frameKind() const;
+ JS_PUBLIC_API(void) forEachOptimizationAttempt(ForEachTrackedOptimizationAttemptOp& op,
+ JSScript** scriptOut,
+ jsbytecode** pcOut) const;
+
+ JS_PUBLIC_API(void)
+ forEachOptimizationTypeInfo(ForEachTrackedOptimizationTypeInfoOp& op) const;
+ };
+
+ // Called once per frame.
+ virtual void operator()(const FrameHandle& frame) = 0;
+};
+
+JS_PUBLIC_API(void)
+ForEachProfiledFrame(JSContext* cx, void* addr, ForEachProfiledFrameOp& op);
+
+} // namespace JS
+
+#endif /* js_ProfilingFrameIterator_h */
diff --git a/js/public/ProfilingStack.h b/js/public/ProfilingStack.h
new file mode 100644
index 0000000000..aeed349e84
--- /dev/null
+++ b/js/public/ProfilingStack.h
@@ -0,0 +1,208 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef js_ProfilingStack_h
+#define js_ProfilingStack_h
+
+#include "jsbytecode.h"
+#include "jstypes.h"
+#include "js/TypeDecls.h"
+
+#include "js/Utility.h"
+
+struct JSRuntime;
+class JSTracer;
+
+namespace js {
+
+// A call stack can be specified to the JS engine such that all JS entry/exits
+// to functions push/pop an entry to/from the specified stack.
+//
+// For more detailed information, see vm/SPSProfiler.h.
+//
+class ProfileEntry
+{
+ // All fields are marked volatile to prevent the compiler from re-ordering
+ // instructions. Namely this sequence:
+ //
+ // entry[size] = ...;
+ // size++;
+ //
+ // If the size modification were somehow reordered before the stores, then
+ // if a sample were taken it would be examining bogus information.
+ //
+ // A ProfileEntry represents both a C++ profile entry and a JS one.
+
+ // Descriptive string of this entry.
+ const char * volatile string;
+
+ // Stack pointer for non-JS entries, the script pointer otherwise.
+ void * volatile spOrScript;
+
+ // Line number for non-JS entries, the bytecode offset otherwise.
+ int32_t volatile lineOrPcOffset;
+
+ // General purpose storage describing this frame.
+ uint32_t volatile flags_;
+
+ public:
+ // These traits are bit masks. Make sure they're powers of 2.
+ enum Flags : uint32_t {
+ // Indicate whether a profile entry represents a CPP frame. If not set,
+ // a JS frame is assumed by default. You're not allowed to publicly
+ // change the frame type. Instead, initialize the ProfileEntry as either
+ // a JS or CPP frame with `initJsFrame` or `initCppFrame` respectively.
+ IS_CPP_ENTRY = 0x01,
+
+ // Indicate that copying the frame label is not necessary when taking a
+ // sample of the pseudostack.
+ FRAME_LABEL_COPY = 0x02,
+
+ // This ProfileEntry is a dummy entry indicating the start of a run
+ // of JS pseudostack entries.
+ BEGIN_PSEUDO_JS = 0x04,
+
+ // This flag is used to indicate that an interpreter JS entry has OSR-ed
+ // into baseline.
+ OSR = 0x08,
+
+ // Union of all flags.
+ ALL = IS_CPP_ENTRY|FRAME_LABEL_COPY|BEGIN_PSEUDO_JS|OSR,
+
+ // Mask for removing all flags except the category information.
+ CATEGORY_MASK = ~ALL
+ };
+
+ // Keep these in sync with devtools/client/performance/modules/categories.js
+ enum class Category : uint32_t {
+ OTHER = 0x10,
+ CSS = 0x20,
+ JS = 0x40,
+ GC = 0x80,
+ CC = 0x100,
+ NETWORK = 0x200,
+ GRAPHICS = 0x400,
+ STORAGE = 0x800,
+ EVENTS = 0x1000,
+
+ FIRST = OTHER,
+ LAST = EVENTS
+ };
+
+ static_assert((static_cast<int>(Category::FIRST) & Flags::ALL) == 0,
+ "The category bitflags should not intersect with the other flags!");
+
+ // All of these methods are marked with the 'volatile' keyword because SPS's
+ // representation of the stack is stored such that all ProfileEntry
+ // instances are volatile. These methods would not be available unless they
+ // were marked as volatile as well.
+
+ bool isCpp() const volatile { return hasFlag(IS_CPP_ENTRY); }
+ bool isJs() const volatile { return !isCpp(); }
+
+ bool isCopyLabel() const volatile { return hasFlag(FRAME_LABEL_COPY); }
+
+ void setLabel(const char* aString) volatile { string = aString; }
+ const char* label() const volatile { return string; }
+
+ void initJsFrame(JSScript* aScript, jsbytecode* aPc) volatile {
+ flags_ = 0;
+ spOrScript = aScript;
+ setPC(aPc);
+ }
+ void initCppFrame(void* aSp, uint32_t aLine) volatile {
+ flags_ = IS_CPP_ENTRY;
+ spOrScript = aSp;
+ lineOrPcOffset = static_cast<int32_t>(aLine);
+ }
+
+ void setFlag(uint32_t flag) volatile {
+ MOZ_ASSERT(flag != IS_CPP_ENTRY);
+ flags_ |= flag;
+ }
+ void unsetFlag(uint32_t flag) volatile {
+ MOZ_ASSERT(flag != IS_CPP_ENTRY);
+ flags_ &= ~flag;
+ }
+ bool hasFlag(uint32_t flag) const volatile {
+ return bool(flags_ & flag);
+ }
+
+ uint32_t flags() const volatile {
+ return flags_;
+ }
+
+ uint32_t category() const volatile {
+ return flags_ & CATEGORY_MASK;
+ }
+ void setCategory(Category c) volatile {
+ MOZ_ASSERT(c >= Category::FIRST);
+ MOZ_ASSERT(c <= Category::LAST);
+ flags_ &= ~CATEGORY_MASK;
+ setFlag(static_cast<uint32_t>(c));
+ }
+
+ void setOSR() volatile {
+ MOZ_ASSERT(isJs());
+ setFlag(OSR);
+ }
+ void unsetOSR() volatile {
+ MOZ_ASSERT(isJs());
+ unsetFlag(OSR);
+ }
+ bool isOSR() const volatile {
+ return hasFlag(OSR);
+ }
+
+ void* stackAddress() const volatile {
+ MOZ_ASSERT(!isJs());
+ return spOrScript;
+ }
+ JS_PUBLIC_API(JSScript*) script() const volatile;
+ uint32_t line() const volatile {
+ MOZ_ASSERT(!isJs());
+ return static_cast<uint32_t>(lineOrPcOffset);
+ }
+
+ // Note that the pointer returned might be invalid.
+ JSScript* rawScript() const volatile {
+ MOZ_ASSERT(isJs());
+ return (JSScript*)spOrScript;
+ }
+
+ // We can't know the layout of JSScript, so look in vm/SPSProfiler.cpp.
+ JS_FRIEND_API(jsbytecode*) pc() const volatile;
+ JS_FRIEND_API(void) setPC(jsbytecode* pc) volatile;
+
+ void trace(JSTracer* trc);
+
+ // The offset of a pc into a script's code can actually be 0, so to
+ // signify a nullptr pc, use a -1 index. This is checked against in
+ // pc() and setPC() to set/get the right pc.
+ static const int32_t NullPCOffset = -1;
+
+ static size_t offsetOfLabel() { return offsetof(ProfileEntry, string); }
+ static size_t offsetOfSpOrScript() { return offsetof(ProfileEntry, spOrScript); }
+ static size_t offsetOfLineOrPcOffset() { return offsetof(ProfileEntry, lineOrPcOffset); }
+ static size_t offsetOfFlags() { return offsetof(ProfileEntry, flags_); }
+};
+
+JS_FRIEND_API(void)
+SetContextProfilingStack(JSContext* cx, ProfileEntry* stack, uint32_t* size,
+ uint32_t max);
+
+JS_FRIEND_API(void)
+EnableContextProfilingStack(JSContext* cx, bool enabled);
+
+JS_FRIEND_API(void)
+RegisterContextProfilingEventMarker(JSContext* cx, void (*fn)(const char*));
+
+JS_FRIEND_API(jsbytecode*)
+ProfilingGetPC(JSContext* cx, JSScript* script, void* ip);
+
+} // namespace js
+
+#endif /* js_ProfilingStack_h */
diff --git a/js/public/Proxy.h b/js/public/Proxy.h
new file mode 100644
index 0000000000..3e95538db6
--- /dev/null
+++ b/js/public/Proxy.h
@@ -0,0 +1,632 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef js_Proxy_h
+#define js_Proxy_h
+
+#include "mozilla/Maybe.h"
+
+#include "jsfriendapi.h"
+
+#include "js/CallNonGenericMethod.h"
+#include "js/Class.h"
+
+namespace js {
+
+using JS::AutoIdVector;
+using JS::CallArgs;
+using JS::Handle;
+using JS::HandleId;
+using JS::HandleObject;
+using JS::HandleValue;
+using JS::IsAcceptableThis;
+using JS::MutableHandle;
+using JS::MutableHandleObject;
+using JS::MutableHandleValue;
+using JS::NativeImpl;
+using JS::ObjectOpResult;
+using JS::PrivateValue;
+using JS::PropertyDescriptor;
+using JS::Value;
+
+class RegExpGuard;
+class JS_FRIEND_API(Wrapper);
+
+/*
+ * A proxy is a JSObject with highly customizable behavior. ES6 specifies a
+ * single kind of proxy, but the customization mechanisms we use to implement
+ * ES6 Proxy objects are also useful wherever an object with weird behavior is
+ * wanted. Proxies are used to implement:
+ *
+ * - the scope objects used by the Debugger's frame.eval() method
+ * (see js::GetDebugScopeForFunction)
+ *
+ * - the khuey hack, whereby a whole compartment can be blown away
+ * even if other compartments hold references to objects in it
+ * (see js::NukeCrossCompartmentWrappers)
+ *
+ * - XPConnect security wrappers, which protect chrome from malicious content
+ * (js/xpconnect/wrappers)
+ *
+ * - DOM objects with special property behavior, like named getters
+ * (dom/bindings/Codegen.py generates these proxies from WebIDL)
+ *
+ * - semi-transparent use of objects that live in other processes
+ * (CPOWs, implemented in js/ipc)
+ *
+ * ### Proxies and internal methods
+ *
+ * ES2016 specifies 13 internal methods. The runtime semantics of just
+ * about everything a script can do to an object is specified in terms
+ * of these internal methods. For example:
+ *
+ * JS code ES6 internal method that gets called
+ * --------------------------- --------------------------------
+ * obj.prop obj.[[Get]](obj, "prop")
+ * "prop" in obj obj.[[HasProperty]]("prop")
+ * new obj() obj.[[Construct]](<empty argument List>)
+ *
+ * With regard to the implementation of these internal methods, there are three
+ * very different kinds of object in SpiderMonkey.
+ *
+ * 1. Native objects' internal methods are implemented in vm/NativeObject.cpp,
+ * with duplicate (but functionally identical) implementations scattered
+ * through the ICs and JITs.
+ *
+ * 2. Certain non-native objects have internal methods that are implemented as
+ * magical js::ObjectOps hooks. We're trying to get rid of these.
+ *
+ * 3. All other objects are proxies. A proxy's internal methods are
+ * implemented in C++, as the virtual methods of a C++ object stored on the
+ * proxy, known as its handler.
+ *
+ * This means that just about anything you do to a proxy will end up going
+ * through a C++ virtual method call. Possibly several. There's no reason the
+ * JITs and ICs can't specialize for particular proxies, based on the handler;
+ * but currently we don't do much of this, so the virtual method overhead
+ * typically is actually incurred.
+ *
+ * ### The proxy handler hierarchy
+ *
+ * A major use case for proxies is to forward each internal method call to
+ * another object, known as its target. The target can be an arbitrary JS
+ * object. Not every proxy has the notion of a target, however.
+ *
+ * To minimize code duplication, a set of abstract proxy handler classes is
+ * provided, from which other handlers may inherit. These abstract classes are
+ * organized in the following hierarchy:
+ *
+ * BaseProxyHandler
+ * |
+ * Wrapper // has a target, can be unwrapped to reveal
+ * | // target (see js::CheckedUnwrap)
+ * |
+ * CrossCompartmentWrapper // target is in another compartment;
+ * // implements membrane between compartments
+ *
+ * Example: Some DOM objects (including all the arraylike DOM objects) are
+ * implemented as proxies. Since these objects don't need to forward operations
+ * to any underlying JS object, DOMJSProxyHandler directly subclasses
+ * BaseProxyHandler.
+ *
+ * Gecko's security wrappers are examples of cross-compartment wrappers.
+ *
+ * ### Proxy prototype chains
+ *
+ * In addition to the normal methods, there are two models for proxy prototype
+ * chains.
+ *
+ * 1. Proxies can use the standard prototype mechanism used throughout the
+ * engine. To do so, simply pass a prototype to NewProxyObject() at
+ * creation time. All prototype accesses will then "just work" to treat the
+ * proxy as a "normal" object.
+ *
+ * 2. A proxy can implement more complicated prototype semantics (if, for
+ * example, it wants to delegate the prototype lookup to a wrapped object)
+ * by passing Proxy::LazyProto as the prototype at create time. This
+ * guarantees that the getPrototype() handler method will be called every
+ * time the object's prototype chain is accessed.
+ *
+ * This system is implemented with two methods: {get,set}Prototype. The
+ * default implementation of setPrototype throws a TypeError. Since it is
+ * not possible to create an object without a sense of prototype chain,
+ * handlers must implement getPrototype if opting in to the dynamic
+ * prototype system.
+ */
+
+/*
+ * BaseProxyHandler is the most generic kind of proxy handler. It does not make
+ * any assumptions about the target. Consequently, it does not provide any
+ * default implementation for most methods. As a convenience, a few high-level
+ * methods, like get() and set(), are given default implementations that work by
+ * calling the low-level methods, like getOwnPropertyDescriptor().
+ *
+ * Important: If you add a method here, you should probably also add a
+ * Proxy::foo entry point with an AutoEnterPolicy. If you don't, you need an
+ * explicit override for the method in SecurityWrapper. See bug 945826 comment 0.
+ */
+class JS_FRIEND_API(BaseProxyHandler)
+{
+ /*
+ * Sometimes it's desirable to designate groups of proxy handlers as "similar".
+ * For this, we use the notion of a "family": A consumer-provided opaque pointer
+ * that designates the larger group to which this proxy belongs.
+ *
+ * If it will never be important to differentiate this proxy from others as
+ * part of a distinct group, nullptr may be used instead.
+ */
+ const void* mFamily;
+
+ /*
+ * Proxy handlers can use mHasPrototype to request the following special
+ * treatment from the JS engine:
+ *
+ * - When mHasPrototype is true, the engine never calls these methods:
+ * getPropertyDescriptor, has, set, enumerate, iterate. Instead, for
+ * these operations, it calls the "own" methods like
+ * getOwnPropertyDescriptor, hasOwn, defineProperty,
+ * getOwnEnumerablePropertyKeys, etc., and consults the prototype chain
+ * if needed.
+ *
+ * - When mHasPrototype is true, the engine calls handler->get() only if
+ * handler->hasOwn() says an own property exists on the proxy. If not,
+ * it consults the prototype chain.
+ *
+ * This is useful because it frees the ProxyHandler from having to implement
+ * any behavior having to do with the prototype chain.
+ */
+ bool mHasPrototype;
+
+ /*
+ * All proxies indicate whether they have any sort of interesting security
+ * policy that might prevent the caller from doing something it wants to
+ * the object. In the case of wrappers, this distinction is used to
+ * determine whether the caller may strip off the wrapper if it so desires.
+ */
+ bool mHasSecurityPolicy;
+
+ public:
+ explicit constexpr BaseProxyHandler(const void* aFamily, bool aHasPrototype = false,
+ bool aHasSecurityPolicy = false)
+ : mFamily(aFamily),
+ mHasPrototype(aHasPrototype),
+ mHasSecurityPolicy(aHasSecurityPolicy)
+ { }
+
+ bool hasPrototype() const {
+ return mHasPrototype;
+ }
+
+ bool hasSecurityPolicy() const {
+ return mHasSecurityPolicy;
+ }
+
+ inline const void* family() const {
+ return mFamily;
+ }
+ static size_t offsetOfFamily() {
+ return offsetof(BaseProxyHandler, mFamily);
+ }
+
+ virtual bool finalizeInBackground(const Value& priv) const {
+ /*
+ * Called on creation of a proxy to determine whether its finalize
+ * method can be finalized on the background thread.
+ */
+ return true;
+ }
+
+ virtual bool canNurseryAllocate() const {
+ /*
+ * Nursery allocation is allowed if and only if it is safe to not
+ * run |finalize| when the ProxyObject dies.
+ */
+ return false;
+ }
+
+ /* Policy enforcement methods.
+ *
+ * enter() allows the policy to specify whether the caller may perform |act|
+ * on the proxy's |id| property. In the case when |act| is CALL, |id| is
+ * generally JSID_VOID.
+ *
+ * The |act| parameter to enter() specifies the action being performed.
+ * If |bp| is false, the method suggests that the caller throw (though it
+ * may still decide to squelch the error).
+ *
+ * We make these OR-able so that assertEnteredPolicy can pass a union of them.
+ * For example, get{,Own}PropertyDescriptor is invoked by calls to ::get()
+ * ::set(), in addition to being invoked on its own, so there are several
+ * valid Actions that could have been entered.
+ */
+ typedef uint32_t Action;
+ enum {
+ NONE = 0x00,
+ GET = 0x01,
+ SET = 0x02,
+ CALL = 0x04,
+ ENUMERATE = 0x08,
+ GET_PROPERTY_DESCRIPTOR = 0x10
+ };
+
+ virtual bool enter(JSContext* cx, HandleObject wrapper, HandleId id, Action act,
+ bool* bp) const;
+
+ /* Standard internal methods. */
+ virtual bool getOwnPropertyDescriptor(JSContext* cx, HandleObject proxy, HandleId id,
+ MutableHandle<PropertyDescriptor> desc) const = 0;
+ virtual bool defineProperty(JSContext* cx, HandleObject proxy, HandleId id,
+ Handle<PropertyDescriptor> desc,
+ ObjectOpResult& result) const = 0;
+ virtual bool ownPropertyKeys(JSContext* cx, HandleObject proxy,
+ AutoIdVector& props) const = 0;
+ virtual bool delete_(JSContext* cx, HandleObject proxy, HandleId id,
+ ObjectOpResult& result) const = 0;
+
+ /*
+ * These methods are standard, but the engine does not normally call them.
+ * They're opt-in. See "Proxy prototype chains" above.
+ *
+ * getPrototype() crashes if called. setPrototype() throws a TypeError.
+ */
+ virtual bool getPrototype(JSContext* cx, HandleObject proxy, MutableHandleObject protop) const;
+ virtual bool setPrototype(JSContext* cx, HandleObject proxy, HandleObject proto,
+ ObjectOpResult& result) const;
+
+ /* Non-standard but conceptual kin to {g,s}etPrototype, so these live here. */
+ virtual bool getPrototypeIfOrdinary(JSContext* cx, HandleObject proxy, bool* isOrdinary,
+ MutableHandleObject protop) const = 0;
+ virtual bool setImmutablePrototype(JSContext* cx, HandleObject proxy, bool* succeeded) const;
+
+ virtual bool preventExtensions(JSContext* cx, HandleObject proxy,
+ ObjectOpResult& result) const = 0;
+ virtual bool isExtensible(JSContext* cx, HandleObject proxy, bool* extensible) const = 0;
+
+ /*
+ * These standard internal methods are implemented, as a convenience, so
+ * that ProxyHandler subclasses don't have to provide every single method.
+ *
+ * The base-class implementations work by calling getPropertyDescriptor().
+ * They do not follow any standard. When in doubt, override them.
+ */
+ virtual bool has(JSContext* cx, HandleObject proxy, HandleId id, bool* bp) const;
+ virtual bool get(JSContext* cx, HandleObject proxy, HandleValue receiver,
+ HandleId id, MutableHandleValue vp) const;
+ virtual bool set(JSContext* cx, HandleObject proxy, HandleId id, HandleValue v,
+ HandleValue receiver, ObjectOpResult& result) const;
+
+ /*
+ * [[Call]] and [[Construct]] are standard internal methods but according
+ * to the spec, they are not present on every object.
+ *
+ * SpiderMonkey never calls a proxy's call()/construct() internal method
+ * unless isCallable()/isConstructor() returns true for that proxy.
+ *
+ * BaseProxyHandler::isCallable()/isConstructor() always return false, and
+ * BaseProxyHandler::call()/construct() crash if called. So if you're
+ * creating a kind of that is never callable, you don't have to override
+ * anything, but otherwise you probably want to override all four.
+ */
+ virtual bool call(JSContext* cx, HandleObject proxy, const CallArgs& args) const;
+ virtual bool construct(JSContext* cx, HandleObject proxy, const CallArgs& args) const;
+
+ /* SpiderMonkey extensions. */
+ virtual bool enumerate(JSContext* cx, HandleObject proxy, MutableHandleObject objp) const;
+ virtual bool getPropertyDescriptor(JSContext* cx, HandleObject proxy, HandleId id,
+ MutableHandle<PropertyDescriptor> desc) const;
+ virtual bool hasOwn(JSContext* cx, HandleObject proxy, HandleId id, bool* bp) const;
+ virtual bool getOwnEnumerablePropertyKeys(JSContext* cx, HandleObject proxy,
+ AutoIdVector& props) const;
+ virtual bool nativeCall(JSContext* cx, IsAcceptableThis test, NativeImpl impl,
+ const CallArgs& args) const;
+ virtual bool hasInstance(JSContext* cx, HandleObject proxy, MutableHandleValue v, bool* bp) const;
+ virtual bool getBuiltinClass(JSContext* cx, HandleObject proxy,
+ ESClass* cls) const;
+ virtual bool isArray(JSContext* cx, HandleObject proxy, JS::IsArrayAnswer* answer) const;
+ virtual const char* className(JSContext* cx, HandleObject proxy) const;
+ virtual JSString* fun_toString(JSContext* cx, HandleObject proxy, unsigned indent) const;
+ virtual bool regexp_toShared(JSContext* cx, HandleObject proxy, RegExpGuard* g) const;
+ virtual bool boxedValue_unbox(JSContext* cx, HandleObject proxy, MutableHandleValue vp) const;
+ virtual void trace(JSTracer* trc, JSObject* proxy) const;
+ virtual void finalize(JSFreeOp* fop, JSObject* proxy) const;
+ virtual void objectMoved(JSObject* proxy, const JSObject* old) const;
+
+ // Allow proxies, wrappers in particular, to specify callability at runtime.
+ // Note: These do not take const JSObject*, but they do in spirit.
+ // We are not prepared to do this, as there's little const correctness
+ // in the external APIs that handle proxies.
+ virtual bool isCallable(JSObject* obj) const;
+ virtual bool isConstructor(JSObject* obj) const;
+
+ // These two hooks must be overridden, or not overridden, in tandem -- no
+ // overriding just one!
+ virtual bool watch(JSContext* cx, JS::HandleObject proxy, JS::HandleId id,
+ JS::HandleObject callable) const;
+ virtual bool unwatch(JSContext* cx, JS::HandleObject proxy, JS::HandleId id) const;
+
+ virtual bool getElements(JSContext* cx, HandleObject proxy, uint32_t begin, uint32_t end,
+ ElementAdder* adder) const;
+
+ /* See comment for weakmapKeyDelegateOp in js/Class.h. */
+ virtual JSObject* weakmapKeyDelegate(JSObject* proxy) const;
+ virtual bool isScripted() const { return false; }
+};
+
+extern JS_FRIEND_DATA(const js::Class* const) ProxyClassPtr;
+
+inline bool IsProxy(const JSObject* obj)
+{
+ return GetObjectClass(obj)->isProxy();
+}
+
+namespace detail {
+const uint32_t PROXY_EXTRA_SLOTS = 2;
+
+// Layout of the values stored by a proxy. Note that API clients require the
+// private slot to be the first slot in the proxy's values, so that the private
+// slot can be accessed in the same fashion as the first reserved slot, via
+// {Get,Set}ReservedOrProxyPrivateSlot.
+
+struct ProxyValueArray
+{
+ Value privateSlot;
+ Value extraSlots[PROXY_EXTRA_SLOTS];
+
+ ProxyValueArray()
+ : privateSlot(JS::UndefinedValue())
+ {
+ for (size_t i = 0; i < PROXY_EXTRA_SLOTS; i++)
+ extraSlots[i] = JS::UndefinedValue();
+ }
+};
+
+// All proxies share the same data layout. Following the object's shape and
+// type, the proxy has a ProxyDataLayout structure with a pointer to an array
+// of values and the proxy's handler. This is designed both so that proxies can
+// be easily swapped with other objects (via RemapWrapper) and to mimic the
+// layout of other objects (proxies and other objects have the same size) so
+// that common code can access either type of object.
+//
+// See GetReservedOrProxyPrivateSlot below.
+struct ProxyDataLayout
+{
+ ProxyValueArray* values;
+ const BaseProxyHandler* handler;
+};
+
+const uint32_t ProxyDataOffset = 2 * sizeof(void*);
+
+inline ProxyDataLayout*
+GetProxyDataLayout(JSObject* obj)
+{
+ MOZ_ASSERT(IsProxy(obj));
+ return reinterpret_cast<ProxyDataLayout*>(reinterpret_cast<uint8_t*>(obj) + ProxyDataOffset);
+}
+
+inline const ProxyDataLayout*
+GetProxyDataLayout(const JSObject* obj)
+{
+ MOZ_ASSERT(IsProxy(obj));
+ return reinterpret_cast<const ProxyDataLayout*>(reinterpret_cast<const uint8_t*>(obj) +
+ ProxyDataOffset);
+}
+} // namespace detail
+
+inline const BaseProxyHandler*
+GetProxyHandler(const JSObject* obj)
+{
+ return detail::GetProxyDataLayout(obj)->handler;
+}
+
+inline const Value&
+GetProxyPrivate(const JSObject* obj)
+{
+ return detail::GetProxyDataLayout(obj)->values->privateSlot;
+}
+
+inline JSObject*
+GetProxyTargetObject(JSObject* obj)
+{
+ return GetProxyPrivate(obj).toObjectOrNull();
+}
+
+inline const Value&
+GetProxyExtra(const JSObject* obj, size_t n)
+{
+ MOZ_ASSERT(n < detail::PROXY_EXTRA_SLOTS);
+ return detail::GetProxyDataLayout(obj)->values->extraSlots[n];
+}
+
+inline void
+SetProxyHandler(JSObject* obj, const BaseProxyHandler* handler)
+{
+ detail::GetProxyDataLayout(obj)->handler = handler;
+}
+
+JS_FRIEND_API(void)
+SetValueInProxy(Value* slot, const Value& value);
+
+inline void
+SetProxyExtra(JSObject* obj, size_t n, const Value& extra)
+{
+ MOZ_ASSERT(n < detail::PROXY_EXTRA_SLOTS);
+ Value* vp = &detail::GetProxyDataLayout(obj)->values->extraSlots[n];
+
+ // Trigger a barrier before writing the slot.
+ if (vp->isMarkable() || extra.isMarkable())
+ SetValueInProxy(vp, extra);
+ else
+ *vp = extra;
+}
+
+inline bool
+IsScriptedProxy(const JSObject* obj)
+{
+ return IsProxy(obj) && GetProxyHandler(obj)->isScripted();
+}
+
+inline const Value&
+GetReservedOrProxyPrivateSlot(const JSObject* obj, size_t slot)
+{
+ MOZ_ASSERT(slot == 0);
+ MOZ_ASSERT(slot < JSCLASS_RESERVED_SLOTS(GetObjectClass(obj)) || IsProxy(obj));
+ return reinterpret_cast<const shadow::Object*>(obj)->slotRef(slot);
+}
+
+inline void
+SetReservedOrProxyPrivateSlot(JSObject* obj, size_t slot, const Value& value)
+{
+ MOZ_ASSERT(slot == 0);
+ MOZ_ASSERT(slot < JSCLASS_RESERVED_SLOTS(GetObjectClass(obj)) || IsProxy(obj));
+ shadow::Object* sobj = reinterpret_cast<shadow::Object*>(obj);
+ if (sobj->slotRef(slot).isMarkable() || value.isMarkable())
+ SetReservedOrProxyPrivateSlotWithBarrier(obj, slot, value);
+ else
+ sobj->slotRef(slot) = value;
+}
+
+class MOZ_STACK_CLASS ProxyOptions {
+ protected:
+ /* protected constructor for subclass */
+ explicit ProxyOptions(bool singletonArg, bool lazyProtoArg = false)
+ : singleton_(singletonArg),
+ lazyProto_(lazyProtoArg),
+ clasp_(ProxyClassPtr)
+ {}
+
+ public:
+ ProxyOptions() : singleton_(false),
+ lazyProto_(false),
+ clasp_(ProxyClassPtr)
+ {}
+
+ bool singleton() const { return singleton_; }
+ ProxyOptions& setSingleton(bool flag) {
+ singleton_ = flag;
+ return *this;
+ }
+
+ bool lazyProto() const { return lazyProto_; }
+ ProxyOptions& setLazyProto(bool flag) {
+ lazyProto_ = flag;
+ return *this;
+ }
+
+ const Class* clasp() const {
+ return clasp_;
+ }
+ ProxyOptions& setClass(const Class* claspArg) {
+ clasp_ = claspArg;
+ return *this;
+ }
+
+ private:
+ bool singleton_;
+ bool lazyProto_;
+ const Class* clasp_;
+};
+
+JS_FRIEND_API(JSObject*)
+NewProxyObject(JSContext* cx, const BaseProxyHandler* handler, HandleValue priv,
+ JSObject* proto, const ProxyOptions& options = ProxyOptions());
+
+JSObject*
+RenewProxyObject(JSContext* cx, JSObject* obj, BaseProxyHandler* handler, const Value& priv);
+
+class JS_FRIEND_API(AutoEnterPolicy)
+{
+ public:
+ typedef BaseProxyHandler::Action Action;
+ AutoEnterPolicy(JSContext* cx, const BaseProxyHandler* handler,
+ HandleObject wrapper, HandleId id, Action act, bool mayThrow)
+#ifdef JS_DEBUG
+ : context(nullptr)
+#endif
+ {
+ allow = handler->hasSecurityPolicy() ? handler->enter(cx, wrapper, id, act, &rv)
+ : true;
+ recordEnter(cx, wrapper, id, act);
+ // We want to throw an exception if all of the following are true:
+ // * The policy disallowed access.
+ // * The policy set rv to false, indicating that we should throw.
+ // * The caller did not instruct us to ignore exceptions.
+ // * The policy did not throw itself.
+ if (!allow && !rv && mayThrow)
+ reportErrorIfExceptionIsNotPending(cx, id);
+ }
+
+ virtual ~AutoEnterPolicy() { recordLeave(); }
+ inline bool allowed() { return allow; }
+ inline bool returnValue() { MOZ_ASSERT(!allowed()); return rv; }
+
+ protected:
+ // no-op constructor for subclass
+ AutoEnterPolicy()
+#ifdef JS_DEBUG
+ : context(nullptr)
+ , enteredAction(BaseProxyHandler::NONE)
+#endif
+ {}
+ void reportErrorIfExceptionIsNotPending(JSContext* cx, jsid id);
+ bool allow;
+ bool rv;
+
+#ifdef JS_DEBUG
+ JSContext* context;
+ mozilla::Maybe<HandleObject> enteredProxy;
+ mozilla::Maybe<HandleId> enteredId;
+ Action enteredAction;
+
+ // NB: We explicitly don't track the entered action here, because sometimes
+ // set() methods do an implicit get() during their implementation, leading
+ // to spurious assertions.
+ AutoEnterPolicy* prev;
+ void recordEnter(JSContext* cx, HandleObject proxy, HandleId id, Action act);
+ void recordLeave();
+
+ friend JS_FRIEND_API(void) assertEnteredPolicy(JSContext* cx, JSObject* proxy, jsid id, Action act);
+#else
+ inline void recordEnter(JSContext* cx, JSObject* proxy, jsid id, Action act) {}
+ inline void recordLeave() {}
+#endif
+
+};
+
+#ifdef JS_DEBUG
+class JS_FRIEND_API(AutoWaivePolicy) : public AutoEnterPolicy {
+public:
+ AutoWaivePolicy(JSContext* cx, HandleObject proxy, HandleId id,
+ BaseProxyHandler::Action act)
+ {
+ allow = true;
+ recordEnter(cx, proxy, id, act);
+ }
+};
+#else
+class JS_FRIEND_API(AutoWaivePolicy) {
+ public:
+ AutoWaivePolicy(JSContext* cx, HandleObject proxy, HandleId id,
+ BaseProxyHandler::Action act)
+ {}
+};
+#endif
+
+#ifdef JS_DEBUG
+extern JS_FRIEND_API(void)
+assertEnteredPolicy(JSContext* cx, JSObject* obj, jsid id,
+ BaseProxyHandler::Action act);
+#else
+inline void assertEnteredPolicy(JSContext* cx, JSObject* obj, jsid id,
+ BaseProxyHandler::Action act)
+{}
+#endif
+
+extern JS_FRIEND_API(JSObject*)
+InitProxyClass(JSContext* cx, JS::HandleObject obj);
+
+} /* namespace js */
+
+#endif /* js_Proxy_h */
diff --git a/js/public/Realm.h b/js/public/Realm.h
new file mode 100644
index 0000000000..13a22c7072
--- /dev/null
+++ b/js/public/Realm.h
@@ -0,0 +1,42 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * Ways to get various per-Realm objects. All the getters declared in this
+ * header operate on the Realm corresponding to the current compartment on the
+ * JSContext.
+ */
+
+#ifndef js_Realm_h
+#define js_Realm_h
+
+#include "jstypes.h"
+
+struct JSContext;
+class JSObject;
+
+namespace JS {
+
+extern JS_PUBLIC_API(JSObject*)
+GetRealmObjectPrototype(JSContext* cx);
+
+extern JS_PUBLIC_API(JSObject*)
+GetRealmFunctionPrototype(JSContext* cx);
+
+extern JS_PUBLIC_API(JSObject*)
+GetRealmArrayPrototype(JSContext* cx);
+
+extern JS_PUBLIC_API(JSObject*)
+GetRealmErrorPrototype(JSContext* cx);
+
+extern JS_PUBLIC_API(JSObject*)
+GetRealmIteratorPrototype(JSContext* cx);
+
+} // namespace JS
+
+#endif // js_Realm_h
+
+
diff --git a/js/public/RequiredDefines.h b/js/public/RequiredDefines.h
new file mode 100644
index 0000000000..308fd7d625
--- /dev/null
+++ b/js/public/RequiredDefines.h
@@ -0,0 +1,34 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/*
+ * Various #defines required to build SpiderMonkey. Embedders should add this
+ * file to the start of the command line via -include or a similar mechanism,
+ * or SpiderMonkey public headers may not work correctly.
+ */
+
+#ifndef js_RequiredDefines_h
+#define js_RequiredDefines_h
+
+/*
+ * The c99 defining the limit macros (UINT32_MAX for example), says:
+ *
+ * C++ implementations should define these macros only when
+ * __STDC_LIMIT_MACROS is defined before <stdint.h> is included.
+ *
+ * The same also occurs with __STDC_CONSTANT_MACROS for the constant macros
+ * (INT8_C for example) used to specify a literal constant of the proper type,
+ * and with __STDC_FORMAT_MACROS for the format macros (PRId32 for example) used
+ * with the fprintf function family.
+ */
+#define __STDC_LIMIT_MACROS
+#define __STDC_CONSTANT_MACROS
+#define __STDC_FORMAT_MACROS
+
+/* Also define a char16_t type if not provided by the compiler. */
+#include "mozilla/Char16.h"
+
+#endif /* js_RequiredDefines_h */
diff --git a/js/public/RootingAPI.h b/js/public/RootingAPI.h
new file mode 100644
index 0000000000..9f6ed89433
--- /dev/null
+++ b/js/public/RootingAPI.h
@@ -0,0 +1,1330 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef js_RootingAPI_h
+#define js_RootingAPI_h
+
+#include "mozilla/Attributes.h"
+#include "mozilla/DebugOnly.h"
+#include "mozilla/GuardObjects.h"
+#include "mozilla/LinkedList.h"
+#include "mozilla/Move.h"
+#include "mozilla/TypeTraits.h"
+
+#include <type_traits>
+
+#include "jspubtd.h"
+
+#include "js/GCAnnotations.h"
+#include "js/GCAPI.h"
+#include "js/GCPolicyAPI.h"
+#include "js/HeapAPI.h"
+#include "js/TypeDecls.h"
+#include "js/UniquePtr.h"
+#include "js/Utility.h"
+
+/*
+ * Moving GC Stack Rooting
+ *
+ * A moving GC may change the physical location of GC allocated things, even
+ * when they are rooted, updating all pointers to the thing to refer to its new
+ * location. The GC must therefore know about all live pointers to a thing,
+ * not just one of them, in order to behave correctly.
+ *
+ * The |Rooted| and |Handle| classes below are used to root stack locations
+ * whose value may be held live across a call that can trigger GC. For a
+ * code fragment such as:
+ *
+ * JSObject* obj = NewObject(cx);
+ * DoSomething(cx);
+ * ... = obj->lastProperty();
+ *
+ * If |DoSomething()| can trigger a GC, the stack location of |obj| must be
+ * rooted to ensure that the GC does not move the JSObject referred to by
+ * |obj| without updating |obj|'s location itself. This rooting must happen
+ * regardless of whether there are other roots which ensure that the object
+ * itself will not be collected.
+ *
+ * If |DoSomething()| cannot trigger a GC, and the same holds for all other
+ * calls made between |obj|'s definitions and its last uses, then no rooting
+ * is required.
+ *
+ * SpiderMonkey can trigger a GC at almost any time and in ways that are not
+ * always clear. For example, the following innocuous-looking actions can
+ * cause a GC: allocation of any new GC thing; JSObject::hasProperty;
+ * JS_ReportError and friends; and ToNumber, among many others. The following
+ * dangerous-looking actions cannot trigger a GC: js_malloc, cx->malloc_,
+ * rt->malloc_, and friends and JS_ReportOutOfMemory.
+ *
+ * The following family of three classes will exactly root a stack location.
+ * Incorrect usage of these classes will result in a compile error in almost
+ * all cases. Therefore, it is very hard to be incorrectly rooted if you use
+ * these classes exclusively. These classes are all templated on the type T of
+ * the value being rooted.
+ *
+ * - Rooted<T> declares a variable of type T, whose value is always rooted.
+ * Rooted<T> may be automatically coerced to a Handle<T>, below. Rooted<T>
+ * should be used whenever a local variable's value may be held live across a
+ * call which can trigger a GC.
+ *
+ * - Handle<T> is a const reference to a Rooted<T>. Functions which take GC
+ * things or values as arguments and need to root those arguments should
+ * generally use handles for those arguments and avoid any explicit rooting.
+ * This has two benefits. First, when several such functions call each other
+ * then redundant rooting of multiple copies of the GC thing can be avoided.
+ * Second, if the caller does not pass a rooted value a compile error will be
+ * generated, which is quicker and easier to fix than when relying on a
+ * separate rooting analysis.
+ *
+ * - MutableHandle<T> is a non-const reference to Rooted<T>. It is used in the
+ * same way as Handle<T> and includes a |set(const T& v)| method to allow
+ * updating the value of the referenced Rooted<T>. A MutableHandle<T> can be
+ * created with an implicit cast from a Rooted<T>*.
+ *
+ * In some cases the small performance overhead of exact rooting (measured to
+ * be a few nanoseconds on desktop) is too much. In these cases, try the
+ * following:
+ *
+ * - Move all Rooted<T> above inner loops: this allows you to re-use the root
+ * on each iteration of the loop.
+ *
+ * - Pass Handle<T> through your hot call stack to avoid re-rooting costs at
+ * every invocation.
+ *
+ * The following diagram explains the list of supported, implicit type
+ * conversions between classes of this family:
+ *
+ * Rooted<T> ----> Handle<T>
+ * | ^
+ * | |
+ * | |
+ * +---> MutableHandle<T>
+ * (via &)
+ *
+ * All of these types have an implicit conversion to raw pointers.
+ */
+
+namespace js {
+
+template <typename T>
+struct BarrierMethods {
+};
+
+template <typename T>
+class RootedBase {};
+
+template <typename T>
+class HandleBase {};
+
+template <typename T>
+class MutableHandleBase {};
+
+template <typename T>
+class HeapBase {};
+
+// Cannot use FOR_EACH_HEAP_ABLE_GC_POINTER_TYPE, as this would import too many macros into scope
+template <typename T> struct IsHeapConstructibleType { static constexpr bool value = false; };
+#define DECLARE_IS_HEAP_CONSTRUCTIBLE_TYPE(T) \
+ template <> struct IsHeapConstructibleType<T> { static constexpr bool value = true; };
+FOR_EACH_PUBLIC_GC_POINTER_TYPE(DECLARE_IS_HEAP_CONSTRUCTIBLE_TYPE)
+FOR_EACH_PUBLIC_TAGGED_GC_POINTER_TYPE(DECLARE_IS_HEAP_CONSTRUCTIBLE_TYPE)
+#undef DECLARE_IS_HEAP_CONSTRUCTIBLE_TYPE
+
+template <typename T>
+class PersistentRootedBase {};
+
+static void* const ConstNullValue = nullptr;
+
+namespace gc {
+struct Cell;
+template<typename T>
+struct PersistentRootedMarker;
+} /* namespace gc */
+
+#define DECLARE_POINTER_COMPARISON_OPS(T) \
+ bool operator==(const T& other) const { return get() == other; } \
+ bool operator!=(const T& other) const { return get() != other; }
+
+// Important: Return a reference so passing a Rooted<T>, etc. to
+// something that takes a |const T&| is not a GC hazard.
+#define DECLARE_POINTER_CONSTREF_OPS(T) \
+ operator const T&() const { return get(); } \
+ const T& operator->() const { return get(); }
+
+// Assignment operators on a base class are hidden by the implicitly defined
+// operator= on the derived class. Thus, define the operator= directly on the
+// class as we would need to manually pass it through anyway.
+#define DECLARE_POINTER_ASSIGN_OPS(Wrapper, T) \
+ Wrapper<T>& operator=(const T& p) { \
+ set(p); \
+ return *this; \
+ } \
+ Wrapper<T>& operator=(T&& p) { \
+ set(mozilla::Move(p)); \
+ return *this; \
+ } \
+ Wrapper<T>& operator=(const Wrapper<T>& other) { \
+ set(other.get()); \
+ return *this; \
+ } \
+
+#define DELETE_ASSIGNMENT_OPS(Wrapper, T) \
+ template <typename S> Wrapper<T>& operator=(S) = delete; \
+ Wrapper<T>& operator=(const Wrapper<T>&) = delete;
+
+#define DECLARE_NONPOINTER_ACCESSOR_METHODS(ptr) \
+ const T* address() const { return &(ptr); } \
+ const T& get() const { return (ptr); } \
+
+#define DECLARE_NONPOINTER_MUTABLE_ACCESSOR_METHODS(ptr) \
+ T* address() { return &(ptr); } \
+ T& get() { return (ptr); } \
+
+} /* namespace js */
+
+namespace JS {
+
+template <typename T> class Rooted;
+template <typename T> class PersistentRooted;
+
+/* This is exposing internal state of the GC for inlining purposes. */
+JS_FRIEND_API(bool) isGCEnabled();
+
+JS_FRIEND_API(void) HeapObjectPostBarrier(JSObject** objp, JSObject* prev, JSObject* next);
+
+#ifdef JS_DEBUG
+/**
+ * For generational GC, assert that an object is in the tenured generation as
+ * opposed to being in the nursery.
+ */
+extern JS_FRIEND_API(void)
+AssertGCThingMustBeTenured(JSObject* obj);
+extern JS_FRIEND_API(void)
+AssertGCThingIsNotAnObjectSubclass(js::gc::Cell* cell);
+#else
+inline void
+AssertGCThingMustBeTenured(JSObject* obj) {}
+inline void
+AssertGCThingIsNotAnObjectSubclass(js::gc::Cell* cell) {}
+#endif
+
+/**
+ * The Heap<T> class is a heap-stored reference to a JS GC thing. All members of
+ * heap classes that refer to GC things should use Heap<T> (or possibly
+ * TenuredHeap<T>, described below).
+ *
+ * Heap<T> is an abstraction that hides some of the complexity required to
+ * maintain GC invariants for the contained reference. It uses operator
+ * overloading to provide a normal pointer interface, but notifies the GC every
+ * time the value it contains is updated. This is necessary for generational GC,
+ * which keeps track of all pointers into the nursery.
+ *
+ * Heap<T> instances must be traced when their containing object is traced to
+ * keep the pointed-to GC thing alive.
+ *
+ * Heap<T> objects should only be used on the heap. GC references stored on the
+ * C/C++ stack must use Rooted/Handle/MutableHandle instead.
+ *
+ * Type T must be a public GC pointer type.
+ */
+template <typename T>
+class MOZ_NON_MEMMOVABLE Heap : public js::HeapBase<T>
+{
+ // Please note: this can actually also be used by nsXBLMaybeCompiled<T>, for legacy reasons.
+ static_assert(js::IsHeapConstructibleType<T>::value,
+ "Type T must be a public GC pointer type");
+ public:
+ Heap() {
+ static_assert(sizeof(T) == sizeof(Heap<T>),
+ "Heap<T> must be binary compatible with T.");
+ init(GCPolicy<T>::initial());
+ }
+ explicit Heap(const T& p) { init(p); }
+
+ /*
+ * For Heap, move semantics are equivalent to copy semantics. In C++, a
+ * copy constructor taking const-ref is the way to get a single function
+ * that will be used for both lvalue and rvalue copies, so we can simply
+ * omit the rvalue variant.
+ */
+ explicit Heap(const Heap<T>& p) { init(p.ptr); }
+
+ ~Heap() {
+ post(ptr, GCPolicy<T>::initial());
+ }
+
+ DECLARE_POINTER_CONSTREF_OPS(T);
+ DECLARE_POINTER_ASSIGN_OPS(Heap, T);
+
+ const T* address() const { return &ptr; }
+
+ void exposeToActiveJS() const {
+ js::BarrierMethods<T>::exposeToJS(ptr);
+ }
+ const T& get() const {
+ exposeToActiveJS();
+ return ptr;
+ }
+ const T& unbarrieredGet() const {
+ return ptr;
+ }
+
+ T* unsafeGet() { return &ptr; }
+
+ explicit operator bool() const {
+ return bool(js::BarrierMethods<T>::asGCThingOrNull(ptr));
+ }
+ explicit operator bool() {
+ return bool(js::BarrierMethods<T>::asGCThingOrNull(ptr));
+ }
+
+ private:
+ void init(const T& newPtr) {
+ ptr = newPtr;
+ post(GCPolicy<T>::initial(), ptr);
+ }
+
+ void set(const T& newPtr) {
+ T tmp = ptr;
+ ptr = newPtr;
+ post(tmp, ptr);
+ }
+
+ void post(const T& prev, const T& next) {
+ js::BarrierMethods<T>::postBarrier(&ptr, prev, next);
+ }
+
+ T ptr;
+};
+
+static MOZ_ALWAYS_INLINE bool
+ObjectIsTenured(JSObject* obj)
+{
+ return !js::gc::IsInsideNursery(reinterpret_cast<js::gc::Cell*>(obj));
+}
+
+static MOZ_ALWAYS_INLINE bool
+ObjectIsTenured(const Heap<JSObject*>& obj)
+{
+ return ObjectIsTenured(obj.unbarrieredGet());
+}
+
+static MOZ_ALWAYS_INLINE bool
+ObjectIsMarkedGray(JSObject* obj)
+{
+ auto cell = reinterpret_cast<js::gc::Cell*>(obj);
+ return js::gc::detail::CellIsMarkedGrayIfKnown(cell);
+}
+
+static MOZ_ALWAYS_INLINE bool
+ObjectIsMarkedGray(const JS::Heap<JSObject*>& obj)
+{
+ return ObjectIsMarkedGray(obj.unbarrieredGet());
+}
+
+static MOZ_ALWAYS_INLINE bool
+ScriptIsMarkedGray(JSScript* script)
+{
+ auto cell = reinterpret_cast<js::gc::Cell*>(script);
+ return js::gc::detail::CellIsMarkedGrayIfKnown(cell);
+}
+
+static MOZ_ALWAYS_INLINE bool
+ScriptIsMarkedGray(const Heap<JSScript*>& script)
+{
+ return ScriptIsMarkedGray(script.unbarrieredGet());
+}
+
+/**
+ * The TenuredHeap<T> class is similar to the Heap<T> class above in that it
+ * encapsulates the GC concerns of an on-heap reference to a JS object. However,
+ * it has two important differences:
+ *
+ * 1) Pointers which are statically known to only reference "tenured" objects
+ * can avoid the extra overhead of SpiderMonkey's write barriers.
+ *
+ * 2) Objects in the "tenured" heap have stronger alignment restrictions than
+ * those in the "nursery", so it is possible to store flags in the lower
+ * bits of pointers known to be tenured. TenuredHeap wraps a normal tagged
+ * pointer with a nice API for accessing the flag bits and adds various
+ * assertions to ensure that it is not mis-used.
+ *
+ * GC things are said to be "tenured" when they are located in the long-lived
+ * heap: e.g. they have gained tenure as an object by surviving past at least
+ * one GC. For performance, SpiderMonkey allocates some things which are known
+ * to normally be long lived directly into the tenured generation; for example,
+ * global objects. Additionally, SpiderMonkey does not visit individual objects
+ * when deleting non-tenured objects, so object with finalizers are also always
+ * tenured; for instance, this includes most DOM objects.
+ *
+ * The considerations to keep in mind when using a TenuredHeap<T> vs a normal
+ * Heap<T> are:
+ *
+ * - It is invalid for a TenuredHeap<T> to refer to a non-tenured thing.
+ * - It is however valid for a Heap<T> to refer to a tenured thing.
+ * - It is not possible to store flag bits in a Heap<T>.
+ */
+template <typename T>
+class TenuredHeap : public js::HeapBase<T>
+{
+ public:
+ TenuredHeap() : bits(0) {
+ static_assert(sizeof(T) == sizeof(TenuredHeap<T>),
+ "TenuredHeap<T> must be binary compatible with T.");
+ }
+ explicit TenuredHeap(T p) : bits(0) { setPtr(p); }
+ explicit TenuredHeap(const TenuredHeap<T>& p) : bits(0) { setPtr(p.getPtr()); }
+
+ bool operator==(const TenuredHeap<T>& other) { return bits == other.bits; }
+ bool operator!=(const TenuredHeap<T>& other) { return bits != other.bits; }
+
+ void setPtr(T newPtr) {
+ MOZ_ASSERT((reinterpret_cast<uintptr_t>(newPtr) & flagsMask) == 0);
+ if (newPtr)
+ AssertGCThingMustBeTenured(newPtr);
+ bits = (bits & flagsMask) | reinterpret_cast<uintptr_t>(newPtr);
+ }
+
+ void setFlags(uintptr_t flagsToSet) {
+ MOZ_ASSERT((flagsToSet & ~flagsMask) == 0);
+ bits |= flagsToSet;
+ }
+
+ void unsetFlags(uintptr_t flagsToUnset) {
+ MOZ_ASSERT((flagsToUnset & ~flagsMask) == 0);
+ bits &= ~flagsToUnset;
+ }
+
+ bool hasFlag(uintptr_t flag) const {
+ MOZ_ASSERT((flag & ~flagsMask) == 0);
+ return (bits & flag) != 0;
+ }
+
+ T unbarrieredGetPtr() const { return reinterpret_cast<T>(bits & ~flagsMask); }
+ uintptr_t getFlags() const { return bits & flagsMask; }
+
+ void exposeToActiveJS() const {
+ js::BarrierMethods<T>::exposeToJS(unbarrieredGetPtr());
+ }
+ T getPtr() const {
+ exposeToActiveJS();
+ return unbarrieredGetPtr();
+ }
+
+ operator T() const { return getPtr(); }
+ T operator->() const { return getPtr(); }
+
+ explicit operator bool() const {
+ return bool(js::BarrierMethods<T>::asGCThingOrNull(unbarrieredGetPtr()));
+ }
+ explicit operator bool() {
+ return bool(js::BarrierMethods<T>::asGCThingOrNull(unbarrieredGetPtr()));
+ }
+
+ TenuredHeap<T>& operator=(T p) {
+ setPtr(p);
+ return *this;
+ }
+
+ TenuredHeap<T>& operator=(const TenuredHeap<T>& other) {
+ bits = other.bits;
+ return *this;
+ }
+
+ private:
+ enum {
+ maskBits = 3,
+ flagsMask = (1 << maskBits) - 1,
+ };
+
+ uintptr_t bits;
+};
+
+/**
+ * Reference to a T that has been rooted elsewhere. This is most useful
+ * as a parameter type, which guarantees that the T lvalue is properly
+ * rooted. See "Move GC Stack Rooting" above.
+ *
+ * If you want to add additional methods to Handle for a specific
+ * specialization, define a HandleBase<T> specialization containing them.
+ */
+template <typename T>
+class MOZ_NONHEAP_CLASS Handle : public js::HandleBase<T>
+{
+ friend class JS::MutableHandle<T>;
+
+ public:
+ /* Creates a handle from a handle of a type convertible to T. */
+ template <typename S>
+ MOZ_IMPLICIT Handle(Handle<S> handle,
+ typename mozilla::EnableIf<mozilla::IsConvertible<S, T>::value, int>::Type dummy = 0)
+ {
+ static_assert(sizeof(Handle<T>) == sizeof(T*),
+ "Handle must be binary compatible with T*.");
+ ptr = reinterpret_cast<const T*>(handle.address());
+ }
+
+ MOZ_IMPLICIT Handle(decltype(nullptr)) {
+ static_assert(mozilla::IsPointer<T>::value,
+ "nullptr_t overload not valid for non-pointer types");
+ ptr = reinterpret_cast<const T*>(&js::ConstNullValue);
+ }
+
+ MOZ_IMPLICIT Handle(MutableHandle<T> handle) {
+ ptr = handle.address();
+ }
+
+ /*
+ * Take care when calling this method!
+ *
+ * This creates a Handle from the raw location of a T.
+ *
+ * It should be called only if the following conditions hold:
+ *
+ * 1) the location of the T is guaranteed to be marked (for some reason
+ * other than being a Rooted), e.g., if it is guaranteed to be reachable
+ * from an implicit root.
+ *
+ * 2) the contents of the location are immutable, or at least cannot change
+ * for the lifetime of the handle, as its users may not expect its value
+ * to change underneath them.
+ */
+ static constexpr Handle fromMarkedLocation(const T* p) {
+ return Handle(p, DeliberatelyChoosingThisOverload,
+ ImUsingThisOnlyInFromFromMarkedLocation);
+ }
+
+ /*
+ * Construct a handle from an explicitly rooted location. This is the
+ * normal way to create a handle, and normally happens implicitly.
+ */
+ template <typename S>
+ inline
+ MOZ_IMPLICIT Handle(const Rooted<S>& root,
+ typename mozilla::EnableIf<mozilla::IsConvertible<S, T>::value, int>::Type dummy = 0);
+
+ template <typename S>
+ inline
+ MOZ_IMPLICIT Handle(const PersistentRooted<S>& root,
+ typename mozilla::EnableIf<mozilla::IsConvertible<S, T>::value, int>::Type dummy = 0);
+
+ /* Construct a read only handle from a mutable handle. */
+ template <typename S>
+ inline
+ MOZ_IMPLICIT Handle(MutableHandle<S>& root,
+ typename mozilla::EnableIf<mozilla::IsConvertible<S, T>::value, int>::Type dummy = 0);
+
+ DECLARE_POINTER_COMPARISON_OPS(T);
+ DECLARE_POINTER_CONSTREF_OPS(T);
+ DECLARE_NONPOINTER_ACCESSOR_METHODS(*ptr);
+
+ private:
+ Handle() {}
+ DELETE_ASSIGNMENT_OPS(Handle, T);
+
+ enum Disambiguator { DeliberatelyChoosingThisOverload = 42 };
+ enum CallerIdentity { ImUsingThisOnlyInFromFromMarkedLocation = 17 };
+ constexpr Handle(const T* p, Disambiguator, CallerIdentity) : ptr(p) {}
+
+ const T* ptr;
+};
+
+/**
+ * Similar to a handle, but the underlying storage can be changed. This is
+ * useful for outparams.
+ *
+ * If you want to add additional methods to MutableHandle for a specific
+ * specialization, define a MutableHandleBase<T> specialization containing
+ * them.
+ */
+template <typename T>
+class MOZ_STACK_CLASS MutableHandle : public js::MutableHandleBase<T>
+{
+ public:
+ inline MOZ_IMPLICIT MutableHandle(Rooted<T>* root);
+ inline MOZ_IMPLICIT MutableHandle(PersistentRooted<T>* root);
+
+ private:
+ // Disallow nullptr for overloading purposes.
+ MutableHandle(decltype(nullptr)) = delete;
+
+ public:
+ void set(const T& v) {
+ *ptr = v;
+ }
+ void set(T&& v) {
+ *ptr = mozilla::Move(v);
+ }
+
+ /*
+ * This may be called only if the location of the T is guaranteed
+ * to be marked (for some reason other than being a Rooted),
+ * e.g., if it is guaranteed to be reachable from an implicit root.
+ *
+ * Create a MutableHandle from a raw location of a T.
+ */
+ static MutableHandle fromMarkedLocation(T* p) {
+ MutableHandle h;
+ h.ptr = p;
+ return h;
+ }
+
+ DECLARE_POINTER_CONSTREF_OPS(T);
+ DECLARE_NONPOINTER_ACCESSOR_METHODS(*ptr);
+ DECLARE_NONPOINTER_MUTABLE_ACCESSOR_METHODS(*ptr);
+
+ private:
+ MutableHandle() {}
+ DELETE_ASSIGNMENT_OPS(MutableHandle, T);
+
+ T* ptr;
+};
+
+} /* namespace JS */
+
+namespace js {
+
+template <typename T>
+struct BarrierMethods<T*>
+{
+ static T* initial() { return nullptr; }
+ static gc::Cell* asGCThingOrNull(T* v) {
+ if (!v)
+ return nullptr;
+ MOZ_ASSERT(uintptr_t(v) > 32);
+ return reinterpret_cast<gc::Cell*>(v);
+ }
+ static void postBarrier(T** vp, T* prev, T* next) {
+ if (next)
+ JS::AssertGCThingIsNotAnObjectSubclass(reinterpret_cast<js::gc::Cell*>(next));
+ }
+ static void exposeToJS(T* t) {
+ if (t)
+ js::gc::ExposeGCThingToActiveJS(JS::GCCellPtr(t));
+ }
+};
+
+template <>
+struct BarrierMethods<JSObject*>
+{
+ static JSObject* initial() { return nullptr; }
+ static gc::Cell* asGCThingOrNull(JSObject* v) {
+ if (!v)
+ return nullptr;
+ MOZ_ASSERT(uintptr_t(v) > 32);
+ return reinterpret_cast<gc::Cell*>(v);
+ }
+ static void postBarrier(JSObject** vp, JSObject* prev, JSObject* next) {
+ JS::HeapObjectPostBarrier(vp, prev, next);
+ }
+ static void exposeToJS(JSObject* obj) {
+ if (obj)
+ JS::ExposeObjectToActiveJS(obj);
+ }
+};
+
+template <>
+struct BarrierMethods<JSFunction*>
+{
+ static JSFunction* initial() { return nullptr; }
+ static gc::Cell* asGCThingOrNull(JSFunction* v) {
+ if (!v)
+ return nullptr;
+ MOZ_ASSERT(uintptr_t(v) > 32);
+ return reinterpret_cast<gc::Cell*>(v);
+ }
+ static void postBarrier(JSFunction** vp, JSFunction* prev, JSFunction* next) {
+ JS::HeapObjectPostBarrier(reinterpret_cast<JSObject**>(vp),
+ reinterpret_cast<JSObject*>(prev),
+ reinterpret_cast<JSObject*>(next));
+ }
+ static void exposeToJS(JSFunction* fun) {
+ if (fun)
+ JS::ExposeObjectToActiveJS(reinterpret_cast<JSObject*>(fun));
+ }
+};
+
+// Provide hash codes for Cell kinds that may be relocated and, thus, not have
+// a stable address to use as the base for a hash code. Instead of the address,
+// this hasher uses Cell::getUniqueId to provide exact matches and as a base
+// for generating hash codes.
+//
+// Note: this hasher, like PointerHasher can "hash" a nullptr. While a nullptr
+// would not likely be a useful key, there are some cases where being able to
+// hash a nullptr is useful, either on purpose or because of bugs:
+// (1) existence checks where the key may happen to be null and (2) some
+// aggregate Lookup kinds embed a JSObject* that is frequently null and do not
+// null test before dispatching to the hasher.
+template <typename T>
+struct JS_PUBLIC_API(MovableCellHasher)
+{
+ using Key = T;
+ using Lookup = T;
+
+ static bool hasHash(const Lookup& l);
+ static bool ensureHash(const Lookup& l);
+ static HashNumber hash(const Lookup& l);
+ static bool match(const Key& k, const Lookup& l);
+ static void rekey(Key& k, const Key& newKey) { k = newKey; }
+};
+
+template <typename T>
+struct JS_PUBLIC_API(MovableCellHasher<JS::Heap<T>>)
+{
+ using Key = JS::Heap<T>;
+ using Lookup = T;
+
+ static bool hasHash(const Lookup& l) { return MovableCellHasher<T>::hasHash(l); }
+ static bool ensureHash(const Lookup& l) { return MovableCellHasher<T>::ensureHash(l); }
+ static HashNumber hash(const Lookup& l) { return MovableCellHasher<T>::hash(l); }
+ static bool match(const Key& k, const Lookup& l) {
+ return MovableCellHasher<T>::match(k.unbarrieredGet(), l);
+ }
+ static void rekey(Key& k, const Key& newKey) { k.unsafeSet(newKey); }
+};
+
+template <typename T>
+struct FallibleHashMethods<MovableCellHasher<T>>
+{
+ template <typename Lookup> static bool hasHash(Lookup&& l) {
+ return MovableCellHasher<T>::hasHash(mozilla::Forward<Lookup>(l));
+ }
+ template <typename Lookup> static bool ensureHash(Lookup&& l) {
+ return MovableCellHasher<T>::ensureHash(mozilla::Forward<Lookup>(l));
+ }
+};
+
+} /* namespace js */
+
+namespace js {
+
+// The alignment must be set because the Rooted and PersistentRooted ptr fields
+// may be accessed through reinterpret_cast<Rooted<ConcreteTraceable>*>, and
+// the compiler may choose a different alignment for the ptr field when it
+// knows the actual type stored in DispatchWrapper<T>.
+//
+// It would make more sense to align only those specific fields of type
+// DispatchWrapper, rather than DispatchWrapper itself, but that causes MSVC to
+// fail when Rooted is used in an IsConvertible test.
+template <typename T>
+class alignas(8) DispatchWrapper
+{
+ static_assert(JS::MapTypeToRootKind<T>::kind == JS::RootKind::Traceable,
+ "DispatchWrapper is intended only for usage with a Traceable");
+
+ using TraceFn = void (*)(JSTracer*, T*, const char*);
+ TraceFn tracer;
+ alignas(gc::CellSize) T storage;
+
+ public:
+ template <typename U>
+ MOZ_IMPLICIT DispatchWrapper(U&& initial)
+ : tracer(&JS::GCPolicy<T>::trace),
+ storage(mozilla::Forward<U>(initial))
+ { }
+
+ // Mimic a pointer type, so that we can drop into Rooted.
+ T* operator &() { return &storage; }
+ const T* operator &() const { return &storage; }
+ operator T&() { return storage; }
+ operator const T&() const { return storage; }
+
+ // Trace the contained storage (of unknown type) using the trace function
+ // we set aside when we did know the type.
+ static void TraceWrapped(JSTracer* trc, T* thingp, const char* name) {
+ auto wrapper = reinterpret_cast<DispatchWrapper*>(
+ uintptr_t(thingp) - offsetof(DispatchWrapper, storage));
+ wrapper->tracer(trc, &wrapper->storage, name);
+ }
+};
+
+} /* namespace js */
+
+namespace JS {
+
+/**
+ * Local variable of type T whose value is always rooted. This is typically
+ * used for local variables, or for non-rooted values being passed to a
+ * function that requires a handle, e.g. Foo(Root<T>(cx, x)).
+ *
+ * If you want to add additional methods to Rooted for a specific
+ * specialization, define a RootedBase<T> specialization containing them.
+ */
+template <typename T>
+class MOZ_RAII Rooted : public js::RootedBase<T>
+{
+ inline void registerWithRootLists(js::RootedListHeads& roots) {
+ this->stack = &roots[JS::MapTypeToRootKind<T>::kind];
+ this->prev = *stack;
+ *stack = reinterpret_cast<Rooted<void*>*>(this);
+ }
+
+ inline js::RootedListHeads& rootLists(JS::RootingContext* cx) {
+ return rootLists(static_cast<js::ContextFriendFields*>(cx));
+ }
+ inline js::RootedListHeads& rootLists(js::ContextFriendFields* cx) {
+ if (JS::Zone* zone = cx->zone_)
+ return JS::shadow::Zone::asShadowZone(zone)->stackRoots_;
+ MOZ_ASSERT(cx->isJSContext);
+ return cx->roots.stackRoots_;
+ }
+ inline js::RootedListHeads& rootLists(JSContext* cx) {
+ return rootLists(js::ContextFriendFields::get(cx));
+ }
+
+ public:
+ template <typename RootingContext>
+ explicit Rooted(const RootingContext& cx)
+ : ptr(GCPolicy<T>::initial())
+ {
+ registerWithRootLists(rootLists(cx));
+ }
+
+ template <typename RootingContext, typename S>
+ Rooted(const RootingContext& cx, S&& initial)
+ : ptr(mozilla::Forward<S>(initial))
+ {
+ registerWithRootLists(rootLists(cx));
+ }
+
+ ~Rooted() {
+ MOZ_ASSERT(*stack == reinterpret_cast<Rooted<void*>*>(this));
+ *stack = prev;
+ }
+
+ Rooted<T>* previous() { return reinterpret_cast<Rooted<T>*>(prev); }
+
+ /*
+ * This method is public for Rooted so that Codegen.py can use a Rooted
+ * interchangeably with a MutableHandleValue.
+ */
+ void set(const T& value) {
+ ptr = value;
+ }
+ void set(T&& value) {
+ ptr = mozilla::Move(value);
+ }
+
+ DECLARE_POINTER_COMPARISON_OPS(T);
+ DECLARE_POINTER_CONSTREF_OPS(T);
+ DECLARE_POINTER_ASSIGN_OPS(Rooted, T);
+ DECLARE_NONPOINTER_ACCESSOR_METHODS(ptr);
+ DECLARE_NONPOINTER_MUTABLE_ACCESSOR_METHODS(ptr);
+
+ private:
+ /*
+ * These need to be templated on void* to avoid aliasing issues between, for
+ * example, Rooted<JSObject> and Rooted<JSFunction>, which use the same
+ * stack head pointer for different classes.
+ */
+ Rooted<void*>** stack;
+ Rooted<void*>* prev;
+
+ /*
+ * For pointer types, the TraceKind for tracing is based on the list it is
+ * in (selected via MapTypeToRootKind), so no additional storage is
+ * required here. Non-pointer types, however, share the same list, so the
+ * function to call for tracing is stored adjacent to the struct. Since C++
+ * cannot templatize on storage class, this is implemented via the wrapper
+ * class DispatchWrapper.
+ */
+ using MaybeWrapped = typename mozilla::Conditional<
+ MapTypeToRootKind<T>::kind == JS::RootKind::Traceable,
+ js::DispatchWrapper<T>,
+ T>::Type;
+ MaybeWrapped ptr;
+
+ Rooted(const Rooted&) = delete;
+} JS_HAZ_ROOTED;
+
+} /* namespace JS */
+
+namespace js {
+
+/**
+ * Augment the generic Rooted<T> interface when T = JSObject* with
+ * class-querying and downcasting operations.
+ *
+ * Given a Rooted<JSObject*> obj, one can view
+ * Handle<StringObject*> h = obj.as<StringObject*>();
+ * as an optimization of
+ * Rooted<StringObject*> rooted(cx, &obj->as<StringObject*>());
+ * Handle<StringObject*> h = rooted;
+ */
+template <>
+class RootedBase<JSObject*>
+{
+ public:
+ template <class U>
+ JS::Handle<U*> as() const;
+};
+
+/**
+ * Augment the generic Handle<T> interface when T = JSObject* with
+ * downcasting operations.
+ *
+ * Given a Handle<JSObject*> obj, one can view
+ * Handle<StringObject*> h = obj.as<StringObject*>();
+ * as an optimization of
+ * Rooted<StringObject*> rooted(cx, &obj->as<StringObject*>());
+ * Handle<StringObject*> h = rooted;
+ */
+template <>
+class HandleBase<JSObject*>
+{
+ public:
+ template <class U>
+ JS::Handle<U*> as() const;
+};
+
+/** Interface substitute for Rooted<T> which does not root the variable's memory. */
+template <typename T>
+class MOZ_RAII FakeRooted : public RootedBase<T>
+{
+ public:
+ template <typename CX>
+ explicit FakeRooted(CX* cx) : ptr(JS::GCPolicy<T>::initial()) {}
+
+ template <typename CX>
+ FakeRooted(CX* cx, T initial) : ptr(initial) {}
+
+ DECLARE_POINTER_COMPARISON_OPS(T);
+ DECLARE_POINTER_CONSTREF_OPS(T);
+ DECLARE_POINTER_ASSIGN_OPS(FakeRooted, T);
+ DECLARE_NONPOINTER_ACCESSOR_METHODS(ptr);
+ DECLARE_NONPOINTER_MUTABLE_ACCESSOR_METHODS(ptr);
+
+ private:
+ T ptr;
+
+ void set(const T& value) {
+ ptr = value;
+ }
+
+ FakeRooted(const FakeRooted&) = delete;
+};
+
+/** Interface substitute for MutableHandle<T> which is not required to point to rooted memory. */
+template <typename T>
+class FakeMutableHandle : public js::MutableHandleBase<T>
+{
+ public:
+ MOZ_IMPLICIT FakeMutableHandle(T* t) {
+ ptr = t;
+ }
+
+ MOZ_IMPLICIT FakeMutableHandle(FakeRooted<T>* root) {
+ ptr = root->address();
+ }
+
+ void set(const T& v) {
+ *ptr = v;
+ }
+
+ DECLARE_POINTER_CONSTREF_OPS(T);
+ DECLARE_NONPOINTER_ACCESSOR_METHODS(*ptr);
+ DECLARE_NONPOINTER_MUTABLE_ACCESSOR_METHODS(*ptr);
+
+ private:
+ FakeMutableHandle() {}
+ DELETE_ASSIGNMENT_OPS(FakeMutableHandle, T);
+
+ T* ptr;
+};
+
+/**
+ * Types for a variable that either should or shouldn't be rooted, depending on
+ * the template parameter allowGC. Used for implementing functions that can
+ * operate on either rooted or unrooted data.
+ *
+ * The toHandle() and toMutableHandle() functions are for calling functions
+ * which require handle types and are only called in the CanGC case. These
+ * allow the calling code to type check.
+ */
+enum AllowGC {
+ NoGC = 0,
+ CanGC = 1
+};
+template <typename T, AllowGC allowGC>
+class MaybeRooted
+{
+};
+
+template <typename T> class MaybeRooted<T, CanGC>
+{
+ public:
+ typedef JS::Handle<T> HandleType;
+ typedef JS::Rooted<T> RootType;
+ typedef JS::MutableHandle<T> MutableHandleType;
+
+ static inline JS::Handle<T> toHandle(HandleType v) {
+ return v;
+ }
+
+ static inline JS::MutableHandle<T> toMutableHandle(MutableHandleType v) {
+ return v;
+ }
+
+ template <typename T2>
+ static inline JS::Handle<T2*> downcastHandle(HandleType v) {
+ return v.template as<T2>();
+ }
+};
+
+template <typename T> class MaybeRooted<T, NoGC>
+{
+ public:
+ typedef const T& HandleType;
+ typedef FakeRooted<T> RootType;
+ typedef FakeMutableHandle<T> MutableHandleType;
+
+ static JS::Handle<T> toHandle(HandleType v) {
+ MOZ_CRASH("Bad conversion");
+ }
+
+ static JS::MutableHandle<T> toMutableHandle(MutableHandleType v) {
+ MOZ_CRASH("Bad conversion");
+ }
+
+ template <typename T2>
+ static inline T2* downcastHandle(HandleType v) {
+ return &v->template as<T2>();
+ }
+};
+
+} /* namespace js */
+
+namespace JS {
+
+template <typename T> template <typename S>
+inline
+Handle<T>::Handle(const Rooted<S>& root,
+ typename mozilla::EnableIf<mozilla::IsConvertible<S, T>::value, int>::Type dummy)
+{
+ ptr = reinterpret_cast<const T*>(root.address());
+}
+
+template <typename T> template <typename S>
+inline
+Handle<T>::Handle(const PersistentRooted<S>& root,
+ typename mozilla::EnableIf<mozilla::IsConvertible<S, T>::value, int>::Type dummy)
+{
+ ptr = reinterpret_cast<const T*>(root.address());
+}
+
+template <typename T> template <typename S>
+inline
+Handle<T>::Handle(MutableHandle<S>& root,
+ typename mozilla::EnableIf<mozilla::IsConvertible<S, T>::value, int>::Type dummy)
+{
+ ptr = reinterpret_cast<const T*>(root.address());
+}
+
+template <typename T>
+inline
+MutableHandle<T>::MutableHandle(Rooted<T>* root)
+{
+ static_assert(sizeof(MutableHandle<T>) == sizeof(T*),
+ "MutableHandle must be binary compatible with T*.");
+ ptr = root->address();
+}
+
+template <typename T>
+inline
+MutableHandle<T>::MutableHandle(PersistentRooted<T>* root)
+{
+ static_assert(sizeof(MutableHandle<T>) == sizeof(T*),
+ "MutableHandle must be binary compatible with T*.");
+ ptr = root->address();
+}
+
+/**
+ * A copyable, assignable global GC root type with arbitrary lifetime, an
+ * infallible constructor, and automatic unrooting on destruction.
+ *
+ * These roots can be used in heap-allocated data structures, so they are not
+ * associated with any particular JSContext or stack. They are registered with
+ * the JSRuntime itself, without locking, so they require a full JSContext to be
+ * initialized, not one of its more restricted superclasses. Initialization may
+ * take place on construction, or in two phases if the no-argument constructor
+ * is called followed by init().
+ *
+ * Note that you must not use an PersistentRooted in an object owned by a JS
+ * object:
+ *
+ * Whenever one object whose lifetime is decided by the GC refers to another
+ * such object, that edge must be traced only if the owning JS object is traced.
+ * This applies not only to JS objects (which obviously are managed by the GC)
+ * but also to C++ objects owned by JS objects.
+ *
+ * If you put a PersistentRooted in such a C++ object, that is almost certainly
+ * a leak. When a GC begins, the referent of the PersistentRooted is treated as
+ * live, unconditionally (because a PersistentRooted is a *root*), even if the
+ * JS object that owns it is unreachable. If there is any path from that
+ * referent back to the JS object, then the C++ object containing the
+ * PersistentRooted will not be destructed, and the whole blob of objects will
+ * not be freed, even if there are no references to them from the outside.
+ *
+ * In the context of Firefox, this is a severe restriction: almost everything in
+ * Firefox is owned by some JS object or another, so using PersistentRooted in
+ * such objects would introduce leaks. For these kinds of edges, Heap<T> or
+ * TenuredHeap<T> would be better types. It's up to the implementor of the type
+ * containing Heap<T> or TenuredHeap<T> members to make sure their referents get
+ * marked when the object itself is marked.
+ */
+template<typename T>
+class PersistentRooted : public js::PersistentRootedBase<T>,
+ private mozilla::LinkedListElement<PersistentRooted<T>>
+{
+ using ListBase = mozilla::LinkedListElement<PersistentRooted<T>>;
+
+ friend class mozilla::LinkedList<PersistentRooted>;
+ friend class mozilla::LinkedListElement<PersistentRooted>;
+
+ void registerWithRootLists(js::RootLists& roots) {
+ MOZ_ASSERT(!initialized());
+ JS::RootKind kind = JS::MapTypeToRootKind<T>::kind;
+ roots.heapRoots_[kind].insertBack(reinterpret_cast<JS::PersistentRooted<void*>*>(this));
+ }
+
+ js::RootLists& rootLists(JSContext* cx) {
+ return rootLists(JS::RootingContext::get(cx));
+ }
+ js::RootLists& rootLists(JS::RootingContext* cx) {
+ MOZ_ASSERT(cx->isJSContext);
+ return cx->roots;
+ }
+
+ // Disallow ExclusiveContext*.
+ js::RootLists& rootLists(js::ContextFriendFields* cx) = delete;
+
+ public:
+ PersistentRooted() : ptr(GCPolicy<T>::initial()) {}
+
+ template <typename RootingContext>
+ explicit PersistentRooted(const RootingContext& cx)
+ : ptr(GCPolicy<T>::initial())
+ {
+ registerWithRootLists(rootLists(cx));
+ }
+
+ template <typename RootingContext, typename U>
+ PersistentRooted(const RootingContext& cx, U&& initial)
+ : ptr(mozilla::Forward<U>(initial))
+ {
+ registerWithRootLists(rootLists(cx));
+ }
+
+ PersistentRooted(const PersistentRooted& rhs)
+ : mozilla::LinkedListElement<PersistentRooted<T>>(),
+ ptr(rhs.ptr)
+ {
+ /*
+ * Copy construction takes advantage of the fact that the original
+ * is already inserted, and simply adds itself to whatever list the
+ * original was on - no JSRuntime pointer needed.
+ *
+ * This requires mutating rhs's links, but those should be 'mutable'
+ * anyway. C++ doesn't let us declare mutable base classes.
+ */
+ const_cast<PersistentRooted&>(rhs).setNext(this);
+ }
+
+ bool initialized() {
+ return ListBase::isInList();
+ }
+
+ template <typename RootingContext>
+ void init(const RootingContext& cx) {
+ init(cx, GCPolicy<T>::initial());
+ }
+
+ template <typename RootingContext, typename U>
+ void init(const RootingContext& cx, U&& initial) {
+ ptr = mozilla::Forward<U>(initial);
+ registerWithRootLists(rootLists(cx));
+ }
+
+ void reset() {
+ if (initialized()) {
+ set(GCPolicy<T>::initial());
+ ListBase::remove();
+ }
+ }
+
+ DECLARE_POINTER_COMPARISON_OPS(T);
+ DECLARE_POINTER_CONSTREF_OPS(T);
+ DECLARE_POINTER_ASSIGN_OPS(PersistentRooted, T);
+ DECLARE_NONPOINTER_ACCESSOR_METHODS(ptr);
+
+ // These are the same as DECLARE_NONPOINTER_MUTABLE_ACCESSOR_METHODS, except
+ // they check that |this| is initialized in case the caller later stores
+ // something in |ptr|.
+ T* address() {
+ MOZ_ASSERT(initialized());
+ return &ptr;
+ }
+ T& get() {
+ MOZ_ASSERT(initialized());
+ return ptr;
+ }
+
+ private:
+ template <typename U>
+ void set(U&& value) {
+ MOZ_ASSERT(initialized());
+ ptr = mozilla::Forward<U>(value);
+ }
+
+ // See the comment above Rooted::ptr.
+ using MaybeWrapped = typename mozilla::Conditional<
+ MapTypeToRootKind<T>::kind == JS::RootKind::Traceable,
+ js::DispatchWrapper<T>,
+ T>::Type;
+ MaybeWrapped ptr;
+} JS_HAZ_ROOTED;
+
+class JS_PUBLIC_API(ObjectPtr)
+{
+ Heap<JSObject*> value;
+
+ public:
+ ObjectPtr() : value(nullptr) {}
+
+ explicit ObjectPtr(JSObject* obj) : value(obj) {}
+
+ ObjectPtr(const ObjectPtr& other) : value(other.value) {}
+
+ ObjectPtr(ObjectPtr&& other)
+ : value(other.value)
+ {
+ other.value = nullptr;
+ }
+
+ /* Always call finalize before the destructor. */
+ ~ObjectPtr() { MOZ_ASSERT(!value); }
+
+ void finalize(JSRuntime* rt);
+ void finalize(JSContext* cx);
+
+ void init(JSObject* obj) { value = obj; }
+
+ JSObject* get() const { return value; }
+ JSObject* unbarrieredGet() const { return value.unbarrieredGet(); }
+
+ void writeBarrierPre(JSContext* cx) {
+ IncrementalObjectBarrier(value);
+ }
+
+ void updateWeakPointerAfterGC();
+
+ ObjectPtr& operator=(JSObject* obj) {
+ IncrementalObjectBarrier(value);
+ value = obj;
+ return *this;
+ }
+
+ void trace(JSTracer* trc, const char* name);
+
+ JSObject& operator*() const { return *value; }
+ JSObject* operator->() const { return value; }
+ operator JSObject*() const { return value; }
+
+ explicit operator bool() const { return value.unbarrieredGet(); }
+ explicit operator bool() { return value.unbarrieredGet(); }
+};
+
+} /* namespace JS */
+
+namespace js {
+
+template <typename Outer, typename T, typename D>
+class UniquePtrOperations
+{
+ const UniquePtr<T, D>& uniquePtr() const { return static_cast<const Outer*>(this)->get(); }
+
+ public:
+ explicit operator bool() const { return !!uniquePtr(); }
+ T* get() const { return uniquePtr().get(); }
+ T* operator->() const { return get(); }
+ T& operator*() const { return *uniquePtr(); }
+};
+
+template <typename Outer, typename T, typename D>
+class MutableUniquePtrOperations : public UniquePtrOperations<Outer, T, D>
+{
+ UniquePtr<T, D>& uniquePtr() { return static_cast<Outer*>(this)->get(); }
+
+ public:
+ MOZ_MUST_USE typename UniquePtr<T, D>::Pointer release() { return uniquePtr().release(); }
+ void reset(T* ptr = T()) { uniquePtr().reset(ptr); }
+};
+
+template <typename T, typename D>
+class RootedBase<UniquePtr<T, D>>
+ : public MutableUniquePtrOperations<JS::Rooted<UniquePtr<T, D>>, T, D>
+{ };
+
+template <typename T, typename D>
+class MutableHandleBase<UniquePtr<T, D>>
+ : public MutableUniquePtrOperations<JS::MutableHandle<UniquePtr<T, D>>, T, D>
+{ };
+
+template <typename T, typename D>
+class HandleBase<UniquePtr<T, D>>
+ : public UniquePtrOperations<JS::Handle<UniquePtr<T, D>>, T, D>
+{ };
+
+template <typename T, typename D>
+class PersistentRootedBase<UniquePtr<T, D>>
+ : public MutableUniquePtrOperations<JS::PersistentRooted<UniquePtr<T, D>>, T, D>
+{ };
+
+namespace gc {
+
+template <typename T, typename TraceCallbacks>
+void
+CallTraceCallbackOnNonHeap(T* v, const TraceCallbacks& aCallbacks, const char* aName, void* aClosure)
+{
+ static_assert(sizeof(T) == sizeof(JS::Heap<T>), "T and Heap<T> must be compatible.");
+ MOZ_ASSERT(v);
+ mozilla::DebugOnly<Cell*> cell = BarrierMethods<T>::asGCThingOrNull(*v);
+ MOZ_ASSERT(cell);
+ MOZ_ASSERT(!IsInsideNursery(cell));
+ JS::Heap<T>* asHeapT = reinterpret_cast<JS::Heap<T>*>(v);
+ aCallbacks.Trace(asHeapT, aName, aClosure);
+}
+
+} /* namespace gc */
+} /* namespace js */
+
+// mozilla::Swap uses a stack temporary, which prevents classes like Heap<T>
+// from being declared MOZ_HEAP_CLASS.
+namespace mozilla {
+
+template <typename T>
+inline void
+Swap(JS::Heap<T>& aX, JS::Heap<T>& aY)
+{
+ T tmp = aX;
+ aX = aY;
+ aY = tmp;
+}
+
+template <typename T>
+inline void
+Swap(JS::TenuredHeap<T>& aX, JS::TenuredHeap<T>& aY)
+{
+ T tmp = aX;
+ aX = aY;
+ aY = tmp;
+}
+
+} /* namespace mozilla */
+
+#undef DELETE_ASSIGNMENT_OPS
+
+#endif /* js_RootingAPI_h */
diff --git a/js/public/SliceBudget.h b/js/public/SliceBudget.h
new file mode 100644
index 0000000000..78982df058
--- /dev/null
+++ b/js/public/SliceBudget.h
@@ -0,0 +1,91 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef js_SliceBudget_h
+#define js_SliceBudget_h
+
+#include <stdint.h>
+
+namespace js {
+
+struct JS_PUBLIC_API(TimeBudget)
+{
+ int64_t budget;
+
+ explicit TimeBudget(int64_t milliseconds) { budget = milliseconds; }
+};
+
+struct JS_PUBLIC_API(WorkBudget)
+{
+ int64_t budget;
+
+ explicit WorkBudget(int64_t work) { budget = work; }
+};
+
+/*
+ * This class records how much work has been done in a given collection slice,
+ * so that we can return before pausing for too long. Some slices are allowed
+ * to run for unlimited time, and others are bounded. To reduce the number of
+ * gettimeofday calls, we only check the time every 1000 operations.
+ */
+class JS_PUBLIC_API(SliceBudget)
+{
+ static const int64_t unlimitedDeadline = INT64_MAX;
+ static const intptr_t unlimitedStartCounter = INTPTR_MAX;
+
+ bool checkOverBudget();
+
+ SliceBudget();
+
+ public:
+ // Memory of the originally requested budget. If isUnlimited, neither of
+ // these are in use. If deadline==0, then workBudget is valid. Otherwise
+ // timeBudget is valid.
+ TimeBudget timeBudget;
+ WorkBudget workBudget;
+
+ int64_t deadline; /* in microseconds */
+ intptr_t counter;
+
+ static const intptr_t CounterReset = 1000;
+
+ static const int64_t UnlimitedTimeBudget = -1;
+ static const int64_t UnlimitedWorkBudget = -1;
+
+ /* Use to create an unlimited budget. */
+ static SliceBudget unlimited() { return SliceBudget(); }
+
+ /* Instantiate as SliceBudget(TimeBudget(n)). */
+ explicit SliceBudget(TimeBudget time);
+
+ /* Instantiate as SliceBudget(WorkBudget(n)). */
+ explicit SliceBudget(WorkBudget work);
+
+ void makeUnlimited() {
+ deadline = unlimitedDeadline;
+ counter = unlimitedStartCounter;
+ }
+
+ void step(intptr_t amt = 1) {
+ counter -= amt;
+ }
+
+ bool isOverBudget() {
+ if (counter > 0)
+ return false;
+ return checkOverBudget();
+ }
+
+ bool isWorkBudget() const { return deadline == 0; }
+ bool isTimeBudget() const { return deadline > 0 && !isUnlimited(); }
+ bool isUnlimited() const { return deadline == unlimitedDeadline; }
+
+ int describe(char* buffer, size_t maxlen) const;
+};
+
+} // namespace js
+
+#endif /* js_SliceBudget_h */
diff --git a/js/public/StructuredClone.h b/js/public/StructuredClone.h
new file mode 100644
index 0000000000..c48975cb95
--- /dev/null
+++ b/js/public/StructuredClone.h
@@ -0,0 +1,359 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef js_StructuredClone_h
+#define js_StructuredClone_h
+
+#include "mozilla/Attributes.h"
+#include "mozilla/BufferList.h"
+
+#include <stdint.h>
+
+#include "jstypes.h"
+
+#include "js/RootingAPI.h"
+#include "js/TypeDecls.h"
+#include "js/Value.h"
+
+struct JSRuntime;
+struct JSStructuredCloneReader;
+struct JSStructuredCloneWriter;
+
+// API for the HTML5 internal structured cloning algorithm.
+
+namespace JS {
+
+enum class StructuredCloneScope : uint32_t {
+ SameProcessSameThread,
+ SameProcessDifferentThread,
+ DifferentProcess
+};
+
+enum TransferableOwnership {
+ /** Transferable data has not been filled in yet */
+ SCTAG_TMO_UNFILLED = 0,
+
+ /** Structured clone buffer does not yet own the data */
+ SCTAG_TMO_UNOWNED = 1,
+
+ /** All values at least this large are owned by the clone buffer */
+ SCTAG_TMO_FIRST_OWNED = 2,
+
+ /** Data is a pointer that can be freed */
+ SCTAG_TMO_ALLOC_DATA = 2,
+
+ /** Data is a memory mapped pointer */
+ SCTAG_TMO_MAPPED_DATA = 3,
+
+ /**
+ * Data is embedding-specific. The engine can free it by calling the
+ * freeTransfer op. The embedding can also use SCTAG_TMO_USER_MIN and
+ * greater, up to 32 bits, to distinguish specific ownership variants.
+ */
+ SCTAG_TMO_CUSTOM = 4,
+
+ SCTAG_TMO_USER_MIN
+};
+
+class CloneDataPolicy
+{
+ bool sharedArrayBuffer_;
+
+ public:
+ // The default is to allow all policy-controlled aspects.
+
+ CloneDataPolicy() :
+ sharedArrayBuffer_(true)
+ {}
+
+ // In the JS engine, SharedArrayBuffers can only be cloned intra-process
+ // because the shared memory areas are allocated in process-private memory.
+ // Clients should therefore deny SharedArrayBuffers when cloning data that
+ // are to be transmitted inter-process.
+ //
+ // Clients should also deny SharedArrayBuffers when cloning data that are to
+ // be transmitted intra-process if policy needs dictate such denial.
+
+ CloneDataPolicy& denySharedArrayBuffer() {
+ sharedArrayBuffer_ = false;
+ return *this;
+ }
+
+ bool isSharedArrayBufferAllowed() const {
+ return sharedArrayBuffer_;
+ }
+};
+
+} /* namespace JS */
+
+/**
+ * Read structured data from the reader r. This hook is used to read a value
+ * previously serialized by a call to the WriteStructuredCloneOp hook.
+ *
+ * tag and data are the pair of uint32_t values from the header. The callback
+ * may use the JS_Read* APIs to read any other relevant parts of the object
+ * from the reader r. closure is any value passed to the JS_ReadStructuredClone
+ * function. Return the new object on success, nullptr on error/exception.
+ */
+typedef JSObject* (*ReadStructuredCloneOp)(JSContext* cx, JSStructuredCloneReader* r,
+ uint32_t tag, uint32_t data, void* closure);
+
+/**
+ * Structured data serialization hook. The engine can write primitive values,
+ * Objects, Arrays, Dates, RegExps, TypedArrays, ArrayBuffers, Sets, Maps,
+ * and SharedTypedArrays. Any other type of object requires application support.
+ * This callback must first use the JS_WriteUint32Pair API to write an object
+ * header, passing a value greater than JS_SCTAG_USER to the tag parameter.
+ * Then it can use the JS_Write* APIs to write any other relevant parts of
+ * the value v to the writer w. closure is any value passed to the
+ * JS_WriteStructuredClone function.
+ *
+ * Return true on success, false on error/exception.
+ */
+typedef bool (*WriteStructuredCloneOp)(JSContext* cx, JSStructuredCloneWriter* w,
+ JS::HandleObject obj, void* closure);
+
+/**
+ * This is called when JS_WriteStructuredClone is given an invalid transferable.
+ * To follow HTML5, the application must throw a DATA_CLONE_ERR DOMException
+ * with error set to one of the JS_SCERR_* values.
+ */
+typedef void (*StructuredCloneErrorOp)(JSContext* cx, uint32_t errorid);
+
+/**
+ * This is called when JS_ReadStructuredClone receives a transferable object
+ * not known to the engine. If this hook does not exist or returns false, the
+ * JS engine calls the reportError op if set, otherwise it throws a
+ * DATA_CLONE_ERR DOM Exception. This method is called before any other
+ * callback and must return a non-null object in returnObject on success.
+ */
+typedef bool (*ReadTransferStructuredCloneOp)(JSContext* cx, JSStructuredCloneReader* r,
+ uint32_t tag, void* content, uint64_t extraData,
+ void* closure,
+ JS::MutableHandleObject returnObject);
+
+/**
+ * Called when JS_WriteStructuredClone receives a transferable object not
+ * handled by the engine. If this hook does not exist or returns false, the JS
+ * engine will call the reportError hook or fall back to throwing a
+ * DATA_CLONE_ERR DOM Exception. This method is called before any other
+ * callback.
+ *
+ * tag: indicates what type of transferable this is. Must be greater than
+ * 0xFFFF0201 (value of the internal SCTAG_TRANSFER_MAP_PENDING_ENTRY)
+ *
+ * ownership: see TransferableOwnership, above. Used to communicate any needed
+ * ownership info to the FreeTransferStructuredCloneOp.
+ *
+ * content, extraData: what the ReadTransferStructuredCloneOp will receive
+ */
+typedef bool (*TransferStructuredCloneOp)(JSContext* cx,
+ JS::Handle<JSObject*> obj,
+ void* closure,
+ // Output:
+ uint32_t* tag,
+ JS::TransferableOwnership* ownership,
+ void** content,
+ uint64_t* extraData);
+
+/**
+ * Called when freeing an unknown transferable object. Note that it
+ * should never trigger a garbage collection (and will assert in a
+ * debug build if it does.)
+ */
+typedef void (*FreeTransferStructuredCloneOp)(uint32_t tag, JS::TransferableOwnership ownership,
+ void* content, uint64_t extraData, void* closure);
+
+// The maximum supported structured-clone serialization format version.
+// Increment this when anything at all changes in the serialization format.
+// (Note that this does not need to be bumped for Transferable-only changes,
+// since they are never saved to persistent storage.)
+#define JS_STRUCTURED_CLONE_VERSION 8
+
+struct JSStructuredCloneCallbacks {
+ ReadStructuredCloneOp read;
+ WriteStructuredCloneOp write;
+ StructuredCloneErrorOp reportError;
+ ReadTransferStructuredCloneOp readTransfer;
+ TransferStructuredCloneOp writeTransfer;
+ FreeTransferStructuredCloneOp freeTransfer;
+};
+
+enum OwnTransferablePolicy {
+ OwnsTransferablesIfAny,
+ IgnoreTransferablesIfAny,
+ NoTransferables
+};
+
+class MOZ_NON_MEMMOVABLE JS_PUBLIC_API(JSStructuredCloneData) :
+ public mozilla::BufferList<js::SystemAllocPolicy>
+{
+ typedef js::SystemAllocPolicy AllocPolicy;
+ typedef mozilla::BufferList<js::SystemAllocPolicy> BufferList;
+
+ static const size_t kInitialSize = 0;
+ static const size_t kInitialCapacity = 4096;
+ static const size_t kStandardCapacity = 4096;
+
+ const JSStructuredCloneCallbacks* callbacks_;
+ void* closure_;
+ OwnTransferablePolicy ownTransferables_;
+
+ void setOptionalCallbacks(const JSStructuredCloneCallbacks* callbacks,
+ void* closure,
+ OwnTransferablePolicy policy) {
+ callbacks_ = callbacks;
+ closure_ = closure;
+ ownTransferables_ = policy;
+ }
+
+ friend struct JSStructuredCloneWriter;
+ friend class JS_PUBLIC_API(JSAutoStructuredCloneBuffer);
+
+public:
+ explicit JSStructuredCloneData(AllocPolicy aAP = AllocPolicy())
+ : BufferList(kInitialSize, kInitialCapacity, kStandardCapacity, aAP)
+ , callbacks_(nullptr)
+ , closure_(nullptr)
+ , ownTransferables_(OwnTransferablePolicy::NoTransferables)
+ {}
+ MOZ_IMPLICIT JSStructuredCloneData(BufferList&& buffers)
+ : BufferList(Move(buffers))
+ , callbacks_(nullptr)
+ , closure_(nullptr)
+ , ownTransferables_(OwnTransferablePolicy::NoTransferables)
+ {}
+ JSStructuredCloneData(JSStructuredCloneData&& other) = default;
+ JSStructuredCloneData& operator=(JSStructuredCloneData&& other) = default;
+ ~JSStructuredCloneData();
+
+ using BufferList::BufferList;
+};
+
+/** Note: if the *data contains transferable objects, it can be read only once. */
+JS_PUBLIC_API(bool)
+JS_ReadStructuredClone(JSContext* cx, JSStructuredCloneData& data, uint32_t version,
+ JS::StructuredCloneScope scope,
+ JS::MutableHandleValue vp,
+ const JSStructuredCloneCallbacks* optionalCallbacks, void* closure);
+
+JS_PUBLIC_API(bool)
+JS_WriteStructuredClone(JSContext* cx, JS::HandleValue v, JSStructuredCloneData* data,
+ JS::StructuredCloneScope scope,
+ JS::CloneDataPolicy cloneDataPolicy,
+ const JSStructuredCloneCallbacks* optionalCallbacks,
+ void* closure, JS::HandleValue transferable);
+
+JS_PUBLIC_API(bool)
+JS_StructuredCloneHasTransferables(JSStructuredCloneData& data, bool* hasTransferable);
+
+JS_PUBLIC_API(bool)
+JS_StructuredClone(JSContext* cx, JS::HandleValue v, JS::MutableHandleValue vp,
+ const JSStructuredCloneCallbacks* optionalCallbacks, void* closure);
+
+/** RAII sugar for JS_WriteStructuredClone. */
+class JS_PUBLIC_API(JSAutoStructuredCloneBuffer) {
+ const JS::StructuredCloneScope scope_;
+ JSStructuredCloneData data_;
+ uint32_t version_;
+
+ public:
+ JSAutoStructuredCloneBuffer(JS::StructuredCloneScope scope,
+ const JSStructuredCloneCallbacks* callbacks, void* closure)
+ : scope_(scope), version_(JS_STRUCTURED_CLONE_VERSION)
+ {
+ data_.setOptionalCallbacks(callbacks, closure, OwnTransferablePolicy::NoTransferables);
+ }
+
+ JSAutoStructuredCloneBuffer(JSAutoStructuredCloneBuffer&& other);
+ JSAutoStructuredCloneBuffer& operator=(JSAutoStructuredCloneBuffer&& other);
+
+ ~JSAutoStructuredCloneBuffer() { clear(); }
+
+ JSStructuredCloneData& data() { return data_; }
+ bool empty() const { return !data_.Size(); }
+
+ void clear(const JSStructuredCloneCallbacks* optionalCallbacks=nullptr, void* closure=nullptr);
+
+ /** Copy some memory. It will be automatically freed by the destructor. */
+ bool copy(const JSStructuredCloneData& data, uint32_t version=JS_STRUCTURED_CLONE_VERSION,
+ const JSStructuredCloneCallbacks* callbacks=nullptr, void* closure=nullptr);
+
+ /**
+ * Adopt some memory. It will be automatically freed by the destructor.
+ * data must have been allocated by the JS engine (e.g., extracted via
+ * JSAutoStructuredCloneBuffer::steal).
+ */
+ void adopt(JSStructuredCloneData&& data, uint32_t version=JS_STRUCTURED_CLONE_VERSION,
+ const JSStructuredCloneCallbacks* callbacks=nullptr, void* closure=nullptr);
+
+ /**
+ * Release the buffer and transfer ownership to the caller.
+ */
+ void steal(JSStructuredCloneData* data, uint32_t* versionp=nullptr,
+ const JSStructuredCloneCallbacks** callbacks=nullptr, void** closure=nullptr);
+
+ /**
+ * Abandon ownership of any transferable objects stored in the buffer,
+ * without freeing the buffer itself. Useful when copying the data out into
+ * an external container, though note that you will need to use adopt() to
+ * properly release that data eventually.
+ */
+ void abandon() { data_.ownTransferables_ = OwnTransferablePolicy::IgnoreTransferablesIfAny; }
+
+ bool read(JSContext* cx, JS::MutableHandleValue vp,
+ const JSStructuredCloneCallbacks* optionalCallbacks=nullptr, void* closure=nullptr);
+
+ bool write(JSContext* cx, JS::HandleValue v,
+ const JSStructuredCloneCallbacks* optionalCallbacks=nullptr, void* closure=nullptr);
+
+ bool write(JSContext* cx, JS::HandleValue v, JS::HandleValue transferable,
+ JS::CloneDataPolicy cloneDataPolicy,
+ const JSStructuredCloneCallbacks* optionalCallbacks=nullptr, void* closure=nullptr);
+
+ private:
+ // Copy and assignment are not supported.
+ JSAutoStructuredCloneBuffer(const JSAutoStructuredCloneBuffer& other) = delete;
+ JSAutoStructuredCloneBuffer& operator=(const JSAutoStructuredCloneBuffer& other) = delete;
+};
+
+// The range of tag values the application may use for its own custom object types.
+#define JS_SCTAG_USER_MIN ((uint32_t) 0xFFFF8000)
+#define JS_SCTAG_USER_MAX ((uint32_t) 0xFFFFFFFF)
+
+#define JS_SCERR_RECURSION 0
+#define JS_SCERR_TRANSFERABLE 1
+#define JS_SCERR_DUP_TRANSFERABLE 2
+#define JS_SCERR_UNSUPPORTED_TYPE 3
+
+JS_PUBLIC_API(bool)
+JS_ReadUint32Pair(JSStructuredCloneReader* r, uint32_t* p1, uint32_t* p2);
+
+JS_PUBLIC_API(bool)
+JS_ReadBytes(JSStructuredCloneReader* r, void* p, size_t len);
+
+JS_PUBLIC_API(bool)
+JS_ReadTypedArray(JSStructuredCloneReader* r, JS::MutableHandleValue vp);
+
+JS_PUBLIC_API(bool)
+JS_WriteUint32Pair(JSStructuredCloneWriter* w, uint32_t tag, uint32_t data);
+
+JS_PUBLIC_API(bool)
+JS_WriteBytes(JSStructuredCloneWriter* w, const void* p, size_t len);
+
+JS_PUBLIC_API(bool)
+JS_WriteString(JSStructuredCloneWriter* w, JS::HandleString str);
+
+JS_PUBLIC_API(bool)
+JS_WriteTypedArray(JSStructuredCloneWriter* w, JS::HandleValue v);
+
+JS_PUBLIC_API(bool)
+JS_ObjectNotWritten(JSStructuredCloneWriter* w, JS::HandleObject obj);
+
+JS_PUBLIC_API(JS::StructuredCloneScope)
+JS_GetStructuredCloneScope(JSStructuredCloneWriter* w);
+
+#endif /* js_StructuredClone_h */
diff --git a/js/public/SweepingAPI.h b/js/public/SweepingAPI.h
new file mode 100644
index 0000000000..0eb29ae413
--- /dev/null
+++ b/js/public/SweepingAPI.h
@@ -0,0 +1,65 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef js_SweepingAPI_h
+#define js_SweepingAPI_h
+
+#include "js/HeapAPI.h"
+
+namespace js {
+template <typename T>
+class WeakCacheBase {};
+} // namespace js
+
+namespace JS {
+template <typename T> class WeakCache;
+
+namespace shadow {
+JS_PUBLIC_API(void)
+RegisterWeakCache(JS::Zone* zone, JS::WeakCache<void*>* cachep);
+} // namespace shadow
+
+// A WeakCache stores the given Sweepable container and links itself into a
+// list of such caches that are swept during each GC.
+template <typename T>
+class WeakCache : public js::WeakCacheBase<T>,
+ private mozilla::LinkedListElement<WeakCache<T>>
+{
+ friend class mozilla::LinkedListElement<WeakCache<T>>;
+ friend class mozilla::LinkedList<WeakCache<T>>;
+
+ WeakCache() = delete;
+ WeakCache(const WeakCache&) = delete;
+
+ using SweepFn = void (*)(T*);
+ SweepFn sweeper;
+ T cache;
+
+ public:
+ using Type = T;
+
+ template <typename U>
+ WeakCache(Zone* zone, U&& initial)
+ : cache(mozilla::Forward<U>(initial))
+ {
+ sweeper = GCPolicy<T>::sweep;
+ shadow::RegisterWeakCache(zone, reinterpret_cast<WeakCache<void*>*>(this));
+ }
+ WeakCache(WeakCache&& other)
+ : sweeper(other.sweeper),
+ cache(mozilla::Move(other.cache))
+ {
+ }
+
+ const T& get() const { return cache; }
+ T& get() { return cache; }
+
+ void sweep() { sweeper(&cache); }
+};
+
+} // namespace JS
+
+#endif // js_SweepingAPI_h
diff --git a/js/public/TraceKind.h b/js/public/TraceKind.h
new file mode 100644
index 0000000000..2eda9cb2c1
--- /dev/null
+++ b/js/public/TraceKind.h
@@ -0,0 +1,212 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef js_TraceKind_h
+#define js_TraceKind_h
+
+#include "mozilla/UniquePtr.h"
+
+#include "js/TypeDecls.h"
+
+// Forward declarations of all the types a TraceKind can denote.
+namespace js {
+class BaseShape;
+class LazyScript;
+class ObjectGroup;
+class Shape;
+class Scope;
+namespace jit {
+class JitCode;
+} // namespace jit
+} // namespace js
+
+namespace JS {
+
+// When tracing a thing, the GC needs to know about the layout of the object it
+// is looking at. There are a fixed number of different layouts that the GC
+// knows about. The "trace kind" is a static map which tells which layout a GC
+// thing has.
+//
+// Although this map is public, the details are completely hidden. Not all of
+// the matching C++ types are exposed, and those that are, are opaque.
+//
+// See Value::gcKind() and JSTraceCallback in Tracer.h for more details.
+enum class TraceKind
+{
+ // These trace kinds have a publicly exposed, although opaque, C++ type.
+ // Note: The order here is determined by our Value packing. Other users
+ // should sort alphabetically, for consistency.
+ Object = 0x00,
+ String = 0x01,
+ Symbol = 0x02,
+ Script = 0x03,
+
+ // Shape details are exposed through JS_TraceShapeCycleCollectorChildren.
+ Shape = 0x04,
+
+ // ObjectGroup details are exposed through JS_TraceObjectGroupCycleCollectorChildren.
+ ObjectGroup = 0x05,
+
+ // The kind associated with a nullptr.
+ Null = 0x06,
+
+ // The following kinds do not have an exposed C++ idiom.
+ BaseShape = 0x0F,
+ JitCode = 0x1F,
+ LazyScript = 0x2F,
+ Scope = 0x3F
+};
+const static uintptr_t OutOfLineTraceKindMask = 0x07;
+static_assert(uintptr_t(JS::TraceKind::BaseShape) & OutOfLineTraceKindMask, "mask bits are set");
+static_assert(uintptr_t(JS::TraceKind::JitCode) & OutOfLineTraceKindMask, "mask bits are set");
+static_assert(uintptr_t(JS::TraceKind::LazyScript) & OutOfLineTraceKindMask, "mask bits are set");
+static_assert(uintptr_t(JS::TraceKind::Scope) & OutOfLineTraceKindMask, "mask bits are set");
+
+// When this header is imported inside SpiderMonkey, the class definitions are
+// available and we can query those definitions to find the correct kind
+// directly from the class hierarchy.
+template <typename T>
+struct MapTypeToTraceKind {
+ static const JS::TraceKind kind = T::TraceKind;
+};
+
+// When this header is used outside SpiderMonkey, the class definitions are not
+// available, so the following table containing all public GC types is used.
+#define JS_FOR_EACH_TRACEKIND(D) \
+ /* PrettyName TypeName AddToCCKind */ \
+ D(BaseShape, js::BaseShape, true) \
+ D(JitCode, js::jit::JitCode, true) \
+ D(LazyScript, js::LazyScript, true) \
+ D(Scope, js::Scope, true) \
+ D(Object, JSObject, true) \
+ D(ObjectGroup, js::ObjectGroup, true) \
+ D(Script, JSScript, true) \
+ D(Shape, js::Shape, true) \
+ D(String, JSString, false) \
+ D(Symbol, JS::Symbol, false)
+
+// Map from all public types to their trace kind.
+#define JS_EXPAND_DEF(name, type, _) \
+ template <> struct MapTypeToTraceKind<type> { \
+ static const JS::TraceKind kind = JS::TraceKind::name; \
+ };
+JS_FOR_EACH_TRACEKIND(JS_EXPAND_DEF);
+#undef JS_EXPAND_DEF
+
+// RootKind is closely related to TraceKind. Whereas TraceKind's indices are
+// laid out for convenient embedding as a pointer tag, the indicies of RootKind
+// are designed for use as array keys via EnumeratedArray.
+enum class RootKind : int8_t
+{
+ // These map 1:1 with trace kinds.
+#define EXPAND_ROOT_KIND(name, _0, _1) \
+ name,
+JS_FOR_EACH_TRACEKIND(EXPAND_ROOT_KIND)
+#undef EXPAND_ROOT_KIND
+
+ // These tagged pointers are special-cased for performance.
+ Id,
+ Value,
+
+ // Everything else.
+ Traceable,
+
+ Limit
+};
+
+// Most RootKind correspond directly to a trace kind.
+template <TraceKind traceKind> struct MapTraceKindToRootKind {};
+#define JS_EXPAND_DEF(name, _0, _1) \
+ template <> struct MapTraceKindToRootKind<JS::TraceKind::name> { \
+ static const JS::RootKind kind = JS::RootKind::name; \
+ };
+JS_FOR_EACH_TRACEKIND(JS_EXPAND_DEF)
+#undef JS_EXPAND_DEF
+
+// Specify the RootKind for all types. Value and jsid map to special cases;
+// pointer types we can derive directly from the TraceKind; everything else
+// should go in the Traceable list and use GCPolicy<T>::trace for tracing.
+template <typename T>
+struct MapTypeToRootKind {
+ static const JS::RootKind kind = JS::RootKind::Traceable;
+};
+template <typename T>
+struct MapTypeToRootKind<T*> {
+ static const JS::RootKind kind =
+ JS::MapTraceKindToRootKind<JS::MapTypeToTraceKind<T>::kind>::kind;
+};
+template <typename T>
+struct MapTypeToRootKind<mozilla::UniquePtr<T>> {
+ static const JS::RootKind kind = JS::MapTypeToRootKind<T>::kind;
+};
+template <> struct MapTypeToRootKind<JS::Value> {
+ static const JS::RootKind kind = JS::RootKind::Value;
+};
+template <> struct MapTypeToRootKind<jsid> {
+ static const JS::RootKind kind = JS::RootKind::Id;
+};
+template <> struct MapTypeToRootKind<JSFunction*> : public MapTypeToRootKind<JSObject*> {};
+
+// Fortunately, few places in the system need to deal with fully abstract
+// cells. In those places that do, we generally want to move to a layout
+// templated function as soon as possible. This template wraps the upcast
+// for that dispatch.
+//
+// Given a call:
+//
+// DispatchTraceKindTyped(f, thing, traceKind, ... args)
+//
+// Downcast the |void *thing| to the specific type designated by |traceKind|,
+// and pass it to the functor |f| along with |... args|, forwarded. Pass the
+// type designated by |traceKind| as the functor's template argument. The
+// |thing| parameter is optional; without it, we simply pass through |... args|.
+
+// GCC and Clang require an explicit template declaration in front of the
+// specialization of operator() because it is a dependent template. MSVC, on
+// the other hand, gets very confused if we have a |template| token there.
+// The clang-cl front end defines _MSC_VER, but still requires the explicit
+// template declaration, so we must test for __clang__ here as well.
+#if defined(_MSC_VER) && !defined(__clang__)
+# define JS_DEPENDENT_TEMPLATE_HINT
+#else
+# define JS_DEPENDENT_TEMPLATE_HINT template
+#endif
+template <typename F, typename... Args>
+auto
+DispatchTraceKindTyped(F f, JS::TraceKind traceKind, Args&&... args)
+ -> decltype(f. JS_DEPENDENT_TEMPLATE_HINT operator()<JSObject>(mozilla::Forward<Args>(args)...))
+{
+ switch (traceKind) {
+#define JS_EXPAND_DEF(name, type, _) \
+ case JS::TraceKind::name: \
+ return f. JS_DEPENDENT_TEMPLATE_HINT operator()<type>(mozilla::Forward<Args>(args)...);
+ JS_FOR_EACH_TRACEKIND(JS_EXPAND_DEF);
+#undef JS_EXPAND_DEF
+ default:
+ MOZ_CRASH("Invalid trace kind in DispatchTraceKindTyped.");
+ }
+}
+#undef JS_DEPENDENT_TEMPLATE_HINT
+
+template <typename F, typename... Args>
+auto
+DispatchTraceKindTyped(F f, void* thing, JS::TraceKind traceKind, Args&&... args)
+ -> decltype(f(static_cast<JSObject*>(nullptr), mozilla::Forward<Args>(args)...))
+{
+ switch (traceKind) {
+#define JS_EXPAND_DEF(name, type, _) \
+ case JS::TraceKind::name: \
+ return f(static_cast<type*>(thing), mozilla::Forward<Args>(args)...);
+ JS_FOR_EACH_TRACEKIND(JS_EXPAND_DEF);
+#undef JS_EXPAND_DEF
+ default:
+ MOZ_CRASH("Invalid trace kind in DispatchTraceKindTyped.");
+ }
+}
+
+} // namespace JS
+
+#endif // js_TraceKind_h
diff --git a/js/public/TracingAPI.h b/js/public/TracingAPI.h
new file mode 100644
index 0000000000..37c69acad2
--- /dev/null
+++ b/js/public/TracingAPI.h
@@ -0,0 +1,403 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef js_TracingAPI_h
+#define js_TracingAPI_h
+
+#include "jsalloc.h"
+
+#include "js/HashTable.h"
+#include "js/HeapAPI.h"
+#include "js/TraceKind.h"
+
+class JS_PUBLIC_API(JSTracer);
+
+namespace JS {
+class JS_PUBLIC_API(CallbackTracer);
+template <typename T> class Heap;
+template <typename T> class TenuredHeap;
+
+/** Returns a static string equivalent of |kind|. */
+JS_FRIEND_API(const char*)
+GCTraceKindToAscii(JS::TraceKind kind);
+
+} // namespace JS
+
+enum WeakMapTraceKind {
+ /**
+ * Do not trace into weak map keys or values during traversal. Users must
+ * handle weak maps manually.
+ */
+ DoNotTraceWeakMaps,
+
+ /**
+ * Do true ephemeron marking with a weak key lookup marking phase. This is
+ * the default for GCMarker.
+ */
+ ExpandWeakMaps,
+
+ /**
+ * Trace through to all values, irrespective of whether the keys are live
+ * or not. Used for non-marking tracers.
+ */
+ TraceWeakMapValues,
+
+ /**
+ * Trace through to all keys and values, irrespective of whether the keys
+ * are live or not. Used for non-marking tracers.
+ */
+ TraceWeakMapKeysValues
+};
+
+class JS_PUBLIC_API(JSTracer)
+{
+ public:
+ // Return the runtime set on the tracer.
+ JSRuntime* runtime() const { return runtime_; }
+
+ // Return the weak map tracing behavior currently set on this tracer.
+ WeakMapTraceKind weakMapAction() const { return weakMapAction_; }
+
+ enum class TracerKindTag {
+ // Marking path: a tracer used only for marking liveness of cells, not
+ // for moving them. The kind will transition to WeakMarking after
+ // everything reachable by regular edges has been marked.
+ Marking,
+
+ // Same as Marking, except we have now moved on to the "weak marking
+ // phase", in which every marked obj/script is immediately looked up to
+ // see if it is a weak map key (and therefore might require marking its
+ // weak map value).
+ WeakMarking,
+
+ // A tracer that traverses the graph for the purposes of moving objects
+ // from the nursery to the tenured area.
+ Tenuring,
+
+ // General-purpose traversal that invokes a callback on each cell.
+ // Traversing children is the responsibility of the callback.
+ Callback
+ };
+ bool isMarkingTracer() const { return tag_ == TracerKindTag::Marking || tag_ == TracerKindTag::WeakMarking; }
+ bool isWeakMarkingTracer() const { return tag_ == TracerKindTag::WeakMarking; }
+ bool isTenuringTracer() const { return tag_ == TracerKindTag::Tenuring; }
+ bool isCallbackTracer() const { return tag_ == TracerKindTag::Callback; }
+ inline JS::CallbackTracer* asCallbackTracer();
+#ifdef DEBUG
+ bool checkEdges() { return checkEdges_; }
+#endif
+
+ protected:
+ JSTracer(JSRuntime* rt, TracerKindTag tag,
+ WeakMapTraceKind weakTraceKind = TraceWeakMapValues)
+ : runtime_(rt)
+ , weakMapAction_(weakTraceKind)
+#ifdef DEBUG
+ , checkEdges_(true)
+#endif
+ , tag_(tag)
+ {}
+
+#ifdef DEBUG
+ // Set whether to check edges are valid in debug builds.
+ void setCheckEdges(bool check) {
+ checkEdges_ = check;
+ }
+#endif
+
+ private:
+ JSRuntime* runtime_;
+ WeakMapTraceKind weakMapAction_;
+#ifdef DEBUG
+ bool checkEdges_;
+#endif
+
+ protected:
+ TracerKindTag tag_;
+};
+
+namespace JS {
+
+class AutoTracingName;
+class AutoTracingIndex;
+class AutoTracingCallback;
+
+class JS_PUBLIC_API(CallbackTracer) : public JSTracer
+{
+ public:
+ CallbackTracer(JSRuntime* rt, WeakMapTraceKind weakTraceKind = TraceWeakMapValues)
+ : JSTracer(rt, JSTracer::TracerKindTag::Callback, weakTraceKind),
+ contextName_(nullptr), contextIndex_(InvalidIndex), contextFunctor_(nullptr)
+ {}
+ CallbackTracer(JSContext* cx, WeakMapTraceKind weakTraceKind = TraceWeakMapValues);
+
+ // Override these methods to receive notification when an edge is visited
+ // with the type contained in the callback. The default implementation
+ // dispatches to the fully-generic onChild implementation, so for cases that
+ // do not care about boxing overhead and do not need the actual edges,
+ // just override the generic onChild.
+ virtual void onObjectEdge(JSObject** objp) { onChild(JS::GCCellPtr(*objp)); }
+ virtual void onStringEdge(JSString** strp) { onChild(JS::GCCellPtr(*strp)); }
+ virtual void onSymbolEdge(JS::Symbol** symp) { onChild(JS::GCCellPtr(*symp)); }
+ virtual void onScriptEdge(JSScript** scriptp) { onChild(JS::GCCellPtr(*scriptp)); }
+ virtual void onShapeEdge(js::Shape** shapep) {
+ onChild(JS::GCCellPtr(*shapep, JS::TraceKind::Shape));
+ }
+ virtual void onObjectGroupEdge(js::ObjectGroup** groupp) {
+ onChild(JS::GCCellPtr(*groupp, JS::TraceKind::ObjectGroup));
+ }
+ virtual void onBaseShapeEdge(js::BaseShape** basep) {
+ onChild(JS::GCCellPtr(*basep, JS::TraceKind::BaseShape));
+ }
+ virtual void onJitCodeEdge(js::jit::JitCode** codep) {
+ onChild(JS::GCCellPtr(*codep, JS::TraceKind::JitCode));
+ }
+ virtual void onLazyScriptEdge(js::LazyScript** lazyp) {
+ onChild(JS::GCCellPtr(*lazyp, JS::TraceKind::LazyScript));
+ }
+ virtual void onScopeEdge(js::Scope** scopep) {
+ onChild(JS::GCCellPtr(*scopep, JS::TraceKind::Scope));
+ }
+
+ // Override this method to receive notification when a node in the GC
+ // heap graph is visited.
+ virtual void onChild(const JS::GCCellPtr& thing) = 0;
+
+ // Access to the tracing context:
+ // When tracing with a JS::CallbackTracer, we invoke the callback with the
+ // edge location and the type of target. This is useful for operating on
+ // the edge in the abstract or on the target thing, satisfying most common
+ // use cases. However, some tracers need additional detail about the
+ // specific edge that is being traced in order to be useful. Unfortunately,
+ // the raw pointer to the edge that we provide is not enough information to
+ // infer much of anything useful about that edge.
+ //
+ // In order to better support use cases that care in particular about edges
+ // -- as opposed to the target thing -- tracing implementations are
+ // responsible for providing extra context information about each edge they
+ // trace, as it is traced. This contains, at a minimum, an edge name and,
+ // when tracing an array, the index. Further specialization can be achived
+ // (with some complexity), by associating a functor with the tracer so
+ // that, when requested, the user can generate totally custom edge
+ // descriptions.
+
+ // Returns the current edge's name. It is only valid to call this when
+ // inside the trace callback, however, the edge name will always be set.
+ const char* contextName() const { MOZ_ASSERT(contextName_); return contextName_; }
+
+ // Returns the current edge's index, if marked as part of an array of edges.
+ // This must be called only inside the trace callback. When not tracing an
+ // array, the value will be InvalidIndex.
+ const static size_t InvalidIndex = size_t(-1);
+ size_t contextIndex() const { return contextIndex_; }
+
+ // Build a description of this edge in the heap graph. This call may invoke
+ // the context functor, if set, which may inspect arbitrary areas of the
+ // heap. On the other hand, the description provided by this method may be
+ // substantially more accurate and useful than those provided by only the
+ // contextName and contextIndex.
+ void getTracingEdgeName(char* buffer, size_t bufferSize);
+
+ // The trace implementation may associate a callback with one or more edges
+ // using AutoTracingDetails. This functor is called by getTracingEdgeName
+ // and is responsible for providing a textual representation of the
+ // currently being traced edge. The callback has access to the full heap,
+ // including the currently set tracing context.
+ class ContextFunctor {
+ public:
+ virtual void operator()(CallbackTracer* trc, char* buf, size_t bufsize) = 0;
+ };
+
+#ifdef DEBUG
+ enum class TracerKind { DoNotCare, Moving, GrayBuffering, VerifyTraceProtoAndIface };
+ virtual TracerKind getTracerKind() const { return TracerKind::DoNotCare; }
+#endif
+
+ // In C++, overriding a method hides all methods in the base class with
+ // that name, not just methods with that signature. Thus, the typed edge
+ // methods have to have distinct names to allow us to override them
+ // individually, which is freqently useful if, for example, we only want to
+ // process only one type of edge.
+ void dispatchToOnEdge(JSObject** objp) { onObjectEdge(objp); }
+ void dispatchToOnEdge(JSString** strp) { onStringEdge(strp); }
+ void dispatchToOnEdge(JS::Symbol** symp) { onSymbolEdge(symp); }
+ void dispatchToOnEdge(JSScript** scriptp) { onScriptEdge(scriptp); }
+ void dispatchToOnEdge(js::Shape** shapep) { onShapeEdge(shapep); }
+ void dispatchToOnEdge(js::ObjectGroup** groupp) { onObjectGroupEdge(groupp); }
+ void dispatchToOnEdge(js::BaseShape** basep) { onBaseShapeEdge(basep); }
+ void dispatchToOnEdge(js::jit::JitCode** codep) { onJitCodeEdge(codep); }
+ void dispatchToOnEdge(js::LazyScript** lazyp) { onLazyScriptEdge(lazyp); }
+ void dispatchToOnEdge(js::Scope** scopep) { onScopeEdge(scopep); }
+
+ private:
+ friend class AutoTracingName;
+ const char* contextName_;
+
+ friend class AutoTracingIndex;
+ size_t contextIndex_;
+
+ friend class AutoTracingDetails;
+ ContextFunctor* contextFunctor_;
+};
+
+// Set the name portion of the tracer's context for the current edge.
+class MOZ_RAII AutoTracingName
+{
+ CallbackTracer* trc_;
+ const char* prior_;
+
+ public:
+ AutoTracingName(CallbackTracer* trc, const char* name) : trc_(trc), prior_(trc->contextName_) {
+ MOZ_ASSERT(name);
+ trc->contextName_ = name;
+ }
+ ~AutoTracingName() {
+ MOZ_ASSERT(trc_->contextName_);
+ trc_->contextName_ = prior_;
+ }
+};
+
+// Set the index portion of the tracer's context for the current range.
+class MOZ_RAII AutoTracingIndex
+{
+ CallbackTracer* trc_;
+
+ public:
+ explicit AutoTracingIndex(JSTracer* trc, size_t initial = 0) : trc_(nullptr) {
+ if (trc->isCallbackTracer()) {
+ trc_ = trc->asCallbackTracer();
+ MOZ_ASSERT(trc_->contextIndex_ == CallbackTracer::InvalidIndex);
+ trc_->contextIndex_ = initial;
+ }
+ }
+ ~AutoTracingIndex() {
+ if (trc_) {
+ MOZ_ASSERT(trc_->contextIndex_ != CallbackTracer::InvalidIndex);
+ trc_->contextIndex_ = CallbackTracer::InvalidIndex;
+ }
+ }
+
+ void operator++() {
+ if (trc_) {
+ MOZ_ASSERT(trc_->contextIndex_ != CallbackTracer::InvalidIndex);
+ ++trc_->contextIndex_;
+ }
+ }
+};
+
+// Set a context callback for the trace callback to use, if it needs a detailed
+// edge description.
+class MOZ_RAII AutoTracingDetails
+{
+ CallbackTracer* trc_;
+
+ public:
+ AutoTracingDetails(JSTracer* trc, CallbackTracer::ContextFunctor& func) : trc_(nullptr) {
+ if (trc->isCallbackTracer()) {
+ trc_ = trc->asCallbackTracer();
+ MOZ_ASSERT(trc_->contextFunctor_ == nullptr);
+ trc_->contextFunctor_ = &func;
+ }
+ }
+ ~AutoTracingDetails() {
+ if (trc_) {
+ MOZ_ASSERT(trc_->contextFunctor_);
+ trc_->contextFunctor_ = nullptr;
+ }
+ }
+};
+
+} // namespace JS
+
+JS::CallbackTracer*
+JSTracer::asCallbackTracer()
+{
+ MOZ_ASSERT(isCallbackTracer());
+ return static_cast<JS::CallbackTracer*>(this);
+}
+
+namespace JS {
+
+// The JS::TraceEdge family of functions traces the given GC thing reference.
+// This performs the tracing action configured on the given JSTracer: typically
+// calling the JSTracer::callback or marking the thing as live.
+//
+// The argument to JS::TraceEdge is an in-out param: when the function returns,
+// the garbage collector might have moved the GC thing. In this case, the
+// reference passed to JS::TraceEdge will be updated to the thing's new
+// location. Callers of this method are responsible for updating any state that
+// is dependent on the object's address. For example, if the object's address
+// is used as a key in a hashtable, then the object must be removed and
+// re-inserted with the correct hash.
+//
+// Note that while |edgep| must never be null, it is fine for |*edgep| to be
+// nullptr.
+template <typename T>
+extern JS_PUBLIC_API(void)
+TraceEdge(JSTracer* trc, JS::Heap<T>* edgep, const char* name);
+
+extern JS_PUBLIC_API(void)
+TraceEdge(JSTracer* trc, JS::TenuredHeap<JSObject*>* edgep, const char* name);
+
+// Edges that are always traced as part of root marking do not require
+// incremental barriers. This function allows for marking non-barriered
+// pointers, but asserts that this happens during root marking.
+//
+// Note that while |edgep| must never be null, it is fine for |*edgep| to be
+// nullptr.
+template <typename T>
+extern JS_PUBLIC_API(void)
+UnsafeTraceRoot(JSTracer* trc, T* edgep, const char* name);
+
+extern JS_PUBLIC_API(void)
+TraceChildren(JSTracer* trc, GCCellPtr thing);
+
+using ZoneSet = js::HashSet<Zone*, js::DefaultHasher<Zone*>, js::SystemAllocPolicy>;
+using CompartmentSet = js::HashSet<JSCompartment*, js::DefaultHasher<JSCompartment*>,
+ js::SystemAllocPolicy>;
+
+/**
+ * Trace every value within |compartments| that is wrapped by a
+ * cross-compartment wrapper from a compartment that is not an element of
+ * |compartments|.
+ */
+extern JS_PUBLIC_API(void)
+TraceIncomingCCWs(JSTracer* trc, const JS::CompartmentSet& compartments);
+
+} // namespace JS
+
+extern JS_PUBLIC_API(void)
+JS_GetTraceThingInfo(char* buf, size_t bufsize, JSTracer* trc,
+ void* thing, JS::TraceKind kind, bool includeDetails);
+
+namespace js {
+
+// Trace an edge that is not a GC root and is not wrapped in a barriered
+// wrapper for some reason.
+//
+// This method does not check if |*edgep| is non-null before tracing through
+// it, so callers must check any nullable pointer before calling this method.
+template <typename T>
+extern JS_PUBLIC_API(void)
+UnsafeTraceManuallyBarrieredEdge(JSTracer* trc, T* edgep, const char* name);
+
+namespace gc {
+
+// Return true if the given edge is not live and is about to be swept.
+template <typename T>
+extern JS_PUBLIC_API(bool)
+EdgeNeedsSweep(JS::Heap<T>* edgep);
+
+// Not part of the public API, but declared here so we can use it in GCPolicy
+// which is.
+template <typename T>
+bool
+IsAboutToBeFinalizedUnbarriered(T* thingp);
+
+} // namespace gc
+} // namespace js
+
+#endif /* js_TracingAPI_h */
diff --git a/js/public/TrackedOptimizationInfo.h b/js/public/TrackedOptimizationInfo.h
new file mode 100644
index 0000000000..b697765c9c
--- /dev/null
+++ b/js/public/TrackedOptimizationInfo.h
@@ -0,0 +1,285 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef js_TrackedOptimizationInfo_h
+#define js_TrackedOptimizationInfo_h
+
+#include "mozilla/Maybe.h"
+
+namespace JS {
+
+#define TRACKED_STRATEGY_LIST(_) \
+ _(GetProp_ArgumentsLength) \
+ _(GetProp_ArgumentsCallee) \
+ _(GetProp_InferredConstant) \
+ _(GetProp_Constant) \
+ _(GetProp_NotDefined) \
+ _(GetProp_StaticName) \
+ _(GetProp_SimdGetter) \
+ _(GetProp_TypedObject) \
+ _(GetProp_DefiniteSlot) \
+ _(GetProp_Unboxed) \
+ _(GetProp_CommonGetter) \
+ _(GetProp_InlineAccess) \
+ _(GetProp_Innerize) \
+ _(GetProp_InlineCache) \
+ _(GetProp_SharedCache) \
+ _(GetProp_ModuleNamespace) \
+ \
+ _(SetProp_CommonSetter) \
+ _(SetProp_TypedObject) \
+ _(SetProp_DefiniteSlot) \
+ _(SetProp_Unboxed) \
+ _(SetProp_InlineAccess) \
+ _(SetProp_InlineCache) \
+ \
+ _(GetElem_TypedObject) \
+ _(GetElem_Dense) \
+ _(GetElem_TypedStatic) \
+ _(GetElem_TypedArray) \
+ _(GetElem_String) \
+ _(GetElem_Arguments) \
+ _(GetElem_ArgumentsInlined) \
+ _(GetElem_InlineCache) \
+ \
+ _(SetElem_TypedObject) \
+ _(SetElem_TypedStatic) \
+ _(SetElem_TypedArray) \
+ _(SetElem_Dense) \
+ _(SetElem_Arguments) \
+ _(SetElem_InlineCache) \
+ \
+ _(BinaryArith_Concat) \
+ _(BinaryArith_SpecializedTypes) \
+ _(BinaryArith_SpecializedOnBaselineTypes) \
+ _(BinaryArith_SharedCache) \
+ _(BinaryArith_Call) \
+ \
+ _(InlineCache_OptimizedStub) \
+ \
+ _(Call_Inline)
+
+
+// Ordering is important below. All outcomes before GenericSuccess will be
+// considered failures, and all outcomes after GenericSuccess will be
+// considered successes.
+#define TRACKED_OUTCOME_LIST(_) \
+ _(GenericFailure) \
+ _(Disabled) \
+ _(NoTypeInfo) \
+ _(NoAnalysisInfo) \
+ _(NoShapeInfo) \
+ _(UnknownObject) \
+ _(UnknownProperties) \
+ _(Singleton) \
+ _(NotSingleton) \
+ _(NotFixedSlot) \
+ _(InconsistentFixedSlot) \
+ _(NotObject) \
+ _(NotStruct) \
+ _(NotUnboxed) \
+ _(NotUndefined) \
+ _(UnboxedConvertedToNative) \
+ _(StructNoField) \
+ _(InconsistentFieldType) \
+ _(InconsistentFieldOffset) \
+ _(NeedsTypeBarrier) \
+ _(InDictionaryMode) \
+ _(NoProtoFound) \
+ _(MultiProtoPaths) \
+ _(NonWritableProperty) \
+ _(ProtoIndexedProps) \
+ _(ArrayBadFlags) \
+ _(ArrayDoubleConversion) \
+ _(ArrayRange) \
+ _(ArraySeenNegativeIndex) \
+ _(TypedObjectHasDetachedBuffer) \
+ _(TypedObjectArrayRange) \
+ _(AccessNotDense) \
+ _(AccessNotSimdObject) \
+ _(AccessNotTypedObject) \
+ _(AccessNotTypedArray) \
+ _(AccessNotString) \
+ _(OperandNotString) \
+ _(OperandNotNumber) \
+ _(OperandNotStringOrNumber) \
+ _(OperandNotSimpleArith) \
+ _(StaticTypedArrayUint32) \
+ _(StaticTypedArrayCantComputeMask) \
+ _(OutOfBounds) \
+ _(GetElemStringNotCached) \
+ _(NonNativeReceiver) \
+ _(IndexType) \
+ _(SetElemNonDenseNonTANotCached) \
+ _(NoSimdJitSupport) \
+ _(SimdTypeNotOptimized) \
+ _(UnknownSimdProperty) \
+ _(NotModuleNamespace) \
+ _(UnknownProperty) \
+ \
+ _(ICOptStub_GenericSuccess) \
+ \
+ _(ICGetPropStub_ReadSlot) \
+ _(ICGetPropStub_CallGetter) \
+ _(ICGetPropStub_ArrayLength) \
+ _(ICGetPropStub_UnboxedRead) \
+ _(ICGetPropStub_UnboxedReadExpando) \
+ _(ICGetPropStub_UnboxedArrayLength) \
+ _(ICGetPropStub_TypedArrayLength) \
+ _(ICGetPropStub_DOMProxyShadowed) \
+ _(ICGetPropStub_DOMProxyUnshadowed) \
+ _(ICGetPropStub_GenericProxy) \
+ _(ICGetPropStub_ArgumentsLength) \
+ \
+ _(ICSetPropStub_Slot) \
+ _(ICSetPropStub_GenericProxy) \
+ _(ICSetPropStub_DOMProxyShadowed) \
+ _(ICSetPropStub_DOMProxyUnshadowed) \
+ _(ICSetPropStub_CallSetter) \
+ _(ICSetPropStub_AddSlot) \
+ _(ICSetPropStub_SetUnboxed) \
+ \
+ _(ICGetElemStub_ReadSlot) \
+ _(ICGetElemStub_CallGetter) \
+ _(ICGetElemStub_ReadUnboxed) \
+ _(ICGetElemStub_Dense) \
+ _(ICGetElemStub_DenseHole) \
+ _(ICGetElemStub_TypedArray) \
+ _(ICGetElemStub_ArgsElementMapped) \
+ _(ICGetElemStub_ArgsElementUnmapped) \
+ \
+ _(ICSetElemStub_Dense) \
+ _(ICSetElemStub_TypedArray) \
+ \
+ _(ICNameStub_ReadSlot) \
+ _(ICNameStub_CallGetter) \
+ _(ICNameStub_TypeOfNoProperty) \
+ \
+ _(CantInlineGeneric) \
+ _(CantInlineNoTarget) \
+ _(CantInlineNotInterpreted) \
+ _(CantInlineNoBaseline) \
+ _(CantInlineLazy) \
+ _(CantInlineNotConstructor) \
+ _(CantInlineClassConstructor) \
+ _(CantInlineDisabledIon) \
+ _(CantInlineTooManyArgs) \
+ _(CantInlineNeedsArgsObj) \
+ _(CantInlineDebuggee) \
+ _(CantInlineUnknownProps) \
+ _(CantInlineExceededDepth) \
+ _(CantInlineExceededTotalBytecodeLength) \
+ _(CantInlineBigCaller) \
+ _(CantInlineBigCallee) \
+ _(CantInlineBigCalleeInlinedBytecodeLength) \
+ _(CantInlineNotHot) \
+ _(CantInlineNotInDispatch) \
+ _(CantInlineUnreachable) \
+ _(CantInlineNativeBadForm) \
+ _(CantInlineNativeBadType) \
+ _(CantInlineNativeNoTemplateObj) \
+ _(CantInlineBound) \
+ _(CantInlineNativeNoSpecialization) \
+ _(HasCommonInliningPath) \
+ \
+ _(GenericSuccess) \
+ _(Inlined) \
+ _(DOM) \
+ _(Monomorphic) \
+ _(Polymorphic)
+
+#define TRACKED_TYPESITE_LIST(_) \
+ _(Receiver) \
+ _(Operand) \
+ _(Index) \
+ _(Value) \
+ _(Call_Target) \
+ _(Call_This) \
+ _(Call_Arg) \
+ _(Call_Return)
+
+enum class TrackedStrategy : uint32_t {
+#define STRATEGY_OP(name) name,
+ TRACKED_STRATEGY_LIST(STRATEGY_OP)
+#undef STRATEGY_OPT
+
+ Count
+};
+
+enum class TrackedOutcome : uint32_t {
+#define OUTCOME_OP(name) name,
+ TRACKED_OUTCOME_LIST(OUTCOME_OP)
+#undef OUTCOME_OP
+
+ Count
+};
+
+enum class TrackedTypeSite : uint32_t {
+#define TYPESITE_OP(name) name,
+ TRACKED_TYPESITE_LIST(TYPESITE_OP)
+#undef TYPESITE_OP
+
+ Count
+};
+
+JS_PUBLIC_API(const char*)
+TrackedStrategyString(TrackedStrategy strategy);
+
+JS_PUBLIC_API(const char*)
+TrackedOutcomeString(TrackedOutcome outcome);
+
+JS_PUBLIC_API(const char*)
+TrackedTypeSiteString(TrackedTypeSite site);
+
+struct ForEachTrackedOptimizationAttemptOp
+{
+ virtual void operator()(TrackedStrategy strategy, TrackedOutcome outcome) = 0;
+};
+
+struct ForEachTrackedOptimizationTypeInfoOp
+{
+ // Called 0+ times per entry, once for each type in the type set that Ion
+ // saw during MIR construction. readType is always called _before_
+ // operator() on the same entry.
+ //
+ // The keyedBy parameter describes how the type is keyed:
+ // - "primitive" for primitive types
+ // - "constructor" for object types tied to a scripted constructor
+ // function.
+ // - "alloc site" for object types tied to an allocation site.
+ // - "prototype" for object types tied neither to a constructor nor
+ // to an allocation site, but to a prototype.
+ // - "singleton" for object types which only has a single value.
+ // - "function" for object types referring to scripted functions.
+ // - "native" for object types referring to native functions.
+ //
+ // The name parameter is the string representation of the type. If the
+ // type is keyed by "constructor", or if the type itself refers to a
+ // scripted function, the name is the function's displayAtom. If the type
+ // is keyed by "native", this is nullptr.
+ //
+ // The location parameter is the filename if the type is keyed by
+ // "constructor", "alloc site", or if the type itself refers to a scripted
+ // function. If the type is keyed by "native", it is the offset of the
+ // native function, suitable for use with addr2line on Linux or atos on OS
+ // X. Otherwise it is nullptr.
+ //
+ // The lineno parameter is the line number if the type is keyed by
+ // "constructor", "alloc site", or if the type itself refers to a scripted
+ // function. Otherwise it is Nothing().
+ //
+ // The location parameter is the only one that may need escaping if being
+ // quoted.
+ virtual void readType(const char* keyedBy, const char* name,
+ const char* location, mozilla::Maybe<unsigned> lineno) = 0;
+
+ // Called once per entry.
+ virtual void operator()(TrackedTypeSite site, const char* mirType) = 0;
+};
+
+} // namespace JS
+
+#endif // js_TrackedOptimizationInfo_h
diff --git a/js/public/TypeDecls.h b/js/public/TypeDecls.h
new file mode 100644
index 0000000000..acb93f9737
--- /dev/null
+++ b/js/public/TypeDecls.h
@@ -0,0 +1,79 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+// This file contains public type declarations that are used *frequently*. If
+// it doesn't occur at least 10 times in Gecko, it probably shouldn't be in
+// here.
+//
+// It includes only:
+// - forward declarations of structs and classes;
+// - typedefs;
+// - enums (maybe).
+// It does *not* contain any struct or class definitions.
+
+#ifndef js_TypeDecls_h
+#define js_TypeDecls_h
+
+#include <stddef.h>
+#include <stdint.h>
+
+#include "js-config.h"
+
+struct JSContext;
+class JSFunction;
+class JSObject;
+class JSScript;
+class JSString;
+class JSAddonId;
+
+struct jsid;
+
+namespace JS {
+
+typedef unsigned char Latin1Char;
+
+class Symbol;
+class Value;
+template <typename T> class Handle;
+template <typename T> class MutableHandle;
+template <typename T> class Rooted;
+template <typename T> class PersistentRooted;
+
+typedef Handle<JSFunction*> HandleFunction;
+typedef Handle<jsid> HandleId;
+typedef Handle<JSObject*> HandleObject;
+typedef Handle<JSScript*> HandleScript;
+typedef Handle<JSString*> HandleString;
+typedef Handle<JS::Symbol*> HandleSymbol;
+typedef Handle<Value> HandleValue;
+
+typedef MutableHandle<JSFunction*> MutableHandleFunction;
+typedef MutableHandle<jsid> MutableHandleId;
+typedef MutableHandle<JSObject*> MutableHandleObject;
+typedef MutableHandle<JSScript*> MutableHandleScript;
+typedef MutableHandle<JSString*> MutableHandleString;
+typedef MutableHandle<JS::Symbol*> MutableHandleSymbol;
+typedef MutableHandle<Value> MutableHandleValue;
+
+typedef Rooted<JSObject*> RootedObject;
+typedef Rooted<JSFunction*> RootedFunction;
+typedef Rooted<JSScript*> RootedScript;
+typedef Rooted<JSString*> RootedString;
+typedef Rooted<JS::Symbol*> RootedSymbol;
+typedef Rooted<jsid> RootedId;
+typedef Rooted<JS::Value> RootedValue;
+
+typedef PersistentRooted<JSFunction*> PersistentRootedFunction;
+typedef PersistentRooted<jsid> PersistentRootedId;
+typedef PersistentRooted<JSObject*> PersistentRootedObject;
+typedef PersistentRooted<JSScript*> PersistentRootedScript;
+typedef PersistentRooted<JSString*> PersistentRootedString;
+typedef PersistentRooted<JS::Symbol*> PersistentRootedSymbol;
+typedef PersistentRooted<Value> PersistentRootedValue;
+
+} // namespace JS
+
+#endif /* js_TypeDecls_h */
diff --git a/js/public/UbiNode.h b/js/public/UbiNode.h
new file mode 100644
index 0000000000..7332f198f9
--- /dev/null
+++ b/js/public/UbiNode.h
@@ -0,0 +1,1146 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef js_UbiNode_h
+#define js_UbiNode_h
+
+#include "mozilla/Alignment.h"
+#include "mozilla/Assertions.h"
+#include "mozilla/Attributes.h"
+#include "mozilla/Maybe.h"
+#include "mozilla/MemoryReporting.h"
+#include "mozilla/Move.h"
+#include "mozilla/RangedPtr.h"
+#include "mozilla/TypeTraits.h"
+#include "mozilla/Variant.h"
+
+#include "jspubtd.h"
+
+#include "js/GCAPI.h"
+#include "js/HashTable.h"
+#include "js/RootingAPI.h"
+#include "js/TracingAPI.h"
+#include "js/TypeDecls.h"
+#include "js/UniquePtr.h"
+#include "js/Value.h"
+#include "js/Vector.h"
+
+// JS::ubi::Node
+//
+// JS::ubi::Node is a pointer-like type designed for internal use by heap
+// analysis tools. A ubi::Node can refer to:
+//
+// - a JS value, like a string, object, or symbol;
+// - an internal SpiderMonkey structure, like a shape or a scope chain object
+// - an instance of some embedding-provided type: in Firefox, an XPCOM
+// object, or an internal DOM node class instance
+//
+// A ubi::Node instance provides metadata about its referent, and can
+// enumerate its referent's outgoing edges, so you can implement heap analysis
+// algorithms that walk the graph - finding paths between objects, or
+// computing heap dominator trees, say - using ubi::Node, while remaining
+// ignorant of the details of the types you're operating on.
+//
+// Of course, when it comes to presenting the results in a developer-facing
+// tool, you'll need to stop being ignorant of those details, because you have
+// to discuss the ubi::Nodes' referents with the developer. Here, ubi::Node
+// can hand you dynamically checked, properly typed pointers to the original
+// objects via the as<T> method, or generate descriptions of the referent
+// itself.
+//
+// ubi::Node instances are lightweight (two-word) value types. Instances:
+// - compare equal if and only if they refer to the same object;
+// - have hash values that respect their equality relation; and
+// - have serializations that are only equal if the ubi::Nodes are equal.
+//
+// A ubi::Node is only valid for as long as its referent is alive; if its
+// referent goes away, the ubi::Node becomes a dangling pointer. A ubi::Node
+// that refers to a GC-managed object is not automatically a GC root; if the
+// GC frees or relocates its referent, the ubi::Node becomes invalid. A
+// ubi::Node that refers to a reference-counted object does not bump the
+// reference count.
+//
+// ubi::Node values require no supporting data structures, making them
+// feasible for use in memory-constrained devices --- ideally, the memory
+// requirements of the algorithm which uses them will be the limiting factor,
+// not the demands of ubi::Node itself.
+//
+// One can construct a ubi::Node value given a pointer to a type that ubi::Node
+// supports. In the other direction, one can convert a ubi::Node back to a
+// pointer; these downcasts are checked dynamically. In particular, one can
+// convert a 'JSContext*' to a ubi::Node, yielding a node with an outgoing edge
+// for every root registered with the runtime; starting from this, one can walk
+// the entire heap. (Of course, one could also start traversal at any other kind
+// of type to which one has a pointer.)
+//
+//
+// Extending ubi::Node To Handle Your Embedding's Types
+//
+// To add support for a new ubi::Node referent type R, you must define a
+// specialization of the ubi::Concrete template, ubi::Concrete<R>, which
+// inherits from ubi::Base. ubi::Node itself uses the specialization for
+// compile-time information (i.e. the checked conversions between R * and
+// ubi::Node), and the inheritance for run-time dispatching.
+//
+//
+// ubi::Node Exposes Implementation Details
+//
+// In many cases, a JavaScript developer's view of their data differs
+// substantially from its actual implementation. For example, while the
+// ECMAScript specification describes objects as maps from property names to
+// sets of attributes (like ECMAScript's [[Value]]), in practice many objects
+// have only a pointer to a shape, shared with other similar objects, and
+// indexed slots that contain the [[Value]] attributes. As another example, a
+// string produced by concatenating two other strings may sometimes be
+// represented by a "rope", a structure that points to the two original
+// strings.
+//
+// We intend to use ubi::Node to write tools that report memory usage, so it's
+// important that ubi::Node accurately portray how much memory nodes consume.
+// Thus, for example, when data that apparently belongs to multiple nodes is
+// in fact shared in a common structure, ubi::Node's graph uses a separate
+// node for that shared structure, and presents edges to it from the data's
+// apparent owners. For example, ubi::Node exposes SpiderMonkey objects'
+// shapes and base shapes, and exposes rope string and substring structure,
+// because these optimizations become visible when a tool reports how much
+// memory a structure consumes.
+//
+// However, fine granularity is not a goal. When a particular object is the
+// exclusive owner of a separate block of memory, ubi::Node may present the
+// object and its block as a single node, and add their sizes together when
+// reporting the node's size, as there is no meaningful loss of data in this
+// case. Thus, for example, a ubi::Node referring to a JavaScript object, when
+// asked for the object's size in bytes, includes the object's slot and
+// element arrays' sizes in the total. There is no separate ubi::Node value
+// representing the slot and element arrays, since they are owned exclusively
+// by the object.
+//
+//
+// Presenting Analysis Results To JavaScript Developers
+//
+// If an analysis provides its results in terms of ubi::Node values, a user
+// interface presenting those results will generally need to clean them up
+// before they can be understood by JavaScript developers. For example,
+// JavaScript developers should not need to understand shapes, only JavaScript
+// objects. Similarly, they should not need to understand the distinction
+// between DOM nodes and the JavaScript shadow objects that represent them.
+//
+//
+// Rooting Restrictions
+//
+// At present there is no way to root ubi::Node instances, so instances can't be
+// live across any operation that might GC. Analyses using ubi::Node must either
+// run to completion and convert their results to some other rootable type, or
+// save their intermediate state in some rooted structure if they must GC before
+// they complete. (For algorithms like path-finding and dominator tree
+// computation, we implement the algorithm avoiding any operation that could
+// cause a GC --- and use AutoCheckCannotGC to verify this.)
+//
+// If this restriction prevents us from implementing interesting tools, we may
+// teach the GC how to root ubi::Nodes, fix up hash tables that use them as
+// keys, etc.
+//
+//
+// Hostile Graph Structure
+//
+// Analyses consuming ubi::Node graphs must be robust when presented with graphs
+// that are deliberately constructed to exploit their weaknesses. When operating
+// on live graphs, web content has control over the object graph, and less
+// direct control over shape and string structure, and analyses should be
+// prepared to handle extreme cases gracefully. For example, if an analysis were
+// to use the C++ stack in a depth-first traversal, carefully constructed
+// content could cause the analysis to overflow the stack.
+//
+// When ubi::Nodes refer to nodes deserialized from a heap snapshot, analyses
+// must be even more careful: since snapshots often come from potentially
+// compromised e10s content processes, even properties normally guaranteed by
+// the platform (the proper linking of DOM nodes, for example) might be
+// corrupted. While it is the deserializer's responsibility to check the basic
+// structure of the snapshot file, the analyses should be prepared for ubi::Node
+// graphs constructed from snapshots to be even more bizarre.
+
+class JSAtom;
+
+namespace JS {
+namespace ubi {
+
+class Edge;
+class EdgeRange;
+class StackFrame;
+
+} // namespace ubi
+} // namespace JS
+
+namespace JS {
+namespace ubi {
+
+using mozilla::Forward;
+using mozilla::Maybe;
+using mozilla::Move;
+using mozilla::RangedPtr;
+using mozilla::Variant;
+
+template <typename T>
+using Vector = mozilla::Vector<T, 0, js::SystemAllocPolicy>;
+
+/*** ubi::StackFrame ******************************************************************************/
+
+// Concrete JS::ubi::StackFrame instances backed by a live SavedFrame object
+// store their strings as JSAtom*, while deserialized stack frames from offline
+// heap snapshots store their strings as const char16_t*. In order to provide
+// zero-cost accessors to these strings in a single interface that works with
+// both cases, we use this variant type.
+class JS_PUBLIC_API(AtomOrTwoByteChars) : public Variant<JSAtom*, const char16_t*> {
+ using Base = Variant<JSAtom*, const char16_t*>;
+
+ public:
+ template<typename T>
+ MOZ_IMPLICIT AtomOrTwoByteChars(T&& rhs) : Base(Forward<T>(rhs)) { }
+
+ template<typename T>
+ AtomOrTwoByteChars& operator=(T&& rhs) {
+ MOZ_ASSERT(this != &rhs, "self-move disallowed");
+ this->~AtomOrTwoByteChars();
+ new (this) AtomOrTwoByteChars(Forward<T>(rhs));
+ return *this;
+ }
+
+ // Return the length of the given AtomOrTwoByteChars string.
+ size_t length();
+
+ // Copy the given AtomOrTwoByteChars string into the destination buffer,
+ // inflating if necessary. Does NOT null terminate. Returns the number of
+ // characters written to destination.
+ size_t copyToBuffer(RangedPtr<char16_t> destination, size_t length);
+};
+
+// The base class implemented by each ConcreteStackFrame<T> type. Subclasses
+// must not add data members to this class.
+class BaseStackFrame {
+ friend class StackFrame;
+
+ BaseStackFrame(const StackFrame&) = delete;
+ BaseStackFrame& operator=(const StackFrame&) = delete;
+
+ protected:
+ void* ptr;
+ explicit BaseStackFrame(void* ptr) : ptr(ptr) { }
+
+ public:
+ // This is a value type that should not have a virtual destructor. Don't add
+ // destructors in subclasses!
+
+ // Get a unique identifier for this StackFrame. The identifier is not valid
+ // across garbage collections.
+ virtual uint64_t identifier() const { return uint64_t(uintptr_t(ptr)); }
+
+ // Get this frame's parent frame.
+ virtual StackFrame parent() const = 0;
+
+ // Get this frame's line number.
+ virtual uint32_t line() const = 0;
+
+ // Get this frame's column number.
+ virtual uint32_t column() const = 0;
+
+ // Get this frame's source name. Never null.
+ virtual AtomOrTwoByteChars source() const = 0;
+
+ // Return this frame's function name if named, otherwise the inferred
+ // display name. Can be null.
+ virtual AtomOrTwoByteChars functionDisplayName() const = 0;
+
+ // Returns true if this frame's function is system JavaScript running with
+ // trusted principals, false otherwise.
+ virtual bool isSystem() const = 0;
+
+ // Return true if this frame's function is a self-hosted JavaScript builtin,
+ // false otherwise.
+ virtual bool isSelfHosted(JSContext* cx) const = 0;
+
+ // Construct a SavedFrame stack for the stack starting with this frame and
+ // containing all of its parents. The SavedFrame objects will be placed into
+ // cx's current compartment.
+ //
+ // Note that the process of
+ //
+ // SavedFrame
+ // |
+ // V
+ // JS::ubi::StackFrame
+ // |
+ // V
+ // offline heap snapshot
+ // |
+ // V
+ // JS::ubi::StackFrame
+ // |
+ // V
+ // SavedFrame
+ //
+ // is lossy because we cannot serialize and deserialize the SavedFrame's
+ // principals in the offline heap snapshot, so JS::ubi::StackFrame
+ // simplifies the principals check into the boolean isSystem() state. This
+ // is fine because we only expose JS::ubi::Stack to devtools and chrome
+ // code, and not to the web platform.
+ virtual MOZ_MUST_USE bool constructSavedFrameStack(JSContext* cx,
+ MutableHandleObject outSavedFrameStack)
+ const = 0;
+
+ // Trace the concrete implementation of JS::ubi::StackFrame.
+ virtual void trace(JSTracer* trc) = 0;
+};
+
+// A traits template with a specialization for each backing type that implements
+// the ubi::BaseStackFrame interface. Each specialization must be the a subclass
+// of ubi::BaseStackFrame.
+template<typename T> class ConcreteStackFrame;
+
+// A JS::ubi::StackFrame represents a frame in a recorded stack. It can be
+// backed either by a live SavedFrame object or by a structure deserialized from
+// an offline heap snapshot.
+//
+// It is a value type that may be memcpy'd hither and thither without worrying
+// about constructors or destructors, similar to POD types.
+//
+// Its lifetime is the same as the lifetime of the graph that is being analyzed
+// by the JS::ubi::Node that the JS::ubi::StackFrame came from. That is, if the
+// graph being analyzed is the live heap graph, the JS::ubi::StackFrame is only
+// valid within the scope of an AutoCheckCannotGC; if the graph being analyzed
+// is an offline heap snapshot, the JS::ubi::StackFrame is valid as long as the
+// offline heap snapshot is alive.
+class StackFrame {
+ // Storage in which we allocate BaseStackFrame subclasses.
+ mozilla::AlignedStorage2<BaseStackFrame> storage;
+
+ BaseStackFrame* base() { return storage.addr(); }
+ const BaseStackFrame* base() const { return storage.addr(); }
+
+ template<typename T>
+ void construct(T* ptr) {
+ static_assert(mozilla::IsBaseOf<BaseStackFrame, ConcreteStackFrame<T>>::value,
+ "ConcreteStackFrame<T> must inherit from BaseStackFrame");
+ static_assert(sizeof(ConcreteStackFrame<T>) == sizeof(*base()),
+ "ubi::ConcreteStackFrame<T> specializations must be the same size as "
+ "ubi::BaseStackFrame");
+ ConcreteStackFrame<T>::construct(base(), ptr);
+ }
+ struct ConstructFunctor;
+
+ public:
+ StackFrame() { construct<void>(nullptr); }
+
+ template<typename T>
+ MOZ_IMPLICIT StackFrame(T* ptr) {
+ construct(ptr);
+ }
+
+ template<typename T>
+ StackFrame& operator=(T* ptr) {
+ construct(ptr);
+ return *this;
+ }
+
+ // Constructors accepting SpiderMonkey's generic-pointer-ish types.
+
+ template<typename T>
+ explicit StackFrame(const JS::Handle<T*>& handle) {
+ construct(handle.get());
+ }
+
+ template<typename T>
+ StackFrame& operator=(const JS::Handle<T*>& handle) {
+ construct(handle.get());
+ return *this;
+ }
+
+ template<typename T>
+ explicit StackFrame(const JS::Rooted<T*>& root) {
+ construct(root.get());
+ }
+
+ template<typename T>
+ StackFrame& operator=(const JS::Rooted<T*>& root) {
+ construct(root.get());
+ return *this;
+ }
+
+ // Because StackFrame is just a vtable pointer and an instance pointer, we
+ // can memcpy everything around instead of making concrete classes define
+ // virtual constructors. See the comment above Node's copy constructor for
+ // more details; that comment applies here as well.
+ StackFrame(const StackFrame& rhs) {
+ memcpy(storage.u.mBytes, rhs.storage.u.mBytes, sizeof(storage.u));
+ }
+
+ StackFrame& operator=(const StackFrame& rhs) {
+ memcpy(storage.u.mBytes, rhs.storage.u.mBytes, sizeof(storage.u));
+ return *this;
+ }
+
+ bool operator==(const StackFrame& rhs) const { return base()->ptr == rhs.base()->ptr; }
+ bool operator!=(const StackFrame& rhs) const { return !(*this == rhs); }
+
+ explicit operator bool() const {
+ return base()->ptr != nullptr;
+ }
+
+ // Copy this StackFrame's source name into the given |destination|
+ // buffer. Copy no more than |length| characters. The result is *not* null
+ // terminated. Returns how many characters were written into the buffer.
+ size_t source(RangedPtr<char16_t> destination, size_t length) const;
+
+ // Copy this StackFrame's function display name into the given |destination|
+ // buffer. Copy no more than |length| characters. The result is *not* null
+ // terminated. Returns how many characters were written into the buffer.
+ size_t functionDisplayName(RangedPtr<char16_t> destination, size_t length) const;
+
+ // Get the size of the respective strings. 0 is returned for null strings.
+ size_t sourceLength();
+ size_t functionDisplayNameLength();
+
+ // Methods that forward to virtual calls through BaseStackFrame.
+
+ void trace(JSTracer* trc) { base()->trace(trc); }
+ uint64_t identifier() const {
+ auto id = base()->identifier();
+ MOZ_ASSERT(JS::Value::isNumberRepresentable(id));
+ return id;
+ }
+ uint32_t line() const { return base()->line(); }
+ uint32_t column() const { return base()->column(); }
+ AtomOrTwoByteChars source() const { return base()->source(); }
+ AtomOrTwoByteChars functionDisplayName() const { return base()->functionDisplayName(); }
+ StackFrame parent() const { return base()->parent(); }
+ bool isSystem() const { return base()->isSystem(); }
+ bool isSelfHosted(JSContext* cx) const { return base()->isSelfHosted(cx); }
+ MOZ_MUST_USE bool constructSavedFrameStack(JSContext* cx,
+ MutableHandleObject outSavedFrameStack) const {
+ return base()->constructSavedFrameStack(cx, outSavedFrameStack);
+ }
+
+ struct HashPolicy {
+ using Lookup = JS::ubi::StackFrame;
+
+ static js::HashNumber hash(const Lookup& lookup) {
+ return lookup.identifier();
+ }
+
+ static bool match(const StackFrame& key, const Lookup& lookup) {
+ return key == lookup;
+ }
+
+ static void rekey(StackFrame& k, const StackFrame& newKey) {
+ k = newKey;
+ }
+ };
+};
+
+// The ubi::StackFrame null pointer. Any attempt to operate on a null
+// ubi::StackFrame crashes.
+template<>
+class ConcreteStackFrame<void> : public BaseStackFrame {
+ explicit ConcreteStackFrame(void* ptr) : BaseStackFrame(ptr) { }
+
+ public:
+ static void construct(void* storage, void*) { new (storage) ConcreteStackFrame(nullptr); }
+
+ uint64_t identifier() const override { return 0; }
+ void trace(JSTracer* trc) override { }
+ MOZ_MUST_USE bool constructSavedFrameStack(JSContext* cx, MutableHandleObject out)
+ const override
+ {
+ out.set(nullptr);
+ return true;
+ }
+
+ uint32_t line() const override { MOZ_CRASH("null JS::ubi::StackFrame"); }
+ uint32_t column() const override { MOZ_CRASH("null JS::ubi::StackFrame"); }
+ AtomOrTwoByteChars source() const override { MOZ_CRASH("null JS::ubi::StackFrame"); }
+ AtomOrTwoByteChars functionDisplayName() const override { MOZ_CRASH("null JS::ubi::StackFrame"); }
+ StackFrame parent() const override { MOZ_CRASH("null JS::ubi::StackFrame"); }
+ bool isSystem() const override { MOZ_CRASH("null JS::ubi::StackFrame"); }
+ bool isSelfHosted(JSContext* cx) const override { MOZ_CRASH("null JS::ubi::StackFrame"); }
+};
+
+MOZ_MUST_USE JS_PUBLIC_API(bool)
+ConstructSavedFrameStackSlow(JSContext* cx,
+ JS::ubi::StackFrame& frame,
+ MutableHandleObject outSavedFrameStack);
+
+
+/*** ubi::Node ************************************************************************************/
+
+// A concrete node specialization can claim its referent is a member of a
+// particular "coarse type" which is less specific than the actual
+// implementation type but generally more palatable for web developers. For
+// example, JitCode can be considered to have a coarse type of "Script". This is
+// used by some analyses for putting nodes into different buckets. The default,
+// if a concrete specialization does not provide its own mapping to a CoarseType
+// variant, is "Other".
+//
+// NB: the values associated with a particular enum variant must not change or
+// be reused for new variants. Doing so will cause inspecting ubi::Nodes backed
+// by an offline heap snapshot from an older SpiderMonkey/Firefox version to
+// break. Consider this enum append only.
+enum class CoarseType: uint32_t {
+ Other = 0,
+ Object = 1,
+ Script = 2,
+ String = 3,
+
+ FIRST = Other,
+ LAST = String
+};
+
+inline uint32_t
+CoarseTypeToUint32(CoarseType type)
+{
+ return static_cast<uint32_t>(type);
+}
+
+inline bool
+Uint32IsValidCoarseType(uint32_t n)
+{
+ auto first = static_cast<uint32_t>(CoarseType::FIRST);
+ auto last = static_cast<uint32_t>(CoarseType::LAST);
+ MOZ_ASSERT(first < last);
+ return first <= n && n <= last;
+}
+
+inline CoarseType
+Uint32ToCoarseType(uint32_t n)
+{
+ MOZ_ASSERT(Uint32IsValidCoarseType(n));
+ return static_cast<CoarseType>(n);
+}
+
+// The base class implemented by each ubi::Node referent type. Subclasses must
+// not add data members to this class.
+class JS_PUBLIC_API(Base) {
+ friend class Node;
+
+ // For performance's sake, we'd prefer to avoid a virtual destructor; and
+ // an empty constructor seems consistent with the 'lightweight value type'
+ // visible behavior we're trying to achieve. But if the destructor isn't
+ // virtual, and a subclass overrides it, the subclass's destructor will be
+ // ignored. Is there a way to make the compiler catch that error?
+
+ protected:
+ // Space for the actual pointer. Concrete subclasses should define a
+ // properly typed 'get' member function to access this.
+ void* ptr;
+
+ explicit Base(void* ptr) : ptr(ptr) { }
+
+ public:
+ bool operator==(const Base& rhs) const {
+ // Some compilers will indeed place objects of different types at
+ // the same address, so technically, we should include the vtable
+ // in this comparison. But it seems unlikely to cause problems in
+ // practice.
+ return ptr == rhs.ptr;
+ }
+ bool operator!=(const Base& rhs) const { return !(*this == rhs); }
+
+ // An identifier for this node, guaranteed to be stable and unique for as
+ // long as this ubi::Node's referent is alive and at the same address.
+ //
+ // This is probably suitable for use in serializations, as it is an integral
+ // type. It may also help save memory when constructing HashSets of
+ // ubi::Nodes: since a uint64_t will always be smaller-or-equal-to the size
+ // of a ubi::Node, a HashSet<ubi::Node::Id> may use less space per element
+ // than a HashSet<ubi::Node>.
+ //
+ // (Note that 'unique' only means 'up to equality on ubi::Node'; see the
+ // caveats about multiple objects allocated at the same address for
+ // 'ubi::Node::operator=='.)
+ using Id = uint64_t;
+ virtual Id identifier() const { return Id(uintptr_t(ptr)); }
+
+ // Returns true if this node is pointing to something on the live heap, as
+ // opposed to something from a deserialized core dump. Returns false,
+ // otherwise.
+ virtual bool isLive() const { return true; };
+
+ // Return the coarse-grained type-of-thing that this node represents.
+ virtual CoarseType coarseType() const { return CoarseType::Other; }
+
+ // Return a human-readable name for the referent's type. The result should
+ // be statically allocated. (You can use u"strings" for this.)
+ //
+ // This must always return Concrete<T>::concreteTypeName; we use that
+ // pointer as a tag for this particular referent type.
+ virtual const char16_t* typeName() const = 0;
+
+ // Return the size of this node, in bytes. Include any structures that this
+ // node owns exclusively that are not exposed as their own ubi::Nodes.
+ // |mallocSizeOf| should be a malloc block sizing function; see
+ // |mfbt/MemoryReporting.h|.
+ //
+ // Because we can use |JS::ubi::Node|s backed by a snapshot that was taken
+ // on a 64-bit platform when we are currently on a 32-bit platform, we
+ // cannot rely on |size_t| for node sizes. Instead, |Size| is uint64_t on
+ // all platforms.
+ using Size = uint64_t;
+ virtual Size size(mozilla::MallocSizeOf mallocSizeof) const { return 1; }
+
+ // Return an EdgeRange that initially contains all the referent's outgoing
+ // edges. The caller takes ownership of the EdgeRange.
+ //
+ // If wantNames is true, compute names for edges. Doing so can be expensive
+ // in time and memory.
+ virtual js::UniquePtr<EdgeRange> edges(JSContext* cx, bool wantNames) const = 0;
+
+ // Return the Zone to which this node's referent belongs, or nullptr if the
+ // referent is not of a type allocated in SpiderMonkey Zones.
+ virtual JS::Zone* zone() const { return nullptr; }
+
+ // Return the compartment for this node. Some ubi::Node referents are not
+ // associated with JSCompartments, such as JSStrings (which are associated
+ // with Zones). When the referent is not associated with a compartment,
+ // nullptr is returned.
+ virtual JSCompartment* compartment() const { return nullptr; }
+
+ // Return whether this node's referent's allocation stack was captured.
+ virtual bool hasAllocationStack() const { return false; }
+
+ // Get the stack recorded at the time this node's referent was
+ // allocated. This must only be called when hasAllocationStack() is true.
+ virtual StackFrame allocationStack() const {
+ MOZ_CRASH("Concrete classes that have an allocation stack must override both "
+ "hasAllocationStack and allocationStack.");
+ }
+
+ // Methods for JSObject Referents
+ //
+ // These methods are only semantically valid if the referent is either a
+ // JSObject in the live heap, or represents a previously existing JSObject
+ // from some deserialized heap snapshot.
+
+ // Return the object's [[Class]]'s name.
+ virtual const char* jsObjectClassName() const { return nullptr; }
+
+ // If this object was constructed with `new` and we have the data available,
+ // place the contructor function's display name in the out parameter.
+ // Otherwise, place nullptr in the out parameter. Caller maintains ownership
+ // of the out parameter. True is returned on success, false is returned on
+ // OOM.
+ virtual MOZ_MUST_USE bool jsObjectConstructorName(JSContext* cx, UniqueTwoByteChars& outName)
+ const
+ {
+ outName.reset(nullptr);
+ return true;
+ }
+
+ // Methods for CoarseType::Script referents
+
+ // Return the script's source's filename if available. If unavailable,
+ // return nullptr.
+ virtual const char* scriptFilename() const { return nullptr; }
+
+ private:
+ Base(const Base& rhs) = delete;
+ Base& operator=(const Base& rhs) = delete;
+};
+
+// A traits template with a specialization for each referent type that
+// ubi::Node supports. The specialization must be the concrete subclass of Base
+// that represents a pointer to the referent type. It must include these
+// members:
+//
+// // The specific char16_t array returned by Concrete<T>::typeName().
+// static const char16_t concreteTypeName[];
+//
+// // Construct an instance of this concrete class in |storage| referring
+// // to |referent|. Implementations typically use a placement 'new'.
+// //
+// // In some cases, |referent| will contain dynamic type information that
+// // identifies it a some more specific subclass of |Referent|. For
+// // example, when |Referent| is |JSObject|, then |referent->getClass()|
+// // could tell us that it's actually a JSFunction. Similarly, if
+// // |Referent| is |nsISupports|, we would like a ubi::Node that knows its
+// // final implementation type.
+// //
+// // So we delegate the actual construction to this specialization, which
+// // knows Referent's details.
+// static void construct(void* storage, Referent* referent);
+template<typename Referent>
+class Concrete;
+
+// A container for a Base instance; all members simply forward to the contained
+// instance. This container allows us to pass ubi::Node instances by value.
+class Node {
+ // Storage in which we allocate Base subclasses.
+ mozilla::AlignedStorage2<Base> storage;
+ Base* base() { return storage.addr(); }
+ const Base* base() const { return storage.addr(); }
+
+ template<typename T>
+ void construct(T* ptr) {
+ static_assert(sizeof(Concrete<T>) == sizeof(*base()),
+ "ubi::Base specializations must be the same size as ubi::Base");
+ static_assert(mozilla::IsBaseOf<Base, Concrete<T>>::value,
+ "ubi::Concrete<T> must inherit from ubi::Base");
+ Concrete<T>::construct(base(), ptr);
+ }
+ struct ConstructFunctor;
+
+ public:
+ Node() { construct<void>(nullptr); }
+
+ template<typename T>
+ MOZ_IMPLICIT Node(T* ptr) {
+ construct(ptr);
+ }
+ template<typename T>
+ Node& operator=(T* ptr) {
+ construct(ptr);
+ return *this;
+ }
+
+ // We can construct and assign from rooted forms of pointers.
+ template<typename T>
+ MOZ_IMPLICIT Node(const Rooted<T*>& root) {
+ construct(root.get());
+ }
+ template<typename T>
+ Node& operator=(const Rooted<T*>& root) {
+ construct(root.get());
+ return *this;
+ }
+
+ // Constructors accepting SpiderMonkey's other generic-pointer-ish types.
+ // Note that we *do* want an implicit constructor here: JS::Value and
+ // JS::ubi::Node are both essentially tagged references to other sorts of
+ // objects, so letting conversions happen automatically is appropriate.
+ MOZ_IMPLICIT Node(JS::HandleValue value);
+ explicit Node(const JS::GCCellPtr& thing);
+
+ // copy construction and copy assignment just use memcpy, since we know
+ // instances contain nothing but a vtable pointer and a data pointer.
+ //
+ // To be completely correct, concrete classes could provide a virtual
+ // 'construct' member function, which we could invoke on rhs to construct an
+ // instance in our storage. But this is good enough; there's no need to jump
+ // through vtables for copying and assignment that are just going to move
+ // two words around. The compiler knows how to optimize memcpy.
+ Node(const Node& rhs) {
+ memcpy(storage.u.mBytes, rhs.storage.u.mBytes, sizeof(storage.u));
+ }
+
+ Node& operator=(const Node& rhs) {
+ memcpy(storage.u.mBytes, rhs.storage.u.mBytes, sizeof(storage.u));
+ return *this;
+ }
+
+ bool operator==(const Node& rhs) const { return *base() == *rhs.base(); }
+ bool operator!=(const Node& rhs) const { return *base() != *rhs.base(); }
+
+ explicit operator bool() const {
+ return base()->ptr != nullptr;
+ }
+
+ bool isLive() const { return base()->isLive(); }
+
+ // Get the canonical type name for the given type T.
+ template<typename T>
+ static const char16_t* canonicalTypeName() { return Concrete<T>::concreteTypeName; }
+
+ template<typename T>
+ bool is() const {
+ return base()->typeName() == canonicalTypeName<T>();
+ }
+
+ template<typename T>
+ T* as() const {
+ MOZ_ASSERT(isLive());
+ MOZ_ASSERT(is<T>());
+ return static_cast<T*>(base()->ptr);
+ }
+
+ template<typename T>
+ T* asOrNull() const {
+ MOZ_ASSERT(isLive());
+ return is<T>() ? static_cast<T*>(base()->ptr) : nullptr;
+ }
+
+ // If this node refers to something that can be represented as a JavaScript
+ // value that is safe to expose to JavaScript code, return that value.
+ // Otherwise return UndefinedValue(). JSStrings, JS::Symbols, and some (but
+ // not all!) JSObjects can be exposed.
+ JS::Value exposeToJS() const;
+
+ CoarseType coarseType() const { return base()->coarseType(); }
+ const char16_t* typeName() const { return base()->typeName(); }
+ JS::Zone* zone() const { return base()->zone(); }
+ JSCompartment* compartment() const { return base()->compartment(); }
+ const char* jsObjectClassName() const { return base()->jsObjectClassName(); }
+ MOZ_MUST_USE bool jsObjectConstructorName(JSContext* cx, UniqueTwoByteChars& outName) const {
+ return base()->jsObjectConstructorName(cx, outName);
+ }
+
+ const char* scriptFilename() const { return base()->scriptFilename(); }
+
+ using Size = Base::Size;
+ Size size(mozilla::MallocSizeOf mallocSizeof) const {
+ auto size = base()->size(mallocSizeof);
+ MOZ_ASSERT(size > 0,
+ "C++ does not have zero-sized types! Choose 1 if you just need a "
+ "conservative default.");
+ return size;
+ }
+
+ js::UniquePtr<EdgeRange> edges(JSContext* cx, bool wantNames = true) const {
+ return base()->edges(cx, wantNames);
+ }
+
+ bool hasAllocationStack() const { return base()->hasAllocationStack(); }
+ StackFrame allocationStack() const {
+ return base()->allocationStack();
+ }
+
+ using Id = Base::Id;
+ Id identifier() const {
+ auto id = base()->identifier();
+ MOZ_ASSERT(JS::Value::isNumberRepresentable(id));
+ return id;
+ }
+
+ // A hash policy for ubi::Nodes.
+ // This simply uses the stock PointerHasher on the ubi::Node's pointer.
+ // We specialize DefaultHasher below to make this the default.
+ class HashPolicy {
+ typedef js::PointerHasher<void*, mozilla::tl::FloorLog2<sizeof(void*)>::value> PtrHash;
+
+ public:
+ typedef Node Lookup;
+
+ static js::HashNumber hash(const Lookup& l) { return PtrHash::hash(l.base()->ptr); }
+ static bool match(const Node& k, const Lookup& l) { return k == l; }
+ static void rekey(Node& k, const Node& newKey) { k = newKey; }
+ };
+};
+
+using NodeSet = js::HashSet<Node, js::DefaultHasher<Node>, js::SystemAllocPolicy>;
+using NodeSetPtr = mozilla::UniquePtr<NodeSet, JS::DeletePolicy<NodeSet>>;
+
+/*** Edge and EdgeRange ***************************************************************************/
+
+using EdgeName = UniqueTwoByteChars;
+
+// An outgoing edge to a referent node.
+class Edge {
+ public:
+ Edge() : name(nullptr), referent() { }
+
+ // Construct an initialized Edge, taking ownership of |name|.
+ Edge(char16_t* name, const Node& referent)
+ : name(name)
+ , referent(referent)
+ { }
+
+ // Move construction and assignment.
+ Edge(Edge&& rhs)
+ : name(mozilla::Move(rhs.name))
+ , referent(rhs.referent)
+ { }
+
+ Edge& operator=(Edge&& rhs) {
+ MOZ_ASSERT(&rhs != this);
+ this->~Edge();
+ new (this) Edge(mozilla::Move(rhs));
+ return *this;
+ }
+
+ Edge(const Edge&) = delete;
+ Edge& operator=(const Edge&) = delete;
+
+ // This edge's name. This may be nullptr, if Node::edges was called with
+ // false as the wantNames parameter.
+ //
+ // The storage is owned by this Edge, and will be freed when this Edge is
+ // destructed. You may take ownership of the name by `mozilla::Move`ing it
+ // out of the edge; it is just a UniquePtr.
+ //
+ // (In real life we'll want a better representation for names, to avoid
+ // creating tons of strings when the names follow a pattern; and we'll need
+ // to think about lifetimes carefully to ensure traversal stays cheap.)
+ EdgeName name;
+
+ // This edge's referent.
+ Node referent;
+};
+
+// EdgeRange is an abstract base class for iterating over a node's outgoing
+// edges. (This is modeled after js::HashTable<K,V>::Range.)
+//
+// Concrete instances of this class need not be as lightweight as Node itself,
+// since they're usually only instantiated while iterating over a particular
+// object's edges. For example, a dumb implementation for JS Cells might use
+// JS::TraceChildren to to get the outgoing edges, and then store them in an
+// array internal to the EdgeRange.
+class EdgeRange {
+ protected:
+ // The current front edge of this range, or nullptr if this range is empty.
+ Edge* front_;
+
+ EdgeRange() : front_(nullptr) { }
+
+ public:
+ virtual ~EdgeRange() { }
+
+ // True if there are no more edges in this range.
+ bool empty() const { return !front_; }
+
+ // The front edge of this range. This is owned by the EdgeRange, and is
+ // only guaranteed to live until the next call to popFront, or until
+ // the EdgeRange is destructed.
+ const Edge& front() const { return *front_; }
+ Edge& front() { return *front_; }
+
+ // Remove the front edge from this range. This should only be called if
+ // !empty().
+ virtual void popFront() = 0;
+
+ private:
+ EdgeRange(const EdgeRange&) = delete;
+ EdgeRange& operator=(const EdgeRange&) = delete;
+};
+
+
+typedef mozilla::Vector<Edge, 8, js::SystemAllocPolicy> EdgeVector;
+
+// An EdgeRange concrete class that holds a pre-existing vector of
+// Edges. A PreComputedEdgeRange does not take ownership of its
+// EdgeVector; it is up to the PreComputedEdgeRange's consumer to manage
+// that lifetime.
+class PreComputedEdgeRange : public EdgeRange {
+ EdgeVector& edges;
+ size_t i;
+
+ void settle() {
+ front_ = i < edges.length() ? &edges[i] : nullptr;
+ }
+
+ public:
+ explicit PreComputedEdgeRange(EdgeVector& edges)
+ : edges(edges),
+ i(0)
+ {
+ settle();
+ }
+
+ void popFront() override {
+ MOZ_ASSERT(!empty());
+ i++;
+ settle();
+ }
+};
+
+/*** RootList *************************************************************************************/
+
+// RootList is a class that can be pointed to by a |ubi::Node|, creating a
+// fictional root-of-roots which has edges to every GC root in the JS
+// runtime. Having a single root |ubi::Node| is useful for algorithms written
+// with the assumption that there aren't multiple roots (such as computing
+// dominator trees) and you want a single point of entry. It also ensures that
+// the roots themselves get visited by |ubi::BreadthFirst| (they would otherwise
+// only be used as starting points).
+//
+// RootList::init itself causes a minor collection, but once the list of roots
+// has been created, GC must not occur, as the referent ubi::Nodes are not
+// stable across GC. The init calls emplace on |noGC|'s AutoCheckCannotGC, whose
+// lifetime must extend at least as long as the RootList itself.
+//
+// Example usage:
+//
+// {
+// mozilla::Maybe<JS::AutoCheckCannotGC> maybeNoGC;
+// JS::ubi::RootList rootList(cx, maybeNoGC);
+// if (!rootList.init())
+// return false;
+//
+// // The AutoCheckCannotGC is guaranteed to exist if init returned true.
+// MOZ_ASSERT(maybeNoGC.isSome());
+//
+// JS::ubi::Node root(&rootList);
+//
+// ...
+// }
+class MOZ_STACK_CLASS JS_PUBLIC_API(RootList) {
+ Maybe<AutoCheckCannotGC>& noGC;
+
+ public:
+ JSContext* cx;
+ EdgeVector edges;
+ bool wantNames;
+
+ RootList(JSContext* cx, Maybe<AutoCheckCannotGC>& noGC, bool wantNames = false);
+
+ // Find all GC roots.
+ MOZ_MUST_USE bool init();
+ // Find only GC roots in the provided set of |JSCompartment|s.
+ MOZ_MUST_USE bool init(CompartmentSet& debuggees);
+ // Find only GC roots in the given Debugger object's set of debuggee
+ // compartments.
+ MOZ_MUST_USE bool init(HandleObject debuggees);
+
+ // Returns true if the RootList has been initialized successfully, false
+ // otherwise.
+ bool initialized() { return noGC.isSome(); }
+
+ // Explicitly add the given Node as a root in this RootList. If wantNames is
+ // true, you must pass an edgeName. The RootList does not take ownership of
+ // edgeName.
+ MOZ_MUST_USE bool addRoot(Node node, const char16_t* edgeName = nullptr);
+};
+
+
+/*** Concrete classes for ubi::Node referent types ************************************************/
+
+template<>
+class JS_PUBLIC_API(Concrete<RootList>) : public Base {
+ protected:
+ explicit Concrete(RootList* ptr) : Base(ptr) { }
+ RootList& get() const { return *static_cast<RootList*>(ptr); }
+
+ public:
+ static void construct(void* storage, RootList* ptr) { new (storage) Concrete(ptr); }
+
+ js::UniquePtr<EdgeRange> edges(JSContext* cx, bool wantNames) const override;
+
+ const char16_t* typeName() const override { return concreteTypeName; }
+ static const char16_t concreteTypeName[];
+};
+
+// A reusable ubi::Concrete specialization base class for types supported by
+// JS::TraceChildren.
+template<typename Referent>
+class JS_PUBLIC_API(TracerConcrete) : public Base {
+ js::UniquePtr<EdgeRange> edges(JSContext* cx, bool wantNames) const override;
+ JS::Zone* zone() const override;
+
+ protected:
+ explicit TracerConcrete(Referent* ptr) : Base(ptr) { }
+ Referent& get() const { return *static_cast<Referent*>(ptr); }
+};
+
+// For JS::TraceChildren-based types that have a 'compartment' method.
+template<typename Referent>
+class JS_PUBLIC_API(TracerConcreteWithCompartment) : public TracerConcrete<Referent> {
+ typedef TracerConcrete<Referent> TracerBase;
+ JSCompartment* compartment() const override;
+
+ protected:
+ explicit TracerConcreteWithCompartment(Referent* ptr) : TracerBase(ptr) { }
+};
+
+// Define specializations for some commonly-used public JSAPI types.
+// These can use the generic templates above.
+template<>
+class JS_PUBLIC_API(Concrete<JS::Symbol>) : TracerConcrete<JS::Symbol> {
+ protected:
+ explicit Concrete(JS::Symbol* ptr) : TracerConcrete(ptr) { }
+
+ public:
+ static void construct(void* storage, JS::Symbol* ptr) {
+ new (storage) Concrete(ptr);
+ }
+
+ Size size(mozilla::MallocSizeOf mallocSizeOf) const override;
+
+ const char16_t* typeName() const override { return concreteTypeName; }
+ static const char16_t concreteTypeName[];
+};
+
+template<>
+class JS_PUBLIC_API(Concrete<JSScript>) : TracerConcreteWithCompartment<JSScript> {
+ protected:
+ explicit Concrete(JSScript *ptr) : TracerConcreteWithCompartment<JSScript>(ptr) { }
+
+ public:
+ static void construct(void *storage, JSScript *ptr) { new (storage) Concrete(ptr); }
+
+ CoarseType coarseType() const final { return CoarseType::Script; }
+ Size size(mozilla::MallocSizeOf mallocSizeOf) const override;
+ const char* scriptFilename() const final;
+
+ const char16_t* typeName() const override { return concreteTypeName; }
+ static const char16_t concreteTypeName[];
+};
+
+// The JSObject specialization.
+template<>
+class JS_PUBLIC_API(Concrete<JSObject>) : public TracerConcreteWithCompartment<JSObject> {
+ protected:
+ explicit Concrete(JSObject* ptr) : TracerConcreteWithCompartment(ptr) { }
+
+ public:
+ static void construct(void* storage, JSObject* ptr) {
+ new (storage) Concrete(ptr);
+ }
+
+ const char* jsObjectClassName() const override;
+ MOZ_MUST_USE bool jsObjectConstructorName(JSContext* cx, UniqueTwoByteChars& outName)
+ const override;
+ Size size(mozilla::MallocSizeOf mallocSizeOf) const override;
+
+ bool hasAllocationStack() const override;
+ StackFrame allocationStack() const override;
+
+ CoarseType coarseType() const final { return CoarseType::Object; }
+
+ const char16_t* typeName() const override { return concreteTypeName; }
+ static const char16_t concreteTypeName[];
+};
+
+// For JSString, we extend the generic template with a 'size' implementation.
+template<>
+class JS_PUBLIC_API(Concrete<JSString>) : TracerConcrete<JSString> {
+ protected:
+ explicit Concrete(JSString *ptr) : TracerConcrete<JSString>(ptr) { }
+
+ public:
+ static void construct(void *storage, JSString *ptr) { new (storage) Concrete(ptr); }
+
+ Size size(mozilla::MallocSizeOf mallocSizeOf) const override;
+
+ CoarseType coarseType() const final { return CoarseType::String; }
+
+ const char16_t* typeName() const override { return concreteTypeName; }
+ static const char16_t concreteTypeName[];
+};
+
+// The ubi::Node null pointer. Any attempt to operate on a null ubi::Node asserts.
+template<>
+class JS_PUBLIC_API(Concrete<void>) : public Base {
+ const char16_t* typeName() const override;
+ Size size(mozilla::MallocSizeOf mallocSizeOf) const override;
+ js::UniquePtr<EdgeRange> edges(JSContext* cx, bool wantNames) const override;
+ JS::Zone* zone() const override;
+ JSCompartment* compartment() const override;
+ CoarseType coarseType() const final;
+
+ explicit Concrete(void* ptr) : Base(ptr) { }
+
+ public:
+ static void construct(void* storage, void* ptr) { new (storage) Concrete(ptr); }
+};
+
+
+} // namespace ubi
+} // namespace JS
+
+namespace js {
+
+// Make ubi::Node::HashPolicy the default hash policy for ubi::Node.
+template<> struct DefaultHasher<JS::ubi::Node> : JS::ubi::Node::HashPolicy { };
+template<> struct DefaultHasher<JS::ubi::StackFrame> : JS::ubi::StackFrame::HashPolicy { };
+
+} // namespace js
+
+#endif // js_UbiNode_h
diff --git a/js/public/UbiNodeBreadthFirst.h b/js/public/UbiNodeBreadthFirst.h
new file mode 100644
index 0000000000..8446dbc6af
--- /dev/null
+++ b/js/public/UbiNodeBreadthFirst.h
@@ -0,0 +1,244 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef js_UbiNodeBreadthFirst_h
+#define js_UbiNodeBreadthFirst_h
+
+#include "js/UbiNode.h"
+#include "js/Utility.h"
+#include "js/Vector.h"
+
+namespace JS {
+namespace ubi {
+
+// A breadth-first traversal template for graphs of ubi::Nodes.
+//
+// No GC may occur while an instance of this template is live.
+//
+// The provided Handler type should have two members:
+//
+// typename NodeData;
+//
+// The value type of |BreadthFirst<Handler>::visited|, the HashMap of
+// ubi::Nodes that have been visited so far. Since the algorithm needs a
+// hash table like this for its own use anyway, it is simple to let
+// Handler store its own metadata about each node in the same table.
+//
+// For example, if you want to find a shortest path to each node from any
+// traversal starting point, your |NodeData| type could record the first
+// edge to reach each node, and the node from which it originates. Then,
+// when the traversal is complete, you can walk backwards from any node
+// to some starting point, and the path recorded will be a shortest path.
+//
+// This type must have a default constructor. If this type owns any other
+// resources, move constructors and assignment operators are probably a
+// good idea, too.
+//
+// bool operator() (BreadthFirst& traversal,
+// Node origin, const Edge& edge,
+// Handler::NodeData* referentData, bool first);
+//
+// The visitor function, called to report that we have traversed
+// |edge| from |origin|. This is called once for each edge we traverse.
+// As this is a breadth-first search, any prior calls to the visitor function
+// were for origin nodes not further from the start nodes than |origin|.
+//
+// |traversal| is this traversal object, passed along for convenience.
+//
+// |referentData| is a pointer to the value of the entry in
+// |traversal.visited| for |edge.referent|; the visitor function can
+// store whatever metadata it likes about |edge.referent| there.
+//
+// |first| is true if this is the first time we have visited an edge
+// leading to |edge.referent|. This could be stored in NodeData, but
+// the algorithm knows whether it has just created the entry in
+// |traversal.visited|, so it passes it along for convenience.
+//
+// The visitor function may call |traversal.abandonReferent()| if it
+// doesn't want to traverse the outgoing edges of |edge.referent|. You can
+// use this to limit the traversal to a given portion of the graph: it will
+// never visit nodes reachable only through nodes that you have abandoned.
+// Note that |abandonReferent| must be called the first time the given node
+// is reached; that is, |first| must be true.
+//
+// The visitor function may call |traversal.stop()| if it doesn't want
+// to visit any more nodes at all.
+//
+// The visitor function may consult |traversal.visited| for information
+// about other nodes, but it should not add or remove entries.
+//
+// The visitor function should return true on success, or false if an
+// error occurs. A false return value terminates the traversal
+// immediately, and causes BreadthFirst<Handler>::traverse to return
+// false.
+template<typename Handler>
+struct BreadthFirst {
+
+ // Construct a breadth-first traversal object that reports the nodes it
+ // reaches to |handler|. The traversal asserts that no GC happens in its
+ // runtime during its lifetime.
+ //
+ // We do nothing with noGC, other than require it to exist, with a lifetime
+ // that encloses our own.
+ BreadthFirst(JSContext* cx, Handler& handler, const JS::AutoCheckCannotGC& noGC)
+ : wantNames(true), cx(cx), visited(), handler(handler), pending(),
+ traversalBegun(false), stopRequested(false), abandonRequested(false)
+ { }
+
+ // Initialize this traversal object. Return false on OOM.
+ bool init() { return visited.init(); }
+
+ // Add |node| as a starting point for the traversal. You may add
+ // as many starting points as you like. Return false on OOM.
+ bool addStart(Node node) { return pending.append(node); }
+
+ // Add |node| as a starting point for the traversal (see addStart) and also
+ // add it to the |visited| set. Return false on OOM.
+ bool addStartVisited(Node node) {
+ typename NodeMap::AddPtr ptr = visited.lookupForAdd(node);
+ if (!ptr && !visited.add(ptr, node, typename Handler::NodeData()))
+ return false;
+ return addStart(node);
+ }
+
+ // True if the handler wants us to compute edge names; doing so can be
+ // expensive in time and memory. True by default.
+ bool wantNames;
+
+ // Traverse the graph in breadth-first order, starting at the given
+ // start nodes, applying |handler::operator()| for each edge traversed
+ // as described above.
+ //
+ // This should be called only once per instance of this class.
+ //
+ // Return false on OOM or error return from |handler::operator()|.
+ bool traverse()
+ {
+ MOZ_ASSERT(!traversalBegun);
+ traversalBegun = true;
+
+ // While there are pending nodes, visit them.
+ while (!pending.empty()) {
+ Node origin = pending.front();
+ pending.popFront();
+
+ // Get a range containing all origin's outgoing edges.
+ auto range = origin.edges(cx, wantNames);
+ if (!range)
+ return false;
+
+ // Traverse each edge.
+ for (; !range->empty(); range->popFront()) {
+ MOZ_ASSERT(!stopRequested);
+
+ Edge& edge = range->front();
+ typename NodeMap::AddPtr a = visited.lookupForAdd(edge.referent);
+ bool first = !a;
+
+ if (first) {
+ // This is the first time we've reached |edge.referent|.
+ // Mark it as visited.
+ if (!visited.add(a, edge.referent, typename Handler::NodeData()))
+ return false;
+ }
+
+ MOZ_ASSERT(a);
+
+ // Report this edge to the visitor function.
+ if (!handler(*this, origin, edge, &a->value(), first))
+ return false;
+
+ if (stopRequested)
+ return true;
+
+ // Arrange to traverse this edge's referent's outgoing edges
+ // later --- unless |handler| asked us not to.
+ if (abandonRequested) {
+ // Skip the enqueue; reset flag for future iterations.
+ abandonRequested = false;
+ } else if (first) {
+ if (!pending.append(edge.referent))
+ return false;
+ }
+ }
+ }
+
+ return true;
+ }
+
+ // Stop traversal, and return true from |traverse| without visiting any
+ // more nodes. Only |handler::operator()| should call this function; it
+ // may do so to stop the traversal early, without returning false and
+ // then making |traverse|'s caller disambiguate that result from a real
+ // error.
+ void stop() { stopRequested = true; }
+
+ // Request that the current edge's referent's outgoing edges not be
+ // traversed. This must be called the first time that referent is reached.
+ // Other edges *to* that referent will still be traversed.
+ void abandonReferent() { abandonRequested = true; }
+
+ // The context with which we were constructed.
+ JSContext* cx;
+
+ // A map associating each node N that we have reached with a
+ // Handler::NodeData, for |handler|'s use. This is public, so that
+ // |handler| can access it to see the traversal thus far.
+ using NodeMap = js::HashMap<Node, typename Handler::NodeData, js::DefaultHasher<Node>,
+ js::SystemAllocPolicy>;
+ NodeMap visited;
+
+ private:
+ // Our handler object.
+ Handler& handler;
+
+ // A queue template. Appending and popping the front are constant time.
+ // Wasted space is never more than some recent actual population plus the
+ // current population.
+ template <typename T>
+ class Queue {
+ js::Vector<T, 0, js::SystemAllocPolicy> head, tail;
+ size_t frontIndex;
+ public:
+ Queue() : head(), tail(), frontIndex(0) { }
+ bool empty() { return frontIndex >= head.length(); }
+ T& front() {
+ MOZ_ASSERT(!empty());
+ return head[frontIndex];
+ }
+ void popFront() {
+ MOZ_ASSERT(!empty());
+ frontIndex++;
+ if (frontIndex >= head.length()) {
+ head.clearAndFree();
+ head.swap(tail);
+ frontIndex = 0;
+ }
+ }
+ bool append(const T& elt) {
+ return frontIndex == 0 ? head.append(elt) : tail.append(elt);
+ }
+ };
+
+ // A queue of nodes that we have reached, but whose outgoing edges we
+ // have not yet traversed. Nodes reachable in fewer edges are enqueued
+ // earlier.
+ Queue<Node> pending;
+
+ // True if our traverse function has been called.
+ bool traversalBegun;
+
+ // True if we've been asked to stop the traversal.
+ bool stopRequested;
+
+ // True if we've been asked to abandon the current edge's referent.
+ bool abandonRequested;
+};
+
+} // namespace ubi
+} // namespace JS
+
+#endif // js_UbiNodeBreadthFirst_h
diff --git a/js/public/UbiNodeCensus.h b/js/public/UbiNodeCensus.h
new file mode 100644
index 0000000000..8c79908864
--- /dev/null
+++ b/js/public/UbiNodeCensus.h
@@ -0,0 +1,252 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef js_UbiNodeCensus_h
+#define js_UbiNodeCensus_h
+
+#include "mozilla/Attributes.h"
+#include "mozilla/Move.h"
+
+#include <algorithm>
+
+#include "jsapi.h"
+
+#include "js/UbiNode.h"
+#include "js/UbiNodeBreadthFirst.h"
+
+// A census is a ubi::Node traversal that assigns each node to one or more
+// buckets, and returns a report with the size of each bucket.
+//
+// We summarize the results of a census with counts broken down according to
+// criteria selected by the API consumer code that is requesting the census. For
+// example, the following breakdown might give an interesting overview of the
+// heap:
+//
+// - all nodes
+// - objects
+// - objects with a specific [[Class]] *
+// - strings
+// - scripts
+// - all other Node types
+// - nodes with a specific ubi::Node::typeName *
+//
+// Obviously, the parts of this tree marked with * represent many separate
+// counts, depending on how many distinct [[Class]] values and ubi::Node type
+// names we encounter.
+//
+// The supported types of breakdowns are documented in
+// js/src/doc/Debugger/Debugger.Memory.md.
+//
+// When we parse the 'breakdown' argument to takeCensus, we build a tree of
+// CountType nodes. For example, for the breakdown shown in the
+// Debugger.Memory.prototype.takeCensus, documentation:
+//
+// {
+// by: "coarseType",
+// objects: { by: "objectClass" },
+// other: { by: "internalType" }
+// }
+//
+// we would build the following tree of CountType subclasses:
+//
+// ByCoarseType
+// objects: ByObjectClass
+// each class: SimpleCount
+// scripts: SimpleCount
+// strings: SimpleCount
+// other: ByUbinodeType
+// each type: SimpleCount
+//
+// The interior nodes are all breakdown types that categorize nodes according to
+// one characteristic or another; and the leaf nodes are all SimpleType.
+//
+// Each CountType has its own concrete C++ type that holds the counts it
+// produces. SimpleCount::Count just holds totals. ByObjectClass::Count has a
+// hash table whose keys are object class names and whose values are counts of
+// some other type (in the example above, SimpleCount).
+//
+// To keep actual count nodes small, they have no vtable. Instead, each count
+// points to its CountType, which knows how to carry out all the operations we
+// need on a Count. A CountType can produce new count nodes; process nodes as we
+// visit them; build a JS object reporting the results; and destruct count
+// nodes.
+
+
+namespace JS {
+namespace ubi {
+
+struct Census;
+
+class CountBase;
+
+struct CountDeleter {
+ JS_PUBLIC_API(void) operator()(CountBase*);
+};
+
+using CountBasePtr = js::UniquePtr<CountBase, CountDeleter>;
+
+// Abstract base class for CountType nodes.
+struct CountType {
+ explicit CountType() { }
+ virtual ~CountType() { }
+
+ // Destruct a count tree node that this type instance constructed.
+ virtual void destructCount(CountBase& count) = 0;
+
+ // Return a fresh node for the count tree that categorizes nodes according
+ // to this type. Return a nullptr on OOM.
+ virtual CountBasePtr makeCount() = 0;
+
+ // Trace |count| and all its children, for garbage collection.
+ virtual void traceCount(CountBase& count, JSTracer* trc) = 0;
+
+ // Implement the 'count' method for counts returned by this CountType
+ // instance's 'newCount' method.
+ virtual MOZ_MUST_USE bool count(CountBase& count,
+ mozilla::MallocSizeOf mallocSizeOf,
+ const Node& node) = 0;
+
+ // Implement the 'report' method for counts returned by this CountType
+ // instance's 'newCount' method.
+ virtual MOZ_MUST_USE bool report(JSContext* cx, CountBase& count,
+ MutableHandleValue report) = 0;
+};
+
+using CountTypePtr = js::UniquePtr<CountType>;
+
+// An abstract base class for count tree nodes.
+class CountBase {
+ // In lieu of a vtable, each CountBase points to its type, which
+ // carries not only the implementations of the CountBase methods, but also
+ // additional parameters for the type's behavior, as specified in the
+ // breakdown argument passed to takeCensus.
+ CountType& type;
+
+ protected:
+ ~CountBase() { }
+
+ public:
+ explicit CountBase(CountType& type)
+ : type(type)
+ , total_(0)
+ , smallestNodeIdCounted_(SIZE_MAX)
+ { }
+
+ // Categorize and count |node| as appropriate for this count's type.
+ MOZ_MUST_USE bool count(mozilla::MallocSizeOf mallocSizeOf, const Node& node) {
+ total_++;
+
+ auto id = node.identifier();
+ if (id < smallestNodeIdCounted_) {
+ smallestNodeIdCounted_ = id;
+ }
+
+#ifdef DEBUG
+ size_t oldTotal = total_;
+#endif
+
+ bool ret = type.count(*this, mallocSizeOf, node);
+
+ MOZ_ASSERT(total_ == oldTotal,
+ "CountType::count should not increment total_, CountBase::count handles that");
+
+ return ret;
+ }
+
+ // Construct a JavaScript object reporting the counts recorded in this
+ // count, and store it in |report|. Return true on success, or false on
+ // failure.
+ MOZ_MUST_USE bool report(JSContext* cx, MutableHandleValue report) {
+ return type.report(cx, *this, report);
+ }
+
+ // Down-cast this CountBase to its true type, based on its 'type' member,
+ // and run its destructor.
+ void destruct() { return type.destructCount(*this); }
+
+ // Trace this count for garbage collection.
+ void trace(JSTracer* trc) { type.traceCount(*this, trc); }
+
+ size_t total_;
+
+ // The smallest JS::ubi::Node::identifier() passed to this instance's
+ // count() method. This provides a stable way to sort sets.
+ Node::Id smallestNodeIdCounted_;
+};
+
+class RootedCount : JS::CustomAutoRooter {
+ CountBasePtr count;
+
+ void trace(JSTracer* trc) override { count->trace(trc); }
+
+ public:
+ RootedCount(JSContext* cx, CountBasePtr&& count)
+ : CustomAutoRooter(cx),
+ count(Move(count))
+ { }
+ CountBase* operator->() const { return count.get(); }
+ explicit operator bool() const { return count.get(); }
+ operator CountBasePtr&() { return count; }
+};
+
+// Common data for a census traversal, shared across all CountType nodes.
+struct Census {
+ JSContext* const cx;
+ // If the targetZones set is non-empty, then only consider nodes whose zone
+ // is an element of the set. If the targetZones set is empty, then nodes in
+ // all zones are considered.
+ JS::ZoneSet targetZones;
+ Zone* atomsZone;
+
+ explicit Census(JSContext* cx) : cx(cx), atomsZone(nullptr) { }
+
+ MOZ_MUST_USE JS_PUBLIC_API(bool) init();
+};
+
+// A BreadthFirst handler type that conducts a census, using a CountBase to
+// categorize and count each node.
+class CensusHandler {
+ Census& census;
+ CountBasePtr& rootCount;
+ mozilla::MallocSizeOf mallocSizeOf;
+
+ public:
+ CensusHandler(Census& census, CountBasePtr& rootCount, mozilla::MallocSizeOf mallocSizeOf)
+ : census(census),
+ rootCount(rootCount),
+ mallocSizeOf(mallocSizeOf)
+ { }
+
+ MOZ_MUST_USE bool report(JSContext* cx, MutableHandleValue report) {
+ return rootCount->report(cx, report);
+ }
+
+ // This class needs to retain no per-node data.
+ class NodeData { };
+
+ MOZ_MUST_USE JS_PUBLIC_API(bool) operator() (BreadthFirst<CensusHandler>& traversal,
+ Node origin, const Edge& edge,
+ NodeData* referentData, bool first);
+};
+
+using CensusTraversal = BreadthFirst<CensusHandler>;
+
+// Examine the census options supplied by the API consumer, and (among other
+// things) use that to build a CountType tree.
+MOZ_MUST_USE JS_PUBLIC_API(bool) ParseCensusOptions(JSContext* cx,
+ Census& census, HandleObject options,
+ CountTypePtr& outResult);
+
+// Parse the breakdown language (as described in
+// js/src/doc/Debugger/Debugger.Memory.md) into a CountTypePtr. A null pointer
+// is returned on error and is reported to the cx.
+JS_PUBLIC_API(CountTypePtr) ParseBreakdown(JSContext* cx, HandleValue breakdownValue);
+
+
+} // namespace ubi
+} // namespace JS
+
+#endif // js_UbiNodeCensus_h
diff --git a/js/public/UbiNodeDominatorTree.h b/js/public/UbiNodeDominatorTree.h
new file mode 100644
index 0000000000..3422b76bc1
--- /dev/null
+++ b/js/public/UbiNodeDominatorTree.h
@@ -0,0 +1,677 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef js_UbiNodeDominatorTree_h
+#define js_UbiNodeDominatorTree_h
+
+#include "mozilla/Attributes.h"
+#include "mozilla/DebugOnly.h"
+#include "mozilla/Maybe.h"
+#include "mozilla/Move.h"
+#include "mozilla/UniquePtr.h"
+
+#include "jsalloc.h"
+
+#include "js/UbiNode.h"
+#include "js/UbiNodePostOrder.h"
+#include "js/Utility.h"
+#include "js/Vector.h"
+
+namespace JS {
+namespace ubi {
+
+/**
+ * In a directed graph with a root node `R`, a node `A` is said to "dominate" a
+ * node `B` iff every path from `R` to `B` contains `A`. A node `A` is said to
+ * be the "immediate dominator" of a node `B` iff it dominates `B`, is not `B`
+ * itself, and does not dominate any other nodes which also dominate `B` in
+ * turn.
+ *
+ * If we take every node from a graph `G` and create a new graph `T` with edges
+ * to each node from its immediate dominator, then `T` is a tree (each node has
+ * only one immediate dominator, or none if it is the root). This tree is called
+ * a "dominator tree".
+ *
+ * This class represents a dominator tree constructed from a `JS::ubi::Node`
+ * heap graph. The domination relationship and dominator trees are useful tools
+ * for analyzing heap graphs because they tell you:
+ *
+ * - Exactly what could be reclaimed by the GC if some node `A` became
+ * unreachable: those nodes which are dominated by `A`,
+ *
+ * - The "retained size" of a node in the heap graph, in contrast to its
+ * "shallow size". The "shallow size" is the space taken by a node itself,
+ * not counting anything it references. The "retained size" of a node is its
+ * shallow size plus the size of all the things that would be collected if
+ * the original node wasn't (directly or indirectly) referencing them. In
+ * other words, the retained size is the shallow size of a node plus the
+ * shallow sizes of every other node it dominates. For example, the root
+ * node in a binary tree might have a small shallow size that does not take
+ * up much space itself, but it dominates the rest of the binary tree and
+ * its retained size is therefore significant (assuming no external
+ * references into the tree).
+ *
+ * The simple, engineered algorithm presented in "A Simple, Fast Dominance
+ * Algorithm" by Cooper el al[0] is used to find dominators and construct the
+ * dominator tree. This algorithm runs in O(n^2) time, but is faster in practice
+ * than alternative algorithms with better theoretical running times, such as
+ * Lengauer-Tarjan which runs in O(e * log(n)). The big caveat to that statement
+ * is that Cooper et al found it is faster in practice *on control flow graphs*
+ * and I'm not convinced that this property also holds on *heap* graphs. That
+ * said, the implementation of this algorithm is *much* simpler than
+ * Lengauer-Tarjan and has been found to be fast enough at least for the time
+ * being.
+ *
+ * [0]: http://www.cs.rice.edu/~keith/EMBED/dom.pdf
+ */
+class JS_PUBLIC_API(DominatorTree)
+{
+ private:
+ // Types.
+
+ using PredecessorSets = js::HashMap<Node, NodeSetPtr, js::DefaultHasher<Node>,
+ js::SystemAllocPolicy>;
+ using NodeToIndexMap = js::HashMap<Node, uint32_t, js::DefaultHasher<Node>,
+ js::SystemAllocPolicy>;
+ class DominatedSets;
+
+ public:
+ class DominatedSetRange;
+
+ /**
+ * A pointer to an immediately dominated node.
+ *
+ * Don't use this type directly; it is no safer than regular pointers. This
+ * is only for use indirectly with range-based for loops and
+ * `DominatedSetRange`.
+ *
+ * @see JS::ubi::DominatorTree::getDominatedSet
+ */
+ class DominatedNodePtr
+ {
+ friend class DominatedSetRange;
+
+ const JS::ubi::Vector<Node>& postOrder;
+ const uint32_t* ptr;
+
+ DominatedNodePtr(const JS::ubi::Vector<Node>& postOrder, const uint32_t* ptr)
+ : postOrder(postOrder)
+ , ptr(ptr)
+ { }
+
+ public:
+ bool operator!=(const DominatedNodePtr& rhs) const { return ptr != rhs.ptr; }
+ void operator++() { ptr++; }
+ const Node& operator*() const { return postOrder[*ptr]; }
+ };
+
+ /**
+ * A range of immediately dominated `JS::ubi::Node`s for use with
+ * range-based for loops.
+ *
+ * @see JS::ubi::DominatorTree::getDominatedSet
+ */
+ class DominatedSetRange
+ {
+ friend class DominatedSets;
+
+ const JS::ubi::Vector<Node>& postOrder;
+ const uint32_t* beginPtr;
+ const uint32_t* endPtr;
+
+ DominatedSetRange(JS::ubi::Vector<Node>& postOrder, const uint32_t* begin, const uint32_t* end)
+ : postOrder(postOrder)
+ , beginPtr(begin)
+ , endPtr(end)
+ {
+ MOZ_ASSERT(begin <= end);
+ }
+
+ public:
+ DominatedNodePtr begin() const {
+ MOZ_ASSERT(beginPtr <= endPtr);
+ return DominatedNodePtr(postOrder, beginPtr);
+ }
+
+ DominatedNodePtr end() const {
+ return DominatedNodePtr(postOrder, endPtr);
+ }
+
+ size_t length() const {
+ MOZ_ASSERT(beginPtr <= endPtr);
+ return endPtr - beginPtr;
+ }
+
+ /**
+ * Safely skip ahead `n` dominators in the range, in O(1) time.
+ *
+ * Example usage:
+ *
+ * mozilla::Maybe<DominatedSetRange> range = myDominatorTree.getDominatedSet(myNode);
+ * if (range.isNothing()) {
+ * // Handle unknown nodes however you see fit...
+ * return false;
+ * }
+ *
+ * // Don't care about the first ten, for whatever reason.
+ * range->skip(10);
+ * for (const JS::ubi::Node& dominatedNode : *range) {
+ * // ...
+ * }
+ */
+ void skip(size_t n) {
+ beginPtr += n;
+ if (beginPtr > endPtr)
+ beginPtr = endPtr;
+ }
+ };
+
+ private:
+ /**
+ * The set of all dominated sets in a dominator tree.
+ *
+ * Internally stores the sets in a contiguous array, with a side table of
+ * indices into that contiguous array to denote the start index of each
+ * individual set.
+ */
+ class DominatedSets
+ {
+ JS::ubi::Vector<uint32_t> dominated;
+ JS::ubi::Vector<uint32_t> indices;
+
+ DominatedSets(JS::ubi::Vector<uint32_t>&& dominated, JS::ubi::Vector<uint32_t>&& indices)
+ : dominated(mozilla::Move(dominated))
+ , indices(mozilla::Move(indices))
+ { }
+
+ public:
+ // DominatedSets is not copy-able.
+ DominatedSets(const DominatedSets& rhs) = delete;
+ DominatedSets& operator=(const DominatedSets& rhs) = delete;
+
+ // DominatedSets is move-able.
+ DominatedSets(DominatedSets&& rhs)
+ : dominated(mozilla::Move(rhs.dominated))
+ , indices(mozilla::Move(rhs.indices))
+ {
+ MOZ_ASSERT(this != &rhs, "self-move not allowed");
+ }
+ DominatedSets& operator=(DominatedSets&& rhs) {
+ this->~DominatedSets();
+ new (this) DominatedSets(mozilla::Move(rhs));
+ return *this;
+ }
+
+ /**
+ * Create the DominatedSets given the mapping of a node index to its
+ * immediate dominator. Returns `Some` on success, `Nothing` on OOM
+ * failure.
+ */
+ static mozilla::Maybe<DominatedSets> Create(const JS::ubi::Vector<uint32_t>& doms) {
+ auto length = doms.length();
+ MOZ_ASSERT(length < UINT32_MAX);
+
+ // Create a vector `dominated` holding a flattened set of buckets of
+ // immediately dominated children nodes, with a lookup table
+ // `indices` mapping from each node to the beginning of its bucket.
+ //
+ // This has three phases:
+ //
+ // 1. Iterate over the full set of nodes and count up the size of
+ // each bucket. These bucket sizes are temporarily stored in the
+ // `indices` vector.
+ //
+ // 2. Convert the `indices` vector to store the cumulative sum of
+ // the sizes of all buckets before each index, resulting in a
+ // mapping from node index to one past the end of that node's
+ // bucket.
+ //
+ // 3. Iterate over the full set of nodes again, filling in bucket
+ // entries from the end of the bucket's range to its
+ // beginning. This decrements each index as a bucket entry is
+ // filled in. After having filled in all of a bucket's entries,
+ // the index points to the start of the bucket.
+
+ JS::ubi::Vector<uint32_t> dominated;
+ JS::ubi::Vector<uint32_t> indices;
+ if (!dominated.growBy(length) || !indices.growBy(length))
+ return mozilla::Nothing();
+
+ // 1
+ memset(indices.begin(), 0, length * sizeof(uint32_t));
+ for (uint32_t i = 0; i < length; i++)
+ indices[doms[i]]++;
+
+ // 2
+ uint32_t sumOfSizes = 0;
+ for (uint32_t i = 0; i < length; i++) {
+ sumOfSizes += indices[i];
+ MOZ_ASSERT(sumOfSizes <= length);
+ indices[i] = sumOfSizes;
+ }
+
+ // 3
+ for (uint32_t i = 0; i < length; i++) {
+ auto idxOfDom = doms[i];
+ indices[idxOfDom]--;
+ dominated[indices[idxOfDom]] = i;
+ }
+
+#ifdef DEBUG
+ // Assert that our buckets are non-overlapping and don't run off the
+ // end of the vector.
+ uint32_t lastIndex = 0;
+ for (uint32_t i = 0; i < length; i++) {
+ MOZ_ASSERT(indices[i] >= lastIndex);
+ MOZ_ASSERT(indices[i] < length);
+ lastIndex = indices[i];
+ }
+#endif
+
+ return mozilla::Some(DominatedSets(mozilla::Move(dominated), mozilla::Move(indices)));
+ }
+
+ /**
+ * Get the set of nodes immediately dominated by the node at
+ * `postOrder[nodeIndex]`.
+ */
+ DominatedSetRange dominatedSet(JS::ubi::Vector<Node>& postOrder, uint32_t nodeIndex) const {
+ MOZ_ASSERT(postOrder.length() == indices.length());
+ MOZ_ASSERT(nodeIndex < indices.length());
+ auto end = nodeIndex == indices.length() - 1
+ ? dominated.end()
+ : &dominated[indices[nodeIndex + 1]];
+ return DominatedSetRange(postOrder, &dominated[indices[nodeIndex]], end);
+ }
+ };
+
+ private:
+ // Data members.
+ JS::ubi::Vector<Node> postOrder;
+ NodeToIndexMap nodeToPostOrderIndex;
+ JS::ubi::Vector<uint32_t> doms;
+ DominatedSets dominatedSets;
+ mozilla::Maybe<JS::ubi::Vector<JS::ubi::Node::Size>> retainedSizes;
+
+ private:
+ // We use `UNDEFINED` as a sentinel value in the `doms` vector to signal
+ // that we haven't found any dominators for the node at the corresponding
+ // index in `postOrder` yet.
+ static const uint32_t UNDEFINED = UINT32_MAX;
+
+ DominatorTree(JS::ubi::Vector<Node>&& postOrder, NodeToIndexMap&& nodeToPostOrderIndex,
+ JS::ubi::Vector<uint32_t>&& doms, DominatedSets&& dominatedSets)
+ : postOrder(mozilla::Move(postOrder))
+ , nodeToPostOrderIndex(mozilla::Move(nodeToPostOrderIndex))
+ , doms(mozilla::Move(doms))
+ , dominatedSets(mozilla::Move(dominatedSets))
+ , retainedSizes(mozilla::Nothing())
+ { }
+
+ static uint32_t intersect(JS::ubi::Vector<uint32_t>& doms, uint32_t finger1, uint32_t finger2) {
+ while (finger1 != finger2) {
+ if (finger1 < finger2)
+ finger1 = doms[finger1];
+ else if (finger2 < finger1)
+ finger2 = doms[finger2];
+ }
+ return finger1;
+ }
+
+ // Do the post order traversal of the heap graph and populate our
+ // predecessor sets.
+ static MOZ_MUST_USE bool doTraversal(JSContext* cx, AutoCheckCannotGC& noGC, const Node& root,
+ JS::ubi::Vector<Node>& postOrder,
+ PredecessorSets& predecessorSets) {
+ uint32_t nodeCount = 0;
+ auto onNode = [&](const Node& node) {
+ nodeCount++;
+ if (MOZ_UNLIKELY(nodeCount == UINT32_MAX))
+ return false;
+ return postOrder.append(node);
+ };
+
+ auto onEdge = [&](const Node& origin, const Edge& edge) {
+ auto p = predecessorSets.lookupForAdd(edge.referent);
+ if (!p) {
+ mozilla::UniquePtr<NodeSet, DeletePolicy<NodeSet>> set(js_new<NodeSet>());
+ if (!set ||
+ !set->init() ||
+ !predecessorSets.add(p, edge.referent, mozilla::Move(set)))
+ {
+ return false;
+ }
+ }
+ MOZ_ASSERT(p && p->value());
+ return p->value()->put(origin);
+ };
+
+ PostOrder traversal(cx, noGC);
+ return traversal.init() &&
+ traversal.addStart(root) &&
+ traversal.traverse(onNode, onEdge);
+ }
+
+ // Populates the given `map` with an entry for each node to its index in
+ // `postOrder`.
+ static MOZ_MUST_USE bool mapNodesToTheirIndices(JS::ubi::Vector<Node>& postOrder,
+ NodeToIndexMap& map) {
+ MOZ_ASSERT(!map.initialized());
+ MOZ_ASSERT(postOrder.length() < UINT32_MAX);
+ uint32_t length = postOrder.length();
+ if (!map.init(length))
+ return false;
+ for (uint32_t i = 0; i < length; i++)
+ map.putNewInfallible(postOrder[i], i);
+ return true;
+ }
+
+ // Convert the Node -> NodeSet predecessorSets to a index -> Vector<index>
+ // form.
+ static MOZ_MUST_USE bool convertPredecessorSetsToVectors(
+ const Node& root,
+ JS::ubi::Vector<Node>& postOrder,
+ PredecessorSets& predecessorSets,
+ NodeToIndexMap& nodeToPostOrderIndex,
+ JS::ubi::Vector<JS::ubi::Vector<uint32_t>>& predecessorVectors)
+ {
+ MOZ_ASSERT(postOrder.length() < UINT32_MAX);
+ uint32_t length = postOrder.length();
+
+ MOZ_ASSERT(predecessorVectors.length() == 0);
+ if (!predecessorVectors.growBy(length))
+ return false;
+
+ for (uint32_t i = 0; i < length - 1; i++) {
+ auto& node = postOrder[i];
+ MOZ_ASSERT(node != root,
+ "Only the last node should be root, since this was a post order traversal.");
+
+ auto ptr = predecessorSets.lookup(node);
+ MOZ_ASSERT(ptr,
+ "Because this isn't the root, it had better have predecessors, or else how "
+ "did we even find it.");
+
+ auto& predecessors = ptr->value();
+ if (!predecessorVectors[i].reserve(predecessors->count()))
+ return false;
+ for (auto range = predecessors->all(); !range.empty(); range.popFront()) {
+ auto ptr = nodeToPostOrderIndex.lookup(range.front());
+ MOZ_ASSERT(ptr);
+ predecessorVectors[i].infallibleAppend(ptr->value());
+ }
+ }
+ predecessorSets.finish();
+ return true;
+ }
+
+ // Initialize `doms` such that the immediate dominator of the `root` is the
+ // `root` itself and all others are `UNDEFINED`.
+ static MOZ_MUST_USE bool initializeDominators(JS::ubi::Vector<uint32_t>& doms,
+ uint32_t length) {
+ MOZ_ASSERT(doms.length() == 0);
+ if (!doms.growByUninitialized(length))
+ return false;
+ doms[length - 1] = length - 1;
+ for (uint32_t i = 0; i < length - 1; i++)
+ doms[i] = UNDEFINED;
+ return true;
+ }
+
+ void assertSanity() const {
+ MOZ_ASSERT(postOrder.length() == doms.length());
+ MOZ_ASSERT(postOrder.length() == nodeToPostOrderIndex.count());
+ MOZ_ASSERT_IF(retainedSizes.isSome(), postOrder.length() == retainedSizes->length());
+ }
+
+ MOZ_MUST_USE bool computeRetainedSizes(mozilla::MallocSizeOf mallocSizeOf) {
+ MOZ_ASSERT(retainedSizes.isNothing());
+ auto length = postOrder.length();
+
+ retainedSizes.emplace();
+ if (!retainedSizes->growBy(length)) {
+ retainedSizes = mozilla::Nothing();
+ return false;
+ }
+
+ // Iterate in forward order so that we know all of a node's children in
+ // the dominator tree have already had their retained size
+ // computed. Then we can simply say that the retained size of a node is
+ // its shallow size (JS::ubi::Node::size) plus the retained sizes of its
+ // immediate children in the tree.
+
+ for (uint32_t i = 0; i < length; i++) {
+ auto size = postOrder[i].size(mallocSizeOf);
+
+ for (const auto& dominated : dominatedSets.dominatedSet(postOrder, i)) {
+ // The root node dominates itself, but shouldn't contribute to
+ // its own retained size.
+ if (dominated == postOrder[length - 1]) {
+ MOZ_ASSERT(i == length - 1);
+ continue;
+ }
+
+ auto ptr = nodeToPostOrderIndex.lookup(dominated);
+ MOZ_ASSERT(ptr);
+ auto idxOfDominated = ptr->value();
+ MOZ_ASSERT(idxOfDominated < i);
+ size += retainedSizes.ref()[idxOfDominated];
+ }
+
+ retainedSizes.ref()[i] = size;
+ }
+
+ return true;
+ }
+
+ public:
+ // DominatorTree is not copy-able.
+ DominatorTree(const DominatorTree&) = delete;
+ DominatorTree& operator=(const DominatorTree&) = delete;
+
+ // DominatorTree is move-able.
+ DominatorTree(DominatorTree&& rhs)
+ : postOrder(mozilla::Move(rhs.postOrder))
+ , nodeToPostOrderIndex(mozilla::Move(rhs.nodeToPostOrderIndex))
+ , doms(mozilla::Move(rhs.doms))
+ , dominatedSets(mozilla::Move(rhs.dominatedSets))
+ , retainedSizes(mozilla::Move(rhs.retainedSizes))
+ {
+ MOZ_ASSERT(this != &rhs, "self-move is not allowed");
+ }
+ DominatorTree& operator=(DominatorTree&& rhs) {
+ this->~DominatorTree();
+ new (this) DominatorTree(mozilla::Move(rhs));
+ return *this;
+ }
+
+ /**
+ * Construct a `DominatorTree` of the heap graph visible from `root`. The
+ * `root` is also used as the root of the resulting dominator tree.
+ *
+ * The resulting `DominatorTree` instance must not outlive the
+ * `JS::ubi::Node` graph it was constructed from.
+ *
+ * - For `JS::ubi::Node` graphs backed by the live heap graph, this means
+ * that the `DominatorTree`'s lifetime _must_ be contained within the
+ * scope of the provided `AutoCheckCannotGC` reference because a GC will
+ * invalidate the nodes.
+ *
+ * - For `JS::ubi::Node` graphs backed by some other offline structure
+ * provided by the embedder, the resulting `DominatorTree`'s lifetime is
+ * bounded by that offline structure's lifetime.
+ *
+ * In practice, this means that within SpiderMonkey we must treat
+ * `DominatorTree` as if it were backed by the live heap graph and trust
+ * that embedders with knowledge of the graph's implementation will do the
+ * Right Thing.
+ *
+ * Returns `mozilla::Nothing()` on OOM failure. It is the caller's
+ * responsibility to handle and report the OOM.
+ */
+ static mozilla::Maybe<DominatorTree>
+ Create(JSContext* cx, AutoCheckCannotGC& noGC, const Node& root) {
+ JS::ubi::Vector<Node> postOrder;
+ PredecessorSets predecessorSets;
+ if (!predecessorSets.init() || !doTraversal(cx, noGC, root, postOrder, predecessorSets))
+ return mozilla::Nothing();
+
+ MOZ_ASSERT(postOrder.length() < UINT32_MAX);
+ uint32_t length = postOrder.length();
+ MOZ_ASSERT(postOrder[length - 1] == root);
+
+ // From here on out we wish to avoid hash table lookups, and we use
+ // indices into `postOrder` instead of actual nodes wherever
+ // possible. This greatly improves the performance of this
+ // implementation, but we have to pay a little bit of upfront cost to
+ // convert our data structures to play along first.
+
+ NodeToIndexMap nodeToPostOrderIndex;
+ if (!mapNodesToTheirIndices(postOrder, nodeToPostOrderIndex))
+ return mozilla::Nothing();
+
+ JS::ubi::Vector<JS::ubi::Vector<uint32_t>> predecessorVectors;
+ if (!convertPredecessorSetsToVectors(root, postOrder, predecessorSets, nodeToPostOrderIndex,
+ predecessorVectors))
+ return mozilla::Nothing();
+
+ JS::ubi::Vector<uint32_t> doms;
+ if (!initializeDominators(doms, length))
+ return mozilla::Nothing();
+
+ bool changed = true;
+ while (changed) {
+ changed = false;
+
+ // Iterate over the non-root nodes in reverse post order.
+ for (uint32_t indexPlusOne = length - 1; indexPlusOne > 0; indexPlusOne--) {
+ MOZ_ASSERT(postOrder[indexPlusOne - 1] != root);
+
+ // Take the intersection of every predecessor's dominator set;
+ // that is the current best guess at the immediate dominator for
+ // this node.
+
+ uint32_t newIDomIdx = UNDEFINED;
+
+ auto& predecessors = predecessorVectors[indexPlusOne - 1];
+ auto range = predecessors.all();
+ for ( ; !range.empty(); range.popFront()) {
+ auto idx = range.front();
+ if (doms[idx] != UNDEFINED) {
+ newIDomIdx = idx;
+ break;
+ }
+ }
+
+ MOZ_ASSERT(newIDomIdx != UNDEFINED,
+ "Because the root is initialized to dominate itself and is the first "
+ "node in every path, there must exist a predecessor to this node that "
+ "also has a dominator.");
+
+ for ( ; !range.empty(); range.popFront()) {
+ auto idx = range.front();
+ if (doms[idx] != UNDEFINED)
+ newIDomIdx = intersect(doms, newIDomIdx, idx);
+ }
+
+ // If the immediate dominator changed, we will have to do
+ // another pass of the outer while loop to continue the forward
+ // dataflow.
+ if (newIDomIdx != doms[indexPlusOne - 1]) {
+ doms[indexPlusOne - 1] = newIDomIdx;
+ changed = true;
+ }
+ }
+ }
+
+ auto maybeDominatedSets = DominatedSets::Create(doms);
+ if (maybeDominatedSets.isNothing())
+ return mozilla::Nothing();
+
+ return mozilla::Some(DominatorTree(mozilla::Move(postOrder),
+ mozilla::Move(nodeToPostOrderIndex),
+ mozilla::Move(doms),
+ mozilla::Move(*maybeDominatedSets)));
+ }
+
+ /**
+ * Get the root node for this dominator tree.
+ */
+ const Node& root() const {
+ return postOrder[postOrder.length() - 1];
+ }
+
+ /**
+ * Return the immediate dominator of the given `node`. If `node` was not
+ * reachable from the `root` that this dominator tree was constructed from,
+ * then return the null `JS::ubi::Node`.
+ */
+ Node getImmediateDominator(const Node& node) const {
+ assertSanity();
+ auto ptr = nodeToPostOrderIndex.lookup(node);
+ if (!ptr)
+ return Node();
+
+ auto idx = ptr->value();
+ MOZ_ASSERT(idx < postOrder.length());
+ return postOrder[doms[idx]];
+ }
+
+ /**
+ * Get the set of nodes immediately dominated by the given `node`. If `node`
+ * is not a member of this dominator tree, return `Nothing`.
+ *
+ * Example usage:
+ *
+ * mozilla::Maybe<DominatedSetRange> range = myDominatorTree.getDominatedSet(myNode);
+ * if (range.isNothing()) {
+ * // Handle unknown node however you see fit...
+ * return false;
+ * }
+ *
+ * for (const JS::ubi::Node& dominatedNode : *range) {
+ * // Do something with each immediately dominated node...
+ * }
+ */
+ mozilla::Maybe<DominatedSetRange> getDominatedSet(const Node& node) {
+ assertSanity();
+ auto ptr = nodeToPostOrderIndex.lookup(node);
+ if (!ptr)
+ return mozilla::Nothing();
+
+ auto idx = ptr->value();
+ MOZ_ASSERT(idx < postOrder.length());
+ return mozilla::Some(dominatedSets.dominatedSet(postOrder, idx));
+ }
+
+ /**
+ * Get the retained size of the given `node`. The size is placed in
+ * `outSize`, or 0 if `node` is not a member of the dominator tree. Returns
+ * false on OOM failure, leaving `outSize` unchanged.
+ */
+ MOZ_MUST_USE bool getRetainedSize(const Node& node, mozilla::MallocSizeOf mallocSizeOf,
+ Node::Size& outSize) {
+ assertSanity();
+ auto ptr = nodeToPostOrderIndex.lookup(node);
+ if (!ptr) {
+ outSize = 0;
+ return true;
+ }
+
+ if (retainedSizes.isNothing() && !computeRetainedSizes(mallocSizeOf))
+ return false;
+
+ auto idx = ptr->value();
+ MOZ_ASSERT(idx < postOrder.length());
+ outSize = retainedSizes.ref()[idx];
+ return true;
+ }
+};
+
+} // namespace ubi
+} // namespace JS
+
+#endif // js_UbiNodeDominatorTree_h
diff --git a/js/public/UbiNodePostOrder.h b/js/public/UbiNodePostOrder.h
new file mode 100644
index 0000000000..a504267769
--- /dev/null
+++ b/js/public/UbiNodePostOrder.h
@@ -0,0 +1,191 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef js_UbiNodePostOrder_h
+#define js_UbiNodePostOrder_h
+
+#include "mozilla/Attributes.h"
+#include "mozilla/Maybe.h"
+#include "mozilla/Move.h"
+
+#include "jsalloc.h"
+
+#include "js/UbiNode.h"
+#include "js/Utility.h"
+#include "js/Vector.h"
+
+namespace JS {
+namespace ubi {
+
+/**
+ * A post-order depth-first traversal of `ubi::Node` graphs.
+ *
+ * No GC may occur while an instance of `PostOrder` is live.
+ *
+ * The `NodeVisitor` type provided to `PostOrder::traverse` must have the
+ * following member:
+ *
+ * bool operator()(Node& node)
+ *
+ * The node visitor method. This method is called once for each `node`
+ * reachable from the start set in post-order.
+ *
+ * This visitor function should return true on success, or false if an error
+ * occurs. A false return value terminates the traversal immediately, and
+ * causes `PostOrder::traverse` to return false.
+ *
+ * The `EdgeVisitor` type provided to `PostOrder::traverse` must have the
+ * following member:
+ *
+ * bool operator()(Node& origin, Edge& edge)
+ *
+ * The edge visitor method. This method is called once for each outgoing
+ * `edge` from `origin` that is reachable from the start set.
+ *
+ * NB: UNLIKE NODES, THERE IS NO GUARANTEED ORDER IN WHICH EDGES AND THEIR
+ * ORIGINS ARE VISITED!
+ *
+ * This visitor function should return true on success, or false if an error
+ * occurs. A false return value terminates the traversal immediately, and
+ * causes `PostOrder::traverse` to return false.
+ */
+struct PostOrder {
+ private:
+ struct OriginAndEdges {
+ Node origin;
+ EdgeVector edges;
+
+ OriginAndEdges(const Node& node, EdgeVector&& edges)
+ : origin(node)
+ , edges(mozilla::Move(edges))
+ { }
+
+ OriginAndEdges(const OriginAndEdges& rhs) = delete;
+ OriginAndEdges& operator=(const OriginAndEdges& rhs) = delete;
+
+ OriginAndEdges(OriginAndEdges&& rhs)
+ : origin(rhs.origin)
+ , edges(mozilla::Move(rhs.edges))
+ {
+ MOZ_ASSERT(&rhs != this, "self-move disallowed");
+ }
+
+ OriginAndEdges& operator=(OriginAndEdges&& rhs) {
+ this->~OriginAndEdges();
+ new (this) OriginAndEdges(mozilla::Move(rhs));
+ return *this;
+ }
+ };
+
+ using Stack = js::Vector<OriginAndEdges, 256, js::SystemAllocPolicy>;
+ using Set = js::HashSet<Node, js::DefaultHasher<Node>, js::SystemAllocPolicy>;
+
+ JSContext* cx;
+ Set seen;
+ Stack stack;
+#ifdef DEBUG
+ bool traversed;
+#endif
+
+ private:
+ MOZ_MUST_USE bool fillEdgesFromRange(EdgeVector& edges, js::UniquePtr<EdgeRange>& range) {
+ MOZ_ASSERT(range);
+ for ( ; !range->empty(); range->popFront()) {
+ if (!edges.append(mozilla::Move(range->front())))
+ return false;
+ }
+ return true;
+ }
+
+ MOZ_MUST_USE bool pushForTraversing(const Node& node) {
+ EdgeVector edges;
+ auto range = node.edges(cx, /* wantNames */ false);
+ return range &&
+ fillEdgesFromRange(edges, range) &&
+ stack.append(OriginAndEdges(node, mozilla::Move(edges)));
+ }
+
+
+ public:
+ // Construct a post-order traversal object.
+ //
+ // The traversal asserts that no GC happens in its runtime during its
+ // lifetime via the `AutoCheckCannotGC&` parameter. We do nothing with it,
+ // other than require it to exist with a lifetime that encloses our own.
+ PostOrder(JSContext* cx, AutoCheckCannotGC&)
+ : cx(cx)
+ , seen()
+ , stack()
+#ifdef DEBUG
+ , traversed(false)
+#endif
+ { }
+
+ // Initialize this traversal object. Return false on OOM.
+ MOZ_MUST_USE bool init() { return seen.init(); }
+
+ // Add `node` as a starting point for the traversal. You may add
+ // as many starting points as you like. Returns false on OOM.
+ MOZ_MUST_USE bool addStart(const Node& node) {
+ if (!seen.put(node))
+ return false;
+ return pushForTraversing(node);
+ }
+
+ // Traverse the graph in post-order, starting with the set of nodes passed
+ // to `addStart` and applying `onNode::operator()` for each node in the
+ // graph and `onEdge::operator()` for each edge in the graph, as described
+ // above.
+ //
+ // This should be called only once per instance of this class.
+ //
+ // Return false on OOM or error return from `onNode::operator()` or
+ // `onEdge::operator()`.
+ template<typename NodeVisitor, typename EdgeVisitor>
+ MOZ_MUST_USE bool traverse(NodeVisitor onNode, EdgeVisitor onEdge) {
+#ifdef DEBUG
+ MOZ_ASSERT(!traversed, "Can only traverse() once!");
+ traversed = true;
+#endif
+
+ while (!stack.empty()) {
+ auto& origin = stack.back().origin;
+ auto& edges = stack.back().edges;
+
+ if (edges.empty()) {
+ if (!onNode(origin))
+ return false;
+ stack.popBack();
+ continue;
+ }
+
+ Edge edge = mozilla::Move(edges.back());
+ edges.popBack();
+
+ if (!onEdge(origin, edge))
+ return false;
+
+ auto ptr = seen.lookupForAdd(edge.referent);
+ // We've already seen this node, don't follow its edges.
+ if (ptr)
+ continue;
+
+ // Mark the referent as seen and follow its edges.
+ if (!seen.add(ptr, edge.referent) ||
+ !pushForTraversing(edge.referent))
+ {
+ return false;
+ }
+ }
+
+ return true;
+ }
+};
+
+} // namespace ubi
+} // namespace JS
+
+#endif // js_UbiNodePostOrder_h
diff --git a/js/public/UbiNodeShortestPaths.h b/js/public/UbiNodeShortestPaths.h
new file mode 100644
index 0000000000..edd5aebbe5
--- /dev/null
+++ b/js/public/UbiNodeShortestPaths.h
@@ -0,0 +1,350 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef js_UbiNodeShortestPaths_h
+#define js_UbiNodeShortestPaths_h
+
+#include "mozilla/Attributes.h"
+#include "mozilla/Maybe.h"
+#include "mozilla/Move.h"
+
+#include "jsalloc.h"
+
+#include "js/UbiNodeBreadthFirst.h"
+#include "js/Vector.h"
+
+namespace JS {
+namespace ubi {
+
+/**
+ * A back edge along a path in the heap graph.
+ */
+struct JS_PUBLIC_API(BackEdge)
+{
+ private:
+ Node predecessor_;
+ EdgeName name_;
+
+ public:
+ using Ptr = mozilla::UniquePtr<BackEdge, JS::DeletePolicy<BackEdge>>;
+
+ BackEdge() : predecessor_(), name_(nullptr) { }
+
+ MOZ_MUST_USE bool init(const Node& predecessor, Edge& edge) {
+ MOZ_ASSERT(!predecessor_);
+ MOZ_ASSERT(!name_);
+
+ predecessor_ = predecessor;
+ name_ = mozilla::Move(edge.name);
+ return true;
+ }
+
+ BackEdge(const BackEdge&) = delete;
+ BackEdge& operator=(const BackEdge&) = delete;
+
+ BackEdge(BackEdge&& rhs)
+ : predecessor_(rhs.predecessor_)
+ , name_(mozilla::Move(rhs.name_))
+ {
+ MOZ_ASSERT(&rhs != this);
+ }
+
+ BackEdge& operator=(BackEdge&& rhs) {
+ this->~BackEdge();
+ new(this) BackEdge(Move(rhs));
+ return *this;
+ }
+
+ Ptr clone() const;
+
+ const EdgeName& name() const { return name_; }
+ EdgeName& name() { return name_; }
+
+ const JS::ubi::Node& predecessor() const { return predecessor_; }
+};
+
+/**
+ * A path is a series of back edges from which we discovered a target node.
+ */
+using Path = JS::ubi::Vector<BackEdge*>;
+
+/**
+ * The `JS::ubi::ShortestPaths` type represents a collection of up to N shortest
+ * retaining paths for each of a target set of nodes, starting from the same
+ * root node.
+ */
+struct JS_PUBLIC_API(ShortestPaths)
+{
+ private:
+ // Types, type aliases, and data members.
+
+ using BackEdgeVector = JS::ubi::Vector<BackEdge::Ptr>;
+ using NodeToBackEdgeVectorMap = js::HashMap<Node, BackEdgeVector, js::DefaultHasher<Node>,
+ js::SystemAllocPolicy>;
+
+ struct Handler;
+ using Traversal = BreadthFirst<Handler>;
+
+ /**
+ * A `JS::ubi::BreadthFirst` traversal handler that records back edges for
+ * how we reached each node, allowing us to reconstruct the shortest
+ * retaining paths after the traversal.
+ */
+ struct Handler
+ {
+ using NodeData = BackEdge;
+
+ ShortestPaths& shortestPaths;
+ size_t totalMaxPathsToRecord;
+ size_t totalPathsRecorded;
+
+ explicit Handler(ShortestPaths& shortestPaths)
+ : shortestPaths(shortestPaths)
+ , totalMaxPathsToRecord(shortestPaths.targets_.count() * shortestPaths.maxNumPaths_)
+ , totalPathsRecorded(0)
+ {
+ }
+
+ bool
+ operator()(Traversal& traversal, JS::ubi::Node origin, JS::ubi::Edge& edge,
+ BackEdge* back, bool first)
+ {
+ MOZ_ASSERT(back);
+ MOZ_ASSERT(origin == shortestPaths.root_ || traversal.visited.has(origin));
+ MOZ_ASSERT(totalPathsRecorded < totalMaxPathsToRecord);
+
+ if (first && !back->init(origin, edge))
+ return false;
+
+ if (!shortestPaths.targets_.has(edge.referent))
+ return true;
+
+ // If `first` is true, then we moved the edge's name into `back` in
+ // the above call to `init`. So clone that back edge to get the
+ // correct edge name. If `first` is not true, then our edge name is
+ // still in `edge`. This accounts for the asymmetry between
+ // `back->clone()` in the first branch, and the `init` call in the
+ // second branch.
+
+ if (first) {
+ BackEdgeVector paths;
+ if (!paths.reserve(shortestPaths.maxNumPaths_))
+ return false;
+ auto cloned = back->clone();
+ if (!cloned)
+ return false;
+ paths.infallibleAppend(mozilla::Move(cloned));
+ if (!shortestPaths.paths_.putNew(edge.referent, mozilla::Move(paths)))
+ return false;
+ totalPathsRecorded++;
+ } else {
+ auto ptr = shortestPaths.paths_.lookup(edge.referent);
+ MOZ_ASSERT(ptr,
+ "This isn't the first time we have seen the target node `edge.referent`. "
+ "We should have inserted it into shortestPaths.paths_ the first time we "
+ "saw it.");
+
+ if (ptr->value().length() < shortestPaths.maxNumPaths_) {
+ BackEdge::Ptr thisBackEdge(js_new<BackEdge>());
+ if (!thisBackEdge || !thisBackEdge->init(origin, edge))
+ return false;
+ ptr->value().infallibleAppend(mozilla::Move(thisBackEdge));
+ totalPathsRecorded++;
+ }
+ }
+
+ MOZ_ASSERT(totalPathsRecorded <= totalMaxPathsToRecord);
+ if (totalPathsRecorded == totalMaxPathsToRecord)
+ traversal.stop();
+
+ return true;
+ }
+
+ };
+
+ // The maximum number of paths to record for each node.
+ uint32_t maxNumPaths_;
+
+ // The root node we are starting the search from.
+ Node root_;
+
+ // The set of nodes we are searching for paths to.
+ NodeSet targets_;
+
+ // The resulting paths.
+ NodeToBackEdgeVectorMap paths_;
+
+ // Need to keep alive the traversal's back edges so we can walk them later
+ // when the traversal is over when recreating the shortest paths.
+ Traversal::NodeMap backEdges_;
+
+ private:
+ // Private methods.
+
+ ShortestPaths(uint32_t maxNumPaths, const Node& root, NodeSet&& targets)
+ : maxNumPaths_(maxNumPaths)
+ , root_(root)
+ , targets_(mozilla::Move(targets))
+ , paths_()
+ , backEdges_()
+ {
+ MOZ_ASSERT(maxNumPaths_ > 0);
+ MOZ_ASSERT(root_);
+ MOZ_ASSERT(targets_.initialized());
+ }
+
+ bool initialized() const {
+ return targets_.initialized() &&
+ paths_.initialized() &&
+ backEdges_.initialized();
+ }
+
+ public:
+ // Public methods.
+
+ ShortestPaths(ShortestPaths&& rhs)
+ : maxNumPaths_(rhs.maxNumPaths_)
+ , root_(rhs.root_)
+ , targets_(mozilla::Move(rhs.targets_))
+ , paths_(mozilla::Move(rhs.paths_))
+ , backEdges_(mozilla::Move(rhs.backEdges_))
+ {
+ MOZ_ASSERT(this != &rhs, "self-move is not allowed");
+ }
+
+ ShortestPaths& operator=(ShortestPaths&& rhs) {
+ this->~ShortestPaths();
+ new (this) ShortestPaths(mozilla::Move(rhs));
+ return *this;
+ }
+
+ ShortestPaths(const ShortestPaths&) = delete;
+ ShortestPaths& operator=(const ShortestPaths&) = delete;
+
+ /**
+ * Construct a new `JS::ubi::ShortestPaths`, finding up to `maxNumPaths`
+ * shortest retaining paths for each target node in `targets` starting from
+ * `root`.
+ *
+ * The resulting `ShortestPaths` instance must not outlive the
+ * `JS::ubi::Node` graph it was constructed from.
+ *
+ * - For `JS::ubi::Node` graphs backed by the live heap graph, this means
+ * that the `ShortestPaths`'s lifetime _must_ be contained within the
+ * scope of the provided `AutoCheckCannotGC` reference because a GC will
+ * invalidate the nodes.
+ *
+ * - For `JS::ubi::Node` graphs backed by some other offline structure
+ * provided by the embedder, the resulting `ShortestPaths`'s lifetime is
+ * bounded by that offline structure's lifetime.
+ *
+ * Returns `mozilla::Nothing()` on OOM failure. It is the caller's
+ * responsibility to handle and report the OOM.
+ */
+ static mozilla::Maybe<ShortestPaths>
+ Create(JSContext* cx, AutoCheckCannotGC& noGC, uint32_t maxNumPaths, const Node& root, NodeSet&& targets) {
+ MOZ_ASSERT(targets.count() > 0);
+ MOZ_ASSERT(maxNumPaths > 0);
+
+ size_t count = targets.count();
+ ShortestPaths paths(maxNumPaths, root, mozilla::Move(targets));
+ if (!paths.paths_.init(count))
+ return mozilla::Nothing();
+
+ Handler handler(paths);
+ Traversal traversal(cx, handler, noGC);
+ traversal.wantNames = true;
+ if (!traversal.init() || !traversal.addStart(root) || !traversal.traverse())
+ return mozilla::Nothing();
+
+ // Take ownership of the back edges we created while traversing the
+ // graph so that we can follow them from `paths_` and don't
+ // use-after-free.
+ paths.backEdges_ = mozilla::Move(traversal.visited);
+
+ MOZ_ASSERT(paths.initialized());
+ return mozilla::Some(mozilla::Move(paths));
+ }
+
+ /**
+ * Get a range that iterates over each target node we searched for retaining
+ * paths for. The returned range must not outlive the `ShortestPaths`
+ * instance.
+ */
+ NodeSet::Range eachTarget() const {
+ MOZ_ASSERT(initialized());
+ return targets_.all();
+ }
+
+ /**
+ * Invoke the provided functor/lambda/callable once for each retaining path
+ * discovered for `target`. The `func` is passed a single `JS::ubi::Path&`
+ * argument, which contains each edge along the path ordered starting from
+ * the root and ending at the target, and must not outlive the scope of the
+ * call.
+ *
+ * Note that it is possible that we did not find any paths from the root to
+ * the given target, in which case `func` will not be invoked.
+ */
+ template <class Func>
+ MOZ_MUST_USE bool forEachPath(const Node& target, Func func) {
+ MOZ_ASSERT(initialized());
+ MOZ_ASSERT(targets_.has(target));
+
+ auto ptr = paths_.lookup(target);
+
+ // We didn't find any paths to this target, so nothing to do here.
+ if (!ptr)
+ return true;
+
+ MOZ_ASSERT(ptr->value().length() <= maxNumPaths_);
+
+ Path path;
+ for (const auto& backEdge : ptr->value()) {
+ path.clear();
+
+ if (!path.append(backEdge.get()))
+ return false;
+
+ Node here = backEdge->predecessor();
+ MOZ_ASSERT(here);
+
+ while (here != root_) {
+ auto p = backEdges_.lookup(here);
+ MOZ_ASSERT(p);
+ if (!path.append(&p->value()))
+ return false;
+ here = p->value().predecessor();
+ MOZ_ASSERT(here);
+ }
+
+ path.reverse();
+
+ if (!func(path))
+ return false;
+ }
+
+ return true;
+ }
+};
+
+#ifdef DEBUG
+// A helper function to dump the first `maxNumPaths` shortest retaining paths to
+// `node` from the GC roots. Useful when GC things you expect to have been
+// reclaimed by the collector haven't been!
+//
+// Usage:
+//
+// JSObject* foo = ...;
+// JS::ubi::dumpPaths(rt, JS::ubi::Node(foo));
+JS_PUBLIC_API(void)
+dumpPaths(JSRuntime* rt, Node node, uint32_t maxNumPaths = 10);
+#endif
+
+} // namespace ubi
+} // namespace JS
+
+#endif // js_UbiNodeShortestPaths_h
diff --git a/js/public/UniquePtr.h b/js/public/UniquePtr.h
new file mode 100644
index 0000000000..0236bab425
--- /dev/null
+++ b/js/public/UniquePtr.h
@@ -0,0 +1,61 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef js_UniquePtr_h
+#define js_UniquePtr_h
+
+#include "mozilla/UniquePtr.h"
+
+#include "js/Utility.h"
+
+namespace js {
+
+// Replacement for mozilla::UniquePtr that defaults to js::DefaultDelete.
+template <typename T, typename D = JS::DeletePolicy<T>>
+using UniquePtr = mozilla::UniquePtr<T, D>;
+
+namespace detail {
+
+template<typename T>
+struct UniqueSelector
+{
+ typedef UniquePtr<T> SingleObject;
+};
+
+template<typename T>
+struct UniqueSelector<T[]>
+{
+ typedef UniquePtr<T[]> UnknownBound;
+};
+
+template<typename T, decltype(sizeof(int)) N>
+struct UniqueSelector<T[N]>
+{
+ typedef UniquePtr<T[N]> KnownBound;
+};
+
+} // namespace detail
+
+// Replacement for mozilla::MakeUnique that correctly calls js_new and produces
+// a js::UniquePtr.
+template<typename T, typename... Args>
+typename detail::UniqueSelector<T>::SingleObject
+MakeUnique(Args&&... aArgs)
+{
+ return UniquePtr<T>(js_new<T>(mozilla::Forward<Args>(aArgs)...));
+}
+
+template<typename T>
+typename detail::UniqueSelector<T>::UnknownBound
+MakeUnique(decltype(sizeof(int)) aN) = delete;
+
+template<typename T, typename... Args>
+typename detail::UniqueSelector<T>::KnownBound
+MakeUnique(Args&&... aArgs) = delete;
+
+} // namespace js
+
+#endif /* js_UniquePtr_h */
diff --git a/js/public/Utility.h b/js/public/Utility.h
new file mode 100644
index 0000000000..68de3004ab
--- /dev/null
+++ b/js/public/Utility.h
@@ -0,0 +1,577 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef js_Utility_h
+#define js_Utility_h
+
+#include "mozilla/Assertions.h"
+#include "mozilla/Atomics.h"
+#include "mozilla/Attributes.h"
+#include "mozilla/Compiler.h"
+#include "mozilla/Move.h"
+#include "mozilla/Scoped.h"
+#include "mozilla/TemplateLib.h"
+#include "mozilla/UniquePtr.h"
+
+#include <stdlib.h>
+#include <string.h>
+
+#ifdef JS_OOM_DO_BACKTRACES
+#include <execinfo.h>
+#include <stdio.h>
+#endif
+
+#include "jstypes.h"
+
+/* The public JS engine namespace. */
+namespace JS {}
+
+/* The mozilla-shared reusable template/utility namespace. */
+namespace mozilla {}
+
+/* The private JS engine namespace. */
+namespace js {}
+
+#define JS_STATIC_ASSERT(cond) static_assert(cond, "JS_STATIC_ASSERT")
+#define JS_STATIC_ASSERT_IF(cond, expr) MOZ_STATIC_ASSERT_IF(cond, expr, "JS_STATIC_ASSERT_IF")
+
+extern MOZ_NORETURN MOZ_COLD JS_PUBLIC_API(void)
+JS_Assert(const char* s, const char* file, int ln);
+
+/*
+ * Custom allocator support for SpiderMonkey
+ */
+#if defined JS_USE_CUSTOM_ALLOCATOR
+# include "jscustomallocator.h"
+#else
+
+namespace js {
+namespace oom {
+
+/*
+ * To make testing OOM in certain helper threads more effective,
+ * allow restricting the OOM testing to a certain helper thread
+ * type. This allows us to fail e.g. in off-thread script parsing
+ * without causing an OOM in the main thread first.
+ */
+enum ThreadType {
+ THREAD_TYPE_NONE = 0, // 0
+ THREAD_TYPE_MAIN, // 1
+ THREAD_TYPE_ASMJS, // 2
+ THREAD_TYPE_ION, // 3
+ THREAD_TYPE_PARSE, // 4
+ THREAD_TYPE_COMPRESS, // 5
+ THREAD_TYPE_GCHELPER, // 6
+ THREAD_TYPE_GCPARALLEL, // 7
+ THREAD_TYPE_PROMISE_TASK, // 8
+ THREAD_TYPE_MAX // Used to check shell function arguments
+};
+
+/*
+ * Getter/Setter functions to encapsulate mozilla::ThreadLocal,
+ * implementation is in jsutil.cpp.
+ */
+# if defined(DEBUG) || defined(JS_OOM_BREAKPOINT)
+extern bool InitThreadType(void);
+extern void SetThreadType(ThreadType);
+extern JS_FRIEND_API(uint32_t) GetThreadType(void);
+# else
+inline bool InitThreadType(void) { return true; }
+inline void SetThreadType(ThreadType t) {};
+inline uint32_t GetThreadType(void) { return 0; }
+# endif
+
+} /* namespace oom */
+} /* namespace js */
+
+# if defined(DEBUG) || defined(JS_OOM_BREAKPOINT)
+
+#ifdef JS_OOM_BREAKPOINT
+static MOZ_NEVER_INLINE void js_failedAllocBreakpoint() { asm(""); }
+#define JS_OOM_CALL_BP_FUNC() js_failedAllocBreakpoint()
+#else
+#define JS_OOM_CALL_BP_FUNC() do {} while(0)
+#endif
+
+namespace js {
+namespace oom {
+
+/*
+ * Out of memory testing support. We provide various testing functions to
+ * simulate OOM conditions and so we can test that they are handled correctly.
+ */
+
+extern JS_PUBLIC_DATA(uint32_t) targetThread;
+extern JS_PUBLIC_DATA(uint64_t) maxAllocations;
+extern JS_PUBLIC_DATA(uint64_t) counter;
+extern JS_PUBLIC_DATA(bool) failAlways;
+
+extern void
+SimulateOOMAfter(uint64_t allocations, uint32_t thread, bool always);
+
+extern void
+ResetSimulatedOOM();
+
+inline bool
+IsThreadSimulatingOOM()
+{
+ return js::oom::targetThread && js::oom::targetThread == js::oom::GetThreadType();
+}
+
+inline bool
+IsSimulatedOOMAllocation()
+{
+ return IsThreadSimulatingOOM() &&
+ (counter == maxAllocations || (counter > maxAllocations && failAlways));
+}
+
+inline bool
+ShouldFailWithOOM()
+{
+ if (!IsThreadSimulatingOOM())
+ return false;
+
+ counter++;
+ if (IsSimulatedOOMAllocation()) {
+ JS_OOM_CALL_BP_FUNC();
+ return true;
+ }
+ return false;
+}
+
+inline bool
+HadSimulatedOOM() {
+ return counter >= maxAllocations;
+}
+
+} /* namespace oom */
+} /* namespace js */
+
+# define JS_OOM_POSSIBLY_FAIL() \
+ do { \
+ if (js::oom::ShouldFailWithOOM()) \
+ return nullptr; \
+ } while (0)
+
+# define JS_OOM_POSSIBLY_FAIL_BOOL() \
+ do { \
+ if (js::oom::ShouldFailWithOOM()) \
+ return false; \
+ } while (0)
+
+# else
+
+# define JS_OOM_POSSIBLY_FAIL() do {} while(0)
+# define JS_OOM_POSSIBLY_FAIL_BOOL() do {} while(0)
+namespace js {
+namespace oom {
+static inline bool IsSimulatedOOMAllocation() { return false; }
+static inline bool ShouldFailWithOOM() { return false; }
+} /* namespace oom */
+} /* namespace js */
+
+# endif /* DEBUG || JS_OOM_BREAKPOINT */
+
+namespace js {
+
+/* Disable OOM testing in sections which are not OOM safe. */
+struct MOZ_RAII JS_PUBLIC_DATA(AutoEnterOOMUnsafeRegion)
+{
+ MOZ_NORETURN MOZ_COLD void crash(const char* reason);
+ MOZ_NORETURN MOZ_COLD void crash(size_t size, const char* reason);
+
+ using AnnotateOOMAllocationSizeCallback = void(*)(size_t);
+ static AnnotateOOMAllocationSizeCallback annotateOOMSizeCallback;
+ static void setAnnotateOOMAllocationSizeCallback(AnnotateOOMAllocationSizeCallback callback) {
+ annotateOOMSizeCallback = callback;
+ }
+
+#if defined(DEBUG) || defined(JS_OOM_BREAKPOINT)
+ AutoEnterOOMUnsafeRegion()
+ : oomEnabled_(oom::IsThreadSimulatingOOM() && oom::maxAllocations != UINT64_MAX),
+ oomAfter_(0)
+ {
+ if (oomEnabled_) {
+ MOZ_ALWAYS_TRUE(owner_.compareExchange(nullptr, this));
+ oomAfter_ = int64_t(oom::maxAllocations) - int64_t(oom::counter);
+ oom::maxAllocations = UINT64_MAX;
+ }
+ }
+
+ ~AutoEnterOOMUnsafeRegion() {
+ if (oomEnabled_) {
+ MOZ_ASSERT(oom::maxAllocations == UINT64_MAX);
+ int64_t maxAllocations = int64_t(oom::counter) + oomAfter_;
+ MOZ_ASSERT(maxAllocations >= 0,
+ "alloc count + oom limit exceeds range, your oom limit is probably too large");
+ oom::maxAllocations = uint64_t(maxAllocations);
+ MOZ_ALWAYS_TRUE(owner_.compareExchange(this, nullptr));
+ }
+ }
+
+ private:
+ // Used to catch concurrent use from other threads.
+ static mozilla::Atomic<AutoEnterOOMUnsafeRegion*> owner_;
+
+ bool oomEnabled_;
+ int64_t oomAfter_;
+#endif
+};
+
+} /* namespace js */
+
+static inline void* js_malloc(size_t bytes)
+{
+ JS_OOM_POSSIBLY_FAIL();
+ return malloc(bytes);
+}
+
+static inline void* js_calloc(size_t bytes)
+{
+ JS_OOM_POSSIBLY_FAIL();
+ return calloc(bytes, 1);
+}
+
+static inline void* js_calloc(size_t nmemb, size_t size)
+{
+ JS_OOM_POSSIBLY_FAIL();
+ return calloc(nmemb, size);
+}
+
+static inline void* js_realloc(void* p, size_t bytes)
+{
+ // realloc() with zero size is not portable, as some implementations may
+ // return nullptr on success and free |p| for this. We assume nullptr
+ // indicates failure and that |p| is still valid.
+ MOZ_ASSERT(bytes != 0);
+
+ JS_OOM_POSSIBLY_FAIL();
+ return realloc(p, bytes);
+}
+
+static inline void js_free(void* p)
+{
+ free(p);
+}
+
+static inline char* js_strdup(const char* s)
+{
+ JS_OOM_POSSIBLY_FAIL();
+ return strdup(s);
+}
+#endif/* JS_USE_CUSTOM_ALLOCATOR */
+
+#include <new>
+
+/*
+ * Low-level memory management in SpiderMonkey:
+ *
+ * ** Do not use the standard malloc/free/realloc: SpiderMonkey allows these
+ * to be redefined (via JS_USE_CUSTOM_ALLOCATOR) and Gecko even #define's
+ * these symbols.
+ *
+ * ** Do not use the builtin C++ operator new and delete: these throw on
+ * error and we cannot override them not to.
+ *
+ * Allocation:
+ *
+ * - If the lifetime of the allocation is tied to the lifetime of a GC-thing
+ * (that is, finalizing the GC-thing will free the allocation), call one of
+ * the following functions:
+ *
+ * JSContext::{malloc_,realloc_,calloc_,new_}
+ * JSRuntime::{malloc_,realloc_,calloc_,new_}
+ *
+ * These functions accumulate the number of bytes allocated which is used as
+ * part of the GC-triggering heuristic.
+ *
+ * The difference between the JSContext and JSRuntime versions is that the
+ * cx version reports an out-of-memory error on OOM. (This follows from the
+ * general SpiderMonkey idiom that a JSContext-taking function reports its
+ * own errors.)
+ *
+ * - Otherwise, use js_malloc/js_realloc/js_calloc/js_new
+ *
+ * Deallocation:
+ *
+ * - Ordinarily, use js_free/js_delete.
+ *
+ * - For deallocations during GC finalization, use one of the following
+ * operations on the FreeOp provided to the finalizer:
+ *
+ * FreeOp::{free_,delete_}
+ *
+ * The advantage of these operations is that the memory is batched and freed
+ * on another thread.
+ */
+
+/*
+ * Given a class which should provide a 'new' method, add
+ * JS_DECLARE_NEW_METHODS (see js::MallocProvider for an example).
+ *
+ * Note: Do not add a ; at the end of a use of JS_DECLARE_NEW_METHODS,
+ * or the build will break.
+ */
+#define JS_DECLARE_NEW_METHODS(NEWNAME, ALLOCATOR, QUALIFIERS) \
+ template <class T, typename... Args> \
+ QUALIFIERS T * \
+ NEWNAME(Args&&... args) MOZ_HEAP_ALLOCATOR { \
+ void* memory = ALLOCATOR(sizeof(T)); \
+ return MOZ_LIKELY(memory) \
+ ? new(memory) T(mozilla::Forward<Args>(args)...) \
+ : nullptr; \
+ }
+
+/*
+ * Given a class which should provide 'make' methods, add
+ * JS_DECLARE_MAKE_METHODS (see js::MallocProvider for an example). This
+ * method is functionally the same as JS_DECLARE_NEW_METHODS: it just declares
+ * methods that return mozilla::UniquePtr instances that will singly-manage
+ * ownership of the created object.
+ *
+ * Note: Do not add a ; at the end of a use of JS_DECLARE_MAKE_METHODS,
+ * or the build will break.
+ */
+#define JS_DECLARE_MAKE_METHODS(MAKENAME, NEWNAME, QUALIFIERS)\
+ template <class T, typename... Args> \
+ QUALIFIERS mozilla::UniquePtr<T, JS::DeletePolicy<T>> \
+ MAKENAME(Args&&... args) MOZ_HEAP_ALLOCATOR { \
+ T* ptr = NEWNAME<T>(mozilla::Forward<Args>(args)...); \
+ return mozilla::UniquePtr<T, JS::DeletePolicy<T>>(ptr); \
+ }
+
+JS_DECLARE_NEW_METHODS(js_new, js_malloc, static MOZ_ALWAYS_INLINE)
+
+namespace js {
+
+/*
+ * Calculate the number of bytes needed to allocate |numElems| contiguous
+ * instances of type |T|. Return false if the calculation overflowed.
+ */
+template <typename T>
+MOZ_MUST_USE inline bool
+CalculateAllocSize(size_t numElems, size_t* bytesOut)
+{
+ *bytesOut = numElems * sizeof(T);
+ return (numElems & mozilla::tl::MulOverflowMask<sizeof(T)>::value) == 0;
+}
+
+/*
+ * Calculate the number of bytes needed to allocate a single instance of type
+ * |T| followed by |numExtra| contiguous instances of type |Extra|. Return
+ * false if the calculation overflowed.
+ */
+template <typename T, typename Extra>
+MOZ_MUST_USE inline bool
+CalculateAllocSizeWithExtra(size_t numExtra, size_t* bytesOut)
+{
+ *bytesOut = sizeof(T) + numExtra * sizeof(Extra);
+ return (numExtra & mozilla::tl::MulOverflowMask<sizeof(Extra)>::value) == 0 &&
+ *bytesOut >= sizeof(T);
+}
+
+} /* namespace js */
+
+template <class T>
+static MOZ_ALWAYS_INLINE void
+js_delete(const T* p)
+{
+ if (p) {
+ p->~T();
+ js_free(const_cast<T*>(p));
+ }
+}
+
+template<class T>
+static MOZ_ALWAYS_INLINE void
+js_delete_poison(const T* p)
+{
+ if (p) {
+ p->~T();
+ memset(const_cast<T*>(p), 0x3B, sizeof(T));
+ js_free(const_cast<T*>(p));
+ }
+}
+
+template <class T>
+static MOZ_ALWAYS_INLINE T*
+js_pod_malloc()
+{
+ return static_cast<T*>(js_malloc(sizeof(T)));
+}
+
+template <class T>
+static MOZ_ALWAYS_INLINE T*
+js_pod_calloc()
+{
+ return static_cast<T*>(js_calloc(sizeof(T)));
+}
+
+template <class T>
+static MOZ_ALWAYS_INLINE T*
+js_pod_malloc(size_t numElems)
+{
+ size_t bytes;
+ if (MOZ_UNLIKELY(!js::CalculateAllocSize<T>(numElems, &bytes)))
+ return nullptr;
+ return static_cast<T*>(js_malloc(bytes));
+}
+
+template <class T>
+static MOZ_ALWAYS_INLINE T*
+js_pod_calloc(size_t numElems)
+{
+ size_t bytes;
+ if (MOZ_UNLIKELY(!js::CalculateAllocSize<T>(numElems, &bytes)))
+ return nullptr;
+ return static_cast<T*>(js_calloc(bytes));
+}
+
+template <class T>
+static MOZ_ALWAYS_INLINE T*
+js_pod_realloc(T* prior, size_t oldSize, size_t newSize)
+{
+ MOZ_ASSERT(!(oldSize & mozilla::tl::MulOverflowMask<sizeof(T)>::value));
+ size_t bytes;
+ if (MOZ_UNLIKELY(!js::CalculateAllocSize<T>(newSize, &bytes)))
+ return nullptr;
+ return static_cast<T*>(js_realloc(prior, bytes));
+}
+
+namespace js {
+
+template<typename T>
+struct ScopedFreePtrTraits
+{
+ typedef T* type;
+ static T* empty() { return nullptr; }
+ static void release(T* ptr) { js_free(ptr); }
+};
+SCOPED_TEMPLATE(ScopedJSFreePtr, ScopedFreePtrTraits)
+
+template <typename T>
+struct ScopedDeletePtrTraits : public ScopedFreePtrTraits<T>
+{
+ static void release(T* ptr) { js_delete(ptr); }
+};
+SCOPED_TEMPLATE(ScopedJSDeletePtr, ScopedDeletePtrTraits)
+
+template <typename T>
+struct ScopedReleasePtrTraits : public ScopedFreePtrTraits<T>
+{
+ static void release(T* ptr) { if (ptr) ptr->release(); }
+};
+SCOPED_TEMPLATE(ScopedReleasePtr, ScopedReleasePtrTraits)
+
+} /* namespace js */
+
+namespace JS {
+
+template<typename T>
+struct DeletePolicy
+{
+ constexpr DeletePolicy() {}
+
+ template<typename U>
+ MOZ_IMPLICIT DeletePolicy(DeletePolicy<U> other,
+ typename mozilla::EnableIf<mozilla::IsConvertible<U*, T*>::value,
+ int>::Type dummy = 0)
+ {}
+
+ void operator()(const T* ptr) {
+ js_delete(const_cast<T*>(ptr));
+ }
+};
+
+struct FreePolicy
+{
+ void operator()(const void* ptr) {
+ js_free(const_cast<void*>(ptr));
+ }
+};
+
+typedef mozilla::UniquePtr<char[], JS::FreePolicy> UniqueChars;
+typedef mozilla::UniquePtr<char16_t[], JS::FreePolicy> UniqueTwoByteChars;
+
+} // namespace JS
+
+namespace js {
+
+/* Integral types for all hash functions. */
+typedef uint32_t HashNumber;
+const unsigned HashNumberSizeBits = 32;
+
+namespace detail {
+
+/*
+ * Given a raw hash code, h, return a number that can be used to select a hash
+ * bucket.
+ *
+ * This function aims to produce as uniform an output distribution as possible,
+ * especially in the most significant (leftmost) bits, even though the input
+ * distribution may be highly nonrandom, given the constraints that this must
+ * be deterministic and quick to compute.
+ *
+ * Since the leftmost bits of the result are best, the hash bucket index is
+ * computed by doing ScrambleHashCode(h) / (2^32/N) or the equivalent
+ * right-shift, not ScrambleHashCode(h) % N or the equivalent bit-mask.
+ *
+ * FIXME: OrderedHashTable uses a bit-mask; see bug 775896.
+ */
+inline HashNumber
+ScrambleHashCode(HashNumber h)
+{
+ /*
+ * Simply returning h would not cause any hash tables to produce wrong
+ * answers. But it can produce pathologically bad performance: The caller
+ * right-shifts the result, keeping only the highest bits. The high bits of
+ * hash codes are very often completely entropy-free. (So are the lowest
+ * bits.)
+ *
+ * So we use Fibonacci hashing, as described in Knuth, The Art of Computer
+ * Programming, 6.4. This mixes all the bits of the input hash code h.
+ *
+ * The value of goldenRatio is taken from the hex
+ * expansion of the golden ratio, which starts 1.9E3779B9....
+ * This value is especially good if values with consecutive hash codes
+ * are stored in a hash table; see Knuth for details.
+ */
+ static const HashNumber goldenRatio = 0x9E3779B9U;
+ return h * goldenRatio;
+}
+
+} /* namespace detail */
+
+} /* namespace js */
+
+/* sixgill annotation defines */
+#ifndef HAVE_STATIC_ANNOTATIONS
+# define HAVE_STATIC_ANNOTATIONS
+# ifdef XGILL_PLUGIN
+# define STATIC_PRECONDITION(COND) __attribute__((precondition(#COND)))
+# define STATIC_PRECONDITION_ASSUME(COND) __attribute__((precondition_assume(#COND)))
+# define STATIC_POSTCONDITION(COND) __attribute__((postcondition(#COND)))
+# define STATIC_POSTCONDITION_ASSUME(COND) __attribute__((postcondition_assume(#COND)))
+# define STATIC_INVARIANT(COND) __attribute__((invariant(#COND)))
+# define STATIC_INVARIANT_ASSUME(COND) __attribute__((invariant_assume(#COND)))
+# define STATIC_ASSUME(COND) \
+ JS_BEGIN_MACRO \
+ __attribute__((assume_static(#COND), unused)) \
+ int STATIC_PASTE1(assume_static_, __COUNTER__); \
+ JS_END_MACRO
+# else /* XGILL_PLUGIN */
+# define STATIC_PRECONDITION(COND) /* nothing */
+# define STATIC_PRECONDITION_ASSUME(COND) /* nothing */
+# define STATIC_POSTCONDITION(COND) /* nothing */
+# define STATIC_POSTCONDITION_ASSUME(COND) /* nothing */
+# define STATIC_INVARIANT(COND) /* nothing */
+# define STATIC_INVARIANT_ASSUME(COND) /* nothing */
+# define STATIC_ASSUME(COND) JS_BEGIN_MACRO /* nothing */ JS_END_MACRO
+# endif /* XGILL_PLUGIN */
+# define STATIC_SKIP_INFERENCE STATIC_INVARIANT(skip_inference())
+#endif /* HAVE_STATIC_ANNOTATIONS */
+
+#endif /* js_Utility_h */
diff --git a/js/public/Value.h b/js/public/Value.h
new file mode 100644
index 0000000000..00fdad5861
--- /dev/null
+++ b/js/public/Value.h
@@ -0,0 +1,1509 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+/* JS::Value implementation. */
+
+#ifndef js_Value_h
+#define js_Value_h
+
+#include "mozilla/Attributes.h"
+#include "mozilla/Casting.h"
+#include "mozilla/FloatingPoint.h"
+#include "mozilla/Likely.h"
+
+#include <limits> /* for std::numeric_limits */
+
+#include "js-config.h"
+#include "jstypes.h"
+
+#include "js/GCAPI.h"
+#include "js/RootingAPI.h"
+#include "js/Utility.h"
+
+namespace JS { class Value; }
+
+/* JS::Value can store a full int32_t. */
+#define JSVAL_INT_BITS 32
+#define JSVAL_INT_MIN ((int32_t)0x80000000)
+#define JSVAL_INT_MAX ((int32_t)0x7fffffff)
+
+#if defined(JS_PUNBOX64)
+# define JSVAL_TAG_SHIFT 47
+#endif
+
+// Use enums so that printing a JS::Value in the debugger shows nice
+// symbolic type tags.
+
+#if defined(_MSC_VER)
+# define JS_ENUM_HEADER(id, type) enum id : type
+# define JS_ENUM_FOOTER(id)
+#else
+# define JS_ENUM_HEADER(id, type) enum id
+# define JS_ENUM_FOOTER(id) __attribute__((packed))
+#endif
+
+/* Remember to propagate changes to the C defines below. */
+JS_ENUM_HEADER(JSValueType, uint8_t)
+{
+ JSVAL_TYPE_DOUBLE = 0x00,
+ JSVAL_TYPE_INT32 = 0x01,
+ JSVAL_TYPE_UNDEFINED = 0x02,
+ JSVAL_TYPE_BOOLEAN = 0x03,
+ JSVAL_TYPE_MAGIC = 0x04,
+ JSVAL_TYPE_STRING = 0x05,
+ JSVAL_TYPE_SYMBOL = 0x06,
+ JSVAL_TYPE_PRIVATE_GCTHING = 0x07,
+ JSVAL_TYPE_NULL = 0x08,
+ JSVAL_TYPE_OBJECT = 0x0c,
+
+ /* These never appear in a jsval; they are only provided as an out-of-band value. */
+ JSVAL_TYPE_UNKNOWN = 0x20,
+ JSVAL_TYPE_MISSING = 0x21
+} JS_ENUM_FOOTER(JSValueType);
+
+static_assert(sizeof(JSValueType) == 1,
+ "compiler typed enum support is apparently buggy");
+
+#if defined(JS_NUNBOX32)
+
+/* Remember to propagate changes to the C defines below. */
+JS_ENUM_HEADER(JSValueTag, uint32_t)
+{
+ JSVAL_TAG_CLEAR = 0xFFFFFF80,
+ JSVAL_TAG_INT32 = JSVAL_TAG_CLEAR | JSVAL_TYPE_INT32,
+ JSVAL_TAG_UNDEFINED = JSVAL_TAG_CLEAR | JSVAL_TYPE_UNDEFINED,
+ JSVAL_TAG_STRING = JSVAL_TAG_CLEAR | JSVAL_TYPE_STRING,
+ JSVAL_TAG_SYMBOL = JSVAL_TAG_CLEAR | JSVAL_TYPE_SYMBOL,
+ JSVAL_TAG_BOOLEAN = JSVAL_TAG_CLEAR | JSVAL_TYPE_BOOLEAN,
+ JSVAL_TAG_MAGIC = JSVAL_TAG_CLEAR | JSVAL_TYPE_MAGIC,
+ JSVAL_TAG_NULL = JSVAL_TAG_CLEAR | JSVAL_TYPE_NULL,
+ JSVAL_TAG_OBJECT = JSVAL_TAG_CLEAR | JSVAL_TYPE_OBJECT,
+ JSVAL_TAG_PRIVATE_GCTHING = JSVAL_TAG_CLEAR | JSVAL_TYPE_PRIVATE_GCTHING
+} JS_ENUM_FOOTER(JSValueTag);
+
+static_assert(sizeof(JSValueTag) == sizeof(uint32_t),
+ "compiler typed enum support is apparently buggy");
+
+#elif defined(JS_PUNBOX64)
+
+/* Remember to propagate changes to the C defines below. */
+JS_ENUM_HEADER(JSValueTag, uint32_t)
+{
+ JSVAL_TAG_MAX_DOUBLE = 0x1FFF0,
+ JSVAL_TAG_INT32 = JSVAL_TAG_MAX_DOUBLE | JSVAL_TYPE_INT32,
+ JSVAL_TAG_UNDEFINED = JSVAL_TAG_MAX_DOUBLE | JSVAL_TYPE_UNDEFINED,
+ JSVAL_TAG_STRING = JSVAL_TAG_MAX_DOUBLE | JSVAL_TYPE_STRING,
+ JSVAL_TAG_SYMBOL = JSVAL_TAG_MAX_DOUBLE | JSVAL_TYPE_SYMBOL,
+ JSVAL_TAG_BOOLEAN = JSVAL_TAG_MAX_DOUBLE | JSVAL_TYPE_BOOLEAN,
+ JSVAL_TAG_MAGIC = JSVAL_TAG_MAX_DOUBLE | JSVAL_TYPE_MAGIC,
+ JSVAL_TAG_NULL = JSVAL_TAG_MAX_DOUBLE | JSVAL_TYPE_NULL,
+ JSVAL_TAG_OBJECT = JSVAL_TAG_MAX_DOUBLE | JSVAL_TYPE_OBJECT,
+ JSVAL_TAG_PRIVATE_GCTHING = JSVAL_TAG_MAX_DOUBLE | JSVAL_TYPE_PRIVATE_GCTHING
+} JS_ENUM_FOOTER(JSValueTag);
+
+static_assert(sizeof(JSValueTag) == sizeof(uint32_t),
+ "compiler typed enum support is apparently buggy");
+
+JS_ENUM_HEADER(JSValueShiftedTag, uint64_t)
+{
+ JSVAL_SHIFTED_TAG_MAX_DOUBLE = ((((uint64_t)JSVAL_TAG_MAX_DOUBLE) << JSVAL_TAG_SHIFT) | 0xFFFFFFFF),
+ JSVAL_SHIFTED_TAG_INT32 = (((uint64_t)JSVAL_TAG_INT32) << JSVAL_TAG_SHIFT),
+ JSVAL_SHIFTED_TAG_UNDEFINED = (((uint64_t)JSVAL_TAG_UNDEFINED) << JSVAL_TAG_SHIFT),
+ JSVAL_SHIFTED_TAG_STRING = (((uint64_t)JSVAL_TAG_STRING) << JSVAL_TAG_SHIFT),
+ JSVAL_SHIFTED_TAG_SYMBOL = (((uint64_t)JSVAL_TAG_SYMBOL) << JSVAL_TAG_SHIFT),
+ JSVAL_SHIFTED_TAG_BOOLEAN = (((uint64_t)JSVAL_TAG_BOOLEAN) << JSVAL_TAG_SHIFT),
+ JSVAL_SHIFTED_TAG_MAGIC = (((uint64_t)JSVAL_TAG_MAGIC) << JSVAL_TAG_SHIFT),
+ JSVAL_SHIFTED_TAG_NULL = (((uint64_t)JSVAL_TAG_NULL) << JSVAL_TAG_SHIFT),
+ JSVAL_SHIFTED_TAG_OBJECT = (((uint64_t)JSVAL_TAG_OBJECT) << JSVAL_TAG_SHIFT),
+ JSVAL_SHIFTED_TAG_PRIVATE_GCTHING = (((uint64_t)JSVAL_TAG_PRIVATE_GCTHING) << JSVAL_TAG_SHIFT)
+} JS_ENUM_FOOTER(JSValueShiftedTag);
+
+static_assert(sizeof(JSValueShiftedTag) == sizeof(uint64_t),
+ "compiler typed enum support is apparently buggy");
+
+#endif
+
+/*
+ * All our supported compilers implement C++11 |enum Foo : T| syntax, so don't
+ * expose these macros. (This macro exists *only* because gcc bug 51242
+ * <https://gcc.gnu.org/bugzilla/show_bug.cgi?id=51242> makes bit-fields of
+ * typed enums trigger a warning that can't be turned off. Don't expose it
+ * beyond this file!)
+ */
+#undef JS_ENUM_HEADER
+#undef JS_ENUM_FOOTER
+
+#if defined(JS_NUNBOX32)
+
+#define JSVAL_TYPE_TO_TAG(type) ((JSValueTag)(JSVAL_TAG_CLEAR | (type)))
+
+#define JSVAL_LOWER_INCL_TAG_OF_OBJ_OR_NULL_SET JSVAL_TAG_NULL
+#define JSVAL_UPPER_EXCL_TAG_OF_PRIMITIVE_SET JSVAL_TAG_OBJECT
+#define JSVAL_UPPER_INCL_TAG_OF_NUMBER_SET JSVAL_TAG_INT32
+#define JSVAL_LOWER_INCL_TAG_OF_GCTHING_SET JSVAL_TAG_STRING
+
+#elif defined(JS_PUNBOX64)
+
+#define JSVAL_PAYLOAD_MASK 0x00007FFFFFFFFFFFLL
+#define JSVAL_TAG_MASK 0xFFFF800000000000LL
+#define JSVAL_TYPE_TO_TAG(type) ((JSValueTag)(JSVAL_TAG_MAX_DOUBLE | (type)))
+#define JSVAL_TYPE_TO_SHIFTED_TAG(type) (((uint64_t)JSVAL_TYPE_TO_TAG(type)) << JSVAL_TAG_SHIFT)
+
+#define JSVAL_LOWER_INCL_TAG_OF_OBJ_OR_NULL_SET JSVAL_TAG_NULL
+#define JSVAL_UPPER_EXCL_TAG_OF_PRIMITIVE_SET JSVAL_TAG_OBJECT
+#define JSVAL_UPPER_INCL_TAG_OF_NUMBER_SET JSVAL_TAG_INT32
+#define JSVAL_LOWER_INCL_TAG_OF_GCTHING_SET JSVAL_TAG_STRING
+
+#define JSVAL_LOWER_INCL_SHIFTED_TAG_OF_OBJ_OR_NULL_SET JSVAL_SHIFTED_TAG_NULL
+#define JSVAL_UPPER_EXCL_SHIFTED_TAG_OF_PRIMITIVE_SET JSVAL_SHIFTED_TAG_OBJECT
+#define JSVAL_UPPER_EXCL_SHIFTED_TAG_OF_NUMBER_SET JSVAL_SHIFTED_TAG_UNDEFINED
+#define JSVAL_LOWER_INCL_SHIFTED_TAG_OF_GCTHING_SET JSVAL_SHIFTED_TAG_STRING
+
+#endif /* JS_PUNBOX64 */
+
+typedef enum JSWhyMagic
+{
+ /** a hole in a native object's elements */
+ JS_ELEMENTS_HOLE,
+
+ /** there is not a pending iterator value */
+ JS_NO_ITER_VALUE,
+
+ /** exception value thrown when closing a generator */
+ JS_GENERATOR_CLOSING,
+
+ /** compiler sentinel value */
+ JS_NO_CONSTANT,
+
+ /** used in debug builds to catch tracing errors */
+ JS_THIS_POISON,
+
+ /** used in debug builds to catch tracing errors */
+ JS_ARG_POISON,
+
+ /** an empty subnode in the AST serializer */
+ JS_SERIALIZE_NO_NODE,
+
+ /** lazy arguments value on the stack */
+ JS_LAZY_ARGUMENTS,
+
+ /** optimized-away 'arguments' value */
+ JS_OPTIMIZED_ARGUMENTS,
+
+ /** magic value passed to natives to indicate construction */
+ JS_IS_CONSTRUCTING,
+
+ /** value of static block object slot */
+ JS_BLOCK_NEEDS_CLONE,
+
+ /** see class js::HashableValue */
+ JS_HASH_KEY_EMPTY,
+
+ /** error while running Ion code */
+ JS_ION_ERROR,
+
+ /** missing recover instruction result */
+ JS_ION_BAILOUT,
+
+ /** optimized out slot */
+ JS_OPTIMIZED_OUT,
+
+ /** uninitialized lexical bindings that produce ReferenceError on touch. */
+ JS_UNINITIALIZED_LEXICAL,
+
+ /** for local use */
+ JS_GENERIC_MAGIC,
+
+ JS_WHY_MAGIC_COUNT
+} JSWhyMagic;
+
+namespace JS {
+
+static inline constexpr JS::Value UndefinedValue();
+static inline JS::Value PoisonedObjectValue(JSObject* obj);
+
+namespace detail {
+
+constexpr int CanonicalizedNaNSignBit = 0;
+constexpr uint64_t CanonicalizedNaNSignificand = 0x8000000000000ULL;
+
+constexpr uint64_t CanonicalizedNaNBits =
+ mozilla::SpecificNaNBits<double,
+ detail::CanonicalizedNaNSignBit,
+ detail::CanonicalizedNaNSignificand>::value;
+
+} // namespace detail
+
+/**
+ * Returns a generic quiet NaN value, with all payload bits set to zero.
+ *
+ * Among other properties, this NaN's bit pattern conforms to JS::Value's
+ * bit pattern restrictions.
+ */
+static MOZ_ALWAYS_INLINE double
+GenericNaN()
+{
+ return mozilla::SpecificNaN<double>(detail::CanonicalizedNaNSignBit,
+ detail::CanonicalizedNaNSignificand);
+}
+
+/* MSVC with PGO miscompiles this function. */
+#if defined(_MSC_VER)
+# pragma optimize("g", off)
+#endif
+static inline double
+CanonicalizeNaN(double d)
+{
+ if (MOZ_UNLIKELY(mozilla::IsNaN(d)))
+ return GenericNaN();
+ return d;
+}
+#if defined(_MSC_VER)
+# pragma optimize("", on)
+#endif
+
+/**
+ * JS::Value is the interface for a single JavaScript Engine value. A few
+ * general notes on JS::Value:
+ *
+ * - JS::Value has setX() and isX() members for X in
+ *
+ * { Int32, Double, String, Symbol, Boolean, Undefined, Null, Object, Magic }
+ *
+ * JS::Value also contains toX() for each of the non-singleton types.
+ *
+ * - Magic is a singleton type whose payload contains either a JSWhyMagic "reason" for
+ * the magic value or a uint32_t value. By providing JSWhyMagic values when
+ * creating and checking for magic values, it is possible to assert, at
+ * runtime, that only magic values with the expected reason flow through a
+ * particular value. For example, if cx->exception has a magic value, the
+ * reason must be JS_GENERATOR_CLOSING.
+ *
+ * - The JS::Value operations are preferred. The JSVAL_* operations remain for
+ * compatibility; they may be removed at some point. These operations mostly
+ * provide similar functionality. But there are a few key differences. One
+ * is that JS::Value gives null a separate type.
+ * Also, to help prevent mistakenly boxing a nullable JSObject* as an object,
+ * Value::setObject takes a JSObject&. (Conversely, Value::toObject returns a
+ * JSObject&.) A convenience member Value::setObjectOrNull is provided.
+ *
+ * - JSVAL_VOID is the same as the singleton value of the Undefined type.
+ *
+ * - Note that JS::Value is 8 bytes on 32 and 64-bit architectures. Thus, on
+ * 32-bit user code should avoid copying jsval/JS::Value as much as possible,
+ * preferring to pass by const Value&.
+ */
+class MOZ_NON_PARAM alignas(8) Value
+{
+ public:
+#if defined(JS_NUNBOX32)
+ using PayloadType = uint32_t;
+#elif defined(JS_PUNBOX64)
+ using PayloadType = uint64_t;
+#endif
+
+ /*
+ * N.B. the default constructor leaves Value unitialized. Adding a default
+ * constructor prevents Value from being stored in a union.
+ */
+ Value() = default;
+ Value(const Value& v) = default;
+
+ /**
+ * Returns false if creating a NumberValue containing the given type would
+ * be lossy, true otherwise.
+ */
+ template <typename T>
+ static bool isNumberRepresentable(const T t) {
+ return T(double(t)) == t;
+ }
+
+ /*** Mutators ***/
+
+ void setNull() {
+ data.asBits = bitsFromTagAndPayload(JSVAL_TAG_NULL, 0);
+ }
+
+ void setUndefined() {
+ data.asBits = bitsFromTagAndPayload(JSVAL_TAG_UNDEFINED, 0);
+ }
+
+ void setInt32(int32_t i) {
+ data.asBits = bitsFromTagAndPayload(JSVAL_TAG_INT32, uint32_t(i));
+ }
+
+ int32_t& getInt32Ref() {
+ MOZ_ASSERT(isInt32());
+ return data.s.payload.i32;
+ }
+
+ void setDouble(double d) {
+ // Don't assign to data.asDouble to fix a miscompilation with
+ // GCC 5.2.1 and 5.3.1. See bug 1312488.
+ data = layout(d);
+ MOZ_ASSERT(isDouble());
+ }
+
+ void setNaN() {
+ setDouble(GenericNaN());
+ }
+
+ double& getDoubleRef() {
+ MOZ_ASSERT(isDouble());
+ return data.asDouble;
+ }
+
+ void setString(JSString* str) {
+ MOZ_ASSERT(uintptr_t(str) > 0x1000);
+ data.asBits = bitsFromTagAndPayload(JSVAL_TAG_STRING, PayloadType(str));
+ }
+
+ void setSymbol(JS::Symbol* sym) {
+ MOZ_ASSERT(uintptr_t(sym) > 0x1000);
+ data.asBits = bitsFromTagAndPayload(JSVAL_TAG_SYMBOL, PayloadType(sym));
+ }
+
+ void setObject(JSObject& obj) {
+ MOZ_ASSERT(uintptr_t(&obj) > 0x1000 || uintptr_t(&obj) == 0x48);
+#if defined(JS_PUNBOX64)
+ // VisualStudio cannot contain parenthesized C++ style cast and shift
+ // inside decltype in template parameter:
+ // AssertionConditionType<decltype((uintptr_t(x) >> 1))>
+ // It throws syntax error.
+ MOZ_ASSERT((((uintptr_t)&obj) >> JSVAL_TAG_SHIFT) == 0);
+#endif
+ setObjectNoCheck(&obj);
+ }
+
+ private:
+ void setObjectNoCheck(JSObject* obj) {
+ data.asBits = bitsFromTagAndPayload(JSVAL_TAG_OBJECT, PayloadType(obj));
+ }
+
+ friend inline Value PoisonedObjectValue(JSObject* obj);
+
+ public:
+ void setBoolean(bool b) {
+ data.asBits = bitsFromTagAndPayload(JSVAL_TAG_BOOLEAN, uint32_t(b));
+ }
+
+ void setMagic(JSWhyMagic why) {
+ data.asBits = bitsFromTagAndPayload(JSVAL_TAG_MAGIC, uint32_t(why));
+ }
+
+ void setMagicUint32(uint32_t payload) {
+ data.asBits = bitsFromTagAndPayload(JSVAL_TAG_MAGIC, payload);
+ }
+
+ bool setNumber(uint32_t ui) {
+ if (ui > JSVAL_INT_MAX) {
+ setDouble((double)ui);
+ return false;
+ } else {
+ setInt32((int32_t)ui);
+ return true;
+ }
+ }
+
+ bool setNumber(double d) {
+ int32_t i;
+ if (mozilla::NumberIsInt32(d, &i)) {
+ setInt32(i);
+ return true;
+ }
+
+ setDouble(d);
+ return false;
+ }
+
+ void setObjectOrNull(JSObject* arg) {
+ if (arg)
+ setObject(*arg);
+ else
+ setNull();
+ }
+
+ void swap(Value& rhs) {
+ uint64_t tmp = rhs.data.asBits;
+ rhs.data.asBits = data.asBits;
+ data.asBits = tmp;
+ }
+
+ private:
+ JSValueTag toTag() const {
+#if defined(JS_NUNBOX32)
+ return data.s.tag;
+#elif defined(JS_PUNBOX64)
+ return JSValueTag(data.asBits >> JSVAL_TAG_SHIFT);
+#endif
+ }
+
+ public:
+ /*** JIT-only interfaces to interact with and create raw Values ***/
+#if defined(JS_NUNBOX32)
+ PayloadType toNunboxPayload() const {
+ return data.s.payload.i32;
+ }
+
+ JSValueTag toNunboxTag() const {
+ return data.s.tag;
+ }
+#elif defined(JS_PUNBOX64)
+ const void* bitsAsPunboxPointer() const {
+ return reinterpret_cast<void*>(data.asBits);
+ }
+#endif
+
+ /*** Value type queries ***/
+
+ /*
+ * N.B. GCC, in some but not all cases, chooses to emit signed comparison
+ * of JSValueTag even though its underlying type has been forced to be
+ * uint32_t. Thus, all comparisons should explicitly cast operands to
+ * uint32_t.
+ */
+
+ bool isUndefined() const {
+#if defined(JS_NUNBOX32)
+ return toTag() == JSVAL_TAG_UNDEFINED;
+#elif defined(JS_PUNBOX64)
+ return data.asBits == JSVAL_SHIFTED_TAG_UNDEFINED;
+#endif
+ }
+
+ bool isNull() const {
+#if defined(JS_NUNBOX32)
+ return toTag() == JSVAL_TAG_NULL;
+#elif defined(JS_PUNBOX64)
+ return data.asBits == JSVAL_SHIFTED_TAG_NULL;
+#endif
+ }
+
+ bool isNullOrUndefined() const {
+ return isNull() || isUndefined();
+ }
+
+ bool isInt32() const {
+ return toTag() == JSVAL_TAG_INT32;
+ }
+
+ bool isInt32(int32_t i32) const {
+ return data.asBits == bitsFromTagAndPayload(JSVAL_TAG_INT32, uint32_t(i32));
+ }
+
+ bool isDouble() const {
+#if defined(JS_NUNBOX32)
+ return uint32_t(toTag()) <= uint32_t(JSVAL_TAG_CLEAR);
+#elif defined(JS_PUNBOX64)
+ return (data.asBits | mozilla::DoubleTypeTraits::kSignBit) <= JSVAL_SHIFTED_TAG_MAX_DOUBLE;
+#endif
+ }
+
+ bool isNumber() const {
+#if defined(JS_NUNBOX32)
+ MOZ_ASSERT(toTag() != JSVAL_TAG_CLEAR);
+ return uint32_t(toTag()) <= uint32_t(JSVAL_UPPER_INCL_TAG_OF_NUMBER_SET);
+#elif defined(JS_PUNBOX64)
+ return data.asBits < JSVAL_UPPER_EXCL_SHIFTED_TAG_OF_NUMBER_SET;
+#endif
+ }
+
+ bool isString() const {
+ return toTag() == JSVAL_TAG_STRING;
+ }
+
+ bool isSymbol() const {
+ return toTag() == JSVAL_TAG_SYMBOL;
+ }
+
+ bool isObject() const {
+#if defined(JS_NUNBOX32)
+ return toTag() == JSVAL_TAG_OBJECT;
+#elif defined(JS_PUNBOX64)
+ MOZ_ASSERT((data.asBits >> JSVAL_TAG_SHIFT) <= JSVAL_TAG_OBJECT);
+ return data.asBits >= JSVAL_SHIFTED_TAG_OBJECT;
+#endif
+ }
+
+ bool isPrimitive() const {
+#if defined(JS_NUNBOX32)
+ return uint32_t(toTag()) < uint32_t(JSVAL_UPPER_EXCL_TAG_OF_PRIMITIVE_SET);
+#elif defined(JS_PUNBOX64)
+ return data.asBits < JSVAL_UPPER_EXCL_SHIFTED_TAG_OF_PRIMITIVE_SET;
+#endif
+ }
+
+ bool isObjectOrNull() const {
+ MOZ_ASSERT(uint32_t(toTag()) <= uint32_t(JSVAL_TAG_OBJECT));
+#if defined(JS_NUNBOX32)
+ return uint32_t(toTag()) >= uint32_t(JSVAL_LOWER_INCL_TAG_OF_OBJ_OR_NULL_SET);
+#elif defined(JS_PUNBOX64)
+ return data.asBits >= JSVAL_LOWER_INCL_SHIFTED_TAG_OF_OBJ_OR_NULL_SET;
+#endif
+ }
+
+ bool isGCThing() const {
+#if defined(JS_NUNBOX32)
+ /* gcc sometimes generates signed < without explicit casts. */
+ return uint32_t(toTag()) >= uint32_t(JSVAL_LOWER_INCL_TAG_OF_GCTHING_SET);
+#elif defined(JS_PUNBOX64)
+ return data.asBits >= JSVAL_LOWER_INCL_SHIFTED_TAG_OF_GCTHING_SET;
+#endif
+ }
+
+ bool isBoolean() const {
+ return toTag() == JSVAL_TAG_BOOLEAN;
+ }
+
+ bool isTrue() const {
+ return data.asBits == bitsFromTagAndPayload(JSVAL_TAG_BOOLEAN, uint32_t(true));
+ }
+
+ bool isFalse() const {
+ return data.asBits == bitsFromTagAndPayload(JSVAL_TAG_BOOLEAN, uint32_t(false));
+ }
+
+ bool isMagic() const {
+ return toTag() == JSVAL_TAG_MAGIC;
+ }
+
+ bool isMagic(JSWhyMagic why) const {
+ MOZ_ASSERT_IF(isMagic(), data.s.payload.why == why);
+ return isMagic();
+ }
+
+ bool isMarkable() const {
+ return isGCThing() && !isNull();
+ }
+
+ JS::TraceKind traceKind() const {
+ MOZ_ASSERT(isMarkable());
+ static_assert((JSVAL_TAG_STRING & 0x03) == size_t(JS::TraceKind::String),
+ "Value type tags must correspond with JS::TraceKinds.");
+ static_assert((JSVAL_TAG_SYMBOL & 0x03) == size_t(JS::TraceKind::Symbol),
+ "Value type tags must correspond with JS::TraceKinds.");
+ static_assert((JSVAL_TAG_OBJECT & 0x03) == size_t(JS::TraceKind::Object),
+ "Value type tags must correspond with JS::TraceKinds.");
+ if (MOZ_UNLIKELY(isPrivateGCThing()))
+ return JS::GCThingTraceKind(toGCThing());
+ return JS::TraceKind(toTag() & 0x03);
+ }
+
+ JSWhyMagic whyMagic() const {
+ MOZ_ASSERT(isMagic());
+ return data.s.payload.why;
+ }
+
+ uint32_t magicUint32() const {
+ MOZ_ASSERT(isMagic());
+ return data.s.payload.u32;
+ }
+
+ /*** Comparison ***/
+
+ bool operator==(const Value& rhs) const {
+ return data.asBits == rhs.data.asBits;
+ }
+
+ bool operator!=(const Value& rhs) const {
+ return data.asBits != rhs.data.asBits;
+ }
+
+ friend inline bool SameType(const Value& lhs, const Value& rhs);
+
+ /*** Extract the value's typed payload ***/
+
+ int32_t toInt32() const {
+ MOZ_ASSERT(isInt32());
+#if defined(JS_NUNBOX32)
+ return data.s.payload.i32;
+#elif defined(JS_PUNBOX64)
+ return int32_t(data.asBits);
+#endif
+ }
+
+ double toDouble() const {
+ MOZ_ASSERT(isDouble());
+ return data.asDouble;
+ }
+
+ double toNumber() const {
+ MOZ_ASSERT(isNumber());
+ return isDouble() ? toDouble() : double(toInt32());
+ }
+
+ JSString* toString() const {
+ MOZ_ASSERT(isString());
+#if defined(JS_NUNBOX32)
+ return data.s.payload.str;
+#elif defined(JS_PUNBOX64)
+ return reinterpret_cast<JSString*>(data.asBits & JSVAL_PAYLOAD_MASK);
+#endif
+ }
+
+ JS::Symbol* toSymbol() const {
+ MOZ_ASSERT(isSymbol());
+#if defined(JS_NUNBOX32)
+ return data.s.payload.sym;
+#elif defined(JS_PUNBOX64)
+ return reinterpret_cast<JS::Symbol*>(data.asBits & JSVAL_PAYLOAD_MASK);
+#endif
+ }
+
+ JSObject& toObject() const {
+ MOZ_ASSERT(isObject());
+#if defined(JS_NUNBOX32)
+ return *data.s.payload.obj;
+#elif defined(JS_PUNBOX64)
+ return *toObjectOrNull();
+#endif
+ }
+
+ JSObject* toObjectOrNull() const {
+ MOZ_ASSERT(isObjectOrNull());
+#if defined(JS_NUNBOX32)
+ return data.s.payload.obj;
+#elif defined(JS_PUNBOX64)
+ uint64_t ptrBits = data.asBits & JSVAL_PAYLOAD_MASK;
+ MOZ_ASSERT((ptrBits & 0x7) == 0);
+ return reinterpret_cast<JSObject*>(ptrBits);
+#endif
+ }
+
+ js::gc::Cell* toGCThing() const {
+ MOZ_ASSERT(isGCThing());
+#if defined(JS_NUNBOX32)
+ return data.s.payload.cell;
+#elif defined(JS_PUNBOX64)
+ uint64_t ptrBits = data.asBits & JSVAL_PAYLOAD_MASK;
+ MOZ_ASSERT((ptrBits & 0x7) == 0);
+ return reinterpret_cast<js::gc::Cell*>(ptrBits);
+#endif
+ }
+
+ js::gc::Cell* toMarkablePointer() const {
+ MOZ_ASSERT(isMarkable());
+ return toGCThing();
+ }
+
+ GCCellPtr toGCCellPtr() const {
+ return GCCellPtr(toGCThing(), traceKind());
+ }
+
+ bool toBoolean() const {
+ MOZ_ASSERT(isBoolean());
+#if defined(JS_NUNBOX32)
+ return bool(data.s.payload.boo);
+#elif defined(JS_PUNBOX64)
+ return bool(data.asBits & JSVAL_PAYLOAD_MASK);
+#endif
+ }
+
+ uint32_t payloadAsRawUint32() const {
+ MOZ_ASSERT(!isDouble());
+ return data.s.payload.u32;
+ }
+
+ uint64_t asRawBits() const {
+ return data.asBits;
+ }
+
+ JSValueType extractNonDoubleType() const {
+ uint32_t type = toTag() & 0xF;
+ MOZ_ASSERT(type > JSVAL_TYPE_DOUBLE);
+ return JSValueType(type);
+ }
+
+ /*
+ * Private API
+ *
+ * Private setters/getters allow the caller to read/write arbitrary types
+ * that fit in the 64-bit payload. It is the caller's responsibility, after
+ * storing to a value with setPrivateX to read only using getPrivateX.
+ * Privates values are given a type which ensures they are not marked.
+ */
+
+ void setPrivate(void* ptr) {
+ MOZ_ASSERT((uintptr_t(ptr) & 1) == 0);
+#if defined(JS_NUNBOX32)
+ data.s.tag = JSValueTag(0);
+ data.s.payload.ptr = ptr;
+#elif defined(JS_PUNBOX64)
+ data.asBits = uintptr_t(ptr) >> 1;
+#endif
+ MOZ_ASSERT(isDouble());
+ }
+
+ void* toPrivate() const {
+ MOZ_ASSERT(isDouble());
+#if defined(JS_NUNBOX32)
+ return data.s.payload.ptr;
+#elif defined(JS_PUNBOX64)
+ MOZ_ASSERT((data.asBits & 0x8000000000000000ULL) == 0);
+ return reinterpret_cast<void*>(data.asBits << 1);
+#endif
+ }
+
+ void setPrivateUint32(uint32_t ui) {
+ MOZ_ASSERT(uint32_t(int32_t(ui)) == ui);
+ setInt32(int32_t(ui));
+ }
+
+ uint32_t toPrivateUint32() const {
+ return uint32_t(toInt32());
+ }
+
+ /*
+ * Private GC Thing API
+ *
+ * Non-JSObject, JSString, and JS::Symbol cells may be put into the 64-bit
+ * payload as private GC things. Such Values are considered isMarkable()
+ * and isGCThing(), and as such, automatically marked. Their traceKind()
+ * is gotten via their cells.
+ */
+
+ void setPrivateGCThing(js::gc::Cell* cell) {
+ MOZ_ASSERT(JS::GCThingTraceKind(cell) != JS::TraceKind::String,
+ "Private GC thing Values must not be strings. Make a StringValue instead.");
+ MOZ_ASSERT(JS::GCThingTraceKind(cell) != JS::TraceKind::Symbol,
+ "Private GC thing Values must not be symbols. Make a SymbolValue instead.");
+ MOZ_ASSERT(JS::GCThingTraceKind(cell) != JS::TraceKind::Object,
+ "Private GC thing Values must not be objects. Make an ObjectValue instead.");
+
+ MOZ_ASSERT(uintptr_t(cell) > 0x1000);
+#if defined(JS_PUNBOX64)
+ // VisualStudio cannot contain parenthesized C++ style cast and shift
+ // inside decltype in template parameter:
+ // AssertionConditionType<decltype((uintptr_t(x) >> 1))>
+ // It throws syntax error.
+ MOZ_ASSERT((((uintptr_t)cell) >> JSVAL_TAG_SHIFT) == 0);
+#endif
+ data.asBits = bitsFromTagAndPayload(JSVAL_TAG_PRIVATE_GCTHING, PayloadType(cell));
+ }
+
+ bool isPrivateGCThing() const {
+ return toTag() == JSVAL_TAG_PRIVATE_GCTHING;
+ }
+
+ const size_t* payloadWord() const {
+#if defined(JS_NUNBOX32)
+ return &data.s.payload.word;
+#elif defined(JS_PUNBOX64)
+ return &data.asWord;
+#endif
+ }
+
+ const uintptr_t* payloadUIntPtr() const {
+#if defined(JS_NUNBOX32)
+ return &data.s.payload.uintptr;
+#elif defined(JS_PUNBOX64)
+ return &data.asUIntPtr;
+#endif
+ }
+
+#if !defined(_MSC_VER) && !defined(__sparc)
+ // Value must be POD so that MSVC will pass it by value and not in memory
+ // (bug 689101); the same is true for SPARC as well (bug 737344). More
+ // precisely, we don't want Value return values compiled as out params.
+ private:
+#endif
+
+#if MOZ_LITTLE_ENDIAN
+# if defined(JS_NUNBOX32)
+ union layout {
+ uint64_t asBits;
+ struct {
+ union {
+ int32_t i32;
+ uint32_t u32;
+ uint32_t boo; // Don't use |bool| -- it must be four bytes.
+ JSString* str;
+ JS::Symbol* sym;
+ JSObject* obj;
+ js::gc::Cell* cell;
+ void* ptr;
+ JSWhyMagic why;
+ size_t word;
+ uintptr_t uintptr;
+ } payload;
+ JSValueTag tag;
+ } s;
+ double asDouble;
+ void* asPtr;
+
+ layout() = default;
+ explicit constexpr layout(uint64_t bits) : asBits(bits) {}
+ explicit constexpr layout(double d) : asDouble(d) {}
+ } data;
+# elif defined(JS_PUNBOX64)
+ union layout {
+ uint64_t asBits;
+#if !defined(_WIN64)
+ /* MSVC does not pack these correctly :-( */
+ struct {
+ uint64_t payload47 : 47;
+ JSValueTag tag : 17;
+ } debugView;
+#endif
+ struct {
+ union {
+ int32_t i32;
+ uint32_t u32;
+ JSWhyMagic why;
+ } payload;
+ } s;
+ double asDouble;
+ void* asPtr;
+ size_t asWord;
+ uintptr_t asUIntPtr;
+
+ layout() = default;
+ explicit constexpr layout(uint64_t bits) : asBits(bits) {}
+ explicit constexpr layout(double d) : asDouble(d) {}
+ } data;
+# endif /* JS_PUNBOX64 */
+#else /* MOZ_LITTLE_ENDIAN */
+# if defined(JS_NUNBOX32)
+ union layout {
+ uint64_t asBits;
+ struct {
+ JSValueTag tag;
+ union {
+ int32_t i32;
+ uint32_t u32;
+ uint32_t boo; // Don't use |bool| -- it must be four bytes.
+ JSString* str;
+ JS::Symbol* sym;
+ JSObject* obj;
+ js::gc::Cell* cell;
+ void* ptr;
+ JSWhyMagic why;
+ size_t word;
+ uintptr_t uintptr;
+ } payload;
+ } s;
+ double asDouble;
+ void* asPtr;
+
+ layout() = default;
+ explicit constexpr layout(uint64_t bits) : asBits(bits) {}
+ explicit constexpr layout(double d) : asDouble(d) {}
+ } data;
+# elif defined(JS_PUNBOX64)
+ union layout {
+ uint64_t asBits;
+ struct {
+ JSValueTag tag : 17;
+ uint64_t payload47 : 47;
+ } debugView;
+ struct {
+ uint32_t padding;
+ union {
+ int32_t i32;
+ uint32_t u32;
+ JSWhyMagic why;
+ } payload;
+ } s;
+ double asDouble;
+ void* asPtr;
+ size_t asWord;
+ uintptr_t asUIntPtr;
+
+ layout() = default;
+ explicit constexpr layout(uint64_t bits) : asBits(bits) {}
+ explicit constexpr layout(double d) : asDouble(d) {}
+ } data;
+# endif /* JS_PUNBOX64 */
+#endif /* MOZ_LITTLE_ENDIAN */
+
+ private:
+ explicit constexpr Value(uint64_t asBits) : data(asBits) {}
+ explicit constexpr Value(double d) : data(d) {}
+
+ void staticAssertions() {
+ JS_STATIC_ASSERT(sizeof(JSValueType) == 1);
+ JS_STATIC_ASSERT(sizeof(JSValueTag) == 4);
+ JS_STATIC_ASSERT(sizeof(JSWhyMagic) <= 4);
+ JS_STATIC_ASSERT(sizeof(Value) == 8);
+ }
+
+ friend constexpr Value JS::UndefinedValue();
+
+ public:
+ static constexpr uint64_t
+ bitsFromTagAndPayload(JSValueTag tag, PayloadType payload)
+ {
+#if defined(JS_NUNBOX32)
+ return (uint64_t(uint32_t(tag)) << 32) | payload;
+#elif defined(JS_PUNBOX64)
+ return (uint64_t(uint32_t(tag)) << JSVAL_TAG_SHIFT) | payload;
+#endif
+ }
+
+ static constexpr Value
+ fromTagAndPayload(JSValueTag tag, PayloadType payload)
+ {
+ return fromRawBits(bitsFromTagAndPayload(tag, payload));
+ }
+
+ static constexpr Value
+ fromRawBits(uint64_t asBits) {
+ return Value(asBits);
+ }
+
+ static constexpr Value
+ fromInt32(int32_t i) {
+ return fromTagAndPayload(JSVAL_TAG_INT32, uint32_t(i));
+ }
+
+ static constexpr Value
+ fromDouble(double d) {
+ return Value(d);
+ }
+} JS_HAZ_GC_POINTER;
+
+static_assert(sizeof(Value) == 8, "Value size must leave three tag bits, be a binary power, and is ubiquitously depended upon everywhere");
+
+inline bool
+IsOptimizedPlaceholderMagicValue(const Value& v)
+{
+ if (v.isMagic()) {
+ MOZ_ASSERT(v.whyMagic() == JS_OPTIMIZED_ARGUMENTS || v.whyMagic() == JS_OPTIMIZED_OUT);
+ return true;
+ }
+ return false;
+}
+
+static MOZ_ALWAYS_INLINE void
+ExposeValueToActiveJS(const Value& v)
+{
+ if (v.isMarkable())
+ js::gc::ExposeGCThingToActiveJS(GCCellPtr(v));
+}
+
+/************************************************************************/
+
+static inline Value
+NullValue()
+{
+ Value v;
+ v.setNull();
+ return v;
+}
+
+static inline constexpr Value
+UndefinedValue()
+{
+ return Value::fromTagAndPayload(JSVAL_TAG_UNDEFINED, 0);
+}
+
+static inline constexpr Value
+Int32Value(int32_t i32)
+{
+ return Value::fromInt32(i32);
+}
+
+static inline Value
+DoubleValue(double dbl)
+{
+ Value v;
+ v.setDouble(dbl);
+ return v;
+}
+
+static inline Value
+CanonicalizedDoubleValue(double d)
+{
+ return MOZ_UNLIKELY(mozilla::IsNaN(d))
+ ? Value::fromRawBits(detail::CanonicalizedNaNBits)
+ : Value::fromDouble(d);
+}
+
+static inline bool
+IsCanonicalized(double d)
+{
+ if (mozilla::IsInfinite(d) || mozilla::IsFinite(d))
+ return true;
+
+ uint64_t bits;
+ mozilla::BitwiseCast<uint64_t>(d, &bits);
+ return (bits & ~mozilla::DoubleTypeTraits::kSignBit) == detail::CanonicalizedNaNBits;
+}
+
+static inline Value
+DoubleNaNValue()
+{
+ Value v;
+ v.setNaN();
+ return v;
+}
+
+static inline Value
+Float32Value(float f)
+{
+ Value v;
+ v.setDouble(f);
+ return v;
+}
+
+static inline Value
+StringValue(JSString* str)
+{
+ Value v;
+ v.setString(str);
+ return v;
+}
+
+static inline Value
+SymbolValue(JS::Symbol* sym)
+{
+ Value v;
+ v.setSymbol(sym);
+ return v;
+}
+
+static inline Value
+BooleanValue(bool boo)
+{
+ Value v;
+ v.setBoolean(boo);
+ return v;
+}
+
+static inline Value
+TrueValue()
+{
+ Value v;
+ v.setBoolean(true);
+ return v;
+}
+
+static inline Value
+FalseValue()
+{
+ Value v;
+ v.setBoolean(false);
+ return v;
+}
+
+static inline Value
+ObjectValue(JSObject& obj)
+{
+ Value v;
+ v.setObject(obj);
+ return v;
+}
+
+static inline Value
+ObjectValueCrashOnTouch()
+{
+ Value v;
+ v.setObject(*reinterpret_cast<JSObject*>(0x48));
+ return v;
+}
+
+static inline Value
+MagicValue(JSWhyMagic why)
+{
+ Value v;
+ v.setMagic(why);
+ return v;
+}
+
+static inline Value
+MagicValueUint32(uint32_t payload)
+{
+ Value v;
+ v.setMagicUint32(payload);
+ return v;
+}
+
+static inline Value
+NumberValue(float f)
+{
+ Value v;
+ v.setNumber(f);
+ return v;
+}
+
+static inline Value
+NumberValue(double dbl)
+{
+ Value v;
+ v.setNumber(dbl);
+ return v;
+}
+
+static inline Value
+NumberValue(int8_t i)
+{
+ return Int32Value(i);
+}
+
+static inline Value
+NumberValue(uint8_t i)
+{
+ return Int32Value(i);
+}
+
+static inline Value
+NumberValue(int16_t i)
+{
+ return Int32Value(i);
+}
+
+static inline Value
+NumberValue(uint16_t i)
+{
+ return Int32Value(i);
+}
+
+static inline Value
+NumberValue(int32_t i)
+{
+ return Int32Value(i);
+}
+
+static inline constexpr Value
+NumberValue(uint32_t i)
+{
+ return i <= JSVAL_INT_MAX
+ ? Int32Value(int32_t(i))
+ : Value::fromDouble(double(i));
+}
+
+namespace detail {
+
+template <bool Signed>
+class MakeNumberValue
+{
+ public:
+ template<typename T>
+ static inline Value create(const T t)
+ {
+ Value v;
+ if (JSVAL_INT_MIN <= t && t <= JSVAL_INT_MAX)
+ v.setInt32(int32_t(t));
+ else
+ v.setDouble(double(t));
+ return v;
+ }
+};
+
+template <>
+class MakeNumberValue<false>
+{
+ public:
+ template<typename T>
+ static inline Value create(const T t)
+ {
+ Value v;
+ if (t <= JSVAL_INT_MAX)
+ v.setInt32(int32_t(t));
+ else
+ v.setDouble(double(t));
+ return v;
+ }
+};
+
+} // namespace detail
+
+template <typename T>
+static inline Value
+NumberValue(const T t)
+{
+ MOZ_ASSERT(Value::isNumberRepresentable(t), "value creation would be lossy");
+ return detail::MakeNumberValue<std::numeric_limits<T>::is_signed>::create(t);
+}
+
+static inline Value
+ObjectOrNullValue(JSObject* obj)
+{
+ Value v;
+ v.setObjectOrNull(obj);
+ return v;
+}
+
+static inline Value
+PrivateValue(void* ptr)
+{
+ Value v;
+ v.setPrivate(ptr);
+ return v;
+}
+
+static inline Value
+PrivateUint32Value(uint32_t ui)
+{
+ Value v;
+ v.setPrivateUint32(ui);
+ return v;
+}
+
+static inline Value
+PrivateGCThingValue(js::gc::Cell* cell)
+{
+ Value v;
+ v.setPrivateGCThing(cell);
+ return v;
+}
+
+static inline Value
+PoisonedObjectValue(JSObject* obj)
+{
+ Value v;
+ v.setObjectNoCheck(obj);
+ return v;
+}
+
+inline bool
+SameType(const Value& lhs, const Value& rhs)
+{
+#if defined(JS_NUNBOX32)
+ JSValueTag ltag = lhs.toTag(), rtag = rhs.toTag();
+ return ltag == rtag || (ltag < JSVAL_TAG_CLEAR && rtag < JSVAL_TAG_CLEAR);
+#elif defined(JS_PUNBOX64)
+ return (lhs.isDouble() && rhs.isDouble()) ||
+ (((lhs.data.asBits ^ rhs.data.asBits) & 0xFFFF800000000000ULL) == 0);
+#endif
+}
+
+} // namespace JS
+
+/************************************************************************/
+
+namespace JS {
+JS_PUBLIC_API(void) HeapValuePostBarrier(Value* valuep, const Value& prev, const Value& next);
+
+template <>
+struct GCPolicy<JS::Value>
+{
+ static Value initial() { return UndefinedValue(); }
+ static void trace(JSTracer* trc, Value* v, const char* name) {
+ js::UnsafeTraceManuallyBarrieredEdge(trc, v, name);
+ }
+ static bool isTenured(const Value& thing) {
+ return !thing.isGCThing() || !IsInsideNursery(thing.toGCThing());
+ }
+};
+
+} // namespace JS
+
+namespace js {
+
+template <>
+struct BarrierMethods<JS::Value>
+{
+ static gc::Cell* asGCThingOrNull(const JS::Value& v) {
+ return v.isMarkable() ? v.toGCThing() : nullptr;
+ }
+ static void postBarrier(JS::Value* v, const JS::Value& prev, const JS::Value& next) {
+ JS::HeapValuePostBarrier(v, prev, next);
+ }
+ static void exposeToJS(const JS::Value& v) {
+ JS::ExposeValueToActiveJS(v);
+ }
+};
+
+template <class Outer> class MutableValueOperations;
+
+/**
+ * A class designed for CRTP use in implementing the non-mutating parts of the
+ * Value interface in Value-like classes. Outer must be a class inheriting
+ * ValueOperations<Outer> with a visible get() method returning a const
+ * reference to the Value abstracted by Outer.
+ */
+template <class Outer>
+class ValueOperations
+{
+ friend class MutableValueOperations<Outer>;
+
+ const JS::Value& value() const { return static_cast<const Outer*>(this)->get(); }
+
+ public:
+ bool isUndefined() const { return value().isUndefined(); }
+ bool isNull() const { return value().isNull(); }
+ bool isBoolean() const { return value().isBoolean(); }
+ bool isTrue() const { return value().isTrue(); }
+ bool isFalse() const { return value().isFalse(); }
+ bool isNumber() const { return value().isNumber(); }
+ bool isInt32() const { return value().isInt32(); }
+ bool isInt32(int32_t i32) const { return value().isInt32(i32); }
+ bool isDouble() const { return value().isDouble(); }
+ bool isString() const { return value().isString(); }
+ bool isSymbol() const { return value().isSymbol(); }
+ bool isObject() const { return value().isObject(); }
+ bool isMagic() const { return value().isMagic(); }
+ bool isMagic(JSWhyMagic why) const { return value().isMagic(why); }
+ bool isMarkable() const { return value().isMarkable(); }
+ bool isPrimitive() const { return value().isPrimitive(); }
+ bool isGCThing() const { return value().isGCThing(); }
+
+ bool isNullOrUndefined() const { return value().isNullOrUndefined(); }
+ bool isObjectOrNull() const { return value().isObjectOrNull(); }
+
+ bool toBoolean() const { return value().toBoolean(); }
+ double toNumber() const { return value().toNumber(); }
+ int32_t toInt32() const { return value().toInt32(); }
+ double toDouble() const { return value().toDouble(); }
+ JSString* toString() const { return value().toString(); }
+ JS::Symbol* toSymbol() const { return value().toSymbol(); }
+ JSObject& toObject() const { return value().toObject(); }
+ JSObject* toObjectOrNull() const { return value().toObjectOrNull(); }
+ gc::Cell* toGCThing() const { return value().toGCThing(); }
+ JS::TraceKind traceKind() const { return value().traceKind(); }
+ void* toPrivate() const { return value().toPrivate(); }
+ uint32_t toPrivateUint32() const { return value().toPrivateUint32(); }
+
+ uint64_t asRawBits() const { return value().asRawBits(); }
+ JSValueType extractNonDoubleType() const { return value().extractNonDoubleType(); }
+
+ JSWhyMagic whyMagic() const { return value().whyMagic(); }
+ uint32_t magicUint32() const { return value().magicUint32(); }
+};
+
+/**
+ * A class designed for CRTP use in implementing all the mutating parts of the
+ * Value interface in Value-like classes. Outer must be a class inheriting
+ * MutableValueOperations<Outer> with visible get() methods returning const and
+ * non-const references to the Value abstracted by Outer.
+ */
+template <class Outer>
+class MutableValueOperations : public ValueOperations<Outer>
+{
+ JS::Value& value() { return static_cast<Outer*>(this)->get(); }
+
+ public:
+ void setNull() { value().setNull(); }
+ void setUndefined() { value().setUndefined(); }
+ void setInt32(int32_t i) { value().setInt32(i); }
+ void setDouble(double d) { value().setDouble(d); }
+ void setNaN() { setDouble(JS::GenericNaN()); }
+ void setBoolean(bool b) { value().setBoolean(b); }
+ void setMagic(JSWhyMagic why) { value().setMagic(why); }
+ bool setNumber(uint32_t ui) { return value().setNumber(ui); }
+ bool setNumber(double d) { return value().setNumber(d); }
+ void setString(JSString* str) { this->value().setString(str); }
+ void setSymbol(JS::Symbol* sym) { this->value().setSymbol(sym); }
+ void setObject(JSObject& obj) { this->value().setObject(obj); }
+ void setObjectOrNull(JSObject* arg) { this->value().setObjectOrNull(arg); }
+ void setPrivate(void* ptr) { this->value().setPrivate(ptr); }
+ void setPrivateUint32(uint32_t ui) { this->value().setPrivateUint32(ui); }
+ void setPrivateGCThing(js::gc::Cell* cell) { this->value().setPrivateGCThing(cell); }
+};
+
+/*
+ * Augment the generic Heap<T> interface when T = Value with
+ * type-querying, value-extracting, and mutating operations.
+ */
+template <>
+class HeapBase<JS::Value> : public ValueOperations<JS::Heap<JS::Value> >
+{
+ typedef JS::Heap<JS::Value> Outer;
+
+ friend class ValueOperations<Outer>;
+
+ void setBarriered(const JS::Value& v) {
+ *static_cast<JS::Heap<JS::Value>*>(this) = v;
+ }
+
+ public:
+ void setNull() { setBarriered(JS::NullValue()); }
+ void setUndefined() { setBarriered(JS::UndefinedValue()); }
+ void setInt32(int32_t i) { setBarriered(JS::Int32Value(i)); }
+ void setDouble(double d) { setBarriered(JS::DoubleValue(d)); }
+ void setNaN() { setDouble(JS::GenericNaN()); }
+ void setBoolean(bool b) { setBarriered(JS::BooleanValue(b)); }
+ void setMagic(JSWhyMagic why) { setBarriered(JS::MagicValue(why)); }
+ void setString(JSString* str) { setBarriered(JS::StringValue(str)); }
+ void setSymbol(JS::Symbol* sym) { setBarriered(JS::SymbolValue(sym)); }
+ void setObject(JSObject& obj) { setBarriered(JS::ObjectValue(obj)); }
+ void setPrivateGCThing(js::gc::Cell* cell) { setBarriered(JS::PrivateGCThingValue(cell)); }
+
+ bool setNumber(uint32_t ui) {
+ if (ui > JSVAL_INT_MAX) {
+ setDouble((double)ui);
+ return false;
+ } else {
+ setInt32((int32_t)ui);
+ return true;
+ }
+ }
+
+ bool setNumber(double d) {
+ int32_t i;
+ if (mozilla::NumberIsInt32(d, &i)) {
+ setInt32(i);
+ return true;
+ }
+
+ setDouble(d);
+ return false;
+ }
+
+ void setObjectOrNull(JSObject* arg) {
+ if (arg)
+ setObject(*arg);
+ else
+ setNull();
+ }
+};
+
+template <>
+class HandleBase<JS::Value> : public ValueOperations<JS::Handle<JS::Value> >
+{};
+
+template <>
+class MutableHandleBase<JS::Value> : public MutableValueOperations<JS::MutableHandle<JS::Value> >
+{};
+
+template <>
+class RootedBase<JS::Value> : public MutableValueOperations<JS::Rooted<JS::Value> >
+{};
+
+template <>
+class PersistentRootedBase<JS::Value> : public MutableValueOperations<JS::PersistentRooted<JS::Value>>
+{};
+
+/*
+ * If the Value is a GC pointer type, convert to that type and call |f| with
+ * the pointer. If the Value is not a GC type, calls F::defaultValue.
+ */
+template <typename F, typename... Args>
+auto
+DispatchTyped(F f, const JS::Value& val, Args&&... args)
+ -> decltype(f(static_cast<JSObject*>(nullptr), mozilla::Forward<Args>(args)...))
+{
+ if (val.isString())
+ return f(val.toString(), mozilla::Forward<Args>(args)...);
+ if (val.isObject())
+ return f(&val.toObject(), mozilla::Forward<Args>(args)...);
+ if (val.isSymbol())
+ return f(val.toSymbol(), mozilla::Forward<Args>(args)...);
+ if (MOZ_UNLIKELY(val.isPrivateGCThing()))
+ return DispatchTyped(f, val.toGCCellPtr(), mozilla::Forward<Args>(args)...);
+ MOZ_ASSERT(!val.isMarkable());
+ return F::defaultValue(val);
+}
+
+template <class S> struct VoidDefaultAdaptor { static void defaultValue(const S&) {} };
+template <class S> struct IdentityDefaultAdaptor { static S defaultValue(const S& v) {return v;} };
+template <class S, bool v> struct BoolDefaultAdaptor { static bool defaultValue(const S&) { return v; } };
+
+} // namespace js
+
+/************************************************************************/
+
+namespace JS {
+
+extern JS_PUBLIC_DATA(const HandleValue) NullHandleValue;
+extern JS_PUBLIC_DATA(const HandleValue) UndefinedHandleValue;
+extern JS_PUBLIC_DATA(const HandleValue) TrueHandleValue;
+extern JS_PUBLIC_DATA(const HandleValue) FalseHandleValue;
+
+} // namespace JS
+
+#endif /* js_Value_h */
diff --git a/js/public/Vector.h b/js/public/Vector.h
new file mode 100644
index 0000000000..6fa63e93ee
--- /dev/null
+++ b/js/public/Vector.h
@@ -0,0 +1,45 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef js_Vector_h
+#define js_Vector_h
+
+#include "mozilla/Vector.h"
+
+/* Silence dire "bugs in previous versions of MSVC have been fixed" warnings */
+#ifdef _MSC_VER
+#pragma warning(push)
+#pragma warning(disable:4345)
+#endif
+
+namespace js {
+
+class TempAllocPolicy;
+
+namespace detail {
+
+template <typename T>
+struct TypeIsGCThing : mozilla::FalseType
+{};
+
+// Uncomment this once we actually can assert it:
+//template <>
+//struct TypeIsGCThing<JS::Value> : mozilla::TrueType
+//{};
+
+} // namespace detail
+
+template <typename T,
+ size_t MinInlineCapacity = 0,
+ class AllocPolicy = TempAllocPolicy,
+ // Don't use this with JS::Value! Use JS::AutoValueVector instead.
+ typename = typename mozilla::EnableIf<!detail::TypeIsGCThing<T>::value>::Type
+ >
+using Vector = mozilla::Vector<T, MinInlineCapacity, AllocPolicy>;
+
+} // namespace js
+
+#endif /* js_Vector_h */
diff --git a/js/public/WeakMapPtr.h b/js/public/WeakMapPtr.h
new file mode 100644
index 0000000000..41860551a0
--- /dev/null
+++ b/js/public/WeakMapPtr.h
@@ -0,0 +1,46 @@
+/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 4 -*-
+ * vim: set ts=8 sts=4 et sw=4 tw=99:
+ * This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#ifndef js_WeakMapPtr_h
+#define js_WeakMapPtr_h
+
+#include "jspubtd.h"
+
+#include "js/TypeDecls.h"
+
+namespace JS {
+
+// A wrapper around the internal C++ representation of SpiderMonkey WeakMaps,
+// usable outside the engine.
+//
+// The supported template specializations are enumerated in WeakMapPtr.cpp. If
+// you want to use this class for a different key/value combination, add it to
+// the list and the compiler will generate the relevant machinery.
+template <typename K, typename V>
+class JS_PUBLIC_API(WeakMapPtr)
+{
+ public:
+ WeakMapPtr() : ptr(nullptr) {}
+ bool init(JSContext* cx);
+ bool initialized() { return ptr != nullptr; }
+ void destroy();
+ virtual ~WeakMapPtr() { MOZ_ASSERT(!initialized()); }
+ void trace(JSTracer* tracer);
+
+ V lookup(const K& key);
+ bool put(JSContext* cx, const K& key, const V& value);
+
+ private:
+ void* ptr;
+
+ // WeakMapPtr is neither copyable nor assignable.
+ WeakMapPtr(const WeakMapPtr& wmp) = delete;
+ WeakMapPtr& operator=(const WeakMapPtr& wmp) = delete;
+};
+
+} /* namespace JS */
+
+#endif /* js_WeakMapPtr_h */