GSI - Employe Self Service Mobile
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

30556 lines
1.1 MiB

2 months ago
  1. import { _getProvider, getApp, _removeServiceInstance, _registerComponent, registerVersion, SDK_VERSION as SDK_VERSION$1 } from '@firebase/app';
  2. import { Component } from '@firebase/component';
  3. import { Logger, LogLevel } from '@firebase/logger';
  4. import { inspect, TextEncoder, TextDecoder } from 'util';
  5. import { FirebaseError, createMockUserToken, getModularInstance, deepEqual, getDefaultEmulatorHostnameAndPort, getUA, isIndexedDBAvailable, isSafari } from '@firebase/util';
  6. import { randomBytes as randomBytes$1 } from 'crypto';
  7. import * as grpc from '@grpc/grpc-js';
  8. import * as protoLoader from '@grpc/proto-loader';
  9. const name = "@firebase/firestore";
  10. const version$1 = "3.8.1";
  11. /**
  12. * @license
  13. * Copyright 2017 Google LLC
  14. *
  15. * Licensed under the Apache License, Version 2.0 (the "License");
  16. * you may not use this file except in compliance with the License.
  17. * You may obtain a copy of the License at
  18. *
  19. * http://www.apache.org/licenses/LICENSE-2.0
  20. *
  21. * Unless required by applicable law or agreed to in writing, software
  22. * distributed under the License is distributed on an "AS IS" BASIS,
  23. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  24. * See the License for the specific language governing permissions and
  25. * limitations under the License.
  26. */
  27. /**
  28. * Simple wrapper around a nullable UID. Mostly exists to make code more
  29. * readable.
  30. */
  31. class User {
  32. constructor(uid) {
  33. this.uid = uid;
  34. }
  35. isAuthenticated() {
  36. return this.uid != null;
  37. }
  38. /**
  39. * Returns a key representing this user, suitable for inclusion in a
  40. * dictionary.
  41. */
  42. toKey() {
  43. if (this.isAuthenticated()) {
  44. return 'uid:' + this.uid;
  45. }
  46. else {
  47. return 'anonymous-user';
  48. }
  49. }
  50. isEqual(otherUser) {
  51. return otherUser.uid === this.uid;
  52. }
  53. }
  54. /** A user with a null UID. */
  55. User.UNAUTHENTICATED = new User(null);
  56. // TODO(mikelehen): Look into getting a proper uid-equivalent for
  57. // non-FirebaseAuth providers.
  58. User.GOOGLE_CREDENTIALS = new User('google-credentials-uid');
  59. User.FIRST_PARTY = new User('first-party-uid');
  60. User.MOCK_USER = new User('mock-user');
  61. const version = "9.16.0";
  62. /**
  63. * @license
  64. * Copyright 2017 Google LLC
  65. *
  66. * Licensed under the Apache License, Version 2.0 (the "License");
  67. * you may not use this file except in compliance with the License.
  68. * You may obtain a copy of the License at
  69. *
  70. * http://www.apache.org/licenses/LICENSE-2.0
  71. *
  72. * Unless required by applicable law or agreed to in writing, software
  73. * distributed under the License is distributed on an "AS IS" BASIS,
  74. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  75. * See the License for the specific language governing permissions and
  76. * limitations under the License.
  77. */
  78. let SDK_VERSION = version;
  79. function setSDKVersion(version) {
  80. SDK_VERSION = version;
  81. }
  82. /**
  83. * @license
  84. * Copyright 2020 Google LLC
  85. *
  86. * Licensed under the Apache License, Version 2.0 (the "License");
  87. * you may not use this file except in compliance with the License.
  88. * You may obtain a copy of the License at
  89. *
  90. * http://www.apache.org/licenses/LICENSE-2.0
  91. *
  92. * Unless required by applicable law or agreed to in writing, software
  93. * distributed under the License is distributed on an "AS IS" BASIS,
  94. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  95. * See the License for the specific language governing permissions and
  96. * limitations under the License.
  97. */
  98. /** Formats an object as a JSON string, suitable for logging. */
  99. function formatJSON(value) {
  100. // util.inspect() results in much more readable output than JSON.stringify()
  101. return inspect(value, { depth: 100 });
  102. }
  103. /**
  104. * @license
  105. * Copyright 2017 Google LLC
  106. *
  107. * Licensed under the Apache License, Version 2.0 (the "License");
  108. * you may not use this file except in compliance with the License.
  109. * You may obtain a copy of the License at
  110. *
  111. * http://www.apache.org/licenses/LICENSE-2.0
  112. *
  113. * Unless required by applicable law or agreed to in writing, software
  114. * distributed under the License is distributed on an "AS IS" BASIS,
  115. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  116. * See the License for the specific language governing permissions and
  117. * limitations under the License.
  118. */
  119. const logClient = new Logger('@firebase/firestore');
  120. // Helper methods are needed because variables can't be exported as read/write
  121. function getLogLevel() {
  122. return logClient.logLevel;
  123. }
  124. /**
  125. * Sets the verbosity of Cloud Firestore logs (debug, error, or silent).
  126. *
  127. * @param logLevel - The verbosity you set for activity and error logging. Can
  128. * be any of the following values:
  129. *
  130. * <ul>
  131. * <li>`debug` for the most verbose logging level, primarily for
  132. * debugging.</li>
  133. * <li>`error` to log errors only.</li>
  134. * <li><code>`silent` to turn off logging.</li>
  135. * </ul>
  136. */
  137. function setLogLevel(logLevel) {
  138. logClient.setLogLevel(logLevel);
  139. }
  140. function logDebug(msg, ...obj) {
  141. if (logClient.logLevel <= LogLevel.DEBUG) {
  142. const args = obj.map(argToString);
  143. logClient.debug(`Firestore (${SDK_VERSION}): ${msg}`, ...args);
  144. }
  145. }
  146. function logError(msg, ...obj) {
  147. if (logClient.logLevel <= LogLevel.ERROR) {
  148. const args = obj.map(argToString);
  149. logClient.error(`Firestore (${SDK_VERSION}): ${msg}`, ...args);
  150. }
  151. }
  152. /**
  153. * @internal
  154. */
  155. function logWarn(msg, ...obj) {
  156. if (logClient.logLevel <= LogLevel.WARN) {
  157. const args = obj.map(argToString);
  158. logClient.warn(`Firestore (${SDK_VERSION}): ${msg}`, ...args);
  159. }
  160. }
  161. /**
  162. * Converts an additional log parameter to a string representation.
  163. */
  164. function argToString(obj) {
  165. if (typeof obj === 'string') {
  166. return obj;
  167. }
  168. else {
  169. try {
  170. return formatJSON(obj);
  171. }
  172. catch (e) {
  173. // Converting to JSON failed, just log the object directly
  174. return obj;
  175. }
  176. }
  177. }
  178. /**
  179. * @license
  180. * Copyright 2017 Google LLC
  181. *
  182. * Licensed under the Apache License, Version 2.0 (the "License");
  183. * you may not use this file except in compliance with the License.
  184. * You may obtain a copy of the License at
  185. *
  186. * http://www.apache.org/licenses/LICENSE-2.0
  187. *
  188. * Unless required by applicable law or agreed to in writing, software
  189. * distributed under the License is distributed on an "AS IS" BASIS,
  190. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  191. * See the License for the specific language governing permissions and
  192. * limitations under the License.
  193. */
  194. /**
  195. * Unconditionally fails, throwing an Error with the given message.
  196. * Messages are stripped in production builds.
  197. *
  198. * Returns `never` and can be used in expressions:
  199. * @example
  200. * let futureVar = fail('not implemented yet');
  201. */
  202. function fail(failure = 'Unexpected state') {
  203. // Log the failure in addition to throw an exception, just in case the
  204. // exception is swallowed.
  205. const message = `FIRESTORE (${SDK_VERSION}) INTERNAL ASSERTION FAILED: ` + failure;
  206. logError(message);
  207. // NOTE: We don't use FirestoreError here because these are internal failures
  208. // that cannot be handled by the user. (Also it would create a circular
  209. // dependency between the error and assert modules which doesn't work.)
  210. throw new Error(message);
  211. }
  212. /**
  213. * Fails if the given assertion condition is false, throwing an Error with the
  214. * given message if it did.
  215. *
  216. * Messages are stripped in production builds.
  217. */
  218. function hardAssert(assertion, message) {
  219. if (!assertion) {
  220. fail();
  221. }
  222. }
  223. /**
  224. * Fails if the given assertion condition is false, throwing an Error with the
  225. * given message if it did.
  226. *
  227. * The code of callsites invoking this function are stripped out in production
  228. * builds. Any side-effects of code within the debugAssert() invocation will not
  229. * happen in this case.
  230. *
  231. * @internal
  232. */
  233. function debugAssert(assertion, message) {
  234. if (!assertion) {
  235. fail();
  236. }
  237. }
  238. /**
  239. * Casts `obj` to `T`. In non-production builds, verifies that `obj` is an
  240. * instance of `T` before casting.
  241. */
  242. function debugCast(obj,
  243. // eslint-disable-next-line @typescript-eslint/no-explicit-any
  244. constructor) {
  245. return obj;
  246. }
  247. /**
  248. * @license
  249. * Copyright 2017 Google LLC
  250. *
  251. * Licensed under the Apache License, Version 2.0 (the "License");
  252. * you may not use this file except in compliance with the License.
  253. * You may obtain a copy of the License at
  254. *
  255. * http://www.apache.org/licenses/LICENSE-2.0
  256. *
  257. * Unless required by applicable law or agreed to in writing, software
  258. * distributed under the License is distributed on an "AS IS" BASIS,
  259. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  260. * See the License for the specific language governing permissions and
  261. * limitations under the License.
  262. */
  263. const Code = {
  264. // Causes are copied from:
  265. // https://github.com/grpc/grpc/blob/bceec94ea4fc5f0085d81235d8e1c06798dc341a/include/grpc%2B%2B/impl/codegen/status_code_enum.h
  266. /** Not an error; returned on success. */
  267. OK: 'ok',
  268. /** The operation was cancelled (typically by the caller). */
  269. CANCELLED: 'cancelled',
  270. /** Unknown error or an error from a different error domain. */
  271. UNKNOWN: 'unknown',
  272. /**
  273. * Client specified an invalid argument. Note that this differs from
  274. * FAILED_PRECONDITION. INVALID_ARGUMENT indicates arguments that are
  275. * problematic regardless of the state of the system (e.g., a malformed file
  276. * name).
  277. */
  278. INVALID_ARGUMENT: 'invalid-argument',
  279. /**
  280. * Deadline expired before operation could complete. For operations that
  281. * change the state of the system, this error may be returned even if the
  282. * operation has completed successfully. For example, a successful response
  283. * from a server could have been delayed long enough for the deadline to
  284. * expire.
  285. */
  286. DEADLINE_EXCEEDED: 'deadline-exceeded',
  287. /** Some requested entity (e.g., file or directory) was not found. */
  288. NOT_FOUND: 'not-found',
  289. /**
  290. * Some entity that we attempted to create (e.g., file or directory) already
  291. * exists.
  292. */
  293. ALREADY_EXISTS: 'already-exists',
  294. /**
  295. * The caller does not have permission to execute the specified operation.
  296. * PERMISSION_DENIED must not be used for rejections caused by exhausting
  297. * some resource (use RESOURCE_EXHAUSTED instead for those errors).
  298. * PERMISSION_DENIED must not be used if the caller can not be identified
  299. * (use UNAUTHENTICATED instead for those errors).
  300. */
  301. PERMISSION_DENIED: 'permission-denied',
  302. /**
  303. * The request does not have valid authentication credentials for the
  304. * operation.
  305. */
  306. UNAUTHENTICATED: 'unauthenticated',
  307. /**
  308. * Some resource has been exhausted, perhaps a per-user quota, or perhaps the
  309. * entire file system is out of space.
  310. */
  311. RESOURCE_EXHAUSTED: 'resource-exhausted',
  312. /**
  313. * Operation was rejected because the system is not in a state required for
  314. * the operation's execution. For example, directory to be deleted may be
  315. * non-empty, an rmdir operation is applied to a non-directory, etc.
  316. *
  317. * A litmus test that may help a service implementor in deciding
  318. * between FAILED_PRECONDITION, ABORTED, and UNAVAILABLE:
  319. * (a) Use UNAVAILABLE if the client can retry just the failing call.
  320. * (b) Use ABORTED if the client should retry at a higher-level
  321. * (e.g., restarting a read-modify-write sequence).
  322. * (c) Use FAILED_PRECONDITION if the client should not retry until
  323. * the system state has been explicitly fixed. E.g., if an "rmdir"
  324. * fails because the directory is non-empty, FAILED_PRECONDITION
  325. * should be returned since the client should not retry unless
  326. * they have first fixed up the directory by deleting files from it.
  327. * (d) Use FAILED_PRECONDITION if the client performs conditional
  328. * REST Get/Update/Delete on a resource and the resource on the
  329. * server does not match the condition. E.g., conflicting
  330. * read-modify-write on the same resource.
  331. */
  332. FAILED_PRECONDITION: 'failed-precondition',
  333. /**
  334. * The operation was aborted, typically due to a concurrency issue like
  335. * sequencer check failures, transaction aborts, etc.
  336. *
  337. * See litmus test above for deciding between FAILED_PRECONDITION, ABORTED,
  338. * and UNAVAILABLE.
  339. */
  340. ABORTED: 'aborted',
  341. /**
  342. * Operation was attempted past the valid range. E.g., seeking or reading
  343. * past end of file.
  344. *
  345. * Unlike INVALID_ARGUMENT, this error indicates a problem that may be fixed
  346. * if the system state changes. For example, a 32-bit file system will
  347. * generate INVALID_ARGUMENT if asked to read at an offset that is not in the
  348. * range [0,2^32-1], but it will generate OUT_OF_RANGE if asked to read from
  349. * an offset past the current file size.
  350. *
  351. * There is a fair bit of overlap between FAILED_PRECONDITION and
  352. * OUT_OF_RANGE. We recommend using OUT_OF_RANGE (the more specific error)
  353. * when it applies so that callers who are iterating through a space can
  354. * easily look for an OUT_OF_RANGE error to detect when they are done.
  355. */
  356. OUT_OF_RANGE: 'out-of-range',
  357. /** Operation is not implemented or not supported/enabled in this service. */
  358. UNIMPLEMENTED: 'unimplemented',
  359. /**
  360. * Internal errors. Means some invariants expected by underlying System has
  361. * been broken. If you see one of these errors, Something is very broken.
  362. */
  363. INTERNAL: 'internal',
  364. /**
  365. * The service is currently unavailable. This is a most likely a transient
  366. * condition and may be corrected by retrying with a backoff.
  367. *
  368. * See litmus test above for deciding between FAILED_PRECONDITION, ABORTED,
  369. * and UNAVAILABLE.
  370. */
  371. UNAVAILABLE: 'unavailable',
  372. /** Unrecoverable data loss or corruption. */
  373. DATA_LOSS: 'data-loss'
  374. };
  375. /** An error returned by a Firestore operation. */
  376. class FirestoreError extends FirebaseError {
  377. /** @hideconstructor */
  378. constructor(
  379. /**
  380. * The backend error code associated with this error.
  381. */
  382. code,
  383. /**
  384. * A custom error description.
  385. */
  386. message) {
  387. super(code, message);
  388. this.code = code;
  389. this.message = message;
  390. // HACK: We write a toString property directly because Error is not a real
  391. // class and so inheritance does not work correctly. We could alternatively
  392. // do the same "back-door inheritance" trick that FirebaseError does.
  393. this.toString = () => `${this.name}: [code=${this.code}]: ${this.message}`;
  394. }
  395. }
  396. /**
  397. * @license
  398. * Copyright 2017 Google LLC
  399. *
  400. * Licensed under the Apache License, Version 2.0 (the "License");
  401. * you may not use this file except in compliance with the License.
  402. * You may obtain a copy of the License at
  403. *
  404. * http://www.apache.org/licenses/LICENSE-2.0
  405. *
  406. * Unless required by applicable law or agreed to in writing, software
  407. * distributed under the License is distributed on an "AS IS" BASIS,
  408. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  409. * See the License for the specific language governing permissions and
  410. * limitations under the License.
  411. */
  412. class Deferred {
  413. constructor() {
  414. this.promise = new Promise((resolve, reject) => {
  415. this.resolve = resolve;
  416. this.reject = reject;
  417. });
  418. }
  419. }
  420. /**
  421. * @license
  422. * Copyright 2017 Google LLC
  423. *
  424. * Licensed under the Apache License, Version 2.0 (the "License");
  425. * you may not use this file except in compliance with the License.
  426. * You may obtain a copy of the License at
  427. *
  428. * http://www.apache.org/licenses/LICENSE-2.0
  429. *
  430. * Unless required by applicable law or agreed to in writing, software
  431. * distributed under the License is distributed on an "AS IS" BASIS,
  432. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  433. * See the License for the specific language governing permissions and
  434. * limitations under the License.
  435. */
  436. class OAuthToken {
  437. constructor(value, user) {
  438. this.user = user;
  439. this.type = 'OAuth';
  440. this.headers = new Map();
  441. this.headers.set('Authorization', `Bearer ${value}`);
  442. }
  443. }
  444. /**
  445. * A CredentialsProvider that always yields an empty token.
  446. * @internal
  447. */
  448. class EmptyAuthCredentialsProvider {
  449. getToken() {
  450. return Promise.resolve(null);
  451. }
  452. invalidateToken() { }
  453. start(asyncQueue, changeListener) {
  454. // Fire with initial user.
  455. asyncQueue.enqueueRetryable(() => changeListener(User.UNAUTHENTICATED));
  456. }
  457. shutdown() { }
  458. }
  459. /**
  460. * A CredentialsProvider that always returns a constant token. Used for
  461. * emulator token mocking.
  462. */
  463. class EmulatorAuthCredentialsProvider {
  464. constructor(token) {
  465. this.token = token;
  466. /**
  467. * Stores the listener registered with setChangeListener()
  468. * This isn't actually necessary since the UID never changes, but we use this
  469. * to verify the listen contract is adhered to in tests.
  470. */
  471. this.changeListener = null;
  472. }
  473. getToken() {
  474. return Promise.resolve(this.token);
  475. }
  476. invalidateToken() { }
  477. start(asyncQueue, changeListener) {
  478. this.changeListener = changeListener;
  479. // Fire with initial user.
  480. asyncQueue.enqueueRetryable(() => changeListener(this.token.user));
  481. }
  482. shutdown() {
  483. this.changeListener = null;
  484. }
  485. }
  486. class FirebaseAuthCredentialsProvider {
  487. constructor(authProvider) {
  488. this.authProvider = authProvider;
  489. /** Tracks the current User. */
  490. this.currentUser = User.UNAUTHENTICATED;
  491. /**
  492. * Counter used to detect if the token changed while a getToken request was
  493. * outstanding.
  494. */
  495. this.tokenCounter = 0;
  496. this.forceRefresh = false;
  497. this.auth = null;
  498. }
  499. start(asyncQueue, changeListener) {
  500. let lastTokenId = this.tokenCounter;
  501. // A change listener that prevents double-firing for the same token change.
  502. const guardedChangeListener = user => {
  503. if (this.tokenCounter !== lastTokenId) {
  504. lastTokenId = this.tokenCounter;
  505. return changeListener(user);
  506. }
  507. else {
  508. return Promise.resolve();
  509. }
  510. };
  511. // A promise that can be waited on to block on the next token change.
  512. // This promise is re-created after each change.
  513. let nextToken = new Deferred();
  514. this.tokenListener = () => {
  515. this.tokenCounter++;
  516. this.currentUser = this.getUser();
  517. nextToken.resolve();
  518. nextToken = new Deferred();
  519. asyncQueue.enqueueRetryable(() => guardedChangeListener(this.currentUser));
  520. };
  521. const awaitNextToken = () => {
  522. const currentTokenAttempt = nextToken;
  523. asyncQueue.enqueueRetryable(async () => {
  524. await currentTokenAttempt.promise;
  525. await guardedChangeListener(this.currentUser);
  526. });
  527. };
  528. const registerAuth = (auth) => {
  529. logDebug('FirebaseAuthCredentialsProvider', 'Auth detected');
  530. this.auth = auth;
  531. this.auth.addAuthTokenListener(this.tokenListener);
  532. awaitNextToken();
  533. };
  534. this.authProvider.onInit(auth => registerAuth(auth));
  535. // Our users can initialize Auth right after Firestore, so we give it
  536. // a chance to register itself with the component framework before we
  537. // determine whether to start up in unauthenticated mode.
  538. setTimeout(() => {
  539. if (!this.auth) {
  540. const auth = this.authProvider.getImmediate({ optional: true });
  541. if (auth) {
  542. registerAuth(auth);
  543. }
  544. else {
  545. // If auth is still not available, proceed with `null` user
  546. logDebug('FirebaseAuthCredentialsProvider', 'Auth not yet detected');
  547. nextToken.resolve();
  548. nextToken = new Deferred();
  549. }
  550. }
  551. }, 0);
  552. awaitNextToken();
  553. }
  554. getToken() {
  555. // Take note of the current value of the tokenCounter so that this method
  556. // can fail (with an ABORTED error) if there is a token change while the
  557. // request is outstanding.
  558. const initialTokenCounter = this.tokenCounter;
  559. const forceRefresh = this.forceRefresh;
  560. this.forceRefresh = false;
  561. if (!this.auth) {
  562. return Promise.resolve(null);
  563. }
  564. return this.auth.getToken(forceRefresh).then(tokenData => {
  565. // Cancel the request since the token changed while the request was
  566. // outstanding so the response is potentially for a previous user (which
  567. // user, we can't be sure).
  568. if (this.tokenCounter !== initialTokenCounter) {
  569. logDebug('FirebaseAuthCredentialsProvider', 'getToken aborted due to token change.');
  570. return this.getToken();
  571. }
  572. else {
  573. if (tokenData) {
  574. hardAssert(typeof tokenData.accessToken === 'string');
  575. return new OAuthToken(tokenData.accessToken, this.currentUser);
  576. }
  577. else {
  578. return null;
  579. }
  580. }
  581. });
  582. }
  583. invalidateToken() {
  584. this.forceRefresh = true;
  585. }
  586. shutdown() {
  587. if (this.auth) {
  588. this.auth.removeAuthTokenListener(this.tokenListener);
  589. }
  590. }
  591. // Auth.getUid() can return null even with a user logged in. It is because
  592. // getUid() is synchronous, but the auth code populating Uid is asynchronous.
  593. // This method should only be called in the AuthTokenListener callback
  594. // to guarantee to get the actual user.
  595. getUser() {
  596. const currentUid = this.auth && this.auth.getUid();
  597. hardAssert(currentUid === null || typeof currentUid === 'string');
  598. return new User(currentUid);
  599. }
  600. }
  601. /*
  602. * FirstPartyToken provides a fresh token each time its value
  603. * is requested, because if the token is too old, requests will be rejected.
  604. * Technically this may no longer be necessary since the SDK should gracefully
  605. * recover from unauthenticated errors (see b/33147818 for context), but it's
  606. * safer to keep the implementation as-is.
  607. */
  608. class FirstPartyToken {
  609. constructor(gapi, sessionIndex, iamToken, authTokenFactory) {
  610. this.gapi = gapi;
  611. this.sessionIndex = sessionIndex;
  612. this.iamToken = iamToken;
  613. this.authTokenFactory = authTokenFactory;
  614. this.type = 'FirstParty';
  615. this.user = User.FIRST_PARTY;
  616. this._headers = new Map();
  617. }
  618. /** Gets an authorization token, using a provided factory function, or falling back to First Party GAPI. */
  619. getAuthToken() {
  620. if (this.authTokenFactory) {
  621. return this.authTokenFactory();
  622. }
  623. else {
  624. // Make sure this really is a Gapi client.
  625. hardAssert(!!(typeof this.gapi === 'object' &&
  626. this.gapi !== null &&
  627. this.gapi['auth'] &&
  628. this.gapi['auth']['getAuthHeaderValueForFirstParty']));
  629. return this.gapi['auth']['getAuthHeaderValueForFirstParty']([]);
  630. }
  631. }
  632. get headers() {
  633. this._headers.set('X-Goog-AuthUser', this.sessionIndex);
  634. // Use array notation to prevent minification
  635. const authHeaderTokenValue = this.getAuthToken();
  636. if (authHeaderTokenValue) {
  637. this._headers.set('Authorization', authHeaderTokenValue);
  638. }
  639. if (this.iamToken) {
  640. this._headers.set('X-Goog-Iam-Authorization-Token', this.iamToken);
  641. }
  642. return this._headers;
  643. }
  644. }
  645. /*
  646. * Provides user credentials required for the Firestore JavaScript SDK
  647. * to authenticate the user, using technique that is only available
  648. * to applications hosted by Google.
  649. */
  650. class FirstPartyAuthCredentialsProvider {
  651. constructor(gapi, sessionIndex, iamToken, authTokenFactory) {
  652. this.gapi = gapi;
  653. this.sessionIndex = sessionIndex;
  654. this.iamToken = iamToken;
  655. this.authTokenFactory = authTokenFactory;
  656. }
  657. getToken() {
  658. return Promise.resolve(new FirstPartyToken(this.gapi, this.sessionIndex, this.iamToken, this.authTokenFactory));
  659. }
  660. start(asyncQueue, changeListener) {
  661. // Fire with initial uid.
  662. asyncQueue.enqueueRetryable(() => changeListener(User.FIRST_PARTY));
  663. }
  664. shutdown() { }
  665. invalidateToken() { }
  666. }
  667. class AppCheckToken {
  668. constructor(value) {
  669. this.value = value;
  670. this.type = 'AppCheck';
  671. this.headers = new Map();
  672. if (value && value.length > 0) {
  673. this.headers.set('x-firebase-appcheck', this.value);
  674. }
  675. }
  676. }
  677. class FirebaseAppCheckTokenProvider {
  678. constructor(appCheckProvider) {
  679. this.appCheckProvider = appCheckProvider;
  680. this.forceRefresh = false;
  681. this.appCheck = null;
  682. this.latestAppCheckToken = null;
  683. }
  684. start(asyncQueue, changeListener) {
  685. const onTokenChanged = tokenResult => {
  686. if (tokenResult.error != null) {
  687. logDebug('FirebaseAppCheckTokenProvider', `Error getting App Check token; using placeholder token instead. Error: ${tokenResult.error.message}`);
  688. }
  689. const tokenUpdated = tokenResult.token !== this.latestAppCheckToken;
  690. this.latestAppCheckToken = tokenResult.token;
  691. logDebug('FirebaseAppCheckTokenProvider', `Received ${tokenUpdated ? 'new' : 'existing'} token.`);
  692. return tokenUpdated
  693. ? changeListener(tokenResult.token)
  694. : Promise.resolve();
  695. };
  696. this.tokenListener = (tokenResult) => {
  697. asyncQueue.enqueueRetryable(() => onTokenChanged(tokenResult));
  698. };
  699. const registerAppCheck = (appCheck) => {
  700. logDebug('FirebaseAppCheckTokenProvider', 'AppCheck detected');
  701. this.appCheck = appCheck;
  702. this.appCheck.addTokenListener(this.tokenListener);
  703. };
  704. this.appCheckProvider.onInit(appCheck => registerAppCheck(appCheck));
  705. // Our users can initialize AppCheck after Firestore, so we give it
  706. // a chance to register itself with the component framework.
  707. setTimeout(() => {
  708. if (!this.appCheck) {
  709. const appCheck = this.appCheckProvider.getImmediate({ optional: true });
  710. if (appCheck) {
  711. registerAppCheck(appCheck);
  712. }
  713. else {
  714. // If AppCheck is still not available, proceed without it.
  715. logDebug('FirebaseAppCheckTokenProvider', 'AppCheck not yet detected');
  716. }
  717. }
  718. }, 0);
  719. }
  720. getToken() {
  721. const forceRefresh = this.forceRefresh;
  722. this.forceRefresh = false;
  723. if (!this.appCheck) {
  724. return Promise.resolve(null);
  725. }
  726. return this.appCheck.getToken(forceRefresh).then(tokenResult => {
  727. if (tokenResult) {
  728. hardAssert(typeof tokenResult.token === 'string');
  729. this.latestAppCheckToken = tokenResult.token;
  730. return new AppCheckToken(tokenResult.token);
  731. }
  732. else {
  733. return null;
  734. }
  735. });
  736. }
  737. invalidateToken() {
  738. this.forceRefresh = true;
  739. }
  740. shutdown() {
  741. if (this.appCheck) {
  742. this.appCheck.removeTokenListener(this.tokenListener);
  743. }
  744. }
  745. }
  746. /**
  747. * An AppCheck token provider that always yields an empty token.
  748. * @internal
  749. */
  750. class EmptyAppCheckTokenProvider {
  751. getToken() {
  752. return Promise.resolve(new AppCheckToken(''));
  753. }
  754. invalidateToken() { }
  755. start(asyncQueue, changeListener) { }
  756. shutdown() { }
  757. }
  758. /**
  759. * Builds a CredentialsProvider depending on the type of
  760. * the credentials passed in.
  761. */
  762. function makeAuthCredentialsProvider(credentials) {
  763. if (!credentials) {
  764. return new EmptyAuthCredentialsProvider();
  765. }
  766. switch (credentials['type']) {
  767. case 'gapi':
  768. const client = credentials['client'];
  769. return new FirstPartyAuthCredentialsProvider(client, credentials['sessionIndex'] || '0', credentials['iamToken'] || null, credentials['authTokenFactory'] || null);
  770. case 'provider':
  771. return credentials['client'];
  772. default:
  773. throw new FirestoreError(Code.INVALID_ARGUMENT, 'makeAuthCredentialsProvider failed due to invalid credential type');
  774. }
  775. }
  776. /**
  777. * @license
  778. * Copyright 2020 Google LLC
  779. *
  780. * Licensed under the Apache License, Version 2.0 (the "License");
  781. * you may not use this file except in compliance with the License.
  782. * You may obtain a copy of the License at
  783. *
  784. * http://www.apache.org/licenses/LICENSE-2.0
  785. *
  786. * Unless required by applicable law or agreed to in writing, software
  787. * distributed under the License is distributed on an "AS IS" BASIS,
  788. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  789. * See the License for the specific language governing permissions and
  790. * limitations under the License.
  791. */
  792. /**
  793. * Generates `nBytes` of random bytes.
  794. *
  795. * If `nBytes < 0` , an error will be thrown.
  796. */
  797. function randomBytes(nBytes) {
  798. return randomBytes$1(nBytes);
  799. }
  800. /**
  801. * @license
  802. * Copyright 2017 Google LLC
  803. *
  804. * Licensed under the Apache License, Version 2.0 (the "License");
  805. * you may not use this file except in compliance with the License.
  806. * You may obtain a copy of the License at
  807. *
  808. * http://www.apache.org/licenses/LICENSE-2.0
  809. *
  810. * Unless required by applicable law or agreed to in writing, software
  811. * distributed under the License is distributed on an "AS IS" BASIS,
  812. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  813. * See the License for the specific language governing permissions and
  814. * limitations under the License.
  815. */
  816. class AutoId {
  817. static newId() {
  818. // Alphanumeric characters
  819. const chars = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789';
  820. // The largest byte value that is a multiple of `char.length`.
  821. const maxMultiple = Math.floor(256 / chars.length) * chars.length;
  822. let autoId = '';
  823. const targetLength = 20;
  824. while (autoId.length < targetLength) {
  825. const bytes = randomBytes(40);
  826. for (let i = 0; i < bytes.length; ++i) {
  827. // Only accept values that are [0, maxMultiple), this ensures they can
  828. // be evenly mapped to indices of `chars` via a modulo operation.
  829. if (autoId.length < targetLength && bytes[i] < maxMultiple) {
  830. autoId += chars.charAt(bytes[i] % chars.length);
  831. }
  832. }
  833. }
  834. return autoId;
  835. }
  836. }
  837. function primitiveComparator(left, right) {
  838. if (left < right) {
  839. return -1;
  840. }
  841. if (left > right) {
  842. return 1;
  843. }
  844. return 0;
  845. }
  846. /** Helper to compare arrays using isEqual(). */
  847. function arrayEquals(left, right, comparator) {
  848. if (left.length !== right.length) {
  849. return false;
  850. }
  851. return left.every((value, index) => comparator(value, right[index]));
  852. }
  853. /**
  854. * Returns the immediate lexicographically-following string. This is useful to
  855. * construct an inclusive range for indexeddb iterators.
  856. */
  857. function immediateSuccessor(s) {
  858. // Return the input string, with an additional NUL byte appended.
  859. return s + '\0';
  860. }
  861. /**
  862. * @license
  863. * Copyright 2017 Google LLC
  864. *
  865. * Licensed under the Apache License, Version 2.0 (the "License");
  866. * you may not use this file except in compliance with the License.
  867. * You may obtain a copy of the License at
  868. *
  869. * http://www.apache.org/licenses/LICENSE-2.0
  870. *
  871. * Unless required by applicable law or agreed to in writing, software
  872. * distributed under the License is distributed on an "AS IS" BASIS,
  873. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  874. * See the License for the specific language governing permissions and
  875. * limitations under the License.
  876. */
  877. // The earliest date supported by Firestore timestamps (0001-01-01T00:00:00Z).
  878. const MIN_SECONDS = -62135596800;
  879. // Number of nanoseconds in a millisecond.
  880. const MS_TO_NANOS = 1e6;
  881. /**
  882. * A `Timestamp` represents a point in time independent of any time zone or
  883. * calendar, represented as seconds and fractions of seconds at nanosecond
  884. * resolution in UTC Epoch time.
  885. *
  886. * It is encoded using the Proleptic Gregorian Calendar which extends the
  887. * Gregorian calendar backwards to year one. It is encoded assuming all minutes
  888. * are 60 seconds long, i.e. leap seconds are "smeared" so that no leap second
  889. * table is needed for interpretation. Range is from 0001-01-01T00:00:00Z to
  890. * 9999-12-31T23:59:59.999999999Z.
  891. *
  892. * For examples and further specifications, refer to the
  893. * {@link https://github.com/google/protobuf/blob/master/src/google/protobuf/timestamp.proto | Timestamp definition}.
  894. */
  895. class Timestamp {
  896. /**
  897. * Creates a new timestamp.
  898. *
  899. * @param seconds - The number of seconds of UTC time since Unix epoch
  900. * 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to
  901. * 9999-12-31T23:59:59Z inclusive.
  902. * @param nanoseconds - The non-negative fractions of a second at nanosecond
  903. * resolution. Negative second values with fractions must still have
  904. * non-negative nanoseconds values that count forward in time. Must be
  905. * from 0 to 999,999,999 inclusive.
  906. */
  907. constructor(
  908. /**
  909. * The number of seconds of UTC time since Unix epoch 1970-01-01T00:00:00Z.
  910. */
  911. seconds,
  912. /**
  913. * The fractions of a second at nanosecond resolution.*
  914. */
  915. nanoseconds) {
  916. this.seconds = seconds;
  917. this.nanoseconds = nanoseconds;
  918. if (nanoseconds < 0) {
  919. throw new FirestoreError(Code.INVALID_ARGUMENT, 'Timestamp nanoseconds out of range: ' + nanoseconds);
  920. }
  921. if (nanoseconds >= 1e9) {
  922. throw new FirestoreError(Code.INVALID_ARGUMENT, 'Timestamp nanoseconds out of range: ' + nanoseconds);
  923. }
  924. if (seconds < MIN_SECONDS) {
  925. throw new FirestoreError(Code.INVALID_ARGUMENT, 'Timestamp seconds out of range: ' + seconds);
  926. }
  927. // This will break in the year 10,000.
  928. if (seconds >= 253402300800) {
  929. throw new FirestoreError(Code.INVALID_ARGUMENT, 'Timestamp seconds out of range: ' + seconds);
  930. }
  931. }
  932. /**
  933. * Creates a new timestamp with the current date, with millisecond precision.
  934. *
  935. * @returns a new timestamp representing the current date.
  936. */
  937. static now() {
  938. return Timestamp.fromMillis(Date.now());
  939. }
  940. /**
  941. * Creates a new timestamp from the given date.
  942. *
  943. * @param date - The date to initialize the `Timestamp` from.
  944. * @returns A new `Timestamp` representing the same point in time as the given
  945. * date.
  946. */
  947. static fromDate(date) {
  948. return Timestamp.fromMillis(date.getTime());
  949. }
  950. /**
  951. * Creates a new timestamp from the given number of milliseconds.
  952. *
  953. * @param milliseconds - Number of milliseconds since Unix epoch
  954. * 1970-01-01T00:00:00Z.
  955. * @returns A new `Timestamp` representing the same point in time as the given
  956. * number of milliseconds.
  957. */
  958. static fromMillis(milliseconds) {
  959. const seconds = Math.floor(milliseconds / 1000);
  960. const nanos = Math.floor((milliseconds - seconds * 1000) * MS_TO_NANOS);
  961. return new Timestamp(seconds, nanos);
  962. }
  963. /**
  964. * Converts a `Timestamp` to a JavaScript `Date` object. This conversion
  965. * causes a loss of precision since `Date` objects only support millisecond
  966. * precision.
  967. *
  968. * @returns JavaScript `Date` object representing the same point in time as
  969. * this `Timestamp`, with millisecond precision.
  970. */
  971. toDate() {
  972. return new Date(this.toMillis());
  973. }
  974. /**
  975. * Converts a `Timestamp` to a numeric timestamp (in milliseconds since
  976. * epoch). This operation causes a loss of precision.
  977. *
  978. * @returns The point in time corresponding to this timestamp, represented as
  979. * the number of milliseconds since Unix epoch 1970-01-01T00:00:00Z.
  980. */
  981. toMillis() {
  982. return this.seconds * 1000 + this.nanoseconds / MS_TO_NANOS;
  983. }
  984. _compareTo(other) {
  985. if (this.seconds === other.seconds) {
  986. return primitiveComparator(this.nanoseconds, other.nanoseconds);
  987. }
  988. return primitiveComparator(this.seconds, other.seconds);
  989. }
  990. /**
  991. * Returns true if this `Timestamp` is equal to the provided one.
  992. *
  993. * @param other - The `Timestamp` to compare against.
  994. * @returns true if this `Timestamp` is equal to the provided one.
  995. */
  996. isEqual(other) {
  997. return (other.seconds === this.seconds && other.nanoseconds === this.nanoseconds);
  998. }
  999. /** Returns a textual representation of this `Timestamp`. */
  1000. toString() {
  1001. return ('Timestamp(seconds=' +
  1002. this.seconds +
  1003. ', nanoseconds=' +
  1004. this.nanoseconds +
  1005. ')');
  1006. }
  1007. /** Returns a JSON-serializable representation of this `Timestamp`. */
  1008. toJSON() {
  1009. return { seconds: this.seconds, nanoseconds: this.nanoseconds };
  1010. }
  1011. /**
  1012. * Converts this object to a primitive string, which allows `Timestamp` objects
  1013. * to be compared using the `>`, `<=`, `>=` and `>` operators.
  1014. */
  1015. valueOf() {
  1016. // This method returns a string of the form <seconds>.<nanoseconds> where
  1017. // <seconds> is translated to have a non-negative value and both <seconds>
  1018. // and <nanoseconds> are left-padded with zeroes to be a consistent length.
  1019. // Strings with this format then have a lexiographical ordering that matches
  1020. // the expected ordering. The <seconds> translation is done to avoid having
  1021. // a leading negative sign (i.e. a leading '-' character) in its string
  1022. // representation, which would affect its lexiographical ordering.
  1023. const adjustedSeconds = this.seconds - MIN_SECONDS;
  1024. // Note: Up to 12 decimal digits are required to represent all valid
  1025. // 'seconds' values.
  1026. const formattedSeconds = String(adjustedSeconds).padStart(12, '0');
  1027. const formattedNanoseconds = String(this.nanoseconds).padStart(9, '0');
  1028. return formattedSeconds + '.' + formattedNanoseconds;
  1029. }
  1030. }
  1031. /**
  1032. * @license
  1033. * Copyright 2017 Google LLC
  1034. *
  1035. * Licensed under the Apache License, Version 2.0 (the "License");
  1036. * you may not use this file except in compliance with the License.
  1037. * You may obtain a copy of the License at
  1038. *
  1039. * http://www.apache.org/licenses/LICENSE-2.0
  1040. *
  1041. * Unless required by applicable law or agreed to in writing, software
  1042. * distributed under the License is distributed on an "AS IS" BASIS,
  1043. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  1044. * See the License for the specific language governing permissions and
  1045. * limitations under the License.
  1046. */
  1047. /**
  1048. * A version of a document in Firestore. This corresponds to the version
  1049. * timestamp, such as update_time or read_time.
  1050. */
  1051. class SnapshotVersion {
  1052. constructor(timestamp) {
  1053. this.timestamp = timestamp;
  1054. }
  1055. static fromTimestamp(value) {
  1056. return new SnapshotVersion(value);
  1057. }
  1058. static min() {
  1059. return new SnapshotVersion(new Timestamp(0, 0));
  1060. }
  1061. static max() {
  1062. return new SnapshotVersion(new Timestamp(253402300799, 1e9 - 1));
  1063. }
  1064. compareTo(other) {
  1065. return this.timestamp._compareTo(other.timestamp);
  1066. }
  1067. isEqual(other) {
  1068. return this.timestamp.isEqual(other.timestamp);
  1069. }
  1070. /** Returns a number representation of the version for use in spec tests. */
  1071. toMicroseconds() {
  1072. // Convert to microseconds.
  1073. return this.timestamp.seconds * 1e6 + this.timestamp.nanoseconds / 1000;
  1074. }
  1075. toString() {
  1076. return 'SnapshotVersion(' + this.timestamp.toString() + ')';
  1077. }
  1078. toTimestamp() {
  1079. return this.timestamp;
  1080. }
  1081. }
  1082. /**
  1083. * @license
  1084. * Copyright 2017 Google LLC
  1085. *
  1086. * Licensed under the Apache License, Version 2.0 (the "License");
  1087. * you may not use this file except in compliance with the License.
  1088. * You may obtain a copy of the License at
  1089. *
  1090. * http://www.apache.org/licenses/LICENSE-2.0
  1091. *
  1092. * Unless required by applicable law or agreed to in writing, software
  1093. * distributed under the License is distributed on an "AS IS" BASIS,
  1094. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  1095. * See the License for the specific language governing permissions and
  1096. * limitations under the License.
  1097. */
  1098. const DOCUMENT_KEY_NAME = '__name__';
  1099. /**
  1100. * Path represents an ordered sequence of string segments.
  1101. */
  1102. class BasePath {
  1103. constructor(segments, offset, length) {
  1104. if (offset === undefined) {
  1105. offset = 0;
  1106. }
  1107. else if (offset > segments.length) {
  1108. fail();
  1109. }
  1110. if (length === undefined) {
  1111. length = segments.length - offset;
  1112. }
  1113. else if (length > segments.length - offset) {
  1114. fail();
  1115. }
  1116. this.segments = segments;
  1117. this.offset = offset;
  1118. this.len = length;
  1119. }
  1120. get length() {
  1121. return this.len;
  1122. }
  1123. isEqual(other) {
  1124. return BasePath.comparator(this, other) === 0;
  1125. }
  1126. child(nameOrPath) {
  1127. const segments = this.segments.slice(this.offset, this.limit());
  1128. if (nameOrPath instanceof BasePath) {
  1129. nameOrPath.forEach(segment => {
  1130. segments.push(segment);
  1131. });
  1132. }
  1133. else {
  1134. segments.push(nameOrPath);
  1135. }
  1136. return this.construct(segments);
  1137. }
  1138. /** The index of one past the last segment of the path. */
  1139. limit() {
  1140. return this.offset + this.length;
  1141. }
  1142. popFirst(size) {
  1143. size = size === undefined ? 1 : size;
  1144. return this.construct(this.segments, this.offset + size, this.length - size);
  1145. }
  1146. popLast() {
  1147. return this.construct(this.segments, this.offset, this.length - 1);
  1148. }
  1149. firstSegment() {
  1150. return this.segments[this.offset];
  1151. }
  1152. lastSegment() {
  1153. return this.get(this.length - 1);
  1154. }
  1155. get(index) {
  1156. return this.segments[this.offset + index];
  1157. }
  1158. isEmpty() {
  1159. return this.length === 0;
  1160. }
  1161. isPrefixOf(other) {
  1162. if (other.length < this.length) {
  1163. return false;
  1164. }
  1165. for (let i = 0; i < this.length; i++) {
  1166. if (this.get(i) !== other.get(i)) {
  1167. return false;
  1168. }
  1169. }
  1170. return true;
  1171. }
  1172. isImmediateParentOf(potentialChild) {
  1173. if (this.length + 1 !== potentialChild.length) {
  1174. return false;
  1175. }
  1176. for (let i = 0; i < this.length; i++) {
  1177. if (this.get(i) !== potentialChild.get(i)) {
  1178. return false;
  1179. }
  1180. }
  1181. return true;
  1182. }
  1183. forEach(fn) {
  1184. for (let i = this.offset, end = this.limit(); i < end; i++) {
  1185. fn(this.segments[i]);
  1186. }
  1187. }
  1188. toArray() {
  1189. return this.segments.slice(this.offset, this.limit());
  1190. }
  1191. static comparator(p1, p2) {
  1192. const len = Math.min(p1.length, p2.length);
  1193. for (let i = 0; i < len; i++) {
  1194. const left = p1.get(i);
  1195. const right = p2.get(i);
  1196. if (left < right) {
  1197. return -1;
  1198. }
  1199. if (left > right) {
  1200. return 1;
  1201. }
  1202. }
  1203. if (p1.length < p2.length) {
  1204. return -1;
  1205. }
  1206. if (p1.length > p2.length) {
  1207. return 1;
  1208. }
  1209. return 0;
  1210. }
  1211. }
  1212. /**
  1213. * A slash-separated path for navigating resources (documents and collections)
  1214. * within Firestore.
  1215. *
  1216. * @internal
  1217. */
  1218. class ResourcePath extends BasePath {
  1219. construct(segments, offset, length) {
  1220. return new ResourcePath(segments, offset, length);
  1221. }
  1222. canonicalString() {
  1223. // NOTE: The client is ignorant of any path segments containing escape
  1224. // sequences (e.g. __id123__) and just passes them through raw (they exist
  1225. // for legacy reasons and should not be used frequently).
  1226. return this.toArray().join('/');
  1227. }
  1228. toString() {
  1229. return this.canonicalString();
  1230. }
  1231. /**
  1232. * Creates a resource path from the given slash-delimited string. If multiple
  1233. * arguments are provided, all components are combined. Leading and trailing
  1234. * slashes from all components are ignored.
  1235. */
  1236. static fromString(...pathComponents) {
  1237. // NOTE: The client is ignorant of any path segments containing escape
  1238. // sequences (e.g. __id123__) and just passes them through raw (they exist
  1239. // for legacy reasons and should not be used frequently).
  1240. const segments = [];
  1241. for (const path of pathComponents) {
  1242. if (path.indexOf('//') >= 0) {
  1243. throw new FirestoreError(Code.INVALID_ARGUMENT, `Invalid segment (${path}). Paths must not contain // in them.`);
  1244. }
  1245. // Strip leading and traling slashed.
  1246. segments.push(...path.split('/').filter(segment => segment.length > 0));
  1247. }
  1248. return new ResourcePath(segments);
  1249. }
  1250. static emptyPath() {
  1251. return new ResourcePath([]);
  1252. }
  1253. }
  1254. const identifierRegExp = /^[_a-zA-Z][_a-zA-Z0-9]*$/;
  1255. /**
  1256. * A dot-separated path for navigating sub-objects within a document.
  1257. * @internal
  1258. */
  1259. class FieldPath$1 extends BasePath {
  1260. construct(segments, offset, length) {
  1261. return new FieldPath$1(segments, offset, length);
  1262. }
  1263. /**
  1264. * Returns true if the string could be used as a segment in a field path
  1265. * without escaping.
  1266. */
  1267. static isValidIdentifier(segment) {
  1268. return identifierRegExp.test(segment);
  1269. }
  1270. canonicalString() {
  1271. return this.toArray()
  1272. .map(str => {
  1273. str = str.replace(/\\/g, '\\\\').replace(/`/g, '\\`');
  1274. if (!FieldPath$1.isValidIdentifier(str)) {
  1275. str = '`' + str + '`';
  1276. }
  1277. return str;
  1278. })
  1279. .join('.');
  1280. }
  1281. toString() {
  1282. return this.canonicalString();
  1283. }
  1284. /**
  1285. * Returns true if this field references the key of a document.
  1286. */
  1287. isKeyField() {
  1288. return this.length === 1 && this.get(0) === DOCUMENT_KEY_NAME;
  1289. }
  1290. /**
  1291. * The field designating the key of a document.
  1292. */
  1293. static keyField() {
  1294. return new FieldPath$1([DOCUMENT_KEY_NAME]);
  1295. }
  1296. /**
  1297. * Parses a field string from the given server-formatted string.
  1298. *
  1299. * - Splitting the empty string is not allowed (for now at least).
  1300. * - Empty segments within the string (e.g. if there are two consecutive
  1301. * separators) are not allowed.
  1302. *
  1303. * TODO(b/37244157): we should make this more strict. Right now, it allows
  1304. * non-identifier path components, even if they aren't escaped.
  1305. */
  1306. static fromServerFormat(path) {
  1307. const segments = [];
  1308. let current = '';
  1309. let i = 0;
  1310. const addCurrentSegment = () => {
  1311. if (current.length === 0) {
  1312. throw new FirestoreError(Code.INVALID_ARGUMENT, `Invalid field path (${path}). Paths must not be empty, begin ` +
  1313. `with '.', end with '.', or contain '..'`);
  1314. }
  1315. segments.push(current);
  1316. current = '';
  1317. };
  1318. let inBackticks = false;
  1319. while (i < path.length) {
  1320. const c = path[i];
  1321. if (c === '\\') {
  1322. if (i + 1 === path.length) {
  1323. throw new FirestoreError(Code.INVALID_ARGUMENT, 'Path has trailing escape character: ' + path);
  1324. }
  1325. const next = path[i + 1];
  1326. if (!(next === '\\' || next === '.' || next === '`')) {
  1327. throw new FirestoreError(Code.INVALID_ARGUMENT, 'Path has invalid escape sequence: ' + path);
  1328. }
  1329. current += next;
  1330. i += 2;
  1331. }
  1332. else if (c === '`') {
  1333. inBackticks = !inBackticks;
  1334. i++;
  1335. }
  1336. else if (c === '.' && !inBackticks) {
  1337. addCurrentSegment();
  1338. i++;
  1339. }
  1340. else {
  1341. current += c;
  1342. i++;
  1343. }
  1344. }
  1345. addCurrentSegment();
  1346. if (inBackticks) {
  1347. throw new FirestoreError(Code.INVALID_ARGUMENT, 'Unterminated ` in path: ' + path);
  1348. }
  1349. return new FieldPath$1(segments);
  1350. }
  1351. static emptyPath() {
  1352. return new FieldPath$1([]);
  1353. }
  1354. }
  1355. /**
  1356. * @license
  1357. * Copyright 2017 Google LLC
  1358. *
  1359. * Licensed under the Apache License, Version 2.0 (the "License");
  1360. * you may not use this file except in compliance with the License.
  1361. * You may obtain a copy of the License at
  1362. *
  1363. * http://www.apache.org/licenses/LICENSE-2.0
  1364. *
  1365. * Unless required by applicable law or agreed to in writing, software
  1366. * distributed under the License is distributed on an "AS IS" BASIS,
  1367. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  1368. * See the License for the specific language governing permissions and
  1369. * limitations under the License.
  1370. */
  1371. /**
  1372. * @internal
  1373. */
  1374. class DocumentKey {
  1375. constructor(path) {
  1376. this.path = path;
  1377. }
  1378. static fromPath(path) {
  1379. return new DocumentKey(ResourcePath.fromString(path));
  1380. }
  1381. static fromName(name) {
  1382. return new DocumentKey(ResourcePath.fromString(name).popFirst(5));
  1383. }
  1384. static empty() {
  1385. return new DocumentKey(ResourcePath.emptyPath());
  1386. }
  1387. get collectionGroup() {
  1388. return this.path.popLast().lastSegment();
  1389. }
  1390. /** Returns true if the document is in the specified collectionId. */
  1391. hasCollectionId(collectionId) {
  1392. return (this.path.length >= 2 &&
  1393. this.path.get(this.path.length - 2) === collectionId);
  1394. }
  1395. /** Returns the collection group (i.e. the name of the parent collection) for this key. */
  1396. getCollectionGroup() {
  1397. return this.path.get(this.path.length - 2);
  1398. }
  1399. /** Returns the fully qualified path to the parent collection. */
  1400. getCollectionPath() {
  1401. return this.path.popLast();
  1402. }
  1403. isEqual(other) {
  1404. return (other !== null && ResourcePath.comparator(this.path, other.path) === 0);
  1405. }
  1406. toString() {
  1407. return this.path.toString();
  1408. }
  1409. static comparator(k1, k2) {
  1410. return ResourcePath.comparator(k1.path, k2.path);
  1411. }
  1412. static isDocumentKey(path) {
  1413. return path.length % 2 === 0;
  1414. }
  1415. /**
  1416. * Creates and returns a new document key with the given segments.
  1417. *
  1418. * @param segments - The segments of the path to the document
  1419. * @returns A new instance of DocumentKey
  1420. */
  1421. static fromSegments(segments) {
  1422. return new DocumentKey(new ResourcePath(segments.slice()));
  1423. }
  1424. }
  1425. /**
  1426. * @license
  1427. * Copyright 2021 Google LLC
  1428. *
  1429. * Licensed under the Apache License, Version 2.0 (the "License");
  1430. * you may not use this file except in compliance with the License.
  1431. * You may obtain a copy of the License at
  1432. *
  1433. * http://www.apache.org/licenses/LICENSE-2.0
  1434. *
  1435. * Unless required by applicable law or agreed to in writing, software
  1436. * distributed under the License is distributed on an "AS IS" BASIS,
  1437. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  1438. * See the License for the specific language governing permissions and
  1439. * limitations under the License.
  1440. */
  1441. /**
  1442. * The initial mutation batch id for each index. Gets updated during index
  1443. * backfill.
  1444. */
  1445. const INITIAL_LARGEST_BATCH_ID = -1;
  1446. /**
  1447. * The initial sequence number for each index. Gets updated during index
  1448. * backfill.
  1449. */
  1450. const INITIAL_SEQUENCE_NUMBER = 0;
  1451. /**
  1452. * An index definition for field indexes in Firestore.
  1453. *
  1454. * Every index is associated with a collection. The definition contains a list
  1455. * of fields and their index kind (which can be `ASCENDING`, `DESCENDING` or
  1456. * `CONTAINS` for ArrayContains/ArrayContainsAny queries).
  1457. *
  1458. * Unlike the backend, the SDK does not differentiate between collection or
  1459. * collection group-scoped indices. Every index can be used for both single
  1460. * collection and collection group queries.
  1461. */
  1462. class FieldIndex {
  1463. constructor(
  1464. /**
  1465. * The index ID. Returns -1 if the index ID is not available (e.g. the index
  1466. * has not yet been persisted).
  1467. */
  1468. indexId,
  1469. /** The collection ID this index applies to. */
  1470. collectionGroup,
  1471. /** The field segments for this index. */
  1472. fields,
  1473. /** Shows how up-to-date the index is for the current user. */
  1474. indexState) {
  1475. this.indexId = indexId;
  1476. this.collectionGroup = collectionGroup;
  1477. this.fields = fields;
  1478. this.indexState = indexState;
  1479. }
  1480. }
  1481. /** An ID for an index that has not yet been added to persistence. */
  1482. FieldIndex.UNKNOWN_ID = -1;
  1483. /** Returns the ArrayContains/ArrayContainsAny segment for this index. */
  1484. function fieldIndexGetArraySegment(fieldIndex) {
  1485. return fieldIndex.fields.find(s => s.kind === 2 /* IndexKind.CONTAINS */);
  1486. }
  1487. /** Returns all directional (ascending/descending) segments for this index. */
  1488. function fieldIndexGetDirectionalSegments(fieldIndex) {
  1489. return fieldIndex.fields.filter(s => s.kind !== 2 /* IndexKind.CONTAINS */);
  1490. }
  1491. /**
  1492. * Returns the order of the document key component for the given index.
  1493. *
  1494. * PORTING NOTE: This is only used in the Web IndexedDb implementation.
  1495. */
  1496. function fieldIndexGetKeyOrder(fieldIndex) {
  1497. const directionalSegments = fieldIndexGetDirectionalSegments(fieldIndex);
  1498. return directionalSegments.length === 0
  1499. ? 0 /* IndexKind.ASCENDING */
  1500. : directionalSegments[directionalSegments.length - 1].kind;
  1501. }
  1502. /**
  1503. * Compares indexes by collection group and segments. Ignores update time and
  1504. * index ID.
  1505. */
  1506. function fieldIndexSemanticComparator(left, right) {
  1507. let cmp = primitiveComparator(left.collectionGroup, right.collectionGroup);
  1508. if (cmp !== 0) {
  1509. return cmp;
  1510. }
  1511. for (let i = 0; i < Math.min(left.fields.length, right.fields.length); ++i) {
  1512. cmp = indexSegmentComparator(left.fields[i], right.fields[i]);
  1513. if (cmp !== 0) {
  1514. return cmp;
  1515. }
  1516. }
  1517. return primitiveComparator(left.fields.length, right.fields.length);
  1518. }
  1519. /** Returns a debug representation of the field index */
  1520. function fieldIndexToString(fieldIndex) {
  1521. return `id=${fieldIndex.indexId}|cg=${fieldIndex.collectionGroup}|f=${fieldIndex.fields.map(f => `${f.fieldPath}:${f.kind}`).join(',')}`;
  1522. }
  1523. /** An index component consisting of field path and index type. */
  1524. class IndexSegment {
  1525. constructor(
  1526. /** The field path of the component. */
  1527. fieldPath,
  1528. /** The fields sorting order. */
  1529. kind) {
  1530. this.fieldPath = fieldPath;
  1531. this.kind = kind;
  1532. }
  1533. }
  1534. function indexSegmentComparator(left, right) {
  1535. const cmp = FieldPath$1.comparator(left.fieldPath, right.fieldPath);
  1536. if (cmp !== 0) {
  1537. return cmp;
  1538. }
  1539. return primitiveComparator(left.kind, right.kind);
  1540. }
  1541. /**
  1542. * Stores the "high water mark" that indicates how updated the Index is for the
  1543. * current user.
  1544. */
  1545. class IndexState {
  1546. constructor(
  1547. /**
  1548. * Indicates when the index was last updated (relative to other indexes).
  1549. */
  1550. sequenceNumber,
  1551. /** The the latest indexed read time, document and batch id. */
  1552. offset) {
  1553. this.sequenceNumber = sequenceNumber;
  1554. this.offset = offset;
  1555. }
  1556. /** The state of an index that has not yet been backfilled. */
  1557. static empty() {
  1558. return new IndexState(INITIAL_SEQUENCE_NUMBER, IndexOffset.min());
  1559. }
  1560. }
  1561. /**
  1562. * Creates an offset that matches all documents with a read time higher than
  1563. * `readTime`.
  1564. */
  1565. function newIndexOffsetSuccessorFromReadTime(readTime, largestBatchId) {
  1566. // We want to create an offset that matches all documents with a read time
  1567. // greater than the provided read time. To do so, we technically need to
  1568. // create an offset for `(readTime, MAX_DOCUMENT_KEY)`. While we could use
  1569. // Unicode codepoints to generate MAX_DOCUMENT_KEY, it is much easier to use
  1570. // `(readTime + 1, DocumentKey.empty())` since `> DocumentKey.empty()` matches
  1571. // all valid document IDs.
  1572. const successorSeconds = readTime.toTimestamp().seconds;
  1573. const successorNanos = readTime.toTimestamp().nanoseconds + 1;
  1574. const successor = SnapshotVersion.fromTimestamp(successorNanos === 1e9
  1575. ? new Timestamp(successorSeconds + 1, 0)
  1576. : new Timestamp(successorSeconds, successorNanos));
  1577. return new IndexOffset(successor, DocumentKey.empty(), largestBatchId);
  1578. }
  1579. /** Creates a new offset based on the provided document. */
  1580. function newIndexOffsetFromDocument(document) {
  1581. return new IndexOffset(document.readTime, document.key, INITIAL_LARGEST_BATCH_ID);
  1582. }
  1583. /**
  1584. * Stores the latest read time, document and batch ID that were processed for an
  1585. * index.
  1586. */
  1587. class IndexOffset {
  1588. constructor(
  1589. /**
  1590. * The latest read time version that has been indexed by Firestore for this
  1591. * field index.
  1592. */
  1593. readTime,
  1594. /**
  1595. * The key of the last document that was indexed for this query. Use
  1596. * `DocumentKey.empty()` if no document has been indexed.
  1597. */
  1598. documentKey,
  1599. /*
  1600. * The largest mutation batch id that's been processed by Firestore.
  1601. */
  1602. largestBatchId) {
  1603. this.readTime = readTime;
  1604. this.documentKey = documentKey;
  1605. this.largestBatchId = largestBatchId;
  1606. }
  1607. /** Returns an offset that sorts before all regular offsets. */
  1608. static min() {
  1609. return new IndexOffset(SnapshotVersion.min(), DocumentKey.empty(), INITIAL_LARGEST_BATCH_ID);
  1610. }
  1611. /** Returns an offset that sorts after all regular offsets. */
  1612. static max() {
  1613. return new IndexOffset(SnapshotVersion.max(), DocumentKey.empty(), INITIAL_LARGEST_BATCH_ID);
  1614. }
  1615. }
  1616. function indexOffsetComparator(left, right) {
  1617. let cmp = left.readTime.compareTo(right.readTime);
  1618. if (cmp !== 0) {
  1619. return cmp;
  1620. }
  1621. cmp = DocumentKey.comparator(left.documentKey, right.documentKey);
  1622. if (cmp !== 0) {
  1623. return cmp;
  1624. }
  1625. return primitiveComparator(left.largestBatchId, right.largestBatchId);
  1626. }
  1627. /**
  1628. * @license
  1629. * Copyright 2020 Google LLC
  1630. *
  1631. * Licensed under the Apache License, Version 2.0 (the "License");
  1632. * you may not use this file except in compliance with the License.
  1633. * You may obtain a copy of the License at
  1634. *
  1635. * http://www.apache.org/licenses/LICENSE-2.0
  1636. *
  1637. * Unless required by applicable law or agreed to in writing, software
  1638. * distributed under the License is distributed on an "AS IS" BASIS,
  1639. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  1640. * See the License for the specific language governing permissions and
  1641. * limitations under the License.
  1642. */
  1643. const PRIMARY_LEASE_LOST_ERROR_MSG = 'The current tab is not in the required state to perform this operation. ' +
  1644. 'It might be necessary to refresh the browser tab.';
  1645. /**
  1646. * A base class representing a persistence transaction, encapsulating both the
  1647. * transaction's sequence numbers as well as a list of onCommitted listeners.
  1648. *
  1649. * When you call Persistence.runTransaction(), it will create a transaction and
  1650. * pass it to your callback. You then pass it to any method that operates
  1651. * on persistence.
  1652. */
  1653. class PersistenceTransaction {
  1654. constructor() {
  1655. this.onCommittedListeners = [];
  1656. }
  1657. addOnCommittedListener(listener) {
  1658. this.onCommittedListeners.push(listener);
  1659. }
  1660. raiseOnCommittedEvent() {
  1661. this.onCommittedListeners.forEach(listener => listener());
  1662. }
  1663. }
  1664. /**
  1665. * @license
  1666. * Copyright 2017 Google LLC
  1667. *
  1668. * Licensed under the Apache License, Version 2.0 (the "License");
  1669. * you may not use this file except in compliance with the License.
  1670. * You may obtain a copy of the License at
  1671. *
  1672. * http://www.apache.org/licenses/LICENSE-2.0
  1673. *
  1674. * Unless required by applicable law or agreed to in writing, software
  1675. * distributed under the License is distributed on an "AS IS" BASIS,
  1676. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  1677. * See the License for the specific language governing permissions and
  1678. * limitations under the License.
  1679. */
  1680. /**
  1681. * Verifies the error thrown by a LocalStore operation. If a LocalStore
  1682. * operation fails because the primary lease has been taken by another client,
  1683. * we ignore the error (the persistence layer will immediately call
  1684. * `applyPrimaryLease` to propagate the primary state change). All other errors
  1685. * are re-thrown.
  1686. *
  1687. * @param err - An error returned by a LocalStore operation.
  1688. * @returns A Promise that resolves after we recovered, or the original error.
  1689. */
  1690. async function ignoreIfPrimaryLeaseLoss(err) {
  1691. if (err.code === Code.FAILED_PRECONDITION &&
  1692. err.message === PRIMARY_LEASE_LOST_ERROR_MSG) {
  1693. logDebug('LocalStore', 'Unexpectedly lost primary lease');
  1694. }
  1695. else {
  1696. throw err;
  1697. }
  1698. }
  1699. /**
  1700. * @license
  1701. * Copyright 2017 Google LLC
  1702. *
  1703. * Licensed under the Apache License, Version 2.0 (the "License");
  1704. * you may not use this file except in compliance with the License.
  1705. * You may obtain a copy of the License at
  1706. *
  1707. * http://www.apache.org/licenses/LICENSE-2.0
  1708. *
  1709. * Unless required by applicable law or agreed to in writing, software
  1710. * distributed under the License is distributed on an "AS IS" BASIS,
  1711. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  1712. * See the License for the specific language governing permissions and
  1713. * limitations under the License.
  1714. */
  1715. /**
  1716. * PersistencePromise is essentially a re-implementation of Promise except
  1717. * it has a .next() method instead of .then() and .next() and .catch() callbacks
  1718. * are executed synchronously when a PersistencePromise resolves rather than
  1719. * asynchronously (Promise implementations use setImmediate() or similar).
  1720. *
  1721. * This is necessary to interoperate with IndexedDB which will automatically
  1722. * commit transactions if control is returned to the event loop without
  1723. * synchronously initiating another operation on the transaction.
  1724. *
  1725. * NOTE: .then() and .catch() only allow a single consumer, unlike normal
  1726. * Promises.
  1727. */
  1728. class PersistencePromise {
  1729. constructor(callback) {
  1730. // NOTE: next/catchCallback will always point to our own wrapper functions,
  1731. // not the user's raw next() or catch() callbacks.
  1732. this.nextCallback = null;
  1733. this.catchCallback = null;
  1734. // When the operation resolves, we'll set result or error and mark isDone.
  1735. this.result = undefined;
  1736. this.error = undefined;
  1737. this.isDone = false;
  1738. // Set to true when .then() or .catch() are called and prevents additional
  1739. // chaining.
  1740. this.callbackAttached = false;
  1741. callback(value => {
  1742. this.isDone = true;
  1743. this.result = value;
  1744. if (this.nextCallback) {
  1745. // value should be defined unless T is Void, but we can't express
  1746. // that in the type system.
  1747. this.nextCallback(value);
  1748. }
  1749. }, error => {
  1750. this.isDone = true;
  1751. this.error = error;
  1752. if (this.catchCallback) {
  1753. this.catchCallback(error);
  1754. }
  1755. });
  1756. }
  1757. catch(fn) {
  1758. return this.next(undefined, fn);
  1759. }
  1760. next(nextFn, catchFn) {
  1761. if (this.callbackAttached) {
  1762. fail();
  1763. }
  1764. this.callbackAttached = true;
  1765. if (this.isDone) {
  1766. if (!this.error) {
  1767. return this.wrapSuccess(nextFn, this.result);
  1768. }
  1769. else {
  1770. return this.wrapFailure(catchFn, this.error);
  1771. }
  1772. }
  1773. else {
  1774. return new PersistencePromise((resolve, reject) => {
  1775. this.nextCallback = (value) => {
  1776. this.wrapSuccess(nextFn, value).next(resolve, reject);
  1777. };
  1778. this.catchCallback = (error) => {
  1779. this.wrapFailure(catchFn, error).next(resolve, reject);
  1780. };
  1781. });
  1782. }
  1783. }
  1784. toPromise() {
  1785. return new Promise((resolve, reject) => {
  1786. this.next(resolve, reject);
  1787. });
  1788. }
  1789. wrapUserFunction(fn) {
  1790. try {
  1791. const result = fn();
  1792. if (result instanceof PersistencePromise) {
  1793. return result;
  1794. }
  1795. else {
  1796. return PersistencePromise.resolve(result);
  1797. }
  1798. }
  1799. catch (e) {
  1800. return PersistencePromise.reject(e);
  1801. }
  1802. }
  1803. wrapSuccess(nextFn, value) {
  1804. if (nextFn) {
  1805. return this.wrapUserFunction(() => nextFn(value));
  1806. }
  1807. else {
  1808. // If there's no nextFn, then R must be the same as T
  1809. return PersistencePromise.resolve(value);
  1810. }
  1811. }
  1812. wrapFailure(catchFn, error) {
  1813. if (catchFn) {
  1814. return this.wrapUserFunction(() => catchFn(error));
  1815. }
  1816. else {
  1817. return PersistencePromise.reject(error);
  1818. }
  1819. }
  1820. static resolve(result) {
  1821. return new PersistencePromise((resolve, reject) => {
  1822. resolve(result);
  1823. });
  1824. }
  1825. static reject(error) {
  1826. return new PersistencePromise((resolve, reject) => {
  1827. reject(error);
  1828. });
  1829. }
  1830. static waitFor(
  1831. // Accept all Promise types in waitFor().
  1832. // eslint-disable-next-line @typescript-eslint/no-explicit-any
  1833. all) {
  1834. return new PersistencePromise((resolve, reject) => {
  1835. let expectedCount = 0;
  1836. let resolvedCount = 0;
  1837. let done = false;
  1838. all.forEach(element => {
  1839. ++expectedCount;
  1840. element.next(() => {
  1841. ++resolvedCount;
  1842. if (done && resolvedCount === expectedCount) {
  1843. resolve();
  1844. }
  1845. }, err => reject(err));
  1846. });
  1847. done = true;
  1848. if (resolvedCount === expectedCount) {
  1849. resolve();
  1850. }
  1851. });
  1852. }
  1853. /**
  1854. * Given an array of predicate functions that asynchronously evaluate to a
  1855. * boolean, implements a short-circuiting `or` between the results. Predicates
  1856. * will be evaluated until one of them returns `true`, then stop. The final
  1857. * result will be whether any of them returned `true`.
  1858. */
  1859. static or(predicates) {
  1860. let p = PersistencePromise.resolve(false);
  1861. for (const predicate of predicates) {
  1862. p = p.next(isTrue => {
  1863. if (isTrue) {
  1864. return PersistencePromise.resolve(isTrue);
  1865. }
  1866. else {
  1867. return predicate();
  1868. }
  1869. });
  1870. }
  1871. return p;
  1872. }
  1873. static forEach(collection, f) {
  1874. const promises = [];
  1875. collection.forEach((r, s) => {
  1876. promises.push(f.call(this, r, s));
  1877. });
  1878. return this.waitFor(promises);
  1879. }
  1880. /**
  1881. * Concurrently map all array elements through asynchronous function.
  1882. */
  1883. static mapArray(array, f) {
  1884. return new PersistencePromise((resolve, reject) => {
  1885. const expectedCount = array.length;
  1886. const results = new Array(expectedCount);
  1887. let resolvedCount = 0;
  1888. for (let i = 0; i < expectedCount; i++) {
  1889. const current = i;
  1890. f(array[current]).next(result => {
  1891. results[current] = result;
  1892. ++resolvedCount;
  1893. if (resolvedCount === expectedCount) {
  1894. resolve(results);
  1895. }
  1896. }, err => reject(err));
  1897. }
  1898. });
  1899. }
  1900. /**
  1901. * An alternative to recursive PersistencePromise calls, that avoids
  1902. * potential memory problems from unbounded chains of promises.
  1903. *
  1904. * The `action` will be called repeatedly while `condition` is true.
  1905. */
  1906. static doWhile(condition, action) {
  1907. return new PersistencePromise((resolve, reject) => {
  1908. const process = () => {
  1909. if (condition() === true) {
  1910. action().next(() => {
  1911. process();
  1912. }, reject);
  1913. }
  1914. else {
  1915. resolve();
  1916. }
  1917. };
  1918. process();
  1919. });
  1920. }
  1921. }
  1922. /**
  1923. * @license
  1924. * Copyright 2017 Google LLC
  1925. *
  1926. * Licensed under the Apache License, Version 2.0 (the "License");
  1927. * you may not use this file except in compliance with the License.
  1928. * You may obtain a copy of the License at
  1929. *
  1930. * http://www.apache.org/licenses/LICENSE-2.0
  1931. *
  1932. * Unless required by applicable law or agreed to in writing, software
  1933. * distributed under the License is distributed on an "AS IS" BASIS,
  1934. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  1935. * See the License for the specific language governing permissions and
  1936. * limitations under the License.
  1937. */
  1938. // References to `window` are guarded by SimpleDb.isAvailable()
  1939. /* eslint-disable no-restricted-globals */
  1940. const LOG_TAG$i = 'SimpleDb';
  1941. /**
  1942. * The maximum number of retry attempts for an IndexedDb transaction that fails
  1943. * with a DOMException.
  1944. */
  1945. const TRANSACTION_RETRY_COUNT = 3;
  1946. /**
  1947. * Wraps an IDBTransaction and exposes a store() method to get a handle to a
  1948. * specific object store.
  1949. */
  1950. class SimpleDbTransaction {
  1951. constructor(action, transaction) {
  1952. this.action = action;
  1953. this.transaction = transaction;
  1954. this.aborted = false;
  1955. /**
  1956. * A `Promise` that resolves with the result of the IndexedDb transaction.
  1957. */
  1958. this.completionDeferred = new Deferred();
  1959. this.transaction.oncomplete = () => {
  1960. this.completionDeferred.resolve();
  1961. };
  1962. this.transaction.onabort = () => {
  1963. if (transaction.error) {
  1964. this.completionDeferred.reject(new IndexedDbTransactionError(action, transaction.error));
  1965. }
  1966. else {
  1967. this.completionDeferred.resolve();
  1968. }
  1969. };
  1970. this.transaction.onerror = (event) => {
  1971. const error = checkForAndReportiOSError(event.target.error);
  1972. this.completionDeferred.reject(new IndexedDbTransactionError(action, error));
  1973. };
  1974. }
  1975. static open(db, action, mode, objectStoreNames) {
  1976. try {
  1977. return new SimpleDbTransaction(action, db.transaction(objectStoreNames, mode));
  1978. }
  1979. catch (e) {
  1980. throw new IndexedDbTransactionError(action, e);
  1981. }
  1982. }
  1983. get completionPromise() {
  1984. return this.completionDeferred.promise;
  1985. }
  1986. abort(error) {
  1987. if (error) {
  1988. this.completionDeferred.reject(error);
  1989. }
  1990. if (!this.aborted) {
  1991. logDebug(LOG_TAG$i, 'Aborting transaction:', error ? error.message : 'Client-initiated abort');
  1992. this.aborted = true;
  1993. this.transaction.abort();
  1994. }
  1995. }
  1996. maybeCommit() {
  1997. // If the browser supports V3 IndexedDB, we invoke commit() explicitly to
  1998. // speed up index DB processing if the event loop remains blocks.
  1999. // eslint-disable-next-line @typescript-eslint/no-explicit-any
  2000. const maybeV3IndexedDb = this.transaction;
  2001. if (!this.aborted && typeof maybeV3IndexedDb.commit === 'function') {
  2002. maybeV3IndexedDb.commit();
  2003. }
  2004. }
  2005. /**
  2006. * Returns a SimpleDbStore<KeyType, ValueType> for the specified store. All
  2007. * operations performed on the SimpleDbStore happen within the context of this
  2008. * transaction and it cannot be used anymore once the transaction is
  2009. * completed.
  2010. *
  2011. * Note that we can't actually enforce that the KeyType and ValueType are
  2012. * correct, but they allow type safety through the rest of the consuming code.
  2013. */
  2014. store(storeName) {
  2015. const store = this.transaction.objectStore(storeName);
  2016. return new SimpleDbStore(store);
  2017. }
  2018. }
  2019. /**
  2020. * Provides a wrapper around IndexedDb with a simplified interface that uses
  2021. * Promise-like return values to chain operations. Real promises cannot be used
  2022. * since .then() continuations are executed asynchronously (e.g. via
  2023. * .setImmediate), which would cause IndexedDB to end the transaction.
  2024. * See PersistencePromise for more details.
  2025. */
  2026. class SimpleDb {
  2027. /*
  2028. * Creates a new SimpleDb wrapper for IndexedDb database `name`.
  2029. *
  2030. * Note that `version` must not be a downgrade. IndexedDB does not support
  2031. * downgrading the schema version. We currently do not support any way to do
  2032. * versioning outside of IndexedDB's versioning mechanism, as only
  2033. * version-upgrade transactions are allowed to do things like create
  2034. * objectstores.
  2035. */
  2036. constructor(name, version, schemaConverter) {
  2037. this.name = name;
  2038. this.version = version;
  2039. this.schemaConverter = schemaConverter;
  2040. const iOSVersion = SimpleDb.getIOSVersion(getUA());
  2041. // NOTE: According to https://bugs.webkit.org/show_bug.cgi?id=197050, the
  2042. // bug we're checking for should exist in iOS >= 12.2 and < 13, but for
  2043. // whatever reason it's much harder to hit after 12.2 so we only proactively
  2044. // log on 12.2.
  2045. if (iOSVersion === 12.2) {
  2046. logError('Firestore persistence suffers from a bug in iOS 12.2 ' +
  2047. 'Safari that may cause your app to stop working. See ' +
  2048. 'https://stackoverflow.com/q/56496296/110915 for details ' +
  2049. 'and a potential workaround.');
  2050. }
  2051. }
  2052. /** Deletes the specified database. */
  2053. static delete(name) {
  2054. logDebug(LOG_TAG$i, 'Removing database:', name);
  2055. return wrapRequest(window.indexedDB.deleteDatabase(name)).toPromise();
  2056. }
  2057. /** Returns true if IndexedDB is available in the current environment. */
  2058. static isAvailable() {
  2059. if (!isIndexedDBAvailable()) {
  2060. return false;
  2061. }
  2062. if (SimpleDb.isMockPersistence()) {
  2063. return true;
  2064. }
  2065. // We extensively use indexed array values and compound keys,
  2066. // which IE and Edge do not support. However, they still have indexedDB
  2067. // defined on the window, so we need to check for them here and make sure
  2068. // to return that persistence is not enabled for those browsers.
  2069. // For tracking support of this feature, see here:
  2070. // https://developer.microsoft.com/en-us/microsoft-edge/platform/status/indexeddbarraysandmultientrysupport/
  2071. // Check the UA string to find out the browser.
  2072. const ua = getUA();
  2073. // IE 10
  2074. // ua = 'Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.2; Trident/6.0)';
  2075. // IE 11
  2076. // ua = 'Mozilla/5.0 (Windows NT 6.3; Trident/7.0; rv:11.0) like Gecko';
  2077. // Edge
  2078. // ua = 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML,
  2079. // like Gecko) Chrome/39.0.2171.71 Safari/537.36 Edge/12.0';
  2080. // iOS Safari: Disable for users running iOS version < 10.
  2081. const iOSVersion = SimpleDb.getIOSVersion(ua);
  2082. const isUnsupportedIOS = 0 < iOSVersion && iOSVersion < 10;
  2083. // Android browser: Disable for userse running version < 4.5.
  2084. const androidVersion = SimpleDb.getAndroidVersion(ua);
  2085. const isUnsupportedAndroid = 0 < androidVersion && androidVersion < 4.5;
  2086. if (ua.indexOf('MSIE ') > 0 ||
  2087. ua.indexOf('Trident/') > 0 ||
  2088. ua.indexOf('Edge/') > 0 ||
  2089. isUnsupportedIOS ||
  2090. isUnsupportedAndroid) {
  2091. return false;
  2092. }
  2093. else {
  2094. return true;
  2095. }
  2096. }
  2097. /**
  2098. * Returns true if the backing IndexedDB store is the Node IndexedDBShim
  2099. * (see https://github.com/axemclion/IndexedDBShim).
  2100. */
  2101. static isMockPersistence() {
  2102. var _a;
  2103. return (typeof process !== 'undefined' &&
  2104. ((_a = process.env) === null || _a === void 0 ? void 0 : _a.USE_MOCK_PERSISTENCE) === 'YES');
  2105. }
  2106. /** Helper to get a typed SimpleDbStore from a transaction. */
  2107. static getStore(txn, store) {
  2108. return txn.store(store);
  2109. }
  2110. // visible for testing
  2111. /** Parse User Agent to determine iOS version. Returns -1 if not found. */
  2112. static getIOSVersion(ua) {
  2113. const iOSVersionRegex = ua.match(/i(?:phone|pad|pod) os ([\d_]+)/i);
  2114. const version = iOSVersionRegex
  2115. ? iOSVersionRegex[1].split('_').slice(0, 2).join('.')
  2116. : '-1';
  2117. return Number(version);
  2118. }
  2119. // visible for testing
  2120. /** Parse User Agent to determine Android version. Returns -1 if not found. */
  2121. static getAndroidVersion(ua) {
  2122. const androidVersionRegex = ua.match(/Android ([\d.]+)/i);
  2123. const version = androidVersionRegex
  2124. ? androidVersionRegex[1].split('.').slice(0, 2).join('.')
  2125. : '-1';
  2126. return Number(version);
  2127. }
  2128. /**
  2129. * Opens the specified database, creating or upgrading it if necessary.
  2130. */
  2131. async ensureDb(action) {
  2132. if (!this.db) {
  2133. logDebug(LOG_TAG$i, 'Opening database:', this.name);
  2134. this.db = await new Promise((resolve, reject) => {
  2135. // TODO(mikelehen): Investigate browser compatibility.
  2136. // https://developer.mozilla.org/en-US/docs/Web/API/IndexedDB_API/Using_IndexedDB
  2137. // suggests IE9 and older WebKit browsers handle upgrade
  2138. // differently. They expect setVersion, as described here:
  2139. // https://developer.mozilla.org/en-US/docs/Web/API/IDBVersionChangeRequest/setVersion
  2140. const request = indexedDB.open(this.name, this.version);
  2141. request.onsuccess = (event) => {
  2142. const db = event.target.result;
  2143. resolve(db);
  2144. };
  2145. request.onblocked = () => {
  2146. reject(new IndexedDbTransactionError(action, 'Cannot upgrade IndexedDB schema while another tab is open. ' +
  2147. 'Close all tabs that access Firestore and reload this page to proceed.'));
  2148. };
  2149. request.onerror = (event) => {
  2150. const error = event.target.error;
  2151. if (error.name === 'VersionError') {
  2152. reject(new FirestoreError(Code.FAILED_PRECONDITION, 'A newer version of the Firestore SDK was previously used and so the persisted ' +
  2153. 'data is not compatible with the version of the SDK you are now using. The SDK ' +
  2154. 'will operate with persistence disabled. If you need persistence, please ' +
  2155. 're-upgrade to a newer version of the SDK or else clear the persisted IndexedDB ' +
  2156. 'data for your app to start fresh.'));
  2157. }
  2158. else if (error.name === 'InvalidStateError') {
  2159. reject(new FirestoreError(Code.FAILED_PRECONDITION, 'Unable to open an IndexedDB connection. This could be due to running in a ' +
  2160. 'private browsing session on a browser whose private browsing sessions do not ' +
  2161. 'support IndexedDB: ' +
  2162. error));
  2163. }
  2164. else {
  2165. reject(new IndexedDbTransactionError(action, error));
  2166. }
  2167. };
  2168. request.onupgradeneeded = (event) => {
  2169. logDebug(LOG_TAG$i, 'Database "' + this.name + '" requires upgrade from version:', event.oldVersion);
  2170. const db = event.target.result;
  2171. this.schemaConverter
  2172. .createOrUpgrade(db, request.transaction, event.oldVersion, this.version)
  2173. .next(() => {
  2174. logDebug(LOG_TAG$i, 'Database upgrade to version ' + this.version + ' complete');
  2175. });
  2176. };
  2177. });
  2178. }
  2179. if (this.versionchangelistener) {
  2180. this.db.onversionchange = event => this.versionchangelistener(event);
  2181. }
  2182. return this.db;
  2183. }
  2184. setVersionChangeListener(versionChangeListener) {
  2185. this.versionchangelistener = versionChangeListener;
  2186. if (this.db) {
  2187. this.db.onversionchange = (event) => {
  2188. return versionChangeListener(event);
  2189. };
  2190. }
  2191. }
  2192. async runTransaction(action, mode, objectStores, transactionFn) {
  2193. const readonly = mode === 'readonly';
  2194. let attemptNumber = 0;
  2195. while (true) {
  2196. ++attemptNumber;
  2197. try {
  2198. this.db = await this.ensureDb(action);
  2199. const transaction = SimpleDbTransaction.open(this.db, action, readonly ? 'readonly' : 'readwrite', objectStores);
  2200. const transactionFnResult = transactionFn(transaction)
  2201. .next(result => {
  2202. transaction.maybeCommit();
  2203. return result;
  2204. })
  2205. .catch(error => {
  2206. // Abort the transaction if there was an error.
  2207. transaction.abort(error);
  2208. // We cannot actually recover, and calling `abort()` will cause the transaction's
  2209. // completion promise to be rejected. This in turn means that we won't use
  2210. // `transactionFnResult` below. We return a rejection here so that we don't add the
  2211. // possibility of returning `void` to the type of `transactionFnResult`.
  2212. return PersistencePromise.reject(error);
  2213. })
  2214. .toPromise();
  2215. // As noted above, errors are propagated by aborting the transaction. So
  2216. // we swallow any error here to avoid the browser logging it as unhandled.
  2217. transactionFnResult.catch(() => { });
  2218. // Wait for the transaction to complete (i.e. IndexedDb's onsuccess event to
  2219. // fire), but still return the original transactionFnResult back to the
  2220. // caller.
  2221. await transaction.completionPromise;
  2222. return transactionFnResult;
  2223. }
  2224. catch (e) {
  2225. const error = e;
  2226. // TODO(schmidt-sebastian): We could probably be smarter about this and
  2227. // not retry exceptions that are likely unrecoverable (such as quota
  2228. // exceeded errors).
  2229. // Note: We cannot use an instanceof check for FirestoreException, since the
  2230. // exception is wrapped in a generic error by our async/await handling.
  2231. const retryable = error.name !== 'FirebaseError' &&
  2232. attemptNumber < TRANSACTION_RETRY_COUNT;
  2233. logDebug(LOG_TAG$i, 'Transaction failed with error:', error.message, 'Retrying:', retryable);
  2234. this.close();
  2235. if (!retryable) {
  2236. return Promise.reject(error);
  2237. }
  2238. }
  2239. }
  2240. }
  2241. close() {
  2242. if (this.db) {
  2243. this.db.close();
  2244. }
  2245. this.db = undefined;
  2246. }
  2247. }
  2248. /**
  2249. * A controller for iterating over a key range or index. It allows an iterate
  2250. * callback to delete the currently-referenced object, or jump to a new key
  2251. * within the key range or index.
  2252. */
  2253. class IterationController {
  2254. constructor(dbCursor) {
  2255. this.dbCursor = dbCursor;
  2256. this.shouldStop = false;
  2257. this.nextKey = null;
  2258. }
  2259. get isDone() {
  2260. return this.shouldStop;
  2261. }
  2262. get skipToKey() {
  2263. return this.nextKey;
  2264. }
  2265. set cursor(value) {
  2266. this.dbCursor = value;
  2267. }
  2268. /**
  2269. * This function can be called to stop iteration at any point.
  2270. */
  2271. done() {
  2272. this.shouldStop = true;
  2273. }
  2274. /**
  2275. * This function can be called to skip to that next key, which could be
  2276. * an index or a primary key.
  2277. */
  2278. skip(key) {
  2279. this.nextKey = key;
  2280. }
  2281. /**
  2282. * Delete the current cursor value from the object store.
  2283. *
  2284. * NOTE: You CANNOT do this with a keysOnly query.
  2285. */
  2286. delete() {
  2287. return wrapRequest(this.dbCursor.delete());
  2288. }
  2289. }
  2290. /** An error that wraps exceptions that thrown during IndexedDB execution. */
  2291. class IndexedDbTransactionError extends FirestoreError {
  2292. constructor(actionName, cause) {
  2293. super(Code.UNAVAILABLE, `IndexedDB transaction '${actionName}' failed: ${cause}`);
  2294. this.name = 'IndexedDbTransactionError';
  2295. }
  2296. }
  2297. /** Verifies whether `e` is an IndexedDbTransactionError. */
  2298. function isIndexedDbTransactionError(e) {
  2299. // Use name equality, as instanceof checks on errors don't work with errors
  2300. // that wrap other errors.
  2301. return e.name === 'IndexedDbTransactionError';
  2302. }
  2303. /**
  2304. * A wrapper around an IDBObjectStore providing an API that:
  2305. *
  2306. * 1) Has generic KeyType / ValueType parameters to provide strongly-typed
  2307. * methods for acting against the object store.
  2308. * 2) Deals with IndexedDB's onsuccess / onerror event callbacks, making every
  2309. * method return a PersistencePromise instead.
  2310. * 3) Provides a higher-level API to avoid needing to do excessive wrapping of
  2311. * intermediate IndexedDB types (IDBCursorWithValue, etc.)
  2312. */
  2313. class SimpleDbStore {
  2314. constructor(store) {
  2315. this.store = store;
  2316. }
  2317. put(keyOrValue, value) {
  2318. let request;
  2319. if (value !== undefined) {
  2320. logDebug(LOG_TAG$i, 'PUT', this.store.name, keyOrValue, value);
  2321. request = this.store.put(value, keyOrValue);
  2322. }
  2323. else {
  2324. logDebug(LOG_TAG$i, 'PUT', this.store.name, '<auto-key>', keyOrValue);
  2325. request = this.store.put(keyOrValue);
  2326. }
  2327. return wrapRequest(request);
  2328. }
  2329. /**
  2330. * Adds a new value into an Object Store and returns the new key. Similar to
  2331. * IndexedDb's `add()`, this method will fail on primary key collisions.
  2332. *
  2333. * @param value - The object to write.
  2334. * @returns The key of the value to add.
  2335. */
  2336. add(value) {
  2337. logDebug(LOG_TAG$i, 'ADD', this.store.name, value, value);
  2338. const request = this.store.add(value);
  2339. return wrapRequest(request);
  2340. }
  2341. /**
  2342. * Gets the object with the specified key from the specified store, or null
  2343. * if no object exists with the specified key.
  2344. *
  2345. * @key The key of the object to get.
  2346. * @returns The object with the specified key or null if no object exists.
  2347. */
  2348. get(key) {
  2349. const request = this.store.get(key);
  2350. // We're doing an unsafe cast to ValueType.
  2351. // eslint-disable-next-line @typescript-eslint/no-explicit-any
  2352. return wrapRequest(request).next(result => {
  2353. // Normalize nonexistence to null.
  2354. if (result === undefined) {
  2355. result = null;
  2356. }
  2357. logDebug(LOG_TAG$i, 'GET', this.store.name, key, result);
  2358. return result;
  2359. });
  2360. }
  2361. delete(key) {
  2362. logDebug(LOG_TAG$i, 'DELETE', this.store.name, key);
  2363. const request = this.store.delete(key);
  2364. return wrapRequest(request);
  2365. }
  2366. /**
  2367. * If we ever need more of the count variants, we can add overloads. For now,
  2368. * all we need is to count everything in a store.
  2369. *
  2370. * Returns the number of rows in the store.
  2371. */
  2372. count() {
  2373. logDebug(LOG_TAG$i, 'COUNT', this.store.name);
  2374. const request = this.store.count();
  2375. return wrapRequest(request);
  2376. }
  2377. loadAll(indexOrRange, range) {
  2378. const iterateOptions = this.options(indexOrRange, range);
  2379. // Use `getAll()` if the browser supports IndexedDB v3, as it is roughly
  2380. // 20% faster. Unfortunately, getAll() does not support custom indices.
  2381. if (!iterateOptions.index && typeof this.store.getAll === 'function') {
  2382. const request = this.store.getAll(iterateOptions.range);
  2383. return new PersistencePromise((resolve, reject) => {
  2384. request.onerror = (event) => {
  2385. reject(event.target.error);
  2386. };
  2387. request.onsuccess = (event) => {
  2388. resolve(event.target.result);
  2389. };
  2390. });
  2391. }
  2392. else {
  2393. const cursor = this.cursor(iterateOptions);
  2394. const results = [];
  2395. return this.iterateCursor(cursor, (key, value) => {
  2396. results.push(value);
  2397. }).next(() => {
  2398. return results;
  2399. });
  2400. }
  2401. }
  2402. /**
  2403. * Loads the first `count` elements from the provided index range. Loads all
  2404. * elements if no limit is provided.
  2405. */
  2406. loadFirst(range, count) {
  2407. const request = this.store.getAll(range, count === null ? undefined : count);
  2408. return new PersistencePromise((resolve, reject) => {
  2409. request.onerror = (event) => {
  2410. reject(event.target.error);
  2411. };
  2412. request.onsuccess = (event) => {
  2413. resolve(event.target.result);
  2414. };
  2415. });
  2416. }
  2417. deleteAll(indexOrRange, range) {
  2418. logDebug(LOG_TAG$i, 'DELETE ALL', this.store.name);
  2419. const options = this.options(indexOrRange, range);
  2420. options.keysOnly = false;
  2421. const cursor = this.cursor(options);
  2422. return this.iterateCursor(cursor, (key, value, control) => {
  2423. // NOTE: Calling delete() on a cursor is documented as more efficient than
  2424. // calling delete() on an object store with a single key
  2425. // (https://developer.mozilla.org/en-US/docs/Web/API/IDBObjectStore/delete),
  2426. // however, this requires us *not* to use a keysOnly cursor
  2427. // (https://developer.mozilla.org/en-US/docs/Web/API/IDBCursor/delete). We
  2428. // may want to compare the performance of each method.
  2429. return control.delete();
  2430. });
  2431. }
  2432. iterate(optionsOrCallback, callback) {
  2433. let options;
  2434. if (!callback) {
  2435. options = {};
  2436. callback = optionsOrCallback;
  2437. }
  2438. else {
  2439. options = optionsOrCallback;
  2440. }
  2441. const cursor = this.cursor(options);
  2442. return this.iterateCursor(cursor, callback);
  2443. }
  2444. /**
  2445. * Iterates over a store, but waits for the given callback to complete for
  2446. * each entry before iterating the next entry. This allows the callback to do
  2447. * asynchronous work to determine if this iteration should continue.
  2448. *
  2449. * The provided callback should return `true` to continue iteration, and
  2450. * `false` otherwise.
  2451. */
  2452. iterateSerial(callback) {
  2453. const cursorRequest = this.cursor({});
  2454. return new PersistencePromise((resolve, reject) => {
  2455. cursorRequest.onerror = (event) => {
  2456. const error = checkForAndReportiOSError(event.target.error);
  2457. reject(error);
  2458. };
  2459. cursorRequest.onsuccess = (event) => {
  2460. const cursor = event.target.result;
  2461. if (!cursor) {
  2462. resolve();
  2463. return;
  2464. }
  2465. callback(cursor.primaryKey, cursor.value).next(shouldContinue => {
  2466. if (shouldContinue) {
  2467. cursor.continue();
  2468. }
  2469. else {
  2470. resolve();
  2471. }
  2472. });
  2473. };
  2474. });
  2475. }
  2476. iterateCursor(cursorRequest, fn) {
  2477. const results = [];
  2478. return new PersistencePromise((resolve, reject) => {
  2479. cursorRequest.onerror = (event) => {
  2480. reject(event.target.error);
  2481. };
  2482. cursorRequest.onsuccess = (event) => {
  2483. const cursor = event.target.result;
  2484. if (!cursor) {
  2485. resolve();
  2486. return;
  2487. }
  2488. const controller = new IterationController(cursor);
  2489. const userResult = fn(cursor.primaryKey, cursor.value, controller);
  2490. if (userResult instanceof PersistencePromise) {
  2491. const userPromise = userResult.catch(err => {
  2492. controller.done();
  2493. return PersistencePromise.reject(err);
  2494. });
  2495. results.push(userPromise);
  2496. }
  2497. if (controller.isDone) {
  2498. resolve();
  2499. }
  2500. else if (controller.skipToKey === null) {
  2501. cursor.continue();
  2502. }
  2503. else {
  2504. cursor.continue(controller.skipToKey);
  2505. }
  2506. };
  2507. }).next(() => PersistencePromise.waitFor(results));
  2508. }
  2509. options(indexOrRange, range) {
  2510. let indexName = undefined;
  2511. if (indexOrRange !== undefined) {
  2512. if (typeof indexOrRange === 'string') {
  2513. indexName = indexOrRange;
  2514. }
  2515. else {
  2516. range = indexOrRange;
  2517. }
  2518. }
  2519. return { index: indexName, range };
  2520. }
  2521. cursor(options) {
  2522. let direction = 'next';
  2523. if (options.reverse) {
  2524. direction = 'prev';
  2525. }
  2526. if (options.index) {
  2527. const index = this.store.index(options.index);
  2528. if (options.keysOnly) {
  2529. return index.openKeyCursor(options.range, direction);
  2530. }
  2531. else {
  2532. return index.openCursor(options.range, direction);
  2533. }
  2534. }
  2535. else {
  2536. return this.store.openCursor(options.range, direction);
  2537. }
  2538. }
  2539. }
  2540. /**
  2541. * Wraps an IDBRequest in a PersistencePromise, using the onsuccess / onerror
  2542. * handlers to resolve / reject the PersistencePromise as appropriate.
  2543. */
  2544. function wrapRequest(request) {
  2545. return new PersistencePromise((resolve, reject) => {
  2546. request.onsuccess = (event) => {
  2547. const result = event.target.result;
  2548. resolve(result);
  2549. };
  2550. request.onerror = (event) => {
  2551. const error = checkForAndReportiOSError(event.target.error);
  2552. reject(error);
  2553. };
  2554. });
  2555. }
  2556. // Guard so we only report the error once.
  2557. let reportedIOSError = false;
  2558. function checkForAndReportiOSError(error) {
  2559. const iOSVersion = SimpleDb.getIOSVersion(getUA());
  2560. if (iOSVersion >= 12.2 && iOSVersion < 13) {
  2561. const IOS_ERROR = 'An internal error was encountered in the Indexed Database server';
  2562. if (error.message.indexOf(IOS_ERROR) >= 0) {
  2563. // Wrap error in a more descriptive one.
  2564. const newError = new FirestoreError('internal', `IOS_INDEXEDDB_BUG1: IndexedDb has thrown '${IOS_ERROR}'. This is likely ` +
  2565. `due to an unavoidable bug in iOS. See https://stackoverflow.com/q/56496296/110915 ` +
  2566. `for details and a potential workaround.`);
  2567. if (!reportedIOSError) {
  2568. reportedIOSError = true;
  2569. // Throw a global exception outside of this promise chain, for the user to
  2570. // potentially catch.
  2571. setTimeout(() => {
  2572. throw newError;
  2573. }, 0);
  2574. }
  2575. return newError;
  2576. }
  2577. }
  2578. return error;
  2579. }
  2580. const LOG_TAG$h = 'IndexBackiller';
  2581. /** How long we wait to try running index backfill after SDK initialization. */
  2582. const INITIAL_BACKFILL_DELAY_MS = 15 * 1000;
  2583. /** Minimum amount of time between backfill checks, after the first one. */
  2584. const REGULAR_BACKFILL_DELAY_MS = 60 * 1000;
  2585. /** The maximum number of documents to process each time backfill() is called. */
  2586. const MAX_DOCUMENTS_TO_PROCESS = 50;
  2587. /** This class is responsible for the scheduling of Index Backfiller. */
  2588. class IndexBackfillerScheduler {
  2589. constructor(asyncQueue, backfiller) {
  2590. this.asyncQueue = asyncQueue;
  2591. this.backfiller = backfiller;
  2592. this.task = null;
  2593. }
  2594. start() {
  2595. this.schedule(INITIAL_BACKFILL_DELAY_MS);
  2596. }
  2597. stop() {
  2598. if (this.task) {
  2599. this.task.cancel();
  2600. this.task = null;
  2601. }
  2602. }
  2603. get started() {
  2604. return this.task !== null;
  2605. }
  2606. schedule(delay) {
  2607. logDebug(LOG_TAG$h, `Scheduled in ${delay}ms`);
  2608. this.task = this.asyncQueue.enqueueAfterDelay("index_backfill" /* TimerId.IndexBackfill */, delay, async () => {
  2609. this.task = null;
  2610. try {
  2611. const documentsProcessed = await this.backfiller.backfill();
  2612. logDebug(LOG_TAG$h, `Documents written: ${documentsProcessed}`);
  2613. }
  2614. catch (e) {
  2615. if (isIndexedDbTransactionError(e)) {
  2616. logDebug(LOG_TAG$h, 'Ignoring IndexedDB error during index backfill: ', e);
  2617. }
  2618. else {
  2619. await ignoreIfPrimaryLeaseLoss(e);
  2620. }
  2621. }
  2622. await this.schedule(REGULAR_BACKFILL_DELAY_MS);
  2623. });
  2624. }
  2625. }
  2626. /** Implements the steps for backfilling indexes. */
  2627. class IndexBackfiller {
  2628. constructor(
  2629. /**
  2630. * LocalStore provides access to IndexManager and LocalDocumentView.
  2631. * These properties will update when the user changes. Consequently,
  2632. * making a local copy of IndexManager and LocalDocumentView will require
  2633. * updates over time. The simpler solution is to rely on LocalStore to have
  2634. * an up-to-date references to IndexManager and LocalDocumentStore.
  2635. */
  2636. localStore, persistence) {
  2637. this.localStore = localStore;
  2638. this.persistence = persistence;
  2639. }
  2640. async backfill(maxDocumentsToProcess = MAX_DOCUMENTS_TO_PROCESS) {
  2641. return this.persistence.runTransaction('Backfill Indexes', 'readwrite-primary', txn => this.writeIndexEntries(txn, maxDocumentsToProcess));
  2642. }
  2643. /** Writes index entries until the cap is reached. Returns the number of documents processed. */
  2644. writeIndexEntries(transation, maxDocumentsToProcess) {
  2645. const processedCollectionGroups = new Set();
  2646. let documentsRemaining = maxDocumentsToProcess;
  2647. let continueLoop = true;
  2648. return PersistencePromise.doWhile(() => continueLoop === true && documentsRemaining > 0, () => {
  2649. return this.localStore.indexManager
  2650. .getNextCollectionGroupToUpdate(transation)
  2651. .next((collectionGroup) => {
  2652. if (collectionGroup === null ||
  2653. processedCollectionGroups.has(collectionGroup)) {
  2654. continueLoop = false;
  2655. }
  2656. else {
  2657. logDebug(LOG_TAG$h, `Processing collection: ${collectionGroup}`);
  2658. return this.writeEntriesForCollectionGroup(transation, collectionGroup, documentsRemaining).next(documentsProcessed => {
  2659. documentsRemaining -= documentsProcessed;
  2660. processedCollectionGroups.add(collectionGroup);
  2661. });
  2662. }
  2663. });
  2664. }).next(() => maxDocumentsToProcess - documentsRemaining);
  2665. }
  2666. /**
  2667. * Writes entries for the provided collection group. Returns the number of documents processed.
  2668. */
  2669. writeEntriesForCollectionGroup(transaction, collectionGroup, documentsRemainingUnderCap) {
  2670. // Use the earliest offset of all field indexes to query the local cache.
  2671. return this.localStore.indexManager
  2672. .getMinOffsetFromCollectionGroup(transaction, collectionGroup)
  2673. .next(existingOffset => this.localStore.localDocuments
  2674. .getNextDocuments(transaction, collectionGroup, existingOffset, documentsRemainingUnderCap)
  2675. .next(nextBatch => {
  2676. const docs = nextBatch.changes;
  2677. return this.localStore.indexManager
  2678. .updateIndexEntries(transaction, docs)
  2679. .next(() => this.getNewOffset(existingOffset, nextBatch))
  2680. .next(newOffset => {
  2681. logDebug(LOG_TAG$h, `Updating offset: ${newOffset}`);
  2682. return this.localStore.indexManager.updateCollectionGroup(transaction, collectionGroup, newOffset);
  2683. })
  2684. .next(() => docs.size);
  2685. }));
  2686. }
  2687. /** Returns the next offset based on the provided documents. */
  2688. getNewOffset(existingOffset, lookupResult) {
  2689. let maxOffset = existingOffset;
  2690. lookupResult.changes.forEach((key, document) => {
  2691. const newOffset = newIndexOffsetFromDocument(document);
  2692. if (indexOffsetComparator(newOffset, maxOffset) > 0) {
  2693. maxOffset = newOffset;
  2694. }
  2695. });
  2696. return new IndexOffset(maxOffset.readTime, maxOffset.documentKey, Math.max(lookupResult.batchId, existingOffset.largestBatchId));
  2697. }
  2698. }
  2699. /**
  2700. * @license
  2701. * Copyright 2018 Google LLC
  2702. *
  2703. * Licensed under the Apache License, Version 2.0 (the "License");
  2704. * you may not use this file except in compliance with the License.
  2705. * You may obtain a copy of the License at
  2706. *
  2707. * http://www.apache.org/licenses/LICENSE-2.0
  2708. *
  2709. * Unless required by applicable law or agreed to in writing, software
  2710. * distributed under the License is distributed on an "AS IS" BASIS,
  2711. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  2712. * See the License for the specific language governing permissions and
  2713. * limitations under the License.
  2714. */
  2715. /**
  2716. * `ListenSequence` is a monotonic sequence. It is initialized with a minimum value to
  2717. * exceed. All subsequent calls to next will return increasing values. If provided with a
  2718. * `SequenceNumberSyncer`, it will additionally bump its next value when told of a new value, as
  2719. * well as write out sequence numbers that it produces via `next()`.
  2720. */
  2721. class ListenSequence {
  2722. constructor(previousValue, sequenceNumberSyncer) {
  2723. this.previousValue = previousValue;
  2724. if (sequenceNumberSyncer) {
  2725. sequenceNumberSyncer.sequenceNumberHandler = sequenceNumber => this.setPreviousValue(sequenceNumber);
  2726. this.writeNewSequenceNumber = sequenceNumber => sequenceNumberSyncer.writeSequenceNumber(sequenceNumber);
  2727. }
  2728. }
  2729. setPreviousValue(externalPreviousValue) {
  2730. this.previousValue = Math.max(externalPreviousValue, this.previousValue);
  2731. return this.previousValue;
  2732. }
  2733. next() {
  2734. const nextValue = ++this.previousValue;
  2735. if (this.writeNewSequenceNumber) {
  2736. this.writeNewSequenceNumber(nextValue);
  2737. }
  2738. return nextValue;
  2739. }
  2740. }
  2741. ListenSequence.INVALID = -1;
  2742. /**
  2743. * @license
  2744. * Copyright 2017 Google LLC
  2745. *
  2746. * Licensed under the Apache License, Version 2.0 (the "License");
  2747. * you may not use this file except in compliance with the License.
  2748. * You may obtain a copy of the License at
  2749. *
  2750. * http://www.apache.org/licenses/LICENSE-2.0
  2751. *
  2752. * Unless required by applicable law or agreed to in writing, software
  2753. * distributed under the License is distributed on an "AS IS" BASIS,
  2754. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  2755. * See the License for the specific language governing permissions and
  2756. * limitations under the License.
  2757. */
  2758. const escapeChar = '\u0001';
  2759. const encodedSeparatorChar = '\u0001';
  2760. const encodedNul = '\u0010';
  2761. const encodedEscape = '\u0011';
  2762. /**
  2763. * Encodes a resource path into a IndexedDb-compatible string form.
  2764. */
  2765. function encodeResourcePath(path) {
  2766. let result = '';
  2767. for (let i = 0; i < path.length; i++) {
  2768. if (result.length > 0) {
  2769. result = encodeSeparator(result);
  2770. }
  2771. result = encodeSegment(path.get(i), result);
  2772. }
  2773. return encodeSeparator(result);
  2774. }
  2775. /** Encodes a single segment of a resource path into the given result */
  2776. function encodeSegment(segment, resultBuf) {
  2777. let result = resultBuf;
  2778. const length = segment.length;
  2779. for (let i = 0; i < length; i++) {
  2780. const c = segment.charAt(i);
  2781. switch (c) {
  2782. case '\0':
  2783. result += escapeChar + encodedNul;
  2784. break;
  2785. case escapeChar:
  2786. result += escapeChar + encodedEscape;
  2787. break;
  2788. default:
  2789. result += c;
  2790. }
  2791. }
  2792. return result;
  2793. }
  2794. /** Encodes a path separator into the given result */
  2795. function encodeSeparator(result) {
  2796. return result + escapeChar + encodedSeparatorChar;
  2797. }
  2798. /**
  2799. * Decodes the given IndexedDb-compatible string form of a resource path into
  2800. * a ResourcePath instance. Note that this method is not suitable for use with
  2801. * decoding resource names from the server; those are One Platform format
  2802. * strings.
  2803. */
  2804. function decodeResourcePath(path) {
  2805. // Event the empty path must encode as a path of at least length 2. A path
  2806. // with exactly 2 must be the empty path.
  2807. const length = path.length;
  2808. hardAssert(length >= 2);
  2809. if (length === 2) {
  2810. hardAssert(path.charAt(0) === escapeChar && path.charAt(1) === encodedSeparatorChar);
  2811. return ResourcePath.emptyPath();
  2812. }
  2813. // Escape characters cannot exist past the second-to-last position in the
  2814. // source value.
  2815. const lastReasonableEscapeIndex = length - 2;
  2816. const segments = [];
  2817. let segmentBuilder = '';
  2818. for (let start = 0; start < length;) {
  2819. // The last two characters of a valid encoded path must be a separator, so
  2820. // there must be an end to this segment.
  2821. const end = path.indexOf(escapeChar, start);
  2822. if (end < 0 || end > lastReasonableEscapeIndex) {
  2823. fail();
  2824. }
  2825. const next = path.charAt(end + 1);
  2826. switch (next) {
  2827. case encodedSeparatorChar:
  2828. const currentPiece = path.substring(start, end);
  2829. let segment;
  2830. if (segmentBuilder.length === 0) {
  2831. // Avoid copying for the common case of a segment that excludes \0
  2832. // and \001
  2833. segment = currentPiece;
  2834. }
  2835. else {
  2836. segmentBuilder += currentPiece;
  2837. segment = segmentBuilder;
  2838. segmentBuilder = '';
  2839. }
  2840. segments.push(segment);
  2841. break;
  2842. case encodedNul:
  2843. segmentBuilder += path.substring(start, end);
  2844. segmentBuilder += '\0';
  2845. break;
  2846. case encodedEscape:
  2847. // The escape character can be used in the output to encode itself.
  2848. segmentBuilder += path.substring(start, end + 1);
  2849. break;
  2850. default:
  2851. fail();
  2852. }
  2853. start = end + 2;
  2854. }
  2855. return new ResourcePath(segments);
  2856. }
  2857. /**
  2858. * @license
  2859. * Copyright 2022 Google LLC
  2860. *
  2861. * Licensed under the Apache License, Version 2.0 (the "License");
  2862. * you may not use this file except in compliance with the License.
  2863. * You may obtain a copy of the License at
  2864. *
  2865. * http://www.apache.org/licenses/LICENSE-2.0
  2866. *
  2867. * Unless required by applicable law or agreed to in writing, software
  2868. * distributed under the License is distributed on an "AS IS" BASIS,
  2869. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  2870. * See the License for the specific language governing permissions and
  2871. * limitations under the License.
  2872. */
  2873. const DbRemoteDocumentStore$1 = 'remoteDocuments';
  2874. /**
  2875. * @license
  2876. * Copyright 2022 Google LLC
  2877. *
  2878. * Licensed under the Apache License, Version 2.0 (the "License");
  2879. * you may not use this file except in compliance with the License.
  2880. * You may obtain a copy of the License at
  2881. *
  2882. * http://www.apache.org/licenses/LICENSE-2.0
  2883. *
  2884. * Unless required by applicable law or agreed to in writing, software
  2885. * distributed under the License is distributed on an "AS IS" BASIS,
  2886. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  2887. * See the License for the specific language governing permissions and
  2888. * limitations under the License.
  2889. */
  2890. /**
  2891. * Name of the IndexedDb object store.
  2892. *
  2893. * Note that the name 'owner' is chosen to ensure backwards compatibility with
  2894. * older clients that only supported single locked access to the persistence
  2895. * layer.
  2896. */
  2897. const DbPrimaryClientStore = 'owner';
  2898. /**
  2899. * The key string used for the single object that exists in the
  2900. * DbPrimaryClient store.
  2901. */
  2902. const DbPrimaryClientKey = 'owner';
  2903. /** Name of the IndexedDb object store. */
  2904. const DbMutationQueueStore = 'mutationQueues';
  2905. /** Keys are automatically assigned via the userId property. */
  2906. const DbMutationQueueKeyPath = 'userId';
  2907. /** Name of the IndexedDb object store. */
  2908. const DbMutationBatchStore = 'mutations';
  2909. /** Keys are automatically assigned via the userId, batchId properties. */
  2910. const DbMutationBatchKeyPath = 'batchId';
  2911. /** The index name for lookup of mutations by user. */
  2912. const DbMutationBatchUserMutationsIndex = 'userMutationsIndex';
  2913. /** The user mutations index is keyed by [userId, batchId] pairs. */
  2914. const DbMutationBatchUserMutationsKeyPath = ['userId', 'batchId'];
  2915. /**
  2916. * Creates a [userId] key for use in the DbDocumentMutations index to iterate
  2917. * over all of a user's document mutations.
  2918. */
  2919. function newDbDocumentMutationPrefixForUser(userId) {
  2920. return [userId];
  2921. }
  2922. /**
  2923. * Creates a [userId, encodedPath] key for use in the DbDocumentMutations
  2924. * index to iterate over all at document mutations for a given path or lower.
  2925. */
  2926. function newDbDocumentMutationPrefixForPath(userId, path) {
  2927. return [userId, encodeResourcePath(path)];
  2928. }
  2929. /**
  2930. * Creates a full index key of [userId, encodedPath, batchId] for inserting
  2931. * and deleting into the DbDocumentMutations index.
  2932. */
  2933. function newDbDocumentMutationKey(userId, path, batchId) {
  2934. return [userId, encodeResourcePath(path), batchId];
  2935. }
  2936. /**
  2937. * Because we store all the useful information for this store in the key,
  2938. * there is no useful information to store as the value. The raw (unencoded)
  2939. * path cannot be stored because IndexedDb doesn't store prototype
  2940. * information.
  2941. */
  2942. const DbDocumentMutationPlaceholder = {};
  2943. const DbDocumentMutationStore = 'documentMutations';
  2944. const DbRemoteDocumentStore = 'remoteDocumentsV14';
  2945. /**
  2946. * The primary key of the remote documents store, which allows for efficient
  2947. * access by collection path and read time.
  2948. */
  2949. const DbRemoteDocumentKeyPath = [
  2950. 'prefixPath',
  2951. 'collectionGroup',
  2952. 'readTime',
  2953. 'documentId'
  2954. ];
  2955. /** An index that provides access to documents by key. */
  2956. const DbRemoteDocumentDocumentKeyIndex = 'documentKeyIndex';
  2957. const DbRemoteDocumentDocumentKeyIndexPath = [
  2958. 'prefixPath',
  2959. 'collectionGroup',
  2960. 'documentId'
  2961. ];
  2962. /**
  2963. * An index that provides access to documents by collection group and read
  2964. * time.
  2965. *
  2966. * This index is used by the index backfiller.
  2967. */
  2968. const DbRemoteDocumentCollectionGroupIndex = 'collectionGroupIndex';
  2969. const DbRemoteDocumentCollectionGroupIndexPath = [
  2970. 'collectionGroup',
  2971. 'readTime',
  2972. 'prefixPath',
  2973. 'documentId'
  2974. ];
  2975. const DbRemoteDocumentGlobalStore = 'remoteDocumentGlobal';
  2976. const DbRemoteDocumentGlobalKey = 'remoteDocumentGlobalKey';
  2977. const DbTargetStore = 'targets';
  2978. /** Keys are automatically assigned via the targetId property. */
  2979. const DbTargetKeyPath = 'targetId';
  2980. /** The name of the queryTargets index. */
  2981. const DbTargetQueryTargetsIndexName = 'queryTargetsIndex';
  2982. /**
  2983. * The index of all canonicalIds to the targets that they match. This is not
  2984. * a unique mapping because canonicalId does not promise a unique name for all
  2985. * possible queries, so we append the targetId to make the mapping unique.
  2986. */
  2987. const DbTargetQueryTargetsKeyPath = ['canonicalId', 'targetId'];
  2988. /** Name of the IndexedDb object store. */
  2989. const DbTargetDocumentStore = 'targetDocuments';
  2990. /** Keys are automatically assigned via the targetId, path properties. */
  2991. const DbTargetDocumentKeyPath = ['targetId', 'path'];
  2992. /** The index name for the reverse index. */
  2993. const DbTargetDocumentDocumentTargetsIndex = 'documentTargetsIndex';
  2994. /** We also need to create the reverse index for these properties. */
  2995. const DbTargetDocumentDocumentTargetsKeyPath = ['path', 'targetId'];
  2996. /**
  2997. * The key string used for the single object that exists in the
  2998. * DbTargetGlobal store.
  2999. */
  3000. const DbTargetGlobalKey = 'targetGlobalKey';
  3001. const DbTargetGlobalStore = 'targetGlobal';
  3002. /** Name of the IndexedDb object store. */
  3003. const DbCollectionParentStore = 'collectionParents';
  3004. /** Keys are automatically assigned via the collectionId, parent properties. */
  3005. const DbCollectionParentKeyPath = ['collectionId', 'parent'];
  3006. /** Name of the IndexedDb object store. */
  3007. const DbClientMetadataStore = 'clientMetadata';
  3008. /** Keys are automatically assigned via the clientId properties. */
  3009. const DbClientMetadataKeyPath = 'clientId';
  3010. /** Name of the IndexedDb object store. */
  3011. const DbBundleStore = 'bundles';
  3012. const DbBundleKeyPath = 'bundleId';
  3013. /** Name of the IndexedDb object store. */
  3014. const DbNamedQueryStore = 'namedQueries';
  3015. const DbNamedQueryKeyPath = 'name';
  3016. /** Name of the IndexedDb object store. */
  3017. const DbIndexConfigurationStore = 'indexConfiguration';
  3018. const DbIndexConfigurationKeyPath = 'indexId';
  3019. /**
  3020. * An index that provides access to the index configurations by collection
  3021. * group.
  3022. *
  3023. * PORTING NOTE: iOS and Android maintain this index in-memory, but this is
  3024. * not possible here as the Web client supports concurrent access to
  3025. * persistence via multi-tab.
  3026. */
  3027. const DbIndexConfigurationCollectionGroupIndex = 'collectionGroupIndex';
  3028. const DbIndexConfigurationCollectionGroupIndexPath = 'collectionGroup';
  3029. /** Name of the IndexedDb object store. */
  3030. const DbIndexStateStore = 'indexState';
  3031. const DbIndexStateKeyPath = ['indexId', 'uid'];
  3032. /**
  3033. * An index that provides access to documents in a collection sorted by last
  3034. * update time. Used by the backfiller.
  3035. *
  3036. * PORTING NOTE: iOS and Android maintain this index in-memory, but this is
  3037. * not possible here as the Web client supports concurrent access to
  3038. * persistence via multi-tab.
  3039. */
  3040. const DbIndexStateSequenceNumberIndex = 'sequenceNumberIndex';
  3041. const DbIndexStateSequenceNumberIndexPath = ['uid', 'sequenceNumber'];
  3042. /** Name of the IndexedDb object store. */
  3043. const DbIndexEntryStore = 'indexEntries';
  3044. const DbIndexEntryKeyPath = [
  3045. 'indexId',
  3046. 'uid',
  3047. 'arrayValue',
  3048. 'directionalValue',
  3049. 'orderedDocumentKey',
  3050. 'documentKey'
  3051. ];
  3052. const DbIndexEntryDocumentKeyIndex = 'documentKeyIndex';
  3053. const DbIndexEntryDocumentKeyIndexPath = [
  3054. 'indexId',
  3055. 'uid',
  3056. 'orderedDocumentKey'
  3057. ];
  3058. /** Name of the IndexedDb object store. */
  3059. const DbDocumentOverlayStore = 'documentOverlays';
  3060. const DbDocumentOverlayKeyPath = [
  3061. 'userId',
  3062. 'collectionPath',
  3063. 'documentId'
  3064. ];
  3065. const DbDocumentOverlayCollectionPathOverlayIndex = 'collectionPathOverlayIndex';
  3066. const DbDocumentOverlayCollectionPathOverlayIndexPath = [
  3067. 'userId',
  3068. 'collectionPath',
  3069. 'largestBatchId'
  3070. ];
  3071. const DbDocumentOverlayCollectionGroupOverlayIndex = 'collectionGroupOverlayIndex';
  3072. const DbDocumentOverlayCollectionGroupOverlayIndexPath = [
  3073. 'userId',
  3074. 'collectionGroup',
  3075. 'largestBatchId'
  3076. ];
  3077. // Visible for testing
  3078. const V1_STORES = [
  3079. DbMutationQueueStore,
  3080. DbMutationBatchStore,
  3081. DbDocumentMutationStore,
  3082. DbRemoteDocumentStore$1,
  3083. DbTargetStore,
  3084. DbPrimaryClientStore,
  3085. DbTargetGlobalStore,
  3086. DbTargetDocumentStore
  3087. ];
  3088. // Visible for testing
  3089. const V3_STORES = V1_STORES;
  3090. // Note: DbRemoteDocumentChanges is no longer used and dropped with v9.
  3091. const V4_STORES = [...V3_STORES, DbClientMetadataStore];
  3092. const V6_STORES = [...V4_STORES, DbRemoteDocumentGlobalStore];
  3093. const V8_STORES = [...V6_STORES, DbCollectionParentStore];
  3094. const V11_STORES = [...V8_STORES, DbBundleStore, DbNamedQueryStore];
  3095. const V12_STORES = [...V11_STORES, DbDocumentOverlayStore];
  3096. const V13_STORES = [
  3097. DbMutationQueueStore,
  3098. DbMutationBatchStore,
  3099. DbDocumentMutationStore,
  3100. DbRemoteDocumentStore,
  3101. DbTargetStore,
  3102. DbPrimaryClientStore,
  3103. DbTargetGlobalStore,
  3104. DbTargetDocumentStore,
  3105. DbClientMetadataStore,
  3106. DbRemoteDocumentGlobalStore,
  3107. DbCollectionParentStore,
  3108. DbBundleStore,
  3109. DbNamedQueryStore,
  3110. DbDocumentOverlayStore
  3111. ];
  3112. const V14_STORES = V13_STORES;
  3113. const V15_STORES = [
  3114. ...V14_STORES,
  3115. DbIndexConfigurationStore,
  3116. DbIndexStateStore,
  3117. DbIndexEntryStore
  3118. ];
  3119. /** Returns the object stores for the provided schema. */
  3120. function getObjectStores(schemaVersion) {
  3121. if (schemaVersion === 15) {
  3122. return V15_STORES;
  3123. }
  3124. else if (schemaVersion === 14) {
  3125. return V14_STORES;
  3126. }
  3127. else if (schemaVersion === 13) {
  3128. return V13_STORES;
  3129. }
  3130. else if (schemaVersion === 12) {
  3131. return V12_STORES;
  3132. }
  3133. else if (schemaVersion === 11) {
  3134. return V11_STORES;
  3135. }
  3136. else {
  3137. fail();
  3138. }
  3139. }
  3140. /**
  3141. * @license
  3142. * Copyright 2020 Google LLC
  3143. *
  3144. * Licensed under the Apache License, Version 2.0 (the "License");
  3145. * you may not use this file except in compliance with the License.
  3146. * You may obtain a copy of the License at
  3147. *
  3148. * http://www.apache.org/licenses/LICENSE-2.0
  3149. *
  3150. * Unless required by applicable law or agreed to in writing, software
  3151. * distributed under the License is distributed on an "AS IS" BASIS,
  3152. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  3153. * See the License for the specific language governing permissions and
  3154. * limitations under the License.
  3155. */
  3156. class IndexedDbTransaction extends PersistenceTransaction {
  3157. constructor(simpleDbTransaction, currentSequenceNumber) {
  3158. super();
  3159. this.simpleDbTransaction = simpleDbTransaction;
  3160. this.currentSequenceNumber = currentSequenceNumber;
  3161. }
  3162. }
  3163. function getStore(txn, store) {
  3164. const indexedDbTransaction = debugCast(txn);
  3165. return SimpleDb.getStore(indexedDbTransaction.simpleDbTransaction, store);
  3166. }
  3167. /**
  3168. * @license
  3169. * Copyright 2017 Google LLC
  3170. *
  3171. * Licensed under the Apache License, Version 2.0 (the "License");
  3172. * you may not use this file except in compliance with the License.
  3173. * You may obtain a copy of the License at
  3174. *
  3175. * http://www.apache.org/licenses/LICENSE-2.0
  3176. *
  3177. * Unless required by applicable law or agreed to in writing, software
  3178. * distributed under the License is distributed on an "AS IS" BASIS,
  3179. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  3180. * See the License for the specific language governing permissions and
  3181. * limitations under the License.
  3182. */
  3183. function objectSize(obj) {
  3184. let count = 0;
  3185. for (const key in obj) {
  3186. if (Object.prototype.hasOwnProperty.call(obj, key)) {
  3187. count++;
  3188. }
  3189. }
  3190. return count;
  3191. }
  3192. function forEach(obj, fn) {
  3193. for (const key in obj) {
  3194. if (Object.prototype.hasOwnProperty.call(obj, key)) {
  3195. fn(key, obj[key]);
  3196. }
  3197. }
  3198. }
  3199. function isEmpty(obj) {
  3200. for (const key in obj) {
  3201. if (Object.prototype.hasOwnProperty.call(obj, key)) {
  3202. return false;
  3203. }
  3204. }
  3205. return true;
  3206. }
  3207. /**
  3208. * @license
  3209. * Copyright 2017 Google LLC
  3210. *
  3211. * Licensed under the Apache License, Version 2.0 (the "License");
  3212. * you may not use this file except in compliance with the License.
  3213. * You may obtain a copy of the License at
  3214. *
  3215. * http://www.apache.org/licenses/LICENSE-2.0
  3216. *
  3217. * Unless required by applicable law or agreed to in writing, software
  3218. * distributed under the License is distributed on an "AS IS" BASIS,
  3219. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  3220. * See the License for the specific language governing permissions and
  3221. * limitations under the License.
  3222. */
  3223. // An immutable sorted map implementation, based on a Left-leaning Red-Black
  3224. // tree.
  3225. class SortedMap {
  3226. constructor(comparator, root) {
  3227. this.comparator = comparator;
  3228. this.root = root ? root : LLRBNode.EMPTY;
  3229. }
  3230. // Returns a copy of the map, with the specified key/value added or replaced.
  3231. insert(key, value) {
  3232. return new SortedMap(this.comparator, this.root
  3233. .insert(key, value, this.comparator)
  3234. .copy(null, null, LLRBNode.BLACK, null, null));
  3235. }
  3236. // Returns a copy of the map, with the specified key removed.
  3237. remove(key) {
  3238. return new SortedMap(this.comparator, this.root
  3239. .remove(key, this.comparator)
  3240. .copy(null, null, LLRBNode.BLACK, null, null));
  3241. }
  3242. // Returns the value of the node with the given key, or null.
  3243. get(key) {
  3244. let node = this.root;
  3245. while (!node.isEmpty()) {
  3246. const cmp = this.comparator(key, node.key);
  3247. if (cmp === 0) {
  3248. return node.value;
  3249. }
  3250. else if (cmp < 0) {
  3251. node = node.left;
  3252. }
  3253. else if (cmp > 0) {
  3254. node = node.right;
  3255. }
  3256. }
  3257. return null;
  3258. }
  3259. // Returns the index of the element in this sorted map, or -1 if it doesn't
  3260. // exist.
  3261. indexOf(key) {
  3262. // Number of nodes that were pruned when descending right
  3263. let prunedNodes = 0;
  3264. let node = this.root;
  3265. while (!node.isEmpty()) {
  3266. const cmp = this.comparator(key, node.key);
  3267. if (cmp === 0) {
  3268. return prunedNodes + node.left.size;
  3269. }
  3270. else if (cmp < 0) {
  3271. node = node.left;
  3272. }
  3273. else {
  3274. // Count all nodes left of the node plus the node itself
  3275. prunedNodes += node.left.size + 1;
  3276. node = node.right;
  3277. }
  3278. }
  3279. // Node not found
  3280. return -1;
  3281. }
  3282. isEmpty() {
  3283. return this.root.isEmpty();
  3284. }
  3285. // Returns the total number of nodes in the map.
  3286. get size() {
  3287. return this.root.size;
  3288. }
  3289. // Returns the minimum key in the map.
  3290. minKey() {
  3291. return this.root.minKey();
  3292. }
  3293. // Returns the maximum key in the map.
  3294. maxKey() {
  3295. return this.root.maxKey();
  3296. }
  3297. // Traverses the map in key order and calls the specified action function
  3298. // for each key/value pair. If action returns true, traversal is aborted.
  3299. // Returns the first truthy value returned by action, or the last falsey
  3300. // value returned by action.
  3301. inorderTraversal(action) {
  3302. return this.root.inorderTraversal(action);
  3303. }
  3304. forEach(fn) {
  3305. this.inorderTraversal((k, v) => {
  3306. fn(k, v);
  3307. return false;
  3308. });
  3309. }
  3310. toString() {
  3311. const descriptions = [];
  3312. this.inorderTraversal((k, v) => {
  3313. descriptions.push(`${k}:${v}`);
  3314. return false;
  3315. });
  3316. return `{${descriptions.join(', ')}}`;
  3317. }
  3318. // Traverses the map in reverse key order and calls the specified action
  3319. // function for each key/value pair. If action returns true, traversal is
  3320. // aborted.
  3321. // Returns the first truthy value returned by action, or the last falsey
  3322. // value returned by action.
  3323. reverseTraversal(action) {
  3324. return this.root.reverseTraversal(action);
  3325. }
  3326. // Returns an iterator over the SortedMap.
  3327. getIterator() {
  3328. return new SortedMapIterator(this.root, null, this.comparator, false);
  3329. }
  3330. getIteratorFrom(key) {
  3331. return new SortedMapIterator(this.root, key, this.comparator, false);
  3332. }
  3333. getReverseIterator() {
  3334. return new SortedMapIterator(this.root, null, this.comparator, true);
  3335. }
  3336. getReverseIteratorFrom(key) {
  3337. return new SortedMapIterator(this.root, key, this.comparator, true);
  3338. }
  3339. } // end SortedMap
  3340. // An iterator over an LLRBNode.
  3341. class SortedMapIterator {
  3342. constructor(node, startKey, comparator, isReverse) {
  3343. this.isReverse = isReverse;
  3344. this.nodeStack = [];
  3345. let cmp = 1;
  3346. while (!node.isEmpty()) {
  3347. cmp = startKey ? comparator(node.key, startKey) : 1;
  3348. // flip the comparison if we're going in reverse
  3349. if (startKey && isReverse) {
  3350. cmp *= -1;
  3351. }
  3352. if (cmp < 0) {
  3353. // This node is less than our start key. ignore it
  3354. if (this.isReverse) {
  3355. node = node.left;
  3356. }
  3357. else {
  3358. node = node.right;
  3359. }
  3360. }
  3361. else if (cmp === 0) {
  3362. // This node is exactly equal to our start key. Push it on the stack,
  3363. // but stop iterating;
  3364. this.nodeStack.push(node);
  3365. break;
  3366. }
  3367. else {
  3368. // This node is greater than our start key, add it to the stack and move
  3369. // to the next one
  3370. this.nodeStack.push(node);
  3371. if (this.isReverse) {
  3372. node = node.right;
  3373. }
  3374. else {
  3375. node = node.left;
  3376. }
  3377. }
  3378. }
  3379. }
  3380. getNext() {
  3381. let node = this.nodeStack.pop();
  3382. const result = { key: node.key, value: node.value };
  3383. if (this.isReverse) {
  3384. node = node.left;
  3385. while (!node.isEmpty()) {
  3386. this.nodeStack.push(node);
  3387. node = node.right;
  3388. }
  3389. }
  3390. else {
  3391. node = node.right;
  3392. while (!node.isEmpty()) {
  3393. this.nodeStack.push(node);
  3394. node = node.left;
  3395. }
  3396. }
  3397. return result;
  3398. }
  3399. hasNext() {
  3400. return this.nodeStack.length > 0;
  3401. }
  3402. peek() {
  3403. if (this.nodeStack.length === 0) {
  3404. return null;
  3405. }
  3406. const node = this.nodeStack[this.nodeStack.length - 1];
  3407. return { key: node.key, value: node.value };
  3408. }
  3409. } // end SortedMapIterator
  3410. // Represents a node in a Left-leaning Red-Black tree.
  3411. class LLRBNode {
  3412. constructor(key, value, color, left, right) {
  3413. this.key = key;
  3414. this.value = value;
  3415. this.color = color != null ? color : LLRBNode.RED;
  3416. this.left = left != null ? left : LLRBNode.EMPTY;
  3417. this.right = right != null ? right : LLRBNode.EMPTY;
  3418. this.size = this.left.size + 1 + this.right.size;
  3419. }
  3420. // Returns a copy of the current node, optionally replacing pieces of it.
  3421. copy(key, value, color, left, right) {
  3422. return new LLRBNode(key != null ? key : this.key, value != null ? value : this.value, color != null ? color : this.color, left != null ? left : this.left, right != null ? right : this.right);
  3423. }
  3424. isEmpty() {
  3425. return false;
  3426. }
  3427. // Traverses the tree in key order and calls the specified action function
  3428. // for each node. If action returns true, traversal is aborted.
  3429. // Returns the first truthy value returned by action, or the last falsey
  3430. // value returned by action.
  3431. inorderTraversal(action) {
  3432. return (this.left.inorderTraversal(action) ||
  3433. action(this.key, this.value) ||
  3434. this.right.inorderTraversal(action));
  3435. }
  3436. // Traverses the tree in reverse key order and calls the specified action
  3437. // function for each node. If action returns true, traversal is aborted.
  3438. // Returns the first truthy value returned by action, or the last falsey
  3439. // value returned by action.
  3440. reverseTraversal(action) {
  3441. return (this.right.reverseTraversal(action) ||
  3442. action(this.key, this.value) ||
  3443. this.left.reverseTraversal(action));
  3444. }
  3445. // Returns the minimum node in the tree.
  3446. min() {
  3447. if (this.left.isEmpty()) {
  3448. return this;
  3449. }
  3450. else {
  3451. return this.left.min();
  3452. }
  3453. }
  3454. // Returns the maximum key in the tree.
  3455. minKey() {
  3456. return this.min().key;
  3457. }
  3458. // Returns the maximum key in the tree.
  3459. maxKey() {
  3460. if (this.right.isEmpty()) {
  3461. return this.key;
  3462. }
  3463. else {
  3464. return this.right.maxKey();
  3465. }
  3466. }
  3467. // Returns new tree, with the key/value added.
  3468. insert(key, value, comparator) {
  3469. let n = this;
  3470. const cmp = comparator(key, n.key);
  3471. if (cmp < 0) {
  3472. n = n.copy(null, null, null, n.left.insert(key, value, comparator), null);
  3473. }
  3474. else if (cmp === 0) {
  3475. n = n.copy(null, value, null, null, null);
  3476. }
  3477. else {
  3478. n = n.copy(null, null, null, null, n.right.insert(key, value, comparator));
  3479. }
  3480. return n.fixUp();
  3481. }
  3482. removeMin() {
  3483. if (this.left.isEmpty()) {
  3484. return LLRBNode.EMPTY;
  3485. }
  3486. let n = this;
  3487. if (!n.left.isRed() && !n.left.left.isRed()) {
  3488. n = n.moveRedLeft();
  3489. }
  3490. n = n.copy(null, null, null, n.left.removeMin(), null);
  3491. return n.fixUp();
  3492. }
  3493. // Returns new tree, with the specified item removed.
  3494. remove(key, comparator) {
  3495. let smallest;
  3496. let n = this;
  3497. if (comparator(key, n.key) < 0) {
  3498. if (!n.left.isEmpty() && !n.left.isRed() && !n.left.left.isRed()) {
  3499. n = n.moveRedLeft();
  3500. }
  3501. n = n.copy(null, null, null, n.left.remove(key, comparator), null);
  3502. }
  3503. else {
  3504. if (n.left.isRed()) {
  3505. n = n.rotateRight();
  3506. }
  3507. if (!n.right.isEmpty() && !n.right.isRed() && !n.right.left.isRed()) {
  3508. n = n.moveRedRight();
  3509. }
  3510. if (comparator(key, n.key) === 0) {
  3511. if (n.right.isEmpty()) {
  3512. return LLRBNode.EMPTY;
  3513. }
  3514. else {
  3515. smallest = n.right.min();
  3516. n = n.copy(smallest.key, smallest.value, null, null, n.right.removeMin());
  3517. }
  3518. }
  3519. n = n.copy(null, null, null, null, n.right.remove(key, comparator));
  3520. }
  3521. return n.fixUp();
  3522. }
  3523. isRed() {
  3524. return this.color;
  3525. }
  3526. // Returns new tree after performing any needed rotations.
  3527. fixUp() {
  3528. let n = this;
  3529. if (n.right.isRed() && !n.left.isRed()) {
  3530. n = n.rotateLeft();
  3531. }
  3532. if (n.left.isRed() && n.left.left.isRed()) {
  3533. n = n.rotateRight();
  3534. }
  3535. if (n.left.isRed() && n.right.isRed()) {
  3536. n = n.colorFlip();
  3537. }
  3538. return n;
  3539. }
  3540. moveRedLeft() {
  3541. let n = this.colorFlip();
  3542. if (n.right.left.isRed()) {
  3543. n = n.copy(null, null, null, null, n.right.rotateRight());
  3544. n = n.rotateLeft();
  3545. n = n.colorFlip();
  3546. }
  3547. return n;
  3548. }
  3549. moveRedRight() {
  3550. let n = this.colorFlip();
  3551. if (n.left.left.isRed()) {
  3552. n = n.rotateRight();
  3553. n = n.colorFlip();
  3554. }
  3555. return n;
  3556. }
  3557. rotateLeft() {
  3558. const nl = this.copy(null, null, LLRBNode.RED, null, this.right.left);
  3559. return this.right.copy(null, null, this.color, nl, null);
  3560. }
  3561. rotateRight() {
  3562. const nr = this.copy(null, null, LLRBNode.RED, this.left.right, null);
  3563. return this.left.copy(null, null, this.color, null, nr);
  3564. }
  3565. colorFlip() {
  3566. const left = this.left.copy(null, null, !this.left.color, null, null);
  3567. const right = this.right.copy(null, null, !this.right.color, null, null);
  3568. return this.copy(null, null, !this.color, left, right);
  3569. }
  3570. // For testing.
  3571. checkMaxDepth() {
  3572. const blackDepth = this.check();
  3573. if (Math.pow(2.0, blackDepth) <= this.size + 1) {
  3574. return true;
  3575. }
  3576. else {
  3577. return false;
  3578. }
  3579. }
  3580. // In a balanced RB tree, the black-depth (number of black nodes) from root to
  3581. // leaves is equal on both sides. This function verifies that or asserts.
  3582. check() {
  3583. if (this.isRed() && this.left.isRed()) {
  3584. throw fail();
  3585. }
  3586. if (this.right.isRed()) {
  3587. throw fail();
  3588. }
  3589. const blackDepth = this.left.check();
  3590. if (blackDepth !== this.right.check()) {
  3591. throw fail();
  3592. }
  3593. else {
  3594. return blackDepth + (this.isRed() ? 0 : 1);
  3595. }
  3596. }
  3597. } // end LLRBNode
  3598. // Empty node is shared between all LLRB trees.
  3599. // eslint-disable-next-line @typescript-eslint/no-explicit-any
  3600. LLRBNode.EMPTY = null;
  3601. LLRBNode.RED = true;
  3602. LLRBNode.BLACK = false;
  3603. // Represents an empty node (a leaf node in the Red-Black Tree).
  3604. class LLRBEmptyNode {
  3605. constructor() {
  3606. this.size = 0;
  3607. }
  3608. get key() {
  3609. throw fail();
  3610. }
  3611. get value() {
  3612. throw fail();
  3613. }
  3614. get color() {
  3615. throw fail();
  3616. }
  3617. get left() {
  3618. throw fail();
  3619. }
  3620. get right() {
  3621. throw fail();
  3622. }
  3623. // Returns a copy of the current node.
  3624. copy(key, value, color, left, right) {
  3625. return this;
  3626. }
  3627. // Returns a copy of the tree, with the specified key/value added.
  3628. insert(key, value, comparator) {
  3629. return new LLRBNode(key, value);
  3630. }
  3631. // Returns a copy of the tree, with the specified key removed.
  3632. remove(key, comparator) {
  3633. return this;
  3634. }
  3635. isEmpty() {
  3636. return true;
  3637. }
  3638. inorderTraversal(action) {
  3639. return false;
  3640. }
  3641. reverseTraversal(action) {
  3642. return false;
  3643. }
  3644. minKey() {
  3645. return null;
  3646. }
  3647. maxKey() {
  3648. return null;
  3649. }
  3650. isRed() {
  3651. return false;
  3652. }
  3653. // For testing.
  3654. checkMaxDepth() {
  3655. return true;
  3656. }
  3657. check() {
  3658. return 0;
  3659. }
  3660. } // end LLRBEmptyNode
  3661. LLRBNode.EMPTY = new LLRBEmptyNode();
  3662. /**
  3663. * @license
  3664. * Copyright 2017 Google LLC
  3665. *
  3666. * Licensed under the Apache License, Version 2.0 (the "License");
  3667. * you may not use this file except in compliance with the License.
  3668. * You may obtain a copy of the License at
  3669. *
  3670. * http://www.apache.org/licenses/LICENSE-2.0
  3671. *
  3672. * Unless required by applicable law or agreed to in writing, software
  3673. * distributed under the License is distributed on an "AS IS" BASIS,
  3674. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  3675. * See the License for the specific language governing permissions and
  3676. * limitations under the License.
  3677. */
  3678. /**
  3679. * SortedSet is an immutable (copy-on-write) collection that holds elements
  3680. * in order specified by the provided comparator.
  3681. *
  3682. * NOTE: if provided comparator returns 0 for two elements, we consider them to
  3683. * be equal!
  3684. */
  3685. class SortedSet {
  3686. constructor(comparator) {
  3687. this.comparator = comparator;
  3688. this.data = new SortedMap(this.comparator);
  3689. }
  3690. has(elem) {
  3691. return this.data.get(elem) !== null;
  3692. }
  3693. first() {
  3694. return this.data.minKey();
  3695. }
  3696. last() {
  3697. return this.data.maxKey();
  3698. }
  3699. get size() {
  3700. return this.data.size;
  3701. }
  3702. indexOf(elem) {
  3703. return this.data.indexOf(elem);
  3704. }
  3705. /** Iterates elements in order defined by "comparator" */
  3706. forEach(cb) {
  3707. this.data.inorderTraversal((k, v) => {
  3708. cb(k);
  3709. return false;
  3710. });
  3711. }
  3712. /** Iterates over `elem`s such that: range[0] &lt;= elem &lt; range[1]. */
  3713. forEachInRange(range, cb) {
  3714. const iter = this.data.getIteratorFrom(range[0]);
  3715. while (iter.hasNext()) {
  3716. const elem = iter.getNext();
  3717. if (this.comparator(elem.key, range[1]) >= 0) {
  3718. return;
  3719. }
  3720. cb(elem.key);
  3721. }
  3722. }
  3723. /**
  3724. * Iterates over `elem`s such that: start &lt;= elem until false is returned.
  3725. */
  3726. forEachWhile(cb, start) {
  3727. let iter;
  3728. if (start !== undefined) {
  3729. iter = this.data.getIteratorFrom(start);
  3730. }
  3731. else {
  3732. iter = this.data.getIterator();
  3733. }
  3734. while (iter.hasNext()) {
  3735. const elem = iter.getNext();
  3736. const result = cb(elem.key);
  3737. if (!result) {
  3738. return;
  3739. }
  3740. }
  3741. }
  3742. /** Finds the least element greater than or equal to `elem`. */
  3743. firstAfterOrEqual(elem) {
  3744. const iter = this.data.getIteratorFrom(elem);
  3745. return iter.hasNext() ? iter.getNext().key : null;
  3746. }
  3747. getIterator() {
  3748. return new SortedSetIterator(this.data.getIterator());
  3749. }
  3750. getIteratorFrom(key) {
  3751. return new SortedSetIterator(this.data.getIteratorFrom(key));
  3752. }
  3753. /** Inserts or updates an element */
  3754. add(elem) {
  3755. return this.copy(this.data.remove(elem).insert(elem, true));
  3756. }
  3757. /** Deletes an element */
  3758. delete(elem) {
  3759. if (!this.has(elem)) {
  3760. return this;
  3761. }
  3762. return this.copy(this.data.remove(elem));
  3763. }
  3764. isEmpty() {
  3765. return this.data.isEmpty();
  3766. }
  3767. unionWith(other) {
  3768. let result = this;
  3769. // Make sure `result` always refers to the larger one of the two sets.
  3770. if (result.size < other.size) {
  3771. result = other;
  3772. other = this;
  3773. }
  3774. other.forEach(elem => {
  3775. result = result.add(elem);
  3776. });
  3777. return result;
  3778. }
  3779. isEqual(other) {
  3780. if (!(other instanceof SortedSet)) {
  3781. return false;
  3782. }
  3783. if (this.size !== other.size) {
  3784. return false;
  3785. }
  3786. const thisIt = this.data.getIterator();
  3787. const otherIt = other.data.getIterator();
  3788. while (thisIt.hasNext()) {
  3789. const thisElem = thisIt.getNext().key;
  3790. const otherElem = otherIt.getNext().key;
  3791. if (this.comparator(thisElem, otherElem) !== 0) {
  3792. return false;
  3793. }
  3794. }
  3795. return true;
  3796. }
  3797. toArray() {
  3798. const res = [];
  3799. this.forEach(targetId => {
  3800. res.push(targetId);
  3801. });
  3802. return res;
  3803. }
  3804. toString() {
  3805. const result = [];
  3806. this.forEach(elem => result.push(elem));
  3807. return 'SortedSet(' + result.toString() + ')';
  3808. }
  3809. copy(data) {
  3810. const result = new SortedSet(this.comparator);
  3811. result.data = data;
  3812. return result;
  3813. }
  3814. }
  3815. class SortedSetIterator {
  3816. constructor(iter) {
  3817. this.iter = iter;
  3818. }
  3819. getNext() {
  3820. return this.iter.getNext().key;
  3821. }
  3822. hasNext() {
  3823. return this.iter.hasNext();
  3824. }
  3825. }
  3826. /**
  3827. * Compares two sorted sets for equality using their natural ordering. The
  3828. * method computes the intersection and invokes `onAdd` for every element that
  3829. * is in `after` but not `before`. `onRemove` is invoked for every element in
  3830. * `before` but missing from `after`.
  3831. *
  3832. * The method creates a copy of both `before` and `after` and runs in O(n log
  3833. * n), where n is the size of the two lists.
  3834. *
  3835. * @param before - The elements that exist in the original set.
  3836. * @param after - The elements to diff against the original set.
  3837. * @param comparator - The comparator for the elements in before and after.
  3838. * @param onAdd - A function to invoke for every element that is part of `
  3839. * after` but not `before`.
  3840. * @param onRemove - A function to invoke for every element that is part of
  3841. * `before` but not `after`.
  3842. */
  3843. function diffSortedSets(before, after, comparator, onAdd, onRemove) {
  3844. const beforeIt = before.getIterator();
  3845. const afterIt = after.getIterator();
  3846. let beforeValue = advanceIterator(beforeIt);
  3847. let afterValue = advanceIterator(afterIt);
  3848. // Walk through the two sets at the same time, using the ordering defined by
  3849. // `comparator`.
  3850. while (beforeValue || afterValue) {
  3851. let added = false;
  3852. let removed = false;
  3853. if (beforeValue && afterValue) {
  3854. const cmp = comparator(beforeValue, afterValue);
  3855. if (cmp < 0) {
  3856. // The element was removed if the next element in our ordered
  3857. // walkthrough is only in `before`.
  3858. removed = true;
  3859. }
  3860. else if (cmp > 0) {
  3861. // The element was added if the next element in our ordered walkthrough
  3862. // is only in `after`.
  3863. added = true;
  3864. }
  3865. }
  3866. else if (beforeValue != null) {
  3867. removed = true;
  3868. }
  3869. else {
  3870. added = true;
  3871. }
  3872. if (added) {
  3873. onAdd(afterValue);
  3874. afterValue = advanceIterator(afterIt);
  3875. }
  3876. else if (removed) {
  3877. onRemove(beforeValue);
  3878. beforeValue = advanceIterator(beforeIt);
  3879. }
  3880. else {
  3881. beforeValue = advanceIterator(beforeIt);
  3882. afterValue = advanceIterator(afterIt);
  3883. }
  3884. }
  3885. }
  3886. /**
  3887. * Returns the next element from the iterator or `undefined` if none available.
  3888. */
  3889. function advanceIterator(it) {
  3890. return it.hasNext() ? it.getNext() : undefined;
  3891. }
  3892. /**
  3893. * @license
  3894. * Copyright 2020 Google LLC
  3895. *
  3896. * Licensed under the Apache License, Version 2.0 (the "License");
  3897. * you may not use this file except in compliance with the License.
  3898. * You may obtain a copy of the License at
  3899. *
  3900. * http://www.apache.org/licenses/LICENSE-2.0
  3901. *
  3902. * Unless required by applicable law or agreed to in writing, software
  3903. * distributed under the License is distributed on an "AS IS" BASIS,
  3904. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  3905. * See the License for the specific language governing permissions and
  3906. * limitations under the License.
  3907. */
  3908. /**
  3909. * Provides a set of fields that can be used to partially patch a document.
  3910. * FieldMask is used in conjunction with ObjectValue.
  3911. * Examples:
  3912. * foo - Overwrites foo entirely with the provided value. If foo is not
  3913. * present in the companion ObjectValue, the field is deleted.
  3914. * foo.bar - Overwrites only the field bar of the object foo.
  3915. * If foo is not an object, foo is replaced with an object
  3916. * containing foo
  3917. */
  3918. class FieldMask {
  3919. constructor(fields) {
  3920. this.fields = fields;
  3921. // TODO(dimond): validation of FieldMask
  3922. // Sort the field mask to support `FieldMask.isEqual()` and assert below.
  3923. fields.sort(FieldPath$1.comparator);
  3924. }
  3925. static empty() {
  3926. return new FieldMask([]);
  3927. }
  3928. /**
  3929. * Returns a new FieldMask object that is the result of adding all the given
  3930. * fields paths to this field mask.
  3931. */
  3932. unionWith(extraFields) {
  3933. let mergedMaskSet = new SortedSet(FieldPath$1.comparator);
  3934. for (const fieldPath of this.fields) {
  3935. mergedMaskSet = mergedMaskSet.add(fieldPath);
  3936. }
  3937. for (const fieldPath of extraFields) {
  3938. mergedMaskSet = mergedMaskSet.add(fieldPath);
  3939. }
  3940. return new FieldMask(mergedMaskSet.toArray());
  3941. }
  3942. /**
  3943. * Verifies that `fieldPath` is included by at least one field in this field
  3944. * mask.
  3945. *
  3946. * This is an O(n) operation, where `n` is the size of the field mask.
  3947. */
  3948. covers(fieldPath) {
  3949. for (const fieldMaskPath of this.fields) {
  3950. if (fieldMaskPath.isPrefixOf(fieldPath)) {
  3951. return true;
  3952. }
  3953. }
  3954. return false;
  3955. }
  3956. isEqual(other) {
  3957. return arrayEquals(this.fields, other.fields, (l, r) => l.isEqual(r));
  3958. }
  3959. }
  3960. /**
  3961. * @license
  3962. * Copyright 2020 Google LLC
  3963. *
  3964. * Licensed under the Apache License, Version 2.0 (the "License");
  3965. * you may not use this file except in compliance with the License.
  3966. * You may obtain a copy of the License at
  3967. *
  3968. * http://www.apache.org/licenses/LICENSE-2.0
  3969. *
  3970. * Unless required by applicable law or agreed to in writing, software
  3971. * distributed under the License is distributed on an "AS IS" BASIS,
  3972. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  3973. * See the License for the specific language governing permissions and
  3974. * limitations under the License.
  3975. */
  3976. /** Converts a Base64 encoded string to a binary string. */
  3977. function decodeBase64(encoded) {
  3978. // Note: We used to validate the base64 string here via a regular expression.
  3979. // This was removed to improve the performance of indexing.
  3980. return Buffer.from(encoded, 'base64').toString('binary');
  3981. }
  3982. /** Converts a binary string to a Base64 encoded string. */
  3983. function encodeBase64(raw) {
  3984. return Buffer.from(raw, 'binary').toString('base64');
  3985. }
  3986. /** True if and only if the Base64 conversion functions are available. */
  3987. function isBase64Available() {
  3988. return true;
  3989. }
  3990. /**
  3991. * @license
  3992. * Copyright 2020 Google LLC
  3993. *
  3994. * Licensed under the Apache License, Version 2.0 (the "License");
  3995. * you may not use this file except in compliance with the License.
  3996. * You may obtain a copy of the License at
  3997. *
  3998. * http://www.apache.org/licenses/LICENSE-2.0
  3999. *
  4000. * Unless required by applicable law or agreed to in writing, software
  4001. * distributed under the License is distributed on an "AS IS" BASIS,
  4002. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  4003. * See the License for the specific language governing permissions and
  4004. * limitations under the License.
  4005. */
  4006. /**
  4007. * Immutable class that represents a "proto" byte string.
  4008. *
  4009. * Proto byte strings can either be Base64-encoded strings or Uint8Arrays when
  4010. * sent on the wire. This class abstracts away this differentiation by holding
  4011. * the proto byte string in a common class that must be converted into a string
  4012. * before being sent as a proto.
  4013. * @internal
  4014. */
  4015. class ByteString {
  4016. constructor(binaryString) {
  4017. this.binaryString = binaryString;
  4018. }
  4019. static fromBase64String(base64) {
  4020. const binaryString = decodeBase64(base64);
  4021. return new ByteString(binaryString);
  4022. }
  4023. static fromUint8Array(array) {
  4024. // TODO(indexing); Remove the copy of the byte string here as this method
  4025. // is frequently called during indexing.
  4026. const binaryString = binaryStringFromUint8Array(array);
  4027. return new ByteString(binaryString);
  4028. }
  4029. [Symbol.iterator]() {
  4030. let i = 0;
  4031. return {
  4032. next: () => {
  4033. if (i < this.binaryString.length) {
  4034. return { value: this.binaryString.charCodeAt(i++), done: false };
  4035. }
  4036. else {
  4037. return { value: undefined, done: true };
  4038. }
  4039. }
  4040. };
  4041. }
  4042. toBase64() {
  4043. return encodeBase64(this.binaryString);
  4044. }
  4045. toUint8Array() {
  4046. return uint8ArrayFromBinaryString(this.binaryString);
  4047. }
  4048. approximateByteSize() {
  4049. return this.binaryString.length * 2;
  4050. }
  4051. compareTo(other) {
  4052. return primitiveComparator(this.binaryString, other.binaryString);
  4053. }
  4054. isEqual(other) {
  4055. return this.binaryString === other.binaryString;
  4056. }
  4057. }
  4058. ByteString.EMPTY_BYTE_STRING = new ByteString('');
  4059. /**
  4060. * Helper function to convert an Uint8array to a binary string.
  4061. */
  4062. function binaryStringFromUint8Array(array) {
  4063. let binaryString = '';
  4064. for (let i = 0; i < array.length; ++i) {
  4065. binaryString += String.fromCharCode(array[i]);
  4066. }
  4067. return binaryString;
  4068. }
  4069. /**
  4070. * Helper function to convert a binary string to an Uint8Array.
  4071. */
  4072. function uint8ArrayFromBinaryString(binaryString) {
  4073. const buffer = new Uint8Array(binaryString.length);
  4074. for (let i = 0; i < binaryString.length; i++) {
  4075. buffer[i] = binaryString.charCodeAt(i);
  4076. }
  4077. return buffer;
  4078. }
  4079. /**
  4080. * @license
  4081. * Copyright 2020 Google LLC
  4082. *
  4083. * Licensed under the Apache License, Version 2.0 (the "License");
  4084. * you may not use this file except in compliance with the License.
  4085. * You may obtain a copy of the License at
  4086. *
  4087. * http://www.apache.org/licenses/LICENSE-2.0
  4088. *
  4089. * Unless required by applicable law or agreed to in writing, software
  4090. * distributed under the License is distributed on an "AS IS" BASIS,
  4091. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  4092. * See the License for the specific language governing permissions and
  4093. * limitations under the License.
  4094. */
  4095. // A RegExp matching ISO 8601 UTC timestamps with optional fraction.
  4096. const ISO_TIMESTAMP_REG_EXP = new RegExp(/^\d{4}-\d\d-\d\dT\d\d:\d\d:\d\d(?:\.(\d+))?Z$/);
  4097. /**
  4098. * Converts the possible Proto values for a timestamp value into a "seconds and
  4099. * nanos" representation.
  4100. */
  4101. function normalizeTimestamp(date) {
  4102. hardAssert(!!date);
  4103. // The json interface (for the browser) will return an iso timestamp string,
  4104. // while the proto js library (for node) will return a
  4105. // google.protobuf.Timestamp instance.
  4106. if (typeof date === 'string') {
  4107. // The date string can have higher precision (nanos) than the Date class
  4108. // (millis), so we do some custom parsing here.
  4109. // Parse the nanos right out of the string.
  4110. let nanos = 0;
  4111. const fraction = ISO_TIMESTAMP_REG_EXP.exec(date);
  4112. hardAssert(!!fraction);
  4113. if (fraction[1]) {
  4114. // Pad the fraction out to 9 digits (nanos).
  4115. let nanoStr = fraction[1];
  4116. nanoStr = (nanoStr + '000000000').substr(0, 9);
  4117. nanos = Number(nanoStr);
  4118. }
  4119. // Parse the date to get the seconds.
  4120. const parsedDate = new Date(date);
  4121. const seconds = Math.floor(parsedDate.getTime() / 1000);
  4122. return { seconds, nanos };
  4123. }
  4124. else {
  4125. // TODO(b/37282237): Use strings for Proto3 timestamps
  4126. // assert(!this.options.useProto3Json,
  4127. // 'The timestamp instance format requires Proto JS.');
  4128. const seconds = normalizeNumber(date.seconds);
  4129. const nanos = normalizeNumber(date.nanos);
  4130. return { seconds, nanos };
  4131. }
  4132. }
  4133. /**
  4134. * Converts the possible Proto types for numbers into a JavaScript number.
  4135. * Returns 0 if the value is not numeric.
  4136. */
  4137. function normalizeNumber(value) {
  4138. // TODO(bjornick): Handle int64 greater than 53 bits.
  4139. if (typeof value === 'number') {
  4140. return value;
  4141. }
  4142. else if (typeof value === 'string') {
  4143. return Number(value);
  4144. }
  4145. else {
  4146. return 0;
  4147. }
  4148. }
  4149. /** Converts the possible Proto types for Blobs into a ByteString. */
  4150. function normalizeByteString(blob) {
  4151. if (typeof blob === 'string') {
  4152. return ByteString.fromBase64String(blob);
  4153. }
  4154. else {
  4155. return ByteString.fromUint8Array(blob);
  4156. }
  4157. }
  4158. /**
  4159. * @license
  4160. * Copyright 2020 Google LLC
  4161. *
  4162. * Licensed under the Apache License, Version 2.0 (the "License");
  4163. * you may not use this file except in compliance with the License.
  4164. * You may obtain a copy of the License at
  4165. *
  4166. * http://www.apache.org/licenses/LICENSE-2.0
  4167. *
  4168. * Unless required by applicable law or agreed to in writing, software
  4169. * distributed under the License is distributed on an "AS IS" BASIS,
  4170. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  4171. * See the License for the specific language governing permissions and
  4172. * limitations under the License.
  4173. */
  4174. /**
  4175. * Represents a locally-applied ServerTimestamp.
  4176. *
  4177. * Server Timestamps are backed by MapValues that contain an internal field
  4178. * `__type__` with a value of `server_timestamp`. The previous value and local
  4179. * write time are stored in its `__previous_value__` and `__local_write_time__`
  4180. * fields respectively.
  4181. *
  4182. * Notes:
  4183. * - ServerTimestampValue instances are created as the result of applying a
  4184. * transform. They can only exist in the local view of a document. Therefore
  4185. * they do not need to be parsed or serialized.
  4186. * - When evaluated locally (e.g. for snapshot.data()), they by default
  4187. * evaluate to `null`. This behavior can be configured by passing custom
  4188. * FieldValueOptions to value().
  4189. * - With respect to other ServerTimestampValues, they sort by their
  4190. * localWriteTime.
  4191. */
  4192. const SERVER_TIMESTAMP_SENTINEL = 'server_timestamp';
  4193. const TYPE_KEY = '__type__';
  4194. const PREVIOUS_VALUE_KEY = '__previous_value__';
  4195. const LOCAL_WRITE_TIME_KEY = '__local_write_time__';
  4196. function isServerTimestamp(value) {
  4197. var _a, _b;
  4198. const type = (_b = (((_a = value === null || value === void 0 ? void 0 : value.mapValue) === null || _a === void 0 ? void 0 : _a.fields) || {})[TYPE_KEY]) === null || _b === void 0 ? void 0 : _b.stringValue;
  4199. return type === SERVER_TIMESTAMP_SENTINEL;
  4200. }
  4201. /**
  4202. * Creates a new ServerTimestamp proto value (using the internal format).
  4203. */
  4204. function serverTimestamp$1(localWriteTime, previousValue) {
  4205. const mapValue = {
  4206. fields: {
  4207. [TYPE_KEY]: {
  4208. stringValue: SERVER_TIMESTAMP_SENTINEL
  4209. },
  4210. [LOCAL_WRITE_TIME_KEY]: {
  4211. timestampValue: {
  4212. seconds: localWriteTime.seconds,
  4213. nanos: localWriteTime.nanoseconds
  4214. }
  4215. }
  4216. }
  4217. };
  4218. if (previousValue) {
  4219. mapValue.fields[PREVIOUS_VALUE_KEY] = previousValue;
  4220. }
  4221. return { mapValue };
  4222. }
  4223. /**
  4224. * Returns the value of the field before this ServerTimestamp was set.
  4225. *
  4226. * Preserving the previous values allows the user to display the last resoled
  4227. * value until the backend responds with the timestamp.
  4228. */
  4229. function getPreviousValue(value) {
  4230. const previousValue = value.mapValue.fields[PREVIOUS_VALUE_KEY];
  4231. if (isServerTimestamp(previousValue)) {
  4232. return getPreviousValue(previousValue);
  4233. }
  4234. return previousValue;
  4235. }
  4236. /**
  4237. * Returns the local time at which this timestamp was first set.
  4238. */
  4239. function getLocalWriteTime(value) {
  4240. const localWriteTime = normalizeTimestamp(value.mapValue.fields[LOCAL_WRITE_TIME_KEY].timestampValue);
  4241. return new Timestamp(localWriteTime.seconds, localWriteTime.nanos);
  4242. }
  4243. /**
  4244. * @license
  4245. * Copyright 2017 Google LLC
  4246. *
  4247. * Licensed under the Apache License, Version 2.0 (the "License");
  4248. * you may not use this file except in compliance with the License.
  4249. * You may obtain a copy of the License at
  4250. *
  4251. * http://www.apache.org/licenses/LICENSE-2.0
  4252. *
  4253. * Unless required by applicable law or agreed to in writing, software
  4254. * distributed under the License is distributed on an "AS IS" BASIS,
  4255. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  4256. * See the License for the specific language governing permissions and
  4257. * limitations under the License.
  4258. */
  4259. class DatabaseInfo {
  4260. /**
  4261. * Constructs a DatabaseInfo using the provided host, databaseId and
  4262. * persistenceKey.
  4263. *
  4264. * @param databaseId - The database to use.
  4265. * @param appId - The Firebase App Id.
  4266. * @param persistenceKey - A unique identifier for this Firestore's local
  4267. * storage (used in conjunction with the databaseId).
  4268. * @param host - The Firestore backend host to connect to.
  4269. * @param ssl - Whether to use SSL when connecting.
  4270. * @param forceLongPolling - Whether to use the forceLongPolling option
  4271. * when using WebChannel as the network transport.
  4272. * @param autoDetectLongPolling - Whether to use the detectBufferingProxy
  4273. * option when using WebChannel as the network transport.
  4274. * @param useFetchStreams Whether to use the Fetch API instead of
  4275. * XMLHTTPRequest
  4276. */
  4277. constructor(databaseId, appId, persistenceKey, host, ssl, forceLongPolling, autoDetectLongPolling, useFetchStreams) {
  4278. this.databaseId = databaseId;
  4279. this.appId = appId;
  4280. this.persistenceKey = persistenceKey;
  4281. this.host = host;
  4282. this.ssl = ssl;
  4283. this.forceLongPolling = forceLongPolling;
  4284. this.autoDetectLongPolling = autoDetectLongPolling;
  4285. this.useFetchStreams = useFetchStreams;
  4286. }
  4287. }
  4288. /** The default database name for a project. */
  4289. const DEFAULT_DATABASE_NAME = '(default)';
  4290. /**
  4291. * Represents the database ID a Firestore client is associated with.
  4292. * @internal
  4293. */
  4294. class DatabaseId {
  4295. constructor(projectId, database) {
  4296. this.projectId = projectId;
  4297. this.database = database ? database : DEFAULT_DATABASE_NAME;
  4298. }
  4299. static empty() {
  4300. return new DatabaseId('', '');
  4301. }
  4302. get isDefaultDatabase() {
  4303. return this.database === DEFAULT_DATABASE_NAME;
  4304. }
  4305. isEqual(other) {
  4306. return (other instanceof DatabaseId &&
  4307. other.projectId === this.projectId &&
  4308. other.database === this.database);
  4309. }
  4310. }
  4311. function databaseIdFromApp(app, database) {
  4312. if (!Object.prototype.hasOwnProperty.apply(app.options, ['projectId'])) {
  4313. throw new FirestoreError(Code.INVALID_ARGUMENT, '"projectId" not provided in firebase.initializeApp.');
  4314. }
  4315. return new DatabaseId(app.options.projectId, database);
  4316. }
  4317. /**
  4318. * @license
  4319. * Copyright 2017 Google LLC
  4320. *
  4321. * Licensed under the Apache License, Version 2.0 (the "License");
  4322. * you may not use this file except in compliance with the License.
  4323. * You may obtain a copy of the License at
  4324. *
  4325. * http://www.apache.org/licenses/LICENSE-2.0
  4326. *
  4327. * Unless required by applicable law or agreed to in writing, software
  4328. * distributed under the License is distributed on an "AS IS" BASIS,
  4329. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  4330. * See the License for the specific language governing permissions and
  4331. * limitations under the License.
  4332. */
  4333. /** Sentinel value that sorts before any Mutation Batch ID. */
  4334. const BATCHID_UNKNOWN = -1;
  4335. /**
  4336. * Returns whether a variable is either undefined or null.
  4337. */
  4338. function isNullOrUndefined(value) {
  4339. return value === null || value === undefined;
  4340. }
  4341. /** Returns whether the value represents -0. */
  4342. function isNegativeZero(value) {
  4343. // Detect if the value is -0.0. Based on polyfill from
  4344. // https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Object/is
  4345. return value === 0 && 1 / value === 1 / -0;
  4346. }
  4347. /**
  4348. * Returns whether a value is an integer and in the safe integer range
  4349. * @param value - The value to test for being an integer and in the safe range
  4350. */
  4351. function isSafeInteger(value) {
  4352. return (typeof value === 'number' &&
  4353. Number.isInteger(value) &&
  4354. !isNegativeZero(value) &&
  4355. value <= Number.MAX_SAFE_INTEGER &&
  4356. value >= Number.MIN_SAFE_INTEGER);
  4357. }
  4358. /**
  4359. * @license
  4360. * Copyright 2020 Google LLC
  4361. *
  4362. * Licensed under the Apache License, Version 2.0 (the "License");
  4363. * you may not use this file except in compliance with the License.
  4364. * You may obtain a copy of the License at
  4365. *
  4366. * http://www.apache.org/licenses/LICENSE-2.0
  4367. *
  4368. * Unless required by applicable law or agreed to in writing, software
  4369. * distributed under the License is distributed on an "AS IS" BASIS,
  4370. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  4371. * See the License for the specific language governing permissions and
  4372. * limitations under the License.
  4373. */
  4374. const MAX_VALUE_TYPE = '__max__';
  4375. const MAX_VALUE = {
  4376. mapValue: {
  4377. fields: {
  4378. '__type__': { stringValue: MAX_VALUE_TYPE }
  4379. }
  4380. }
  4381. };
  4382. const MIN_VALUE = {
  4383. nullValue: 'NULL_VALUE'
  4384. };
  4385. /** Extracts the backend's type order for the provided value. */
  4386. function typeOrder(value) {
  4387. if ('nullValue' in value) {
  4388. return 0 /* TypeOrder.NullValue */;
  4389. }
  4390. else if ('booleanValue' in value) {
  4391. return 1 /* TypeOrder.BooleanValue */;
  4392. }
  4393. else if ('integerValue' in value || 'doubleValue' in value) {
  4394. return 2 /* TypeOrder.NumberValue */;
  4395. }
  4396. else if ('timestampValue' in value) {
  4397. return 3 /* TypeOrder.TimestampValue */;
  4398. }
  4399. else if ('stringValue' in value) {
  4400. return 5 /* TypeOrder.StringValue */;
  4401. }
  4402. else if ('bytesValue' in value) {
  4403. return 6 /* TypeOrder.BlobValue */;
  4404. }
  4405. else if ('referenceValue' in value) {
  4406. return 7 /* TypeOrder.RefValue */;
  4407. }
  4408. else if ('geoPointValue' in value) {
  4409. return 8 /* TypeOrder.GeoPointValue */;
  4410. }
  4411. else if ('arrayValue' in value) {
  4412. return 9 /* TypeOrder.ArrayValue */;
  4413. }
  4414. else if ('mapValue' in value) {
  4415. if (isServerTimestamp(value)) {
  4416. return 4 /* TypeOrder.ServerTimestampValue */;
  4417. }
  4418. else if (isMaxValue(value)) {
  4419. return 9007199254740991 /* TypeOrder.MaxValue */;
  4420. }
  4421. return 10 /* TypeOrder.ObjectValue */;
  4422. }
  4423. else {
  4424. return fail();
  4425. }
  4426. }
  4427. /** Tests `left` and `right` for equality based on the backend semantics. */
  4428. function valueEquals(left, right) {
  4429. if (left === right) {
  4430. return true;
  4431. }
  4432. const leftType = typeOrder(left);
  4433. const rightType = typeOrder(right);
  4434. if (leftType !== rightType) {
  4435. return false;
  4436. }
  4437. switch (leftType) {
  4438. case 0 /* TypeOrder.NullValue */:
  4439. return true;
  4440. case 1 /* TypeOrder.BooleanValue */:
  4441. return left.booleanValue === right.booleanValue;
  4442. case 4 /* TypeOrder.ServerTimestampValue */:
  4443. return getLocalWriteTime(left).isEqual(getLocalWriteTime(right));
  4444. case 3 /* TypeOrder.TimestampValue */:
  4445. return timestampEquals(left, right);
  4446. case 5 /* TypeOrder.StringValue */:
  4447. return left.stringValue === right.stringValue;
  4448. case 6 /* TypeOrder.BlobValue */:
  4449. return blobEquals(left, right);
  4450. case 7 /* TypeOrder.RefValue */:
  4451. return left.referenceValue === right.referenceValue;
  4452. case 8 /* TypeOrder.GeoPointValue */:
  4453. return geoPointEquals(left, right);
  4454. case 2 /* TypeOrder.NumberValue */:
  4455. return numberEquals(left, right);
  4456. case 9 /* TypeOrder.ArrayValue */:
  4457. return arrayEquals(left.arrayValue.values || [], right.arrayValue.values || [], valueEquals);
  4458. case 10 /* TypeOrder.ObjectValue */:
  4459. return objectEquals(left, right);
  4460. case 9007199254740991 /* TypeOrder.MaxValue */:
  4461. return true;
  4462. default:
  4463. return fail();
  4464. }
  4465. }
  4466. function timestampEquals(left, right) {
  4467. if (typeof left.timestampValue === 'string' &&
  4468. typeof right.timestampValue === 'string' &&
  4469. left.timestampValue.length === right.timestampValue.length) {
  4470. // Use string equality for ISO 8601 timestamps
  4471. return left.timestampValue === right.timestampValue;
  4472. }
  4473. const leftTimestamp = normalizeTimestamp(left.timestampValue);
  4474. const rightTimestamp = normalizeTimestamp(right.timestampValue);
  4475. return (leftTimestamp.seconds === rightTimestamp.seconds &&
  4476. leftTimestamp.nanos === rightTimestamp.nanos);
  4477. }
  4478. function geoPointEquals(left, right) {
  4479. return (normalizeNumber(left.geoPointValue.latitude) ===
  4480. normalizeNumber(right.geoPointValue.latitude) &&
  4481. normalizeNumber(left.geoPointValue.longitude) ===
  4482. normalizeNumber(right.geoPointValue.longitude));
  4483. }
  4484. function blobEquals(left, right) {
  4485. return normalizeByteString(left.bytesValue).isEqual(normalizeByteString(right.bytesValue));
  4486. }
  4487. function numberEquals(left, right) {
  4488. if ('integerValue' in left && 'integerValue' in right) {
  4489. return (normalizeNumber(left.integerValue) === normalizeNumber(right.integerValue));
  4490. }
  4491. else if ('doubleValue' in left && 'doubleValue' in right) {
  4492. const n1 = normalizeNumber(left.doubleValue);
  4493. const n2 = normalizeNumber(right.doubleValue);
  4494. if (n1 === n2) {
  4495. return isNegativeZero(n1) === isNegativeZero(n2);
  4496. }
  4497. else {
  4498. return isNaN(n1) && isNaN(n2);
  4499. }
  4500. }
  4501. return false;
  4502. }
  4503. function objectEquals(left, right) {
  4504. const leftMap = left.mapValue.fields || {};
  4505. const rightMap = right.mapValue.fields || {};
  4506. if (objectSize(leftMap) !== objectSize(rightMap)) {
  4507. return false;
  4508. }
  4509. for (const key in leftMap) {
  4510. if (leftMap.hasOwnProperty(key)) {
  4511. if (rightMap[key] === undefined ||
  4512. !valueEquals(leftMap[key], rightMap[key])) {
  4513. return false;
  4514. }
  4515. }
  4516. }
  4517. return true;
  4518. }
  4519. /** Returns true if the ArrayValue contains the specified element. */
  4520. function arrayValueContains(haystack, needle) {
  4521. return ((haystack.values || []).find(v => valueEquals(v, needle)) !== undefined);
  4522. }
  4523. function valueCompare(left, right) {
  4524. if (left === right) {
  4525. return 0;
  4526. }
  4527. const leftType = typeOrder(left);
  4528. const rightType = typeOrder(right);
  4529. if (leftType !== rightType) {
  4530. return primitiveComparator(leftType, rightType);
  4531. }
  4532. switch (leftType) {
  4533. case 0 /* TypeOrder.NullValue */:
  4534. case 9007199254740991 /* TypeOrder.MaxValue */:
  4535. return 0;
  4536. case 1 /* TypeOrder.BooleanValue */:
  4537. return primitiveComparator(left.booleanValue, right.booleanValue);
  4538. case 2 /* TypeOrder.NumberValue */:
  4539. return compareNumbers(left, right);
  4540. case 3 /* TypeOrder.TimestampValue */:
  4541. return compareTimestamps(left.timestampValue, right.timestampValue);
  4542. case 4 /* TypeOrder.ServerTimestampValue */:
  4543. return compareTimestamps(getLocalWriteTime(left), getLocalWriteTime(right));
  4544. case 5 /* TypeOrder.StringValue */:
  4545. return primitiveComparator(left.stringValue, right.stringValue);
  4546. case 6 /* TypeOrder.BlobValue */:
  4547. return compareBlobs(left.bytesValue, right.bytesValue);
  4548. case 7 /* TypeOrder.RefValue */:
  4549. return compareReferences(left.referenceValue, right.referenceValue);
  4550. case 8 /* TypeOrder.GeoPointValue */:
  4551. return compareGeoPoints(left.geoPointValue, right.geoPointValue);
  4552. case 9 /* TypeOrder.ArrayValue */:
  4553. return compareArrays(left.arrayValue, right.arrayValue);
  4554. case 10 /* TypeOrder.ObjectValue */:
  4555. return compareMaps(left.mapValue, right.mapValue);
  4556. default:
  4557. throw fail();
  4558. }
  4559. }
  4560. function compareNumbers(left, right) {
  4561. const leftNumber = normalizeNumber(left.integerValue || left.doubleValue);
  4562. const rightNumber = normalizeNumber(right.integerValue || right.doubleValue);
  4563. if (leftNumber < rightNumber) {
  4564. return -1;
  4565. }
  4566. else if (leftNumber > rightNumber) {
  4567. return 1;
  4568. }
  4569. else if (leftNumber === rightNumber) {
  4570. return 0;
  4571. }
  4572. else {
  4573. // one or both are NaN.
  4574. if (isNaN(leftNumber)) {
  4575. return isNaN(rightNumber) ? 0 : -1;
  4576. }
  4577. else {
  4578. return 1;
  4579. }
  4580. }
  4581. }
  4582. function compareTimestamps(left, right) {
  4583. if (typeof left === 'string' &&
  4584. typeof right === 'string' &&
  4585. left.length === right.length) {
  4586. return primitiveComparator(left, right);
  4587. }
  4588. const leftTimestamp = normalizeTimestamp(left);
  4589. const rightTimestamp = normalizeTimestamp(right);
  4590. const comparison = primitiveComparator(leftTimestamp.seconds, rightTimestamp.seconds);
  4591. if (comparison !== 0) {
  4592. return comparison;
  4593. }
  4594. return primitiveComparator(leftTimestamp.nanos, rightTimestamp.nanos);
  4595. }
  4596. function compareReferences(leftPath, rightPath) {
  4597. const leftSegments = leftPath.split('/');
  4598. const rightSegments = rightPath.split('/');
  4599. for (let i = 0; i < leftSegments.length && i < rightSegments.length; i++) {
  4600. const comparison = primitiveComparator(leftSegments[i], rightSegments[i]);
  4601. if (comparison !== 0) {
  4602. return comparison;
  4603. }
  4604. }
  4605. return primitiveComparator(leftSegments.length, rightSegments.length);
  4606. }
  4607. function compareGeoPoints(left, right) {
  4608. const comparison = primitiveComparator(normalizeNumber(left.latitude), normalizeNumber(right.latitude));
  4609. if (comparison !== 0) {
  4610. return comparison;
  4611. }
  4612. return primitiveComparator(normalizeNumber(left.longitude), normalizeNumber(right.longitude));
  4613. }
  4614. function compareBlobs(left, right) {
  4615. const leftBytes = normalizeByteString(left);
  4616. const rightBytes = normalizeByteString(right);
  4617. return leftBytes.compareTo(rightBytes);
  4618. }
  4619. function compareArrays(left, right) {
  4620. const leftArray = left.values || [];
  4621. const rightArray = right.values || [];
  4622. for (let i = 0; i < leftArray.length && i < rightArray.length; ++i) {
  4623. const compare = valueCompare(leftArray[i], rightArray[i]);
  4624. if (compare) {
  4625. return compare;
  4626. }
  4627. }
  4628. return primitiveComparator(leftArray.length, rightArray.length);
  4629. }
  4630. function compareMaps(left, right) {
  4631. if (left === MAX_VALUE.mapValue && right === MAX_VALUE.mapValue) {
  4632. return 0;
  4633. }
  4634. else if (left === MAX_VALUE.mapValue) {
  4635. return 1;
  4636. }
  4637. else if (right === MAX_VALUE.mapValue) {
  4638. return -1;
  4639. }
  4640. const leftMap = left.fields || {};
  4641. const leftKeys = Object.keys(leftMap);
  4642. const rightMap = right.fields || {};
  4643. const rightKeys = Object.keys(rightMap);
  4644. // Even though MapValues are likely sorted correctly based on their insertion
  4645. // order (e.g. when received from the backend), local modifications can bring
  4646. // elements out of order. We need to re-sort the elements to ensure that
  4647. // canonical IDs are independent of insertion order.
  4648. leftKeys.sort();
  4649. rightKeys.sort();
  4650. for (let i = 0; i < leftKeys.length && i < rightKeys.length; ++i) {
  4651. const keyCompare = primitiveComparator(leftKeys[i], rightKeys[i]);
  4652. if (keyCompare !== 0) {
  4653. return keyCompare;
  4654. }
  4655. const compare = valueCompare(leftMap[leftKeys[i]], rightMap[rightKeys[i]]);
  4656. if (compare !== 0) {
  4657. return compare;
  4658. }
  4659. }
  4660. return primitiveComparator(leftKeys.length, rightKeys.length);
  4661. }
  4662. /**
  4663. * Generates the canonical ID for the provided field value (as used in Target
  4664. * serialization).
  4665. */
  4666. function canonicalId(value) {
  4667. return canonifyValue(value);
  4668. }
  4669. function canonifyValue(value) {
  4670. if ('nullValue' in value) {
  4671. return 'null';
  4672. }
  4673. else if ('booleanValue' in value) {
  4674. return '' + value.booleanValue;
  4675. }
  4676. else if ('integerValue' in value) {
  4677. return '' + value.integerValue;
  4678. }
  4679. else if ('doubleValue' in value) {
  4680. return '' + value.doubleValue;
  4681. }
  4682. else if ('timestampValue' in value) {
  4683. return canonifyTimestamp(value.timestampValue);
  4684. }
  4685. else if ('stringValue' in value) {
  4686. return value.stringValue;
  4687. }
  4688. else if ('bytesValue' in value) {
  4689. return canonifyByteString(value.bytesValue);
  4690. }
  4691. else if ('referenceValue' in value) {
  4692. return canonifyReference(value.referenceValue);
  4693. }
  4694. else if ('geoPointValue' in value) {
  4695. return canonifyGeoPoint(value.geoPointValue);
  4696. }
  4697. else if ('arrayValue' in value) {
  4698. return canonifyArray(value.arrayValue);
  4699. }
  4700. else if ('mapValue' in value) {
  4701. return canonifyMap(value.mapValue);
  4702. }
  4703. else {
  4704. return fail();
  4705. }
  4706. }
  4707. function canonifyByteString(byteString) {
  4708. return normalizeByteString(byteString).toBase64();
  4709. }
  4710. function canonifyTimestamp(timestamp) {
  4711. const normalizedTimestamp = normalizeTimestamp(timestamp);
  4712. return `time(${normalizedTimestamp.seconds},${normalizedTimestamp.nanos})`;
  4713. }
  4714. function canonifyGeoPoint(geoPoint) {
  4715. return `geo(${geoPoint.latitude},${geoPoint.longitude})`;
  4716. }
  4717. function canonifyReference(referenceValue) {
  4718. return DocumentKey.fromName(referenceValue).toString();
  4719. }
  4720. function canonifyMap(mapValue) {
  4721. // Iteration order in JavaScript is not guaranteed. To ensure that we generate
  4722. // matching canonical IDs for identical maps, we need to sort the keys.
  4723. const sortedKeys = Object.keys(mapValue.fields || {}).sort();
  4724. let result = '{';
  4725. let first = true;
  4726. for (const key of sortedKeys) {
  4727. if (!first) {
  4728. result += ',';
  4729. }
  4730. else {
  4731. first = false;
  4732. }
  4733. result += `${key}:${canonifyValue(mapValue.fields[key])}`;
  4734. }
  4735. return result + '}';
  4736. }
  4737. function canonifyArray(arrayValue) {
  4738. let result = '[';
  4739. let first = true;
  4740. for (const value of arrayValue.values || []) {
  4741. if (!first) {
  4742. result += ',';
  4743. }
  4744. else {
  4745. first = false;
  4746. }
  4747. result += canonifyValue(value);
  4748. }
  4749. return result + ']';
  4750. }
  4751. /** Returns a reference value for the provided database and key. */
  4752. function refValue(databaseId, key) {
  4753. return {
  4754. referenceValue: `projects/${databaseId.projectId}/databases/${databaseId.database}/documents/${key.path.canonicalString()}`
  4755. };
  4756. }
  4757. /** Returns true if `value` is an IntegerValue . */
  4758. function isInteger(value) {
  4759. return !!value && 'integerValue' in value;
  4760. }
  4761. /** Returns true if `value` is a DoubleValue. */
  4762. function isDouble(value) {
  4763. return !!value && 'doubleValue' in value;
  4764. }
  4765. /** Returns true if `value` is either an IntegerValue or a DoubleValue. */
  4766. function isNumber(value) {
  4767. return isInteger(value) || isDouble(value);
  4768. }
  4769. /** Returns true if `value` is an ArrayValue. */
  4770. function isArray(value) {
  4771. return !!value && 'arrayValue' in value;
  4772. }
  4773. /** Returns true if `value` is a NullValue. */
  4774. function isNullValue(value) {
  4775. return !!value && 'nullValue' in value;
  4776. }
  4777. /** Returns true if `value` is NaN. */
  4778. function isNanValue(value) {
  4779. return !!value && 'doubleValue' in value && isNaN(Number(value.doubleValue));
  4780. }
  4781. /** Returns true if `value` is a MapValue. */
  4782. function isMapValue(value) {
  4783. return !!value && 'mapValue' in value;
  4784. }
  4785. /** Creates a deep copy of `source`. */
  4786. function deepClone(source) {
  4787. if (source.geoPointValue) {
  4788. return { geoPointValue: Object.assign({}, source.geoPointValue) };
  4789. }
  4790. else if (source.timestampValue &&
  4791. typeof source.timestampValue === 'object') {
  4792. return { timestampValue: Object.assign({}, source.timestampValue) };
  4793. }
  4794. else if (source.mapValue) {
  4795. const target = { mapValue: { fields: {} } };
  4796. forEach(source.mapValue.fields, (key, val) => (target.mapValue.fields[key] = deepClone(val)));
  4797. return target;
  4798. }
  4799. else if (source.arrayValue) {
  4800. const target = { arrayValue: { values: [] } };
  4801. for (let i = 0; i < (source.arrayValue.values || []).length; ++i) {
  4802. target.arrayValue.values[i] = deepClone(source.arrayValue.values[i]);
  4803. }
  4804. return target;
  4805. }
  4806. else {
  4807. return Object.assign({}, source);
  4808. }
  4809. }
  4810. /** Returns true if the Value represents the canonical {@link #MAX_VALUE} . */
  4811. function isMaxValue(value) {
  4812. return ((((value.mapValue || {}).fields || {})['__type__'] || {}).stringValue ===
  4813. MAX_VALUE_TYPE);
  4814. }
  4815. /** Returns the lowest value for the given value type (inclusive). */
  4816. function valuesGetLowerBound(value) {
  4817. if ('nullValue' in value) {
  4818. return MIN_VALUE;
  4819. }
  4820. else if ('booleanValue' in value) {
  4821. return { booleanValue: false };
  4822. }
  4823. else if ('integerValue' in value || 'doubleValue' in value) {
  4824. return { doubleValue: NaN };
  4825. }
  4826. else if ('timestampValue' in value) {
  4827. return { timestampValue: { seconds: Number.MIN_SAFE_INTEGER } };
  4828. }
  4829. else if ('stringValue' in value) {
  4830. return { stringValue: '' };
  4831. }
  4832. else if ('bytesValue' in value) {
  4833. return { bytesValue: '' };
  4834. }
  4835. else if ('referenceValue' in value) {
  4836. return refValue(DatabaseId.empty(), DocumentKey.empty());
  4837. }
  4838. else if ('geoPointValue' in value) {
  4839. return { geoPointValue: { latitude: -90, longitude: -180 } };
  4840. }
  4841. else if ('arrayValue' in value) {
  4842. return { arrayValue: {} };
  4843. }
  4844. else if ('mapValue' in value) {
  4845. return { mapValue: {} };
  4846. }
  4847. else {
  4848. return fail();
  4849. }
  4850. }
  4851. /** Returns the largest value for the given value type (exclusive). */
  4852. function valuesGetUpperBound(value) {
  4853. if ('nullValue' in value) {
  4854. return { booleanValue: false };
  4855. }
  4856. else if ('booleanValue' in value) {
  4857. return { doubleValue: NaN };
  4858. }
  4859. else if ('integerValue' in value || 'doubleValue' in value) {
  4860. return { timestampValue: { seconds: Number.MIN_SAFE_INTEGER } };
  4861. }
  4862. else if ('timestampValue' in value) {
  4863. return { stringValue: '' };
  4864. }
  4865. else if ('stringValue' in value) {
  4866. return { bytesValue: '' };
  4867. }
  4868. else if ('bytesValue' in value) {
  4869. return refValue(DatabaseId.empty(), DocumentKey.empty());
  4870. }
  4871. else if ('referenceValue' in value) {
  4872. return { geoPointValue: { latitude: -90, longitude: -180 } };
  4873. }
  4874. else if ('geoPointValue' in value) {
  4875. return { arrayValue: {} };
  4876. }
  4877. else if ('arrayValue' in value) {
  4878. return { mapValue: {} };
  4879. }
  4880. else if ('mapValue' in value) {
  4881. return MAX_VALUE;
  4882. }
  4883. else {
  4884. return fail();
  4885. }
  4886. }
  4887. function lowerBoundCompare(left, right) {
  4888. const cmp = valueCompare(left.value, right.value);
  4889. if (cmp !== 0) {
  4890. return cmp;
  4891. }
  4892. if (left.inclusive && !right.inclusive) {
  4893. return -1;
  4894. }
  4895. else if (!left.inclusive && right.inclusive) {
  4896. return 1;
  4897. }
  4898. return 0;
  4899. }
  4900. function upperBoundCompare(left, right) {
  4901. const cmp = valueCompare(left.value, right.value);
  4902. if (cmp !== 0) {
  4903. return cmp;
  4904. }
  4905. if (left.inclusive && !right.inclusive) {
  4906. return 1;
  4907. }
  4908. else if (!left.inclusive && right.inclusive) {
  4909. return -1;
  4910. }
  4911. return 0;
  4912. }
  4913. /**
  4914. * @license
  4915. * Copyright 2017 Google LLC
  4916. *
  4917. * Licensed under the Apache License, Version 2.0 (the "License");
  4918. * you may not use this file except in compliance with the License.
  4919. * You may obtain a copy of the License at
  4920. *
  4921. * http://www.apache.org/licenses/LICENSE-2.0
  4922. *
  4923. * Unless required by applicable law or agreed to in writing, software
  4924. * distributed under the License is distributed on an "AS IS" BASIS,
  4925. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  4926. * See the License for the specific language governing permissions and
  4927. * limitations under the License.
  4928. */
  4929. /**
  4930. * An ObjectValue represents a MapValue in the Firestore Proto and offers the
  4931. * ability to add and remove fields (via the ObjectValueBuilder).
  4932. */
  4933. class ObjectValue {
  4934. constructor(value) {
  4935. this.value = value;
  4936. }
  4937. static empty() {
  4938. return new ObjectValue({ mapValue: {} });
  4939. }
  4940. /**
  4941. * Returns the value at the given path or null.
  4942. *
  4943. * @param path - the path to search
  4944. * @returns The value at the path or null if the path is not set.
  4945. */
  4946. field(path) {
  4947. if (path.isEmpty()) {
  4948. return this.value;
  4949. }
  4950. else {
  4951. let currentLevel = this.value;
  4952. for (let i = 0; i < path.length - 1; ++i) {
  4953. currentLevel = (currentLevel.mapValue.fields || {})[path.get(i)];
  4954. if (!isMapValue(currentLevel)) {
  4955. return null;
  4956. }
  4957. }
  4958. currentLevel = (currentLevel.mapValue.fields || {})[path.lastSegment()];
  4959. return currentLevel || null;
  4960. }
  4961. }
  4962. /**
  4963. * Sets the field to the provided value.
  4964. *
  4965. * @param path - The field path to set.
  4966. * @param value - The value to set.
  4967. */
  4968. set(path, value) {
  4969. const fieldsMap = this.getFieldsMap(path.popLast());
  4970. fieldsMap[path.lastSegment()] = deepClone(value);
  4971. }
  4972. /**
  4973. * Sets the provided fields to the provided values.
  4974. *
  4975. * @param data - A map of fields to values (or null for deletes).
  4976. */
  4977. setAll(data) {
  4978. let parent = FieldPath$1.emptyPath();
  4979. let upserts = {};
  4980. let deletes = [];
  4981. data.forEach((value, path) => {
  4982. if (!parent.isImmediateParentOf(path)) {
  4983. // Insert the accumulated changes at this parent location
  4984. const fieldsMap = this.getFieldsMap(parent);
  4985. this.applyChanges(fieldsMap, upserts, deletes);
  4986. upserts = {};
  4987. deletes = [];
  4988. parent = path.popLast();
  4989. }
  4990. if (value) {
  4991. upserts[path.lastSegment()] = deepClone(value);
  4992. }
  4993. else {
  4994. deletes.push(path.lastSegment());
  4995. }
  4996. });
  4997. const fieldsMap = this.getFieldsMap(parent);
  4998. this.applyChanges(fieldsMap, upserts, deletes);
  4999. }
  5000. /**
  5001. * Removes the field at the specified path. If there is no field at the
  5002. * specified path, nothing is changed.
  5003. *
  5004. * @param path - The field path to remove.
  5005. */
  5006. delete(path) {
  5007. const nestedValue = this.field(path.popLast());
  5008. if (isMapValue(nestedValue) && nestedValue.mapValue.fields) {
  5009. delete nestedValue.mapValue.fields[path.lastSegment()];
  5010. }
  5011. }
  5012. isEqual(other) {
  5013. return valueEquals(this.value, other.value);
  5014. }
  5015. /**
  5016. * Returns the map that contains the leaf element of `path`. If the parent
  5017. * entry does not yet exist, or if it is not a map, a new map will be created.
  5018. */
  5019. getFieldsMap(path) {
  5020. let current = this.value;
  5021. if (!current.mapValue.fields) {
  5022. current.mapValue = { fields: {} };
  5023. }
  5024. for (let i = 0; i < path.length; ++i) {
  5025. let next = current.mapValue.fields[path.get(i)];
  5026. if (!isMapValue(next) || !next.mapValue.fields) {
  5027. next = { mapValue: { fields: {} } };
  5028. current.mapValue.fields[path.get(i)] = next;
  5029. }
  5030. current = next;
  5031. }
  5032. return current.mapValue.fields;
  5033. }
  5034. /**
  5035. * Modifies `fieldsMap` by adding, replacing or deleting the specified
  5036. * entries.
  5037. */
  5038. applyChanges(fieldsMap, inserts, deletes) {
  5039. forEach(inserts, (key, val) => (fieldsMap[key] = val));
  5040. for (const field of deletes) {
  5041. delete fieldsMap[field];
  5042. }
  5043. }
  5044. clone() {
  5045. return new ObjectValue(deepClone(this.value));
  5046. }
  5047. }
  5048. /**
  5049. * Returns a FieldMask built from all fields in a MapValue.
  5050. */
  5051. function extractFieldMask(value) {
  5052. const fields = [];
  5053. forEach(value.fields, (key, value) => {
  5054. const currentPath = new FieldPath$1([key]);
  5055. if (isMapValue(value)) {
  5056. const nestedMask = extractFieldMask(value.mapValue);
  5057. const nestedFields = nestedMask.fields;
  5058. if (nestedFields.length === 0) {
  5059. // Preserve the empty map by adding it to the FieldMask.
  5060. fields.push(currentPath);
  5061. }
  5062. else {
  5063. // For nested and non-empty ObjectValues, add the FieldPath of the
  5064. // leaf nodes.
  5065. for (const nestedPath of nestedFields) {
  5066. fields.push(currentPath.child(nestedPath));
  5067. }
  5068. }
  5069. }
  5070. else {
  5071. // For nested and non-empty ObjectValues, add the FieldPath of the leaf
  5072. // nodes.
  5073. fields.push(currentPath);
  5074. }
  5075. });
  5076. return new FieldMask(fields);
  5077. }
  5078. /**
  5079. * @license
  5080. * Copyright 2017 Google LLC
  5081. *
  5082. * Licensed under the Apache License, Version 2.0 (the "License");
  5083. * you may not use this file except in compliance with the License.
  5084. * You may obtain a copy of the License at
  5085. *
  5086. * http://www.apache.org/licenses/LICENSE-2.0
  5087. *
  5088. * Unless required by applicable law or agreed to in writing, software
  5089. * distributed under the License is distributed on an "AS IS" BASIS,
  5090. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  5091. * See the License for the specific language governing permissions and
  5092. * limitations under the License.
  5093. */
  5094. /**
  5095. * Represents a document in Firestore with a key, version, data and whether it
  5096. * has local mutations applied to it.
  5097. *
  5098. * Documents can transition between states via `convertToFoundDocument()`,
  5099. * `convertToNoDocument()` and `convertToUnknownDocument()`. If a document does
  5100. * not transition to one of these states even after all mutations have been
  5101. * applied, `isValidDocument()` returns false and the document should be removed
  5102. * from all views.
  5103. */
  5104. class MutableDocument {
  5105. constructor(key, documentType, version, readTime, createTime, data, documentState) {
  5106. this.key = key;
  5107. this.documentType = documentType;
  5108. this.version = version;
  5109. this.readTime = readTime;
  5110. this.createTime = createTime;
  5111. this.data = data;
  5112. this.documentState = documentState;
  5113. }
  5114. /**
  5115. * Creates a document with no known version or data, but which can serve as
  5116. * base document for mutations.
  5117. */
  5118. static newInvalidDocument(documentKey) {
  5119. return new MutableDocument(documentKey, 0 /* DocumentType.INVALID */,
  5120. /* version */ SnapshotVersion.min(),
  5121. /* readTime */ SnapshotVersion.min(),
  5122. /* createTime */ SnapshotVersion.min(), ObjectValue.empty(), 0 /* DocumentState.SYNCED */);
  5123. }
  5124. /**
  5125. * Creates a new document that is known to exist with the given data at the
  5126. * given version.
  5127. */
  5128. static newFoundDocument(documentKey, version, createTime, value) {
  5129. return new MutableDocument(documentKey, 1 /* DocumentType.FOUND_DOCUMENT */,
  5130. /* version */ version,
  5131. /* readTime */ SnapshotVersion.min(),
  5132. /* createTime */ createTime, value, 0 /* DocumentState.SYNCED */);
  5133. }
  5134. /** Creates a new document that is known to not exist at the given version. */
  5135. static newNoDocument(documentKey, version) {
  5136. return new MutableDocument(documentKey, 2 /* DocumentType.NO_DOCUMENT */,
  5137. /* version */ version,
  5138. /* readTime */ SnapshotVersion.min(),
  5139. /* createTime */ SnapshotVersion.min(), ObjectValue.empty(), 0 /* DocumentState.SYNCED */);
  5140. }
  5141. /**
  5142. * Creates a new document that is known to exist at the given version but
  5143. * whose data is not known (e.g. a document that was updated without a known
  5144. * base document).
  5145. */
  5146. static newUnknownDocument(documentKey, version) {
  5147. return new MutableDocument(documentKey, 3 /* DocumentType.UNKNOWN_DOCUMENT */,
  5148. /* version */ version,
  5149. /* readTime */ SnapshotVersion.min(),
  5150. /* createTime */ SnapshotVersion.min(), ObjectValue.empty(), 2 /* DocumentState.HAS_COMMITTED_MUTATIONS */);
  5151. }
  5152. /**
  5153. * Changes the document type to indicate that it exists and that its version
  5154. * and data are known.
  5155. */
  5156. convertToFoundDocument(version, value) {
  5157. // If a document is switching state from being an invalid or deleted
  5158. // document to a valid (FOUND_DOCUMENT) document, either due to receiving an
  5159. // update from Watch or due to applying a local set mutation on top
  5160. // of a deleted document, our best guess about its createTime would be the
  5161. // version at which the document transitioned to a FOUND_DOCUMENT.
  5162. if (this.createTime.isEqual(SnapshotVersion.min()) &&
  5163. (this.documentType === 2 /* DocumentType.NO_DOCUMENT */ ||
  5164. this.documentType === 0 /* DocumentType.INVALID */)) {
  5165. this.createTime = version;
  5166. }
  5167. this.version = version;
  5168. this.documentType = 1 /* DocumentType.FOUND_DOCUMENT */;
  5169. this.data = value;
  5170. this.documentState = 0 /* DocumentState.SYNCED */;
  5171. return this;
  5172. }
  5173. /**
  5174. * Changes the document type to indicate that it doesn't exist at the given
  5175. * version.
  5176. */
  5177. convertToNoDocument(version) {
  5178. this.version = version;
  5179. this.documentType = 2 /* DocumentType.NO_DOCUMENT */;
  5180. this.data = ObjectValue.empty();
  5181. this.documentState = 0 /* DocumentState.SYNCED */;
  5182. return this;
  5183. }
  5184. /**
  5185. * Changes the document type to indicate that it exists at a given version but
  5186. * that its data is not known (e.g. a document that was updated without a known
  5187. * base document).
  5188. */
  5189. convertToUnknownDocument(version) {
  5190. this.version = version;
  5191. this.documentType = 3 /* DocumentType.UNKNOWN_DOCUMENT */;
  5192. this.data = ObjectValue.empty();
  5193. this.documentState = 2 /* DocumentState.HAS_COMMITTED_MUTATIONS */;
  5194. return this;
  5195. }
  5196. setHasCommittedMutations() {
  5197. this.documentState = 2 /* DocumentState.HAS_COMMITTED_MUTATIONS */;
  5198. return this;
  5199. }
  5200. setHasLocalMutations() {
  5201. this.documentState = 1 /* DocumentState.HAS_LOCAL_MUTATIONS */;
  5202. this.version = SnapshotVersion.min();
  5203. return this;
  5204. }
  5205. setReadTime(readTime) {
  5206. this.readTime = readTime;
  5207. return this;
  5208. }
  5209. get hasLocalMutations() {
  5210. return this.documentState === 1 /* DocumentState.HAS_LOCAL_MUTATIONS */;
  5211. }
  5212. get hasCommittedMutations() {
  5213. return this.documentState === 2 /* DocumentState.HAS_COMMITTED_MUTATIONS */;
  5214. }
  5215. get hasPendingWrites() {
  5216. return this.hasLocalMutations || this.hasCommittedMutations;
  5217. }
  5218. isValidDocument() {
  5219. return this.documentType !== 0 /* DocumentType.INVALID */;
  5220. }
  5221. isFoundDocument() {
  5222. return this.documentType === 1 /* DocumentType.FOUND_DOCUMENT */;
  5223. }
  5224. isNoDocument() {
  5225. return this.documentType === 2 /* DocumentType.NO_DOCUMENT */;
  5226. }
  5227. isUnknownDocument() {
  5228. return this.documentType === 3 /* DocumentType.UNKNOWN_DOCUMENT */;
  5229. }
  5230. isEqual(other) {
  5231. return (other instanceof MutableDocument &&
  5232. this.key.isEqual(other.key) &&
  5233. this.version.isEqual(other.version) &&
  5234. this.documentType === other.documentType &&
  5235. this.documentState === other.documentState &&
  5236. this.data.isEqual(other.data));
  5237. }
  5238. mutableCopy() {
  5239. return new MutableDocument(this.key, this.documentType, this.version, this.readTime, this.createTime, this.data.clone(), this.documentState);
  5240. }
  5241. toString() {
  5242. return (`Document(${this.key}, ${this.version}, ${JSON.stringify(this.data.value)}, ` +
  5243. `{createTime: ${this.createTime}}), ` +
  5244. `{documentType: ${this.documentType}}), ` +
  5245. `{documentState: ${this.documentState}})`);
  5246. }
  5247. }
  5248. /**
  5249. * Compares the value for field `field` in the provided documents. Throws if
  5250. * the field does not exist in both documents.
  5251. */
  5252. function compareDocumentsByField(field, d1, d2) {
  5253. const v1 = d1.data.field(field);
  5254. const v2 = d2.data.field(field);
  5255. if (v1 !== null && v2 !== null) {
  5256. return valueCompare(v1, v2);
  5257. }
  5258. else {
  5259. return fail();
  5260. }
  5261. }
  5262. /**
  5263. * @license
  5264. * Copyright 2022 Google LLC
  5265. *
  5266. * Licensed under the Apache License, Version 2.0 (the "License");
  5267. * you may not use this file except in compliance with the License.
  5268. * You may obtain a copy of the License at
  5269. *
  5270. * http://www.apache.org/licenses/LICENSE-2.0
  5271. *
  5272. * Unless required by applicable law or agreed to in writing, software
  5273. * distributed under the License is distributed on an "AS IS" BASIS,
  5274. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  5275. * See the License for the specific language governing permissions and
  5276. * limitations under the License.
  5277. */
  5278. /**
  5279. * Represents a bound of a query.
  5280. *
  5281. * The bound is specified with the given components representing a position and
  5282. * whether it's just before or just after the position (relative to whatever the
  5283. * query order is).
  5284. *
  5285. * The position represents a logical index position for a query. It's a prefix
  5286. * of values for the (potentially implicit) order by clauses of a query.
  5287. *
  5288. * Bound provides a function to determine whether a document comes before or
  5289. * after a bound. This is influenced by whether the position is just before or
  5290. * just after the provided values.
  5291. */
  5292. class Bound {
  5293. constructor(position, inclusive) {
  5294. this.position = position;
  5295. this.inclusive = inclusive;
  5296. }
  5297. }
  5298. function boundCompareToDocument(bound, orderBy, doc) {
  5299. let comparison = 0;
  5300. for (let i = 0; i < bound.position.length; i++) {
  5301. const orderByComponent = orderBy[i];
  5302. const component = bound.position[i];
  5303. if (orderByComponent.field.isKeyField()) {
  5304. comparison = DocumentKey.comparator(DocumentKey.fromName(component.referenceValue), doc.key);
  5305. }
  5306. else {
  5307. const docValue = doc.data.field(orderByComponent.field);
  5308. comparison = valueCompare(component, docValue);
  5309. }
  5310. if (orderByComponent.dir === "desc" /* Direction.DESCENDING */) {
  5311. comparison = comparison * -1;
  5312. }
  5313. if (comparison !== 0) {
  5314. break;
  5315. }
  5316. }
  5317. return comparison;
  5318. }
  5319. /**
  5320. * Returns true if a document sorts after a bound using the provided sort
  5321. * order.
  5322. */
  5323. function boundSortsAfterDocument(bound, orderBy, doc) {
  5324. const comparison = boundCompareToDocument(bound, orderBy, doc);
  5325. return bound.inclusive ? comparison >= 0 : comparison > 0;
  5326. }
  5327. /**
  5328. * Returns true if a document sorts before a bound using the provided sort
  5329. * order.
  5330. */
  5331. function boundSortsBeforeDocument(bound, orderBy, doc) {
  5332. const comparison = boundCompareToDocument(bound, orderBy, doc);
  5333. return bound.inclusive ? comparison <= 0 : comparison < 0;
  5334. }
  5335. function boundEquals(left, right) {
  5336. if (left === null) {
  5337. return right === null;
  5338. }
  5339. else if (right === null) {
  5340. return false;
  5341. }
  5342. if (left.inclusive !== right.inclusive ||
  5343. left.position.length !== right.position.length) {
  5344. return false;
  5345. }
  5346. for (let i = 0; i < left.position.length; i++) {
  5347. const leftPosition = left.position[i];
  5348. const rightPosition = right.position[i];
  5349. if (!valueEquals(leftPosition, rightPosition)) {
  5350. return false;
  5351. }
  5352. }
  5353. return true;
  5354. }
  5355. /**
  5356. * @license
  5357. * Copyright 2022 Google LLC
  5358. *
  5359. * Licensed under the Apache License, Version 2.0 (the "License");
  5360. * you may not use this file except in compliance with the License.
  5361. * You may obtain a copy of the License at
  5362. *
  5363. * http://www.apache.org/licenses/LICENSE-2.0
  5364. *
  5365. * Unless required by applicable law or agreed to in writing, software
  5366. * distributed under the License is distributed on an "AS IS" BASIS,
  5367. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  5368. * See the License for the specific language governing permissions and
  5369. * limitations under the License.
  5370. */
  5371. class Filter {
  5372. }
  5373. class FieldFilter extends Filter {
  5374. constructor(field, op, value) {
  5375. super();
  5376. this.field = field;
  5377. this.op = op;
  5378. this.value = value;
  5379. }
  5380. /**
  5381. * Creates a filter based on the provided arguments.
  5382. */
  5383. static create(field, op, value) {
  5384. if (field.isKeyField()) {
  5385. if (op === "in" /* Operator.IN */ || op === "not-in" /* Operator.NOT_IN */) {
  5386. return this.createKeyFieldInFilter(field, op, value);
  5387. }
  5388. else {
  5389. return new KeyFieldFilter(field, op, value);
  5390. }
  5391. }
  5392. else if (op === "array-contains" /* Operator.ARRAY_CONTAINS */) {
  5393. return new ArrayContainsFilter(field, value);
  5394. }
  5395. else if (op === "in" /* Operator.IN */) {
  5396. return new InFilter(field, value);
  5397. }
  5398. else if (op === "not-in" /* Operator.NOT_IN */) {
  5399. return new NotInFilter(field, value);
  5400. }
  5401. else if (op === "array-contains-any" /* Operator.ARRAY_CONTAINS_ANY */) {
  5402. return new ArrayContainsAnyFilter(field, value);
  5403. }
  5404. else {
  5405. return new FieldFilter(field, op, value);
  5406. }
  5407. }
  5408. static createKeyFieldInFilter(field, op, value) {
  5409. return op === "in" /* Operator.IN */
  5410. ? new KeyFieldInFilter(field, value)
  5411. : new KeyFieldNotInFilter(field, value);
  5412. }
  5413. matches(doc) {
  5414. const other = doc.data.field(this.field);
  5415. // Types do not have to match in NOT_EQUAL filters.
  5416. if (this.op === "!=" /* Operator.NOT_EQUAL */) {
  5417. return (other !== null &&
  5418. this.matchesComparison(valueCompare(other, this.value)));
  5419. }
  5420. // Only compare types with matching backend order (such as double and int).
  5421. return (other !== null &&
  5422. typeOrder(this.value) === typeOrder(other) &&
  5423. this.matchesComparison(valueCompare(other, this.value)));
  5424. }
  5425. matchesComparison(comparison) {
  5426. switch (this.op) {
  5427. case "<" /* Operator.LESS_THAN */:
  5428. return comparison < 0;
  5429. case "<=" /* Operator.LESS_THAN_OR_EQUAL */:
  5430. return comparison <= 0;
  5431. case "==" /* Operator.EQUAL */:
  5432. return comparison === 0;
  5433. case "!=" /* Operator.NOT_EQUAL */:
  5434. return comparison !== 0;
  5435. case ">" /* Operator.GREATER_THAN */:
  5436. return comparison > 0;
  5437. case ">=" /* Operator.GREATER_THAN_OR_EQUAL */:
  5438. return comparison >= 0;
  5439. default:
  5440. return fail();
  5441. }
  5442. }
  5443. isInequality() {
  5444. return ([
  5445. "<" /* Operator.LESS_THAN */,
  5446. "<=" /* Operator.LESS_THAN_OR_EQUAL */,
  5447. ">" /* Operator.GREATER_THAN */,
  5448. ">=" /* Operator.GREATER_THAN_OR_EQUAL */,
  5449. "!=" /* Operator.NOT_EQUAL */,
  5450. "not-in" /* Operator.NOT_IN */
  5451. ].indexOf(this.op) >= 0);
  5452. }
  5453. getFlattenedFilters() {
  5454. return [this];
  5455. }
  5456. getFilters() {
  5457. return [this];
  5458. }
  5459. getFirstInequalityField() {
  5460. if (this.isInequality()) {
  5461. return this.field;
  5462. }
  5463. return null;
  5464. }
  5465. }
  5466. class CompositeFilter extends Filter {
  5467. constructor(filters, op) {
  5468. super();
  5469. this.filters = filters;
  5470. this.op = op;
  5471. this.memoizedFlattenedFilters = null;
  5472. }
  5473. /**
  5474. * Creates a filter based on the provided arguments.
  5475. */
  5476. static create(filters, op) {
  5477. return new CompositeFilter(filters, op);
  5478. }
  5479. matches(doc) {
  5480. if (compositeFilterIsConjunction(this)) {
  5481. // For conjunctions, all filters must match, so return false if any filter doesn't match.
  5482. return this.filters.find(filter => !filter.matches(doc)) === undefined;
  5483. }
  5484. else {
  5485. // For disjunctions, at least one filter should match.
  5486. return this.filters.find(filter => filter.matches(doc)) !== undefined;
  5487. }
  5488. }
  5489. getFlattenedFilters() {
  5490. if (this.memoizedFlattenedFilters !== null) {
  5491. return this.memoizedFlattenedFilters;
  5492. }
  5493. this.memoizedFlattenedFilters = this.filters.reduce((result, subfilter) => {
  5494. return result.concat(subfilter.getFlattenedFilters());
  5495. }, []);
  5496. return this.memoizedFlattenedFilters;
  5497. }
  5498. // Returns a mutable copy of `this.filters`
  5499. getFilters() {
  5500. return Object.assign([], this.filters);
  5501. }
  5502. getFirstInequalityField() {
  5503. const found = this.findFirstMatchingFilter(filter => filter.isInequality());
  5504. if (found !== null) {
  5505. return found.field;
  5506. }
  5507. return null;
  5508. }
  5509. // Performs a depth-first search to find and return the first FieldFilter in the composite filter
  5510. // that satisfies the predicate. Returns `null` if none of the FieldFilters satisfy the
  5511. // predicate.
  5512. findFirstMatchingFilter(predicate) {
  5513. for (const fieldFilter of this.getFlattenedFilters()) {
  5514. if (predicate(fieldFilter)) {
  5515. return fieldFilter;
  5516. }
  5517. }
  5518. return null;
  5519. }
  5520. }
  5521. function compositeFilterIsConjunction(compositeFilter) {
  5522. return compositeFilter.op === "and" /* CompositeOperator.AND */;
  5523. }
  5524. function compositeFilterIsDisjunction(compositeFilter) {
  5525. return compositeFilter.op === "or" /* CompositeOperator.OR */;
  5526. }
  5527. /**
  5528. * Returns true if this filter is a conjunction of field filters only. Returns false otherwise.
  5529. */
  5530. function compositeFilterIsFlatConjunction(compositeFilter) {
  5531. return (compositeFilterIsFlat(compositeFilter) &&
  5532. compositeFilterIsConjunction(compositeFilter));
  5533. }
  5534. /**
  5535. * Returns true if this filter does not contain any composite filters. Returns false otherwise.
  5536. */
  5537. function compositeFilterIsFlat(compositeFilter) {
  5538. for (const filter of compositeFilter.filters) {
  5539. if (filter instanceof CompositeFilter) {
  5540. return false;
  5541. }
  5542. }
  5543. return true;
  5544. }
  5545. function canonifyFilter(filter) {
  5546. if (filter instanceof FieldFilter) {
  5547. // TODO(b/29183165): Technically, this won't be unique if two values have
  5548. // the same description, such as the int 3 and the string "3". So we should
  5549. // add the types in here somehow, too.
  5550. return (filter.field.canonicalString() +
  5551. filter.op.toString() +
  5552. canonicalId(filter.value));
  5553. }
  5554. else if (compositeFilterIsFlatConjunction(filter)) {
  5555. // Older SDK versions use an implicit AND operation between their filters.
  5556. // In the new SDK versions, the developer may use an explicit AND filter.
  5557. // To stay consistent with the old usages, we add a special case to ensure
  5558. // the canonical ID for these two are the same. For example:
  5559. // `col.whereEquals("a", 1).whereEquals("b", 2)` should have the same
  5560. // canonical ID as `col.where(and(equals("a",1), equals("b",2)))`.
  5561. return filter.filters.map(filter => canonifyFilter(filter)).join(',');
  5562. }
  5563. else {
  5564. // filter instanceof CompositeFilter
  5565. const canonicalIdsString = filter.filters
  5566. .map(filter => canonifyFilter(filter))
  5567. .join(',');
  5568. return `${filter.op}(${canonicalIdsString})`;
  5569. }
  5570. }
  5571. function filterEquals(f1, f2) {
  5572. if (f1 instanceof FieldFilter) {
  5573. return fieldFilterEquals(f1, f2);
  5574. }
  5575. else if (f1 instanceof CompositeFilter) {
  5576. return compositeFilterEquals(f1, f2);
  5577. }
  5578. else {
  5579. fail();
  5580. }
  5581. }
  5582. function fieldFilterEquals(f1, f2) {
  5583. return (f2 instanceof FieldFilter &&
  5584. f1.op === f2.op &&
  5585. f1.field.isEqual(f2.field) &&
  5586. valueEquals(f1.value, f2.value));
  5587. }
  5588. function compositeFilterEquals(f1, f2) {
  5589. if (f2 instanceof CompositeFilter &&
  5590. f1.op === f2.op &&
  5591. f1.filters.length === f2.filters.length) {
  5592. const subFiltersMatch = f1.filters.reduce((result, f1Filter, index) => result && filterEquals(f1Filter, f2.filters[index]), true);
  5593. return subFiltersMatch;
  5594. }
  5595. return false;
  5596. }
  5597. /**
  5598. * Returns a new composite filter that contains all filter from
  5599. * `compositeFilter` plus all the given filters in `otherFilters`.
  5600. */
  5601. function compositeFilterWithAddedFilters(compositeFilter, otherFilters) {
  5602. const mergedFilters = compositeFilter.filters.concat(otherFilters);
  5603. return CompositeFilter.create(mergedFilters, compositeFilter.op);
  5604. }
  5605. /** Returns a debug description for `filter`. */
  5606. function stringifyFilter(filter) {
  5607. if (filter instanceof FieldFilter) {
  5608. return stringifyFieldFilter(filter);
  5609. }
  5610. else if (filter instanceof CompositeFilter) {
  5611. return stringifyCompositeFilter(filter);
  5612. }
  5613. else {
  5614. return 'Filter';
  5615. }
  5616. }
  5617. function stringifyCompositeFilter(filter) {
  5618. return (filter.op.toString() +
  5619. ` {` +
  5620. filter.getFilters().map(stringifyFilter).join(' ,') +
  5621. '}');
  5622. }
  5623. function stringifyFieldFilter(filter) {
  5624. return `${filter.field.canonicalString()} ${filter.op} ${canonicalId(filter.value)}`;
  5625. }
  5626. /** Filter that matches on key fields (i.e. '__name__'). */
  5627. class KeyFieldFilter extends FieldFilter {
  5628. constructor(field, op, value) {
  5629. super(field, op, value);
  5630. this.key = DocumentKey.fromName(value.referenceValue);
  5631. }
  5632. matches(doc) {
  5633. const comparison = DocumentKey.comparator(doc.key, this.key);
  5634. return this.matchesComparison(comparison);
  5635. }
  5636. }
  5637. /** Filter that matches on key fields within an array. */
  5638. class KeyFieldInFilter extends FieldFilter {
  5639. constructor(field, value) {
  5640. super(field, "in" /* Operator.IN */, value);
  5641. this.keys = extractDocumentKeysFromArrayValue("in" /* Operator.IN */, value);
  5642. }
  5643. matches(doc) {
  5644. return this.keys.some(key => key.isEqual(doc.key));
  5645. }
  5646. }
  5647. /** Filter that matches on key fields not present within an array. */
  5648. class KeyFieldNotInFilter extends FieldFilter {
  5649. constructor(field, value) {
  5650. super(field, "not-in" /* Operator.NOT_IN */, value);
  5651. this.keys = extractDocumentKeysFromArrayValue("not-in" /* Operator.NOT_IN */, value);
  5652. }
  5653. matches(doc) {
  5654. return !this.keys.some(key => key.isEqual(doc.key));
  5655. }
  5656. }
  5657. function extractDocumentKeysFromArrayValue(op, value) {
  5658. var _a;
  5659. return (((_a = value.arrayValue) === null || _a === void 0 ? void 0 : _a.values) || []).map(v => {
  5660. return DocumentKey.fromName(v.referenceValue);
  5661. });
  5662. }
  5663. /** A Filter that implements the array-contains operator. */
  5664. class ArrayContainsFilter extends FieldFilter {
  5665. constructor(field, value) {
  5666. super(field, "array-contains" /* Operator.ARRAY_CONTAINS */, value);
  5667. }
  5668. matches(doc) {
  5669. const other = doc.data.field(this.field);
  5670. return isArray(other) && arrayValueContains(other.arrayValue, this.value);
  5671. }
  5672. }
  5673. /** A Filter that implements the IN operator. */
  5674. class InFilter extends FieldFilter {
  5675. constructor(field, value) {
  5676. super(field, "in" /* Operator.IN */, value);
  5677. }
  5678. matches(doc) {
  5679. const other = doc.data.field(this.field);
  5680. return other !== null && arrayValueContains(this.value.arrayValue, other);
  5681. }
  5682. }
  5683. /** A Filter that implements the not-in operator. */
  5684. class NotInFilter extends FieldFilter {
  5685. constructor(field, value) {
  5686. super(field, "not-in" /* Operator.NOT_IN */, value);
  5687. }
  5688. matches(doc) {
  5689. if (arrayValueContains(this.value.arrayValue, { nullValue: 'NULL_VALUE' })) {
  5690. return false;
  5691. }
  5692. const other = doc.data.field(this.field);
  5693. return other !== null && !arrayValueContains(this.value.arrayValue, other);
  5694. }
  5695. }
  5696. /** A Filter that implements the array-contains-any operator. */
  5697. class ArrayContainsAnyFilter extends FieldFilter {
  5698. constructor(field, value) {
  5699. super(field, "array-contains-any" /* Operator.ARRAY_CONTAINS_ANY */, value);
  5700. }
  5701. matches(doc) {
  5702. const other = doc.data.field(this.field);
  5703. if (!isArray(other) || !other.arrayValue.values) {
  5704. return false;
  5705. }
  5706. return other.arrayValue.values.some(val => arrayValueContains(this.value.arrayValue, val));
  5707. }
  5708. }
  5709. /**
  5710. * @license
  5711. * Copyright 2022 Google LLC
  5712. *
  5713. * Licensed under the Apache License, Version 2.0 (the "License");
  5714. * you may not use this file except in compliance with the License.
  5715. * You may obtain a copy of the License at
  5716. *
  5717. * http://www.apache.org/licenses/LICENSE-2.0
  5718. *
  5719. * Unless required by applicable law or agreed to in writing, software
  5720. * distributed under the License is distributed on an "AS IS" BASIS,
  5721. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  5722. * See the License for the specific language governing permissions and
  5723. * limitations under the License.
  5724. */
  5725. /**
  5726. * An ordering on a field, in some Direction. Direction defaults to ASCENDING.
  5727. */
  5728. class OrderBy {
  5729. constructor(field, dir = "asc" /* Direction.ASCENDING */) {
  5730. this.field = field;
  5731. this.dir = dir;
  5732. }
  5733. }
  5734. function canonifyOrderBy(orderBy) {
  5735. // TODO(b/29183165): Make this collision robust.
  5736. return orderBy.field.canonicalString() + orderBy.dir;
  5737. }
  5738. function stringifyOrderBy(orderBy) {
  5739. return `${orderBy.field.canonicalString()} (${orderBy.dir})`;
  5740. }
  5741. function orderByEquals(left, right) {
  5742. return left.dir === right.dir && left.field.isEqual(right.field);
  5743. }
  5744. /**
  5745. * @license
  5746. * Copyright 2019 Google LLC
  5747. *
  5748. * Licensed under the Apache License, Version 2.0 (the "License");
  5749. * you may not use this file except in compliance with the License.
  5750. * You may obtain a copy of the License at
  5751. *
  5752. * http://www.apache.org/licenses/LICENSE-2.0
  5753. *
  5754. * Unless required by applicable law or agreed to in writing, software
  5755. * distributed under the License is distributed on an "AS IS" BASIS,
  5756. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  5757. * See the License for the specific language governing permissions and
  5758. * limitations under the License.
  5759. */
  5760. // Visible for testing
  5761. class TargetImpl {
  5762. constructor(path, collectionGroup = null, orderBy = [], filters = [], limit = null, startAt = null, endAt = null) {
  5763. this.path = path;
  5764. this.collectionGroup = collectionGroup;
  5765. this.orderBy = orderBy;
  5766. this.filters = filters;
  5767. this.limit = limit;
  5768. this.startAt = startAt;
  5769. this.endAt = endAt;
  5770. this.memoizedCanonicalId = null;
  5771. }
  5772. }
  5773. /**
  5774. * Initializes a Target with a path and optional additional query constraints.
  5775. * Path must currently be empty if this is a collection group query.
  5776. *
  5777. * NOTE: you should always construct `Target` from `Query.toTarget` instead of
  5778. * using this factory method, because `Query` provides an implicit `orderBy`
  5779. * property.
  5780. */
  5781. function newTarget(path, collectionGroup = null, orderBy = [], filters = [], limit = null, startAt = null, endAt = null) {
  5782. return new TargetImpl(path, collectionGroup, orderBy, filters, limit, startAt, endAt);
  5783. }
  5784. function canonifyTarget(target) {
  5785. const targetImpl = debugCast(target);
  5786. if (targetImpl.memoizedCanonicalId === null) {
  5787. let str = targetImpl.path.canonicalString();
  5788. if (targetImpl.collectionGroup !== null) {
  5789. str += '|cg:' + targetImpl.collectionGroup;
  5790. }
  5791. str += '|f:';
  5792. str += targetImpl.filters.map(f => canonifyFilter(f)).join(',');
  5793. str += '|ob:';
  5794. str += targetImpl.orderBy.map(o => canonifyOrderBy(o)).join(',');
  5795. if (!isNullOrUndefined(targetImpl.limit)) {
  5796. str += '|l:';
  5797. str += targetImpl.limit;
  5798. }
  5799. if (targetImpl.startAt) {
  5800. str += '|lb:';
  5801. str += targetImpl.startAt.inclusive ? 'b:' : 'a:';
  5802. str += targetImpl.startAt.position.map(p => canonicalId(p)).join(',');
  5803. }
  5804. if (targetImpl.endAt) {
  5805. str += '|ub:';
  5806. str += targetImpl.endAt.inclusive ? 'a:' : 'b:';
  5807. str += targetImpl.endAt.position.map(p => canonicalId(p)).join(',');
  5808. }
  5809. targetImpl.memoizedCanonicalId = str;
  5810. }
  5811. return targetImpl.memoizedCanonicalId;
  5812. }
  5813. function stringifyTarget(target) {
  5814. let str = target.path.canonicalString();
  5815. if (target.collectionGroup !== null) {
  5816. str += ' collectionGroup=' + target.collectionGroup;
  5817. }
  5818. if (target.filters.length > 0) {
  5819. str += `, filters: [${target.filters
  5820. .map(f => stringifyFilter(f))
  5821. .join(', ')}]`;
  5822. }
  5823. if (!isNullOrUndefined(target.limit)) {
  5824. str += ', limit: ' + target.limit;
  5825. }
  5826. if (target.orderBy.length > 0) {
  5827. str += `, orderBy: [${target.orderBy
  5828. .map(o => stringifyOrderBy(o))
  5829. .join(', ')}]`;
  5830. }
  5831. if (target.startAt) {
  5832. str += ', startAt: ';
  5833. str += target.startAt.inclusive ? 'b:' : 'a:';
  5834. str += target.startAt.position.map(p => canonicalId(p)).join(',');
  5835. }
  5836. if (target.endAt) {
  5837. str += ', endAt: ';
  5838. str += target.endAt.inclusive ? 'a:' : 'b:';
  5839. str += target.endAt.position.map(p => canonicalId(p)).join(',');
  5840. }
  5841. return `Target(${str})`;
  5842. }
  5843. function targetEquals(left, right) {
  5844. if (left.limit !== right.limit) {
  5845. return false;
  5846. }
  5847. if (left.orderBy.length !== right.orderBy.length) {
  5848. return false;
  5849. }
  5850. for (let i = 0; i < left.orderBy.length; i++) {
  5851. if (!orderByEquals(left.orderBy[i], right.orderBy[i])) {
  5852. return false;
  5853. }
  5854. }
  5855. if (left.filters.length !== right.filters.length) {
  5856. return false;
  5857. }
  5858. for (let i = 0; i < left.filters.length; i++) {
  5859. if (!filterEquals(left.filters[i], right.filters[i])) {
  5860. return false;
  5861. }
  5862. }
  5863. if (left.collectionGroup !== right.collectionGroup) {
  5864. return false;
  5865. }
  5866. if (!left.path.isEqual(right.path)) {
  5867. return false;
  5868. }
  5869. if (!boundEquals(left.startAt, right.startAt)) {
  5870. return false;
  5871. }
  5872. return boundEquals(left.endAt, right.endAt);
  5873. }
  5874. function targetIsDocumentTarget(target) {
  5875. return (DocumentKey.isDocumentKey(target.path) &&
  5876. target.collectionGroup === null &&
  5877. target.filters.length === 0);
  5878. }
  5879. /** Returns the field filters that target the given field path. */
  5880. function targetGetFieldFiltersForPath(target, path) {
  5881. return target.filters.filter(f => f instanceof FieldFilter && f.field.isEqual(path));
  5882. }
  5883. /**
  5884. * Returns the values that are used in ARRAY_CONTAINS or ARRAY_CONTAINS_ANY
  5885. * filters. Returns `null` if there are no such filters.
  5886. */
  5887. function targetGetArrayValues(target, fieldIndex) {
  5888. const segment = fieldIndexGetArraySegment(fieldIndex);
  5889. if (segment === undefined) {
  5890. return null;
  5891. }
  5892. for (const fieldFilter of targetGetFieldFiltersForPath(target, segment.fieldPath)) {
  5893. switch (fieldFilter.op) {
  5894. case "array-contains-any" /* Operator.ARRAY_CONTAINS_ANY */:
  5895. return fieldFilter.value.arrayValue.values || [];
  5896. case "array-contains" /* Operator.ARRAY_CONTAINS */:
  5897. return [fieldFilter.value];
  5898. // Remaining filters are not array filters.
  5899. }
  5900. }
  5901. return null;
  5902. }
  5903. /**
  5904. * Returns the list of values that are used in != or NOT_IN filters. Returns
  5905. * `null` if there are no such filters.
  5906. */
  5907. function targetGetNotInValues(target, fieldIndex) {
  5908. const values = new Map();
  5909. for (const segment of fieldIndexGetDirectionalSegments(fieldIndex)) {
  5910. for (const fieldFilter of targetGetFieldFiltersForPath(target, segment.fieldPath)) {
  5911. switch (fieldFilter.op) {
  5912. case "==" /* Operator.EQUAL */:
  5913. case "in" /* Operator.IN */:
  5914. // Encode equality prefix, which is encoded in the index value before
  5915. // the inequality (e.g. `a == 'a' && b != 'b'` is encoded to
  5916. // `value != 'ab'`).
  5917. values.set(segment.fieldPath.canonicalString(), fieldFilter.value);
  5918. break;
  5919. case "not-in" /* Operator.NOT_IN */:
  5920. case "!=" /* Operator.NOT_EQUAL */:
  5921. // NotIn/NotEqual is always a suffix. There cannot be any remaining
  5922. // segments and hence we can return early here.
  5923. values.set(segment.fieldPath.canonicalString(), fieldFilter.value);
  5924. return Array.from(values.values());
  5925. // Remaining filters cannot be used as notIn bounds.
  5926. }
  5927. }
  5928. }
  5929. return null;
  5930. }
  5931. /**
  5932. * Returns a lower bound of field values that can be used as a starting point to
  5933. * scan the index defined by `fieldIndex`. Returns `MIN_VALUE` if no lower bound
  5934. * exists.
  5935. */
  5936. function targetGetLowerBound(target, fieldIndex) {
  5937. const values = [];
  5938. let inclusive = true;
  5939. // For each segment, retrieve a lower bound if there is a suitable filter or
  5940. // startAt.
  5941. for (const segment of fieldIndexGetDirectionalSegments(fieldIndex)) {
  5942. const segmentBound = segment.kind === 0 /* IndexKind.ASCENDING */
  5943. ? targetGetAscendingBound(target, segment.fieldPath, target.startAt)
  5944. : targetGetDescendingBound(target, segment.fieldPath, target.startAt);
  5945. values.push(segmentBound.value);
  5946. inclusive && (inclusive = segmentBound.inclusive);
  5947. }
  5948. return new Bound(values, inclusive);
  5949. }
  5950. /**
  5951. * Returns an upper bound of field values that can be used as an ending point
  5952. * when scanning the index defined by `fieldIndex`. Returns `MAX_VALUE` if no
  5953. * upper bound exists.
  5954. */
  5955. function targetGetUpperBound(target, fieldIndex) {
  5956. const values = [];
  5957. let inclusive = true;
  5958. // For each segment, retrieve an upper bound if there is a suitable filter or
  5959. // endAt.
  5960. for (const segment of fieldIndexGetDirectionalSegments(fieldIndex)) {
  5961. const segmentBound = segment.kind === 0 /* IndexKind.ASCENDING */
  5962. ? targetGetDescendingBound(target, segment.fieldPath, target.endAt)
  5963. : targetGetAscendingBound(target, segment.fieldPath, target.endAt);
  5964. values.push(segmentBound.value);
  5965. inclusive && (inclusive = segmentBound.inclusive);
  5966. }
  5967. return new Bound(values, inclusive);
  5968. }
  5969. /**
  5970. * Returns the value to use as the lower bound for ascending index segment at
  5971. * the provided `fieldPath` (or the upper bound for an descending segment).
  5972. */
  5973. function targetGetAscendingBound(target, fieldPath, bound) {
  5974. let value = MIN_VALUE;
  5975. let inclusive = true;
  5976. // Process all filters to find a value for the current field segment
  5977. for (const fieldFilter of targetGetFieldFiltersForPath(target, fieldPath)) {
  5978. let filterValue = MIN_VALUE;
  5979. let filterInclusive = true;
  5980. switch (fieldFilter.op) {
  5981. case "<" /* Operator.LESS_THAN */:
  5982. case "<=" /* Operator.LESS_THAN_OR_EQUAL */:
  5983. filterValue = valuesGetLowerBound(fieldFilter.value);
  5984. break;
  5985. case "==" /* Operator.EQUAL */:
  5986. case "in" /* Operator.IN */:
  5987. case ">=" /* Operator.GREATER_THAN_OR_EQUAL */:
  5988. filterValue = fieldFilter.value;
  5989. break;
  5990. case ">" /* Operator.GREATER_THAN */:
  5991. filterValue = fieldFilter.value;
  5992. filterInclusive = false;
  5993. break;
  5994. case "!=" /* Operator.NOT_EQUAL */:
  5995. case "not-in" /* Operator.NOT_IN */:
  5996. filterValue = MIN_VALUE;
  5997. break;
  5998. // Remaining filters cannot be used as lower bounds.
  5999. }
  6000. if (lowerBoundCompare({ value, inclusive }, { value: filterValue, inclusive: filterInclusive }) < 0) {
  6001. value = filterValue;
  6002. inclusive = filterInclusive;
  6003. }
  6004. }
  6005. // If there is an additional bound, compare the values against the existing
  6006. // range to see if we can narrow the scope.
  6007. if (bound !== null) {
  6008. for (let i = 0; i < target.orderBy.length; ++i) {
  6009. const orderBy = target.orderBy[i];
  6010. if (orderBy.field.isEqual(fieldPath)) {
  6011. const cursorValue = bound.position[i];
  6012. if (lowerBoundCompare({ value, inclusive }, { value: cursorValue, inclusive: bound.inclusive }) < 0) {
  6013. value = cursorValue;
  6014. inclusive = bound.inclusive;
  6015. }
  6016. break;
  6017. }
  6018. }
  6019. }
  6020. return { value, inclusive };
  6021. }
  6022. /**
  6023. * Returns the value to use as the upper bound for ascending index segment at
  6024. * the provided `fieldPath` (or the lower bound for a descending segment).
  6025. */
  6026. function targetGetDescendingBound(target, fieldPath, bound) {
  6027. let value = MAX_VALUE;
  6028. let inclusive = true;
  6029. // Process all filters to find a value for the current field segment
  6030. for (const fieldFilter of targetGetFieldFiltersForPath(target, fieldPath)) {
  6031. let filterValue = MAX_VALUE;
  6032. let filterInclusive = true;
  6033. switch (fieldFilter.op) {
  6034. case ">=" /* Operator.GREATER_THAN_OR_EQUAL */:
  6035. case ">" /* Operator.GREATER_THAN */:
  6036. filterValue = valuesGetUpperBound(fieldFilter.value);
  6037. filterInclusive = false;
  6038. break;
  6039. case "==" /* Operator.EQUAL */:
  6040. case "in" /* Operator.IN */:
  6041. case "<=" /* Operator.LESS_THAN_OR_EQUAL */:
  6042. filterValue = fieldFilter.value;
  6043. break;
  6044. case "<" /* Operator.LESS_THAN */:
  6045. filterValue = fieldFilter.value;
  6046. filterInclusive = false;
  6047. break;
  6048. case "!=" /* Operator.NOT_EQUAL */:
  6049. case "not-in" /* Operator.NOT_IN */:
  6050. filterValue = MAX_VALUE;
  6051. break;
  6052. // Remaining filters cannot be used as upper bounds.
  6053. }
  6054. if (upperBoundCompare({ value, inclusive }, { value: filterValue, inclusive: filterInclusive }) > 0) {
  6055. value = filterValue;
  6056. inclusive = filterInclusive;
  6057. }
  6058. }
  6059. // If there is an additional bound, compare the values against the existing
  6060. // range to see if we can narrow the scope.
  6061. if (bound !== null) {
  6062. for (let i = 0; i < target.orderBy.length; ++i) {
  6063. const orderBy = target.orderBy[i];
  6064. if (orderBy.field.isEqual(fieldPath)) {
  6065. const cursorValue = bound.position[i];
  6066. if (upperBoundCompare({ value, inclusive }, { value: cursorValue, inclusive: bound.inclusive }) > 0) {
  6067. value = cursorValue;
  6068. inclusive = bound.inclusive;
  6069. }
  6070. break;
  6071. }
  6072. }
  6073. }
  6074. return { value, inclusive };
  6075. }
  6076. /** Returns the number of segments of a perfect index for this target. */
  6077. function targetGetSegmentCount(target) {
  6078. let fields = new SortedSet(FieldPath$1.comparator);
  6079. let hasArraySegment = false;
  6080. for (const filter of target.filters) {
  6081. for (const subFilter of filter.getFlattenedFilters()) {
  6082. // __name__ is not an explicit segment of any index, so we don't need to
  6083. // count it.
  6084. if (subFilter.field.isKeyField()) {
  6085. continue;
  6086. }
  6087. // ARRAY_CONTAINS or ARRAY_CONTAINS_ANY filters must be counted separately.
  6088. // For instance, it is possible to have an index for "a ARRAY a ASC". Even
  6089. // though these are on the same field, they should be counted as two
  6090. // separate segments in an index.
  6091. if (subFilter.op === "array-contains" /* Operator.ARRAY_CONTAINS */ ||
  6092. subFilter.op === "array-contains-any" /* Operator.ARRAY_CONTAINS_ANY */) {
  6093. hasArraySegment = true;
  6094. }
  6095. else {
  6096. fields = fields.add(subFilter.field);
  6097. }
  6098. }
  6099. }
  6100. for (const orderBy of target.orderBy) {
  6101. // __name__ is not an explicit segment of any index, so we don't need to
  6102. // count it.
  6103. if (!orderBy.field.isKeyField()) {
  6104. fields = fields.add(orderBy.field);
  6105. }
  6106. }
  6107. return fields.size + (hasArraySegment ? 1 : 0);
  6108. }
  6109. function targetHasLimit(target) {
  6110. return target.limit !== null;
  6111. }
  6112. /**
  6113. * @license
  6114. * Copyright 2017 Google LLC
  6115. *
  6116. * Licensed under the Apache License, Version 2.0 (the "License");
  6117. * you may not use this file except in compliance with the License.
  6118. * You may obtain a copy of the License at
  6119. *
  6120. * http://www.apache.org/licenses/LICENSE-2.0
  6121. *
  6122. * Unless required by applicable law or agreed to in writing, software
  6123. * distributed under the License is distributed on an "AS IS" BASIS,
  6124. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  6125. * See the License for the specific language governing permissions and
  6126. * limitations under the License.
  6127. */
  6128. /**
  6129. * Query encapsulates all the query attributes we support in the SDK. It can
  6130. * be run against the LocalStore, as well as be converted to a `Target` to
  6131. * query the RemoteStore results.
  6132. *
  6133. * Visible for testing.
  6134. */
  6135. class QueryImpl {
  6136. /**
  6137. * Initializes a Query with a path and optional additional query constraints.
  6138. * Path must currently be empty if this is a collection group query.
  6139. */
  6140. constructor(path, collectionGroup = null, explicitOrderBy = [], filters = [], limit = null, limitType = "F" /* LimitType.First */, startAt = null, endAt = null) {
  6141. this.path = path;
  6142. this.collectionGroup = collectionGroup;
  6143. this.explicitOrderBy = explicitOrderBy;
  6144. this.filters = filters;
  6145. this.limit = limit;
  6146. this.limitType = limitType;
  6147. this.startAt = startAt;
  6148. this.endAt = endAt;
  6149. this.memoizedOrderBy = null;
  6150. // The corresponding `Target` of this `Query` instance.
  6151. this.memoizedTarget = null;
  6152. if (this.startAt) ;
  6153. if (this.endAt) ;
  6154. }
  6155. }
  6156. /** Creates a new Query instance with the options provided. */
  6157. function newQuery(path, collectionGroup, explicitOrderBy, filters, limit, limitType, startAt, endAt) {
  6158. return new QueryImpl(path, collectionGroup, explicitOrderBy, filters, limit, limitType, startAt, endAt);
  6159. }
  6160. /** Creates a new Query for a query that matches all documents at `path` */
  6161. function newQueryForPath(path) {
  6162. return new QueryImpl(path);
  6163. }
  6164. /**
  6165. * Helper to convert a collection group query into a collection query at a
  6166. * specific path. This is used when executing collection group queries, since
  6167. * we have to split the query into a set of collection queries at multiple
  6168. * paths.
  6169. */
  6170. function asCollectionQueryAtPath(query, path) {
  6171. return new QueryImpl(path,
  6172. /*collectionGroup=*/ null, query.explicitOrderBy.slice(), query.filters.slice(), query.limit, query.limitType, query.startAt, query.endAt);
  6173. }
  6174. /**
  6175. * Returns true if this query does not specify any query constraints that
  6176. * could remove results.
  6177. */
  6178. function queryMatchesAllDocuments(query) {
  6179. return (query.filters.length === 0 &&
  6180. query.limit === null &&
  6181. query.startAt == null &&
  6182. query.endAt == null &&
  6183. (query.explicitOrderBy.length === 0 ||
  6184. (query.explicitOrderBy.length === 1 &&
  6185. query.explicitOrderBy[0].field.isKeyField())));
  6186. }
  6187. function getFirstOrderByField(query) {
  6188. return query.explicitOrderBy.length > 0
  6189. ? query.explicitOrderBy[0].field
  6190. : null;
  6191. }
  6192. function getInequalityFilterField(query) {
  6193. for (const filter of query.filters) {
  6194. const result = filter.getFirstInequalityField();
  6195. if (result !== null) {
  6196. return result;
  6197. }
  6198. }
  6199. return null;
  6200. }
  6201. /**
  6202. * Creates a new Query for a collection group query that matches all documents
  6203. * within the provided collection group.
  6204. */
  6205. function newQueryForCollectionGroup(collectionId) {
  6206. return new QueryImpl(ResourcePath.emptyPath(), collectionId);
  6207. }
  6208. /**
  6209. * Returns whether the query matches a single document by path (rather than a
  6210. * collection).
  6211. */
  6212. function isDocumentQuery$1(query) {
  6213. return (DocumentKey.isDocumentKey(query.path) &&
  6214. query.collectionGroup === null &&
  6215. query.filters.length === 0);
  6216. }
  6217. /**
  6218. * Returns whether the query matches a collection group rather than a specific
  6219. * collection.
  6220. */
  6221. function isCollectionGroupQuery(query) {
  6222. return query.collectionGroup !== null;
  6223. }
  6224. /**
  6225. * Returns the implicit order by constraint that is used to execute the Query,
  6226. * which can be different from the order by constraints the user provided (e.g.
  6227. * the SDK and backend always orders by `__name__`).
  6228. */
  6229. function queryOrderBy(query) {
  6230. const queryImpl = debugCast(query);
  6231. if (queryImpl.memoizedOrderBy === null) {
  6232. queryImpl.memoizedOrderBy = [];
  6233. const inequalityField = getInequalityFilterField(queryImpl);
  6234. const firstOrderByField = getFirstOrderByField(queryImpl);
  6235. if (inequalityField !== null && firstOrderByField === null) {
  6236. // In order to implicitly add key ordering, we must also add the
  6237. // inequality filter field for it to be a valid query.
  6238. // Note that the default inequality field and key ordering is ascending.
  6239. if (!inequalityField.isKeyField()) {
  6240. queryImpl.memoizedOrderBy.push(new OrderBy(inequalityField));
  6241. }
  6242. queryImpl.memoizedOrderBy.push(new OrderBy(FieldPath$1.keyField(), "asc" /* Direction.ASCENDING */));
  6243. }
  6244. else {
  6245. let foundKeyOrdering = false;
  6246. for (const orderBy of queryImpl.explicitOrderBy) {
  6247. queryImpl.memoizedOrderBy.push(orderBy);
  6248. if (orderBy.field.isKeyField()) {
  6249. foundKeyOrdering = true;
  6250. }
  6251. }
  6252. if (!foundKeyOrdering) {
  6253. // The order of the implicit key ordering always matches the last
  6254. // explicit order by
  6255. const lastDirection = queryImpl.explicitOrderBy.length > 0
  6256. ? queryImpl.explicitOrderBy[queryImpl.explicitOrderBy.length - 1]
  6257. .dir
  6258. : "asc" /* Direction.ASCENDING */;
  6259. queryImpl.memoizedOrderBy.push(new OrderBy(FieldPath$1.keyField(), lastDirection));
  6260. }
  6261. }
  6262. }
  6263. return queryImpl.memoizedOrderBy;
  6264. }
  6265. /**
  6266. * Converts this `Query` instance to it's corresponding `Target` representation.
  6267. */
  6268. function queryToTarget(query) {
  6269. const queryImpl = debugCast(query);
  6270. if (!queryImpl.memoizedTarget) {
  6271. if (queryImpl.limitType === "F" /* LimitType.First */) {
  6272. queryImpl.memoizedTarget = newTarget(queryImpl.path, queryImpl.collectionGroup, queryOrderBy(queryImpl), queryImpl.filters, queryImpl.limit, queryImpl.startAt, queryImpl.endAt);
  6273. }
  6274. else {
  6275. // Flip the orderBy directions since we want the last results
  6276. const orderBys = [];
  6277. for (const orderBy of queryOrderBy(queryImpl)) {
  6278. const dir = orderBy.dir === "desc" /* Direction.DESCENDING */
  6279. ? "asc" /* Direction.ASCENDING */
  6280. : "desc" /* Direction.DESCENDING */;
  6281. orderBys.push(new OrderBy(orderBy.field, dir));
  6282. }
  6283. // We need to swap the cursors to match the now-flipped query ordering.
  6284. const startAt = queryImpl.endAt
  6285. ? new Bound(queryImpl.endAt.position, queryImpl.endAt.inclusive)
  6286. : null;
  6287. const endAt = queryImpl.startAt
  6288. ? new Bound(queryImpl.startAt.position, queryImpl.startAt.inclusive)
  6289. : null;
  6290. // Now return as a LimitType.First query.
  6291. queryImpl.memoizedTarget = newTarget(queryImpl.path, queryImpl.collectionGroup, orderBys, queryImpl.filters, queryImpl.limit, startAt, endAt);
  6292. }
  6293. }
  6294. return queryImpl.memoizedTarget;
  6295. }
  6296. function queryWithAddedFilter(query, filter) {
  6297. filter.getFirstInequalityField();
  6298. getInequalityFilterField(query);
  6299. const newFilters = query.filters.concat([filter]);
  6300. return new QueryImpl(query.path, query.collectionGroup, query.explicitOrderBy.slice(), newFilters, query.limit, query.limitType, query.startAt, query.endAt);
  6301. }
  6302. function queryWithAddedOrderBy(query, orderBy) {
  6303. // TODO(dimond): validate that orderBy does not list the same key twice.
  6304. const newOrderBy = query.explicitOrderBy.concat([orderBy]);
  6305. return new QueryImpl(query.path, query.collectionGroup, newOrderBy, query.filters.slice(), query.limit, query.limitType, query.startAt, query.endAt);
  6306. }
  6307. function queryWithLimit(query, limit, limitType) {
  6308. return new QueryImpl(query.path, query.collectionGroup, query.explicitOrderBy.slice(), query.filters.slice(), limit, limitType, query.startAt, query.endAt);
  6309. }
  6310. function queryWithStartAt(query, bound) {
  6311. return new QueryImpl(query.path, query.collectionGroup, query.explicitOrderBy.slice(), query.filters.slice(), query.limit, query.limitType, bound, query.endAt);
  6312. }
  6313. function queryWithEndAt(query, bound) {
  6314. return new QueryImpl(query.path, query.collectionGroup, query.explicitOrderBy.slice(), query.filters.slice(), query.limit, query.limitType, query.startAt, bound);
  6315. }
  6316. function queryEquals(left, right) {
  6317. return (targetEquals(queryToTarget(left), queryToTarget(right)) &&
  6318. left.limitType === right.limitType);
  6319. }
  6320. // TODO(b/29183165): This is used to get a unique string from a query to, for
  6321. // example, use as a dictionary key, but the implementation is subject to
  6322. // collisions. Make it collision-free.
  6323. function canonifyQuery(query) {
  6324. return `${canonifyTarget(queryToTarget(query))}|lt:${query.limitType}`;
  6325. }
  6326. function stringifyQuery(query) {
  6327. return `Query(target=${stringifyTarget(queryToTarget(query))}; limitType=${query.limitType})`;
  6328. }
  6329. /** Returns whether `doc` matches the constraints of `query`. */
  6330. function queryMatches(query, doc) {
  6331. return (doc.isFoundDocument() &&
  6332. queryMatchesPathAndCollectionGroup(query, doc) &&
  6333. queryMatchesOrderBy(query, doc) &&
  6334. queryMatchesFilters(query, doc) &&
  6335. queryMatchesBounds(query, doc));
  6336. }
  6337. function queryMatchesPathAndCollectionGroup(query, doc) {
  6338. const docPath = doc.key.path;
  6339. if (query.collectionGroup !== null) {
  6340. // NOTE: this.path is currently always empty since we don't expose Collection
  6341. // Group queries rooted at a document path yet.
  6342. return (doc.key.hasCollectionId(query.collectionGroup) &&
  6343. query.path.isPrefixOf(docPath));
  6344. }
  6345. else if (DocumentKey.isDocumentKey(query.path)) {
  6346. // exact match for document queries
  6347. return query.path.isEqual(docPath);
  6348. }
  6349. else {
  6350. // shallow ancestor queries by default
  6351. return query.path.isImmediateParentOf(docPath);
  6352. }
  6353. }
  6354. /**
  6355. * A document must have a value for every ordering clause in order to show up
  6356. * in the results.
  6357. */
  6358. function queryMatchesOrderBy(query, doc) {
  6359. // We must use `queryOrderBy()` to get the list of all orderBys (both implicit and explicit).
  6360. // Note that for OR queries, orderBy applies to all disjunction terms and implicit orderBys must
  6361. // be taken into account. For example, the query "a > 1 || b==1" has an implicit "orderBy a" due
  6362. // to the inequality, and is evaluated as "a > 1 orderBy a || b==1 orderBy a".
  6363. // A document with content of {b:1} matches the filters, but does not match the orderBy because
  6364. // it's missing the field 'a'.
  6365. for (const orderBy of queryOrderBy(query)) {
  6366. // order by key always matches
  6367. if (!orderBy.field.isKeyField() && doc.data.field(orderBy.field) === null) {
  6368. return false;
  6369. }
  6370. }
  6371. return true;
  6372. }
  6373. function queryMatchesFilters(query, doc) {
  6374. for (const filter of query.filters) {
  6375. if (!filter.matches(doc)) {
  6376. return false;
  6377. }
  6378. }
  6379. return true;
  6380. }
  6381. /** Makes sure a document is within the bounds, if provided. */
  6382. function queryMatchesBounds(query, doc) {
  6383. if (query.startAt &&
  6384. !boundSortsBeforeDocument(query.startAt, queryOrderBy(query), doc)) {
  6385. return false;
  6386. }
  6387. if (query.endAt &&
  6388. !boundSortsAfterDocument(query.endAt, queryOrderBy(query), doc)) {
  6389. return false;
  6390. }
  6391. return true;
  6392. }
  6393. /**
  6394. * Returns the collection group that this query targets.
  6395. *
  6396. * PORTING NOTE: This is only used in the Web SDK to facilitate multi-tab
  6397. * synchronization for query results.
  6398. */
  6399. function queryCollectionGroup(query) {
  6400. return (query.collectionGroup ||
  6401. (query.path.length % 2 === 1
  6402. ? query.path.lastSegment()
  6403. : query.path.get(query.path.length - 2)));
  6404. }
  6405. /**
  6406. * Returns a new comparator function that can be used to compare two documents
  6407. * based on the Query's ordering constraint.
  6408. */
  6409. function newQueryComparator(query) {
  6410. return (d1, d2) => {
  6411. let comparedOnKeyField = false;
  6412. for (const orderBy of queryOrderBy(query)) {
  6413. const comp = compareDocs(orderBy, d1, d2);
  6414. if (comp !== 0) {
  6415. return comp;
  6416. }
  6417. comparedOnKeyField = comparedOnKeyField || orderBy.field.isKeyField();
  6418. }
  6419. return 0;
  6420. };
  6421. }
  6422. function compareDocs(orderBy, d1, d2) {
  6423. const comparison = orderBy.field.isKeyField()
  6424. ? DocumentKey.comparator(d1.key, d2.key)
  6425. : compareDocumentsByField(orderBy.field, d1, d2);
  6426. switch (orderBy.dir) {
  6427. case "asc" /* Direction.ASCENDING */:
  6428. return comparison;
  6429. case "desc" /* Direction.DESCENDING */:
  6430. return -1 * comparison;
  6431. default:
  6432. return fail();
  6433. }
  6434. }
  6435. /**
  6436. * @license
  6437. * Copyright 2017 Google LLC
  6438. *
  6439. * Licensed under the Apache License, Version 2.0 (the "License");
  6440. * you may not use this file except in compliance with the License.
  6441. * You may obtain a copy of the License at
  6442. *
  6443. * http://www.apache.org/licenses/LICENSE-2.0
  6444. *
  6445. * Unless required by applicable law or agreed to in writing, software
  6446. * distributed under the License is distributed on an "AS IS" BASIS,
  6447. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  6448. * See the License for the specific language governing permissions and
  6449. * limitations under the License.
  6450. */
  6451. /**
  6452. * A map implementation that uses objects as keys. Objects must have an
  6453. * associated equals function and must be immutable. Entries in the map are
  6454. * stored together with the key being produced from the mapKeyFn. This map
  6455. * automatically handles collisions of keys.
  6456. */
  6457. class ObjectMap {
  6458. constructor(mapKeyFn, equalsFn) {
  6459. this.mapKeyFn = mapKeyFn;
  6460. this.equalsFn = equalsFn;
  6461. /**
  6462. * The inner map for a key/value pair. Due to the possibility of collisions we
  6463. * keep a list of entries that we do a linear search through to find an actual
  6464. * match. Note that collisions should be rare, so we still expect near
  6465. * constant time lookups in practice.
  6466. */
  6467. this.inner = {};
  6468. /** The number of entries stored in the map */
  6469. this.innerSize = 0;
  6470. }
  6471. /** Get a value for this key, or undefined if it does not exist. */
  6472. get(key) {
  6473. const id = this.mapKeyFn(key);
  6474. const matches = this.inner[id];
  6475. if (matches === undefined) {
  6476. return undefined;
  6477. }
  6478. for (const [otherKey, value] of matches) {
  6479. if (this.equalsFn(otherKey, key)) {
  6480. return value;
  6481. }
  6482. }
  6483. return undefined;
  6484. }
  6485. has(key) {
  6486. return this.get(key) !== undefined;
  6487. }
  6488. /** Put this key and value in the map. */
  6489. set(key, value) {
  6490. const id = this.mapKeyFn(key);
  6491. const matches = this.inner[id];
  6492. if (matches === undefined) {
  6493. this.inner[id] = [[key, value]];
  6494. this.innerSize++;
  6495. return;
  6496. }
  6497. for (let i = 0; i < matches.length; i++) {
  6498. if (this.equalsFn(matches[i][0], key)) {
  6499. // This is updating an existing entry and does not increase `innerSize`.
  6500. matches[i] = [key, value];
  6501. return;
  6502. }
  6503. }
  6504. matches.push([key, value]);
  6505. this.innerSize++;
  6506. }
  6507. /**
  6508. * Remove this key from the map. Returns a boolean if anything was deleted.
  6509. */
  6510. delete(key) {
  6511. const id = this.mapKeyFn(key);
  6512. const matches = this.inner[id];
  6513. if (matches === undefined) {
  6514. return false;
  6515. }
  6516. for (let i = 0; i < matches.length; i++) {
  6517. if (this.equalsFn(matches[i][0], key)) {
  6518. if (matches.length === 1) {
  6519. delete this.inner[id];
  6520. }
  6521. else {
  6522. matches.splice(i, 1);
  6523. }
  6524. this.innerSize--;
  6525. return true;
  6526. }
  6527. }
  6528. return false;
  6529. }
  6530. forEach(fn) {
  6531. forEach(this.inner, (_, entries) => {
  6532. for (const [k, v] of entries) {
  6533. fn(k, v);
  6534. }
  6535. });
  6536. }
  6537. isEmpty() {
  6538. return isEmpty(this.inner);
  6539. }
  6540. size() {
  6541. return this.innerSize;
  6542. }
  6543. }
  6544. /**
  6545. * @license
  6546. * Copyright 2017 Google LLC
  6547. *
  6548. * Licensed under the Apache License, Version 2.0 (the "License");
  6549. * you may not use this file except in compliance with the License.
  6550. * You may obtain a copy of the License at
  6551. *
  6552. * http://www.apache.org/licenses/LICENSE-2.0
  6553. *
  6554. * Unless required by applicable law or agreed to in writing, software
  6555. * distributed under the License is distributed on an "AS IS" BASIS,
  6556. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  6557. * See the License for the specific language governing permissions and
  6558. * limitations under the License.
  6559. */
  6560. const EMPTY_MUTABLE_DOCUMENT_MAP = new SortedMap(DocumentKey.comparator);
  6561. function mutableDocumentMap() {
  6562. return EMPTY_MUTABLE_DOCUMENT_MAP;
  6563. }
  6564. const EMPTY_DOCUMENT_MAP = new SortedMap(DocumentKey.comparator);
  6565. function documentMap(...docs) {
  6566. let map = EMPTY_DOCUMENT_MAP;
  6567. for (const doc of docs) {
  6568. map = map.insert(doc.key, doc);
  6569. }
  6570. return map;
  6571. }
  6572. function newOverlayedDocumentMap() {
  6573. return newDocumentKeyMap();
  6574. }
  6575. function convertOverlayedDocumentMapToDocumentMap(collection) {
  6576. let documents = EMPTY_DOCUMENT_MAP;
  6577. collection.forEach((k, v) => (documents = documents.insert(k, v.overlayedDocument)));
  6578. return documents;
  6579. }
  6580. function newOverlayMap() {
  6581. return newDocumentKeyMap();
  6582. }
  6583. function newMutationMap() {
  6584. return newDocumentKeyMap();
  6585. }
  6586. function newDocumentKeyMap() {
  6587. return new ObjectMap(key => key.toString(), (l, r) => l.isEqual(r));
  6588. }
  6589. const EMPTY_DOCUMENT_VERSION_MAP = new SortedMap(DocumentKey.comparator);
  6590. function documentVersionMap() {
  6591. return EMPTY_DOCUMENT_VERSION_MAP;
  6592. }
  6593. const EMPTY_DOCUMENT_KEY_SET = new SortedSet(DocumentKey.comparator);
  6594. function documentKeySet(...keys) {
  6595. let set = EMPTY_DOCUMENT_KEY_SET;
  6596. for (const key of keys) {
  6597. set = set.add(key);
  6598. }
  6599. return set;
  6600. }
  6601. const EMPTY_TARGET_ID_SET = new SortedSet(primitiveComparator);
  6602. function targetIdSet() {
  6603. return EMPTY_TARGET_ID_SET;
  6604. }
  6605. /**
  6606. * @license
  6607. * Copyright 2020 Google LLC
  6608. *
  6609. * Licensed under the Apache License, Version 2.0 (the "License");
  6610. * you may not use this file except in compliance with the License.
  6611. * You may obtain a copy of the License at
  6612. *
  6613. * http://www.apache.org/licenses/LICENSE-2.0
  6614. *
  6615. * Unless required by applicable law or agreed to in writing, software
  6616. * distributed under the License is distributed on an "AS IS" BASIS,
  6617. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  6618. * See the License for the specific language governing permissions and
  6619. * limitations under the License.
  6620. */
  6621. /**
  6622. * Returns an DoubleValue for `value` that is encoded based the serializer's
  6623. * `useProto3Json` setting.
  6624. */
  6625. function toDouble(serializer, value) {
  6626. if (serializer.useProto3Json) {
  6627. if (isNaN(value)) {
  6628. return { doubleValue: 'NaN' };
  6629. }
  6630. else if (value === Infinity) {
  6631. return { doubleValue: 'Infinity' };
  6632. }
  6633. else if (value === -Infinity) {
  6634. return { doubleValue: '-Infinity' };
  6635. }
  6636. }
  6637. return { doubleValue: isNegativeZero(value) ? '-0' : value };
  6638. }
  6639. /**
  6640. * Returns an IntegerValue for `value`.
  6641. */
  6642. function toInteger(value) {
  6643. return { integerValue: '' + value };
  6644. }
  6645. /**
  6646. * Returns a value for a number that's appropriate to put into a proto.
  6647. * The return value is an IntegerValue if it can safely represent the value,
  6648. * otherwise a DoubleValue is returned.
  6649. */
  6650. function toNumber(serializer, value) {
  6651. return isSafeInteger(value) ? toInteger(value) : toDouble(serializer, value);
  6652. }
  6653. /**
  6654. * @license
  6655. * Copyright 2018 Google LLC
  6656. *
  6657. * Licensed under the Apache License, Version 2.0 (the "License");
  6658. * you may not use this file except in compliance with the License.
  6659. * You may obtain a copy of the License at
  6660. *
  6661. * http://www.apache.org/licenses/LICENSE-2.0
  6662. *
  6663. * Unless required by applicable law or agreed to in writing, software
  6664. * distributed under the License is distributed on an "AS IS" BASIS,
  6665. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  6666. * See the License for the specific language governing permissions and
  6667. * limitations under the License.
  6668. */
  6669. /** Used to represent a field transform on a mutation. */
  6670. class TransformOperation {
  6671. constructor() {
  6672. // Make sure that the structural type of `TransformOperation` is unique.
  6673. // See https://github.com/microsoft/TypeScript/issues/5451
  6674. this._ = undefined;
  6675. }
  6676. }
  6677. /**
  6678. * Computes the local transform result against the provided `previousValue`,
  6679. * optionally using the provided localWriteTime.
  6680. */
  6681. function applyTransformOperationToLocalView(transform, previousValue, localWriteTime) {
  6682. if (transform instanceof ServerTimestampTransform) {
  6683. return serverTimestamp$1(localWriteTime, previousValue);
  6684. }
  6685. else if (transform instanceof ArrayUnionTransformOperation) {
  6686. return applyArrayUnionTransformOperation(transform, previousValue);
  6687. }
  6688. else if (transform instanceof ArrayRemoveTransformOperation) {
  6689. return applyArrayRemoveTransformOperation(transform, previousValue);
  6690. }
  6691. else {
  6692. return applyNumericIncrementTransformOperationToLocalView(transform, previousValue);
  6693. }
  6694. }
  6695. /**
  6696. * Computes a final transform result after the transform has been acknowledged
  6697. * by the server, potentially using the server-provided transformResult.
  6698. */
  6699. function applyTransformOperationToRemoteDocument(transform, previousValue, transformResult) {
  6700. // The server just sends null as the transform result for array operations,
  6701. // so we have to calculate a result the same as we do for local
  6702. // applications.
  6703. if (transform instanceof ArrayUnionTransformOperation) {
  6704. return applyArrayUnionTransformOperation(transform, previousValue);
  6705. }
  6706. else if (transform instanceof ArrayRemoveTransformOperation) {
  6707. return applyArrayRemoveTransformOperation(transform, previousValue);
  6708. }
  6709. return transformResult;
  6710. }
  6711. /**
  6712. * If this transform operation is not idempotent, returns the base value to
  6713. * persist for this transform. If a base value is returned, the transform
  6714. * operation is always applied to this base value, even if document has
  6715. * already been updated.
  6716. *
  6717. * Base values provide consistent behavior for non-idempotent transforms and
  6718. * allow us to return the same latency-compensated value even if the backend
  6719. * has already applied the transform operation. The base value is null for
  6720. * idempotent transforms, as they can be re-played even if the backend has
  6721. * already applied them.
  6722. *
  6723. * @returns a base value to store along with the mutation, or null for
  6724. * idempotent transforms.
  6725. */
  6726. function computeTransformOperationBaseValue(transform, previousValue) {
  6727. if (transform instanceof NumericIncrementTransformOperation) {
  6728. return isNumber(previousValue) ? previousValue : { integerValue: 0 };
  6729. }
  6730. return null;
  6731. }
  6732. function transformOperationEquals(left, right) {
  6733. if (left instanceof ArrayUnionTransformOperation &&
  6734. right instanceof ArrayUnionTransformOperation) {
  6735. return arrayEquals(left.elements, right.elements, valueEquals);
  6736. }
  6737. else if (left instanceof ArrayRemoveTransformOperation &&
  6738. right instanceof ArrayRemoveTransformOperation) {
  6739. return arrayEquals(left.elements, right.elements, valueEquals);
  6740. }
  6741. else if (left instanceof NumericIncrementTransformOperation &&
  6742. right instanceof NumericIncrementTransformOperation) {
  6743. return valueEquals(left.operand, right.operand);
  6744. }
  6745. return (left instanceof ServerTimestampTransform &&
  6746. right instanceof ServerTimestampTransform);
  6747. }
  6748. /** Transforms a value into a server-generated timestamp. */
  6749. class ServerTimestampTransform extends TransformOperation {
  6750. }
  6751. /** Transforms an array value via a union operation. */
  6752. class ArrayUnionTransformOperation extends TransformOperation {
  6753. constructor(elements) {
  6754. super();
  6755. this.elements = elements;
  6756. }
  6757. }
  6758. function applyArrayUnionTransformOperation(transform, previousValue) {
  6759. const values = coercedFieldValuesArray(previousValue);
  6760. for (const toUnion of transform.elements) {
  6761. if (!values.some(element => valueEquals(element, toUnion))) {
  6762. values.push(toUnion);
  6763. }
  6764. }
  6765. return { arrayValue: { values } };
  6766. }
  6767. /** Transforms an array value via a remove operation. */
  6768. class ArrayRemoveTransformOperation extends TransformOperation {
  6769. constructor(elements) {
  6770. super();
  6771. this.elements = elements;
  6772. }
  6773. }
  6774. function applyArrayRemoveTransformOperation(transform, previousValue) {
  6775. let values = coercedFieldValuesArray(previousValue);
  6776. for (const toRemove of transform.elements) {
  6777. values = values.filter(element => !valueEquals(element, toRemove));
  6778. }
  6779. return { arrayValue: { values } };
  6780. }
  6781. /**
  6782. * Implements the backend semantics for locally computed NUMERIC_ADD (increment)
  6783. * transforms. Converts all field values to integers or doubles, but unlike the
  6784. * backend does not cap integer values at 2^63. Instead, JavaScript number
  6785. * arithmetic is used and precision loss can occur for values greater than 2^53.
  6786. */
  6787. class NumericIncrementTransformOperation extends TransformOperation {
  6788. constructor(serializer, operand) {
  6789. super();
  6790. this.serializer = serializer;
  6791. this.operand = operand;
  6792. }
  6793. }
  6794. function applyNumericIncrementTransformOperationToLocalView(transform, previousValue) {
  6795. // PORTING NOTE: Since JavaScript's integer arithmetic is limited to 53 bit
  6796. // precision and resolves overflows by reducing precision, we do not
  6797. // manually cap overflows at 2^63.
  6798. const baseValue = computeTransformOperationBaseValue(transform, previousValue);
  6799. const sum = asNumber(baseValue) + asNumber(transform.operand);
  6800. if (isInteger(baseValue) && isInteger(transform.operand)) {
  6801. return toInteger(sum);
  6802. }
  6803. else {
  6804. return toDouble(transform.serializer, sum);
  6805. }
  6806. }
  6807. function asNumber(value) {
  6808. return normalizeNumber(value.integerValue || value.doubleValue);
  6809. }
  6810. function coercedFieldValuesArray(value) {
  6811. return isArray(value) && value.arrayValue.values
  6812. ? value.arrayValue.values.slice()
  6813. : [];
  6814. }
  6815. /**
  6816. * @license
  6817. * Copyright 2017 Google LLC
  6818. *
  6819. * Licensed under the Apache License, Version 2.0 (the "License");
  6820. * you may not use this file except in compliance with the License.
  6821. * You may obtain a copy of the License at
  6822. *
  6823. * http://www.apache.org/licenses/LICENSE-2.0
  6824. *
  6825. * Unless required by applicable law or agreed to in writing, software
  6826. * distributed under the License is distributed on an "AS IS" BASIS,
  6827. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  6828. * See the License for the specific language governing permissions and
  6829. * limitations under the License.
  6830. */
  6831. /** A field path and the TransformOperation to perform upon it. */
  6832. class FieldTransform {
  6833. constructor(field, transform) {
  6834. this.field = field;
  6835. this.transform = transform;
  6836. }
  6837. }
  6838. function fieldTransformEquals(left, right) {
  6839. return (left.field.isEqual(right.field) &&
  6840. transformOperationEquals(left.transform, right.transform));
  6841. }
  6842. function fieldTransformsAreEqual(left, right) {
  6843. if (left === undefined && right === undefined) {
  6844. return true;
  6845. }
  6846. if (left && right) {
  6847. return arrayEquals(left, right, (l, r) => fieldTransformEquals(l, r));
  6848. }
  6849. return false;
  6850. }
  6851. /** The result of successfully applying a mutation to the backend. */
  6852. class MutationResult {
  6853. constructor(
  6854. /**
  6855. * The version at which the mutation was committed:
  6856. *
  6857. * - For most operations, this is the updateTime in the WriteResult.
  6858. * - For deletes, the commitTime of the WriteResponse (because deletes are
  6859. * not stored and have no updateTime).
  6860. *
  6861. * Note that these versions can be different: No-op writes will not change
  6862. * the updateTime even though the commitTime advances.
  6863. */
  6864. version,
  6865. /**
  6866. * The resulting fields returned from the backend after a mutation
  6867. * containing field transforms has been committed. Contains one FieldValue
  6868. * for each FieldTransform that was in the mutation.
  6869. *
  6870. * Will be empty if the mutation did not contain any field transforms.
  6871. */
  6872. transformResults) {
  6873. this.version = version;
  6874. this.transformResults = transformResults;
  6875. }
  6876. }
  6877. /**
  6878. * Encodes a precondition for a mutation. This follows the model that the
  6879. * backend accepts with the special case of an explicit "empty" precondition
  6880. * (meaning no precondition).
  6881. */
  6882. class Precondition {
  6883. constructor(updateTime, exists) {
  6884. this.updateTime = updateTime;
  6885. this.exists = exists;
  6886. }
  6887. /** Creates a new empty Precondition. */
  6888. static none() {
  6889. return new Precondition();
  6890. }
  6891. /** Creates a new Precondition with an exists flag. */
  6892. static exists(exists) {
  6893. return new Precondition(undefined, exists);
  6894. }
  6895. /** Creates a new Precondition based on a version a document exists at. */
  6896. static updateTime(version) {
  6897. return new Precondition(version);
  6898. }
  6899. /** Returns whether this Precondition is empty. */
  6900. get isNone() {
  6901. return this.updateTime === undefined && this.exists === undefined;
  6902. }
  6903. isEqual(other) {
  6904. return (this.exists === other.exists &&
  6905. (this.updateTime
  6906. ? !!other.updateTime && this.updateTime.isEqual(other.updateTime)
  6907. : !other.updateTime));
  6908. }
  6909. }
  6910. /** Returns true if the preconditions is valid for the given document. */
  6911. function preconditionIsValidForDocument(precondition, document) {
  6912. if (precondition.updateTime !== undefined) {
  6913. return (document.isFoundDocument() &&
  6914. document.version.isEqual(precondition.updateTime));
  6915. }
  6916. else if (precondition.exists !== undefined) {
  6917. return precondition.exists === document.isFoundDocument();
  6918. }
  6919. else {
  6920. return true;
  6921. }
  6922. }
  6923. /**
  6924. * A mutation describes a self-contained change to a document. Mutations can
  6925. * create, replace, delete, and update subsets of documents.
  6926. *
  6927. * Mutations not only act on the value of the document but also its version.
  6928. *
  6929. * For local mutations (mutations that haven't been committed yet), we preserve
  6930. * the existing version for Set and Patch mutations. For Delete mutations, we
  6931. * reset the version to 0.
  6932. *
  6933. * Here's the expected transition table.
  6934. *
  6935. * MUTATION APPLIED TO RESULTS IN
  6936. *
  6937. * SetMutation Document(v3) Document(v3)
  6938. * SetMutation NoDocument(v3) Document(v0)
  6939. * SetMutation InvalidDocument(v0) Document(v0)
  6940. * PatchMutation Document(v3) Document(v3)
  6941. * PatchMutation NoDocument(v3) NoDocument(v3)
  6942. * PatchMutation InvalidDocument(v0) UnknownDocument(v3)
  6943. * DeleteMutation Document(v3) NoDocument(v0)
  6944. * DeleteMutation NoDocument(v3) NoDocument(v0)
  6945. * DeleteMutation InvalidDocument(v0) NoDocument(v0)
  6946. *
  6947. * For acknowledged mutations, we use the updateTime of the WriteResponse as
  6948. * the resulting version for Set and Patch mutations. As deletes have no
  6949. * explicit update time, we use the commitTime of the WriteResponse for
  6950. * Delete mutations.
  6951. *
  6952. * If a mutation is acknowledged by the backend but fails the precondition check
  6953. * locally, we transition to an `UnknownDocument` and rely on Watch to send us
  6954. * the updated version.
  6955. *
  6956. * Field transforms are used only with Patch and Set Mutations. We use the
  6957. * `updateTransforms` message to store transforms, rather than the `transforms`s
  6958. * messages.
  6959. *
  6960. * ## Subclassing Notes
  6961. *
  6962. * Every type of mutation needs to implement its own applyToRemoteDocument() and
  6963. * applyToLocalView() to implement the actual behavior of applying the mutation
  6964. * to some source document (see `setMutationApplyToRemoteDocument()` for an
  6965. * example).
  6966. */
  6967. class Mutation {
  6968. }
  6969. /**
  6970. * A utility method to calculate a `Mutation` representing the overlay from the
  6971. * final state of the document, and a `FieldMask` representing the fields that
  6972. * are mutated by the local mutations.
  6973. */
  6974. function calculateOverlayMutation(doc, mask) {
  6975. if (!doc.hasLocalMutations || (mask && mask.fields.length === 0)) {
  6976. return null;
  6977. }
  6978. // mask is null when sets or deletes are applied to the current document.
  6979. if (mask === null) {
  6980. if (doc.isNoDocument()) {
  6981. return new DeleteMutation(doc.key, Precondition.none());
  6982. }
  6983. else {
  6984. return new SetMutation(doc.key, doc.data, Precondition.none());
  6985. }
  6986. }
  6987. else {
  6988. const docValue = doc.data;
  6989. const patchValue = ObjectValue.empty();
  6990. let maskSet = new SortedSet(FieldPath$1.comparator);
  6991. for (let path of mask.fields) {
  6992. if (!maskSet.has(path)) {
  6993. let value = docValue.field(path);
  6994. // If we are deleting a nested field, we take the immediate parent as
  6995. // the mask used to construct the resulting mutation.
  6996. // Justification: Nested fields can create parent fields implicitly. If
  6997. // only a leaf entry is deleted in later mutations, the parent field
  6998. // should still remain, but we may have lost this information.
  6999. // Consider mutation (foo.bar 1), then mutation (foo.bar delete()).
  7000. // This leaves the final result (foo, {}). Despite the fact that `doc`
  7001. // has the correct result, `foo` is not in `mask`, and the resulting
  7002. // mutation would miss `foo`.
  7003. if (value === null && path.length > 1) {
  7004. path = path.popLast();
  7005. value = docValue.field(path);
  7006. }
  7007. if (value === null) {
  7008. patchValue.delete(path);
  7009. }
  7010. else {
  7011. patchValue.set(path, value);
  7012. }
  7013. maskSet = maskSet.add(path);
  7014. }
  7015. }
  7016. return new PatchMutation(doc.key, patchValue, new FieldMask(maskSet.toArray()), Precondition.none());
  7017. }
  7018. }
  7019. /**
  7020. * Applies this mutation to the given document for the purposes of computing a
  7021. * new remote document. If the input document doesn't match the expected state
  7022. * (e.g. it is invalid or outdated), the document type may transition to
  7023. * unknown.
  7024. *
  7025. * @param mutation - The mutation to apply.
  7026. * @param document - The document to mutate. The input document can be an
  7027. * invalid document if the client has no knowledge of the pre-mutation state
  7028. * of the document.
  7029. * @param mutationResult - The result of applying the mutation from the backend.
  7030. */
  7031. function mutationApplyToRemoteDocument(mutation, document, mutationResult) {
  7032. if (mutation instanceof SetMutation) {
  7033. setMutationApplyToRemoteDocument(mutation, document, mutationResult);
  7034. }
  7035. else if (mutation instanceof PatchMutation) {
  7036. patchMutationApplyToRemoteDocument(mutation, document, mutationResult);
  7037. }
  7038. else {
  7039. deleteMutationApplyToRemoteDocument(mutation, document, mutationResult);
  7040. }
  7041. }
  7042. /**
  7043. * Applies this mutation to the given document for the purposes of computing
  7044. * the new local view of a document. If the input document doesn't match the
  7045. * expected state, the document is not modified.
  7046. *
  7047. * @param mutation - The mutation to apply.
  7048. * @param document - The document to mutate. The input document can be an
  7049. * invalid document if the client has no knowledge of the pre-mutation state
  7050. * of the document.
  7051. * @param previousMask - The fields that have been updated before applying this mutation.
  7052. * @param localWriteTime - A timestamp indicating the local write time of the
  7053. * batch this mutation is a part of.
  7054. * @returns A `FieldMask` representing the fields that are changed by applying this mutation.
  7055. */
  7056. function mutationApplyToLocalView(mutation, document, previousMask, localWriteTime) {
  7057. if (mutation instanceof SetMutation) {
  7058. return setMutationApplyToLocalView(mutation, document, previousMask, localWriteTime);
  7059. }
  7060. else if (mutation instanceof PatchMutation) {
  7061. return patchMutationApplyToLocalView(mutation, document, previousMask, localWriteTime);
  7062. }
  7063. else {
  7064. return deleteMutationApplyToLocalView(mutation, document, previousMask);
  7065. }
  7066. }
  7067. /**
  7068. * If this mutation is not idempotent, returns the base value to persist with
  7069. * this mutation. If a base value is returned, the mutation is always applied
  7070. * to this base value, even if document has already been updated.
  7071. *
  7072. * The base value is a sparse object that consists of only the document
  7073. * fields for which this mutation contains a non-idempotent transformation
  7074. * (e.g. a numeric increment). The provided value guarantees consistent
  7075. * behavior for non-idempotent transforms and allow us to return the same
  7076. * latency-compensated value even if the backend has already applied the
  7077. * mutation. The base value is null for idempotent mutations, as they can be
  7078. * re-played even if the backend has already applied them.
  7079. *
  7080. * @returns a base value to store along with the mutation, or null for
  7081. * idempotent mutations.
  7082. */
  7083. function mutationExtractBaseValue(mutation, document) {
  7084. let baseObject = null;
  7085. for (const fieldTransform of mutation.fieldTransforms) {
  7086. const existingValue = document.data.field(fieldTransform.field);
  7087. const coercedValue = computeTransformOperationBaseValue(fieldTransform.transform, existingValue || null);
  7088. if (coercedValue != null) {
  7089. if (baseObject === null) {
  7090. baseObject = ObjectValue.empty();
  7091. }
  7092. baseObject.set(fieldTransform.field, coercedValue);
  7093. }
  7094. }
  7095. return baseObject ? baseObject : null;
  7096. }
  7097. function mutationEquals(left, right) {
  7098. if (left.type !== right.type) {
  7099. return false;
  7100. }
  7101. if (!left.key.isEqual(right.key)) {
  7102. return false;
  7103. }
  7104. if (!left.precondition.isEqual(right.precondition)) {
  7105. return false;
  7106. }
  7107. if (!fieldTransformsAreEqual(left.fieldTransforms, right.fieldTransforms)) {
  7108. return false;
  7109. }
  7110. if (left.type === 0 /* MutationType.Set */) {
  7111. return left.value.isEqual(right.value);
  7112. }
  7113. if (left.type === 1 /* MutationType.Patch */) {
  7114. return (left.data.isEqual(right.data) &&
  7115. left.fieldMask.isEqual(right.fieldMask));
  7116. }
  7117. return true;
  7118. }
  7119. /**
  7120. * A mutation that creates or replaces the document at the given key with the
  7121. * object value contents.
  7122. */
  7123. class SetMutation extends Mutation {
  7124. constructor(key, value, precondition, fieldTransforms = []) {
  7125. super();
  7126. this.key = key;
  7127. this.value = value;
  7128. this.precondition = precondition;
  7129. this.fieldTransforms = fieldTransforms;
  7130. this.type = 0 /* MutationType.Set */;
  7131. }
  7132. getFieldMask() {
  7133. return null;
  7134. }
  7135. }
  7136. function setMutationApplyToRemoteDocument(mutation, document, mutationResult) {
  7137. // Unlike setMutationApplyToLocalView, if we're applying a mutation to a
  7138. // remote document the server has accepted the mutation so the precondition
  7139. // must have held.
  7140. const newData = mutation.value.clone();
  7141. const transformResults = serverTransformResults(mutation.fieldTransforms, document, mutationResult.transformResults);
  7142. newData.setAll(transformResults);
  7143. document
  7144. .convertToFoundDocument(mutationResult.version, newData)
  7145. .setHasCommittedMutations();
  7146. }
  7147. function setMutationApplyToLocalView(mutation, document, previousMask, localWriteTime) {
  7148. if (!preconditionIsValidForDocument(mutation.precondition, document)) {
  7149. // The mutation failed to apply (e.g. a document ID created with add()
  7150. // caused a name collision).
  7151. return previousMask;
  7152. }
  7153. const newData = mutation.value.clone();
  7154. const transformResults = localTransformResults(mutation.fieldTransforms, localWriteTime, document);
  7155. newData.setAll(transformResults);
  7156. document
  7157. .convertToFoundDocument(document.version, newData)
  7158. .setHasLocalMutations();
  7159. return null; // SetMutation overwrites all fields.
  7160. }
  7161. /**
  7162. * A mutation that modifies fields of the document at the given key with the
  7163. * given values. The values are applied through a field mask:
  7164. *
  7165. * * When a field is in both the mask and the values, the corresponding field
  7166. * is updated.
  7167. * * When a field is in neither the mask nor the values, the corresponding
  7168. * field is unmodified.
  7169. * * When a field is in the mask but not in the values, the corresponding field
  7170. * is deleted.
  7171. * * When a field is not in the mask but is in the values, the values map is
  7172. * ignored.
  7173. */
  7174. class PatchMutation extends Mutation {
  7175. constructor(key, data, fieldMask, precondition, fieldTransforms = []) {
  7176. super();
  7177. this.key = key;
  7178. this.data = data;
  7179. this.fieldMask = fieldMask;
  7180. this.precondition = precondition;
  7181. this.fieldTransforms = fieldTransforms;
  7182. this.type = 1 /* MutationType.Patch */;
  7183. }
  7184. getFieldMask() {
  7185. return this.fieldMask;
  7186. }
  7187. }
  7188. function patchMutationApplyToRemoteDocument(mutation, document, mutationResult) {
  7189. if (!preconditionIsValidForDocument(mutation.precondition, document)) {
  7190. // Since the mutation was not rejected, we know that the precondition
  7191. // matched on the backend. We therefore must not have the expected version
  7192. // of the document in our cache and convert to an UnknownDocument with a
  7193. // known updateTime.
  7194. document.convertToUnknownDocument(mutationResult.version);
  7195. return;
  7196. }
  7197. const transformResults = serverTransformResults(mutation.fieldTransforms, document, mutationResult.transformResults);
  7198. const newData = document.data;
  7199. newData.setAll(getPatch(mutation));
  7200. newData.setAll(transformResults);
  7201. document
  7202. .convertToFoundDocument(mutationResult.version, newData)
  7203. .setHasCommittedMutations();
  7204. }
  7205. function patchMutationApplyToLocalView(mutation, document, previousMask, localWriteTime) {
  7206. if (!preconditionIsValidForDocument(mutation.precondition, document)) {
  7207. return previousMask;
  7208. }
  7209. const transformResults = localTransformResults(mutation.fieldTransforms, localWriteTime, document);
  7210. const newData = document.data;
  7211. newData.setAll(getPatch(mutation));
  7212. newData.setAll(transformResults);
  7213. document
  7214. .convertToFoundDocument(document.version, newData)
  7215. .setHasLocalMutations();
  7216. if (previousMask === null) {
  7217. return null;
  7218. }
  7219. return previousMask
  7220. .unionWith(mutation.fieldMask.fields)
  7221. .unionWith(mutation.fieldTransforms.map(transform => transform.field));
  7222. }
  7223. /**
  7224. * Returns a FieldPath/Value map with the content of the PatchMutation.
  7225. */
  7226. function getPatch(mutation) {
  7227. const result = new Map();
  7228. mutation.fieldMask.fields.forEach(fieldPath => {
  7229. if (!fieldPath.isEmpty()) {
  7230. const newValue = mutation.data.field(fieldPath);
  7231. result.set(fieldPath, newValue);
  7232. }
  7233. });
  7234. return result;
  7235. }
  7236. /**
  7237. * Creates a list of "transform results" (a transform result is a field value
  7238. * representing the result of applying a transform) for use after a mutation
  7239. * containing transforms has been acknowledged by the server.
  7240. *
  7241. * @param fieldTransforms - The field transforms to apply the result to.
  7242. * @param mutableDocument - The current state of the document after applying all
  7243. * previous mutations.
  7244. * @param serverTransformResults - The transform results received by the server.
  7245. * @returns The transform results list.
  7246. */
  7247. function serverTransformResults(fieldTransforms, mutableDocument, serverTransformResults) {
  7248. const transformResults = new Map();
  7249. hardAssert(fieldTransforms.length === serverTransformResults.length);
  7250. for (let i = 0; i < serverTransformResults.length; i++) {
  7251. const fieldTransform = fieldTransforms[i];
  7252. const transform = fieldTransform.transform;
  7253. const previousValue = mutableDocument.data.field(fieldTransform.field);
  7254. transformResults.set(fieldTransform.field, applyTransformOperationToRemoteDocument(transform, previousValue, serverTransformResults[i]));
  7255. }
  7256. return transformResults;
  7257. }
  7258. /**
  7259. * Creates a list of "transform results" (a transform result is a field value
  7260. * representing the result of applying a transform) for use when applying a
  7261. * transform locally.
  7262. *
  7263. * @param fieldTransforms - The field transforms to apply the result to.
  7264. * @param localWriteTime - The local time of the mutation (used to
  7265. * generate ServerTimestampValues).
  7266. * @param mutableDocument - The document to apply transforms on.
  7267. * @returns The transform results list.
  7268. */
  7269. function localTransformResults(fieldTransforms, localWriteTime, mutableDocument) {
  7270. const transformResults = new Map();
  7271. for (const fieldTransform of fieldTransforms) {
  7272. const transform = fieldTransform.transform;
  7273. const previousValue = mutableDocument.data.field(fieldTransform.field);
  7274. transformResults.set(fieldTransform.field, applyTransformOperationToLocalView(transform, previousValue, localWriteTime));
  7275. }
  7276. return transformResults;
  7277. }
  7278. /** A mutation that deletes the document at the given key. */
  7279. class DeleteMutation extends Mutation {
  7280. constructor(key, precondition) {
  7281. super();
  7282. this.key = key;
  7283. this.precondition = precondition;
  7284. this.type = 2 /* MutationType.Delete */;
  7285. this.fieldTransforms = [];
  7286. }
  7287. getFieldMask() {
  7288. return null;
  7289. }
  7290. }
  7291. function deleteMutationApplyToRemoteDocument(mutation, document, mutationResult) {
  7292. // Unlike applyToLocalView, if we're applying a mutation to a remote
  7293. // document the server has accepted the mutation so the precondition must
  7294. // have held.
  7295. document
  7296. .convertToNoDocument(mutationResult.version)
  7297. .setHasCommittedMutations();
  7298. }
  7299. function deleteMutationApplyToLocalView(mutation, document, previousMask) {
  7300. if (preconditionIsValidForDocument(mutation.precondition, document)) {
  7301. document.convertToNoDocument(document.version).setHasLocalMutations();
  7302. return null;
  7303. }
  7304. return previousMask;
  7305. }
  7306. /**
  7307. * A mutation that verifies the existence of the document at the given key with
  7308. * the provided precondition.
  7309. *
  7310. * The `verify` operation is only used in Transactions, and this class serves
  7311. * primarily to facilitate serialization into protos.
  7312. */
  7313. class VerifyMutation extends Mutation {
  7314. constructor(key, precondition) {
  7315. super();
  7316. this.key = key;
  7317. this.precondition = precondition;
  7318. this.type = 3 /* MutationType.Verify */;
  7319. this.fieldTransforms = [];
  7320. }
  7321. getFieldMask() {
  7322. return null;
  7323. }
  7324. }
  7325. /**
  7326. * @license
  7327. * Copyright 2017 Google LLC
  7328. *
  7329. * Licensed under the Apache License, Version 2.0 (the "License");
  7330. * you may not use this file except in compliance with the License.
  7331. * You may obtain a copy of the License at
  7332. *
  7333. * http://www.apache.org/licenses/LICENSE-2.0
  7334. *
  7335. * Unless required by applicable law or agreed to in writing, software
  7336. * distributed under the License is distributed on an "AS IS" BASIS,
  7337. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  7338. * See the License for the specific language governing permissions and
  7339. * limitations under the License.
  7340. */
  7341. /**
  7342. * A batch of mutations that will be sent as one unit to the backend.
  7343. */
  7344. class MutationBatch {
  7345. /**
  7346. * @param batchId - The unique ID of this mutation batch.
  7347. * @param localWriteTime - The original write time of this mutation.
  7348. * @param baseMutations - Mutations that are used to populate the base
  7349. * values when this mutation is applied locally. This can be used to locally
  7350. * overwrite values that are persisted in the remote document cache. Base
  7351. * mutations are never sent to the backend.
  7352. * @param mutations - The user-provided mutations in this mutation batch.
  7353. * User-provided mutations are applied both locally and remotely on the
  7354. * backend.
  7355. */
  7356. constructor(batchId, localWriteTime, baseMutations, mutations) {
  7357. this.batchId = batchId;
  7358. this.localWriteTime = localWriteTime;
  7359. this.baseMutations = baseMutations;
  7360. this.mutations = mutations;
  7361. }
  7362. /**
  7363. * Applies all the mutations in this MutationBatch to the specified document
  7364. * to compute the state of the remote document
  7365. *
  7366. * @param document - The document to apply mutations to.
  7367. * @param batchResult - The result of applying the MutationBatch to the
  7368. * backend.
  7369. */
  7370. applyToRemoteDocument(document, batchResult) {
  7371. const mutationResults = batchResult.mutationResults;
  7372. for (let i = 0; i < this.mutations.length; i++) {
  7373. const mutation = this.mutations[i];
  7374. if (mutation.key.isEqual(document.key)) {
  7375. const mutationResult = mutationResults[i];
  7376. mutationApplyToRemoteDocument(mutation, document, mutationResult);
  7377. }
  7378. }
  7379. }
  7380. /**
  7381. * Computes the local view of a document given all the mutations in this
  7382. * batch.
  7383. *
  7384. * @param document - The document to apply mutations to.
  7385. * @param mutatedFields - Fields that have been updated before applying this mutation batch.
  7386. * @returns A `FieldMask` representing all the fields that are mutated.
  7387. */
  7388. applyToLocalView(document, mutatedFields) {
  7389. // First, apply the base state. This allows us to apply non-idempotent
  7390. // transform against a consistent set of values.
  7391. for (const mutation of this.baseMutations) {
  7392. if (mutation.key.isEqual(document.key)) {
  7393. mutatedFields = mutationApplyToLocalView(mutation, document, mutatedFields, this.localWriteTime);
  7394. }
  7395. }
  7396. // Second, apply all user-provided mutations.
  7397. for (const mutation of this.mutations) {
  7398. if (mutation.key.isEqual(document.key)) {
  7399. mutatedFields = mutationApplyToLocalView(mutation, document, mutatedFields, this.localWriteTime);
  7400. }
  7401. }
  7402. return mutatedFields;
  7403. }
  7404. /**
  7405. * Computes the local view for all provided documents given the mutations in
  7406. * this batch. Returns a `DocumentKey` to `Mutation` map which can be used to
  7407. * replace all the mutation applications.
  7408. */
  7409. applyToLocalDocumentSet(documentMap, documentsWithoutRemoteVersion) {
  7410. // TODO(mrschmidt): This implementation is O(n^2). If we apply the mutations
  7411. // directly (as done in `applyToLocalView()`), we can reduce the complexity
  7412. // to O(n).
  7413. const overlays = newMutationMap();
  7414. this.mutations.forEach(m => {
  7415. const overlayedDocument = documentMap.get(m.key);
  7416. // TODO(mutabledocuments): This method should take a MutableDocumentMap
  7417. // and we should remove this cast.
  7418. const mutableDocument = overlayedDocument.overlayedDocument;
  7419. let mutatedFields = this.applyToLocalView(mutableDocument, overlayedDocument.mutatedFields);
  7420. // Set mutatedFields to null if the document is only from local mutations.
  7421. // This creates a Set or Delete mutation, instead of trying to create a
  7422. // patch mutation as the overlay.
  7423. mutatedFields = documentsWithoutRemoteVersion.has(m.key)
  7424. ? null
  7425. : mutatedFields;
  7426. const overlay = calculateOverlayMutation(mutableDocument, mutatedFields);
  7427. if (overlay !== null) {
  7428. overlays.set(m.key, overlay);
  7429. }
  7430. if (!mutableDocument.isValidDocument()) {
  7431. mutableDocument.convertToNoDocument(SnapshotVersion.min());
  7432. }
  7433. });
  7434. return overlays;
  7435. }
  7436. keys() {
  7437. return this.mutations.reduce((keys, m) => keys.add(m.key), documentKeySet());
  7438. }
  7439. isEqual(other) {
  7440. return (this.batchId === other.batchId &&
  7441. arrayEquals(this.mutations, other.mutations, (l, r) => mutationEquals(l, r)) &&
  7442. arrayEquals(this.baseMutations, other.baseMutations, (l, r) => mutationEquals(l, r)));
  7443. }
  7444. }
  7445. /** The result of applying a mutation batch to the backend. */
  7446. class MutationBatchResult {
  7447. constructor(batch, commitVersion, mutationResults,
  7448. /**
  7449. * A pre-computed mapping from each mutated document to the resulting
  7450. * version.
  7451. */
  7452. docVersions) {
  7453. this.batch = batch;
  7454. this.commitVersion = commitVersion;
  7455. this.mutationResults = mutationResults;
  7456. this.docVersions = docVersions;
  7457. }
  7458. /**
  7459. * Creates a new MutationBatchResult for the given batch and results. There
  7460. * must be one result for each mutation in the batch. This static factory
  7461. * caches a document=&gt;version mapping (docVersions).
  7462. */
  7463. static from(batch, commitVersion, results) {
  7464. hardAssert(batch.mutations.length === results.length);
  7465. let versionMap = documentVersionMap();
  7466. const mutations = batch.mutations;
  7467. for (let i = 0; i < mutations.length; i++) {
  7468. versionMap = versionMap.insert(mutations[i].key, results[i].version);
  7469. }
  7470. return new MutationBatchResult(batch, commitVersion, results, versionMap);
  7471. }
  7472. }
  7473. /**
  7474. * @license
  7475. * Copyright 2022 Google LLC
  7476. *
  7477. * Licensed under the Apache License, Version 2.0 (the "License");
  7478. * you may not use this file except in compliance with the License.
  7479. * You may obtain a copy of the License at
  7480. *
  7481. * http://www.apache.org/licenses/LICENSE-2.0
  7482. *
  7483. * Unless required by applicable law or agreed to in writing, software
  7484. * distributed under the License is distributed on an "AS IS" BASIS,
  7485. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  7486. * See the License for the specific language governing permissions and
  7487. * limitations under the License.
  7488. */
  7489. /**
  7490. * Representation of an overlay computed by Firestore.
  7491. *
  7492. * Holds information about a mutation and the largest batch id in Firestore when
  7493. * the mutation was created.
  7494. */
  7495. class Overlay {
  7496. constructor(largestBatchId, mutation) {
  7497. this.largestBatchId = largestBatchId;
  7498. this.mutation = mutation;
  7499. }
  7500. getKey() {
  7501. return this.mutation.key;
  7502. }
  7503. isEqual(other) {
  7504. return other !== null && this.mutation === other.mutation;
  7505. }
  7506. toString() {
  7507. return `Overlay{
  7508. largestBatchId: ${this.largestBatchId},
  7509. mutation: ${this.mutation.toString()}
  7510. }`;
  7511. }
  7512. }
  7513. /**
  7514. * @license
  7515. * Copyright 2017 Google LLC
  7516. *
  7517. * Licensed under the Apache License, Version 2.0 (the "License");
  7518. * you may not use this file except in compliance with the License.
  7519. * You may obtain a copy of the License at
  7520. *
  7521. * http://www.apache.org/licenses/LICENSE-2.0
  7522. *
  7523. * Unless required by applicable law or agreed to in writing, software
  7524. * distributed under the License is distributed on an "AS IS" BASIS,
  7525. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  7526. * See the License for the specific language governing permissions and
  7527. * limitations under the License.
  7528. */
  7529. class ExistenceFilter {
  7530. // TODO(b/33078163): just use simplest form of existence filter for now
  7531. constructor(count) {
  7532. this.count = count;
  7533. }
  7534. }
  7535. /**
  7536. * @license
  7537. * Copyright 2017 Google LLC
  7538. *
  7539. * Licensed under the Apache License, Version 2.0 (the "License");
  7540. * you may not use this file except in compliance with the License.
  7541. * You may obtain a copy of the License at
  7542. *
  7543. * http://www.apache.org/licenses/LICENSE-2.0
  7544. *
  7545. * Unless required by applicable law or agreed to in writing, software
  7546. * distributed under the License is distributed on an "AS IS" BASIS,
  7547. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  7548. * See the License for the specific language governing permissions and
  7549. * limitations under the License.
  7550. */
  7551. /**
  7552. * Error Codes describing the different ways GRPC can fail. These are copied
  7553. * directly from GRPC's sources here:
  7554. *
  7555. * https://github.com/grpc/grpc/blob/bceec94ea4fc5f0085d81235d8e1c06798dc341a/include/grpc%2B%2B/impl/codegen/status_code_enum.h
  7556. *
  7557. * Important! The names of these identifiers matter because the string forms
  7558. * are used for reverse lookups from the webchannel stream. Do NOT change the
  7559. * names of these identifiers or change this into a const enum.
  7560. */
  7561. var RpcCode;
  7562. (function (RpcCode) {
  7563. RpcCode[RpcCode["OK"] = 0] = "OK";
  7564. RpcCode[RpcCode["CANCELLED"] = 1] = "CANCELLED";
  7565. RpcCode[RpcCode["UNKNOWN"] = 2] = "UNKNOWN";
  7566. RpcCode[RpcCode["INVALID_ARGUMENT"] = 3] = "INVALID_ARGUMENT";
  7567. RpcCode[RpcCode["DEADLINE_EXCEEDED"] = 4] = "DEADLINE_EXCEEDED";
  7568. RpcCode[RpcCode["NOT_FOUND"] = 5] = "NOT_FOUND";
  7569. RpcCode[RpcCode["ALREADY_EXISTS"] = 6] = "ALREADY_EXISTS";
  7570. RpcCode[RpcCode["PERMISSION_DENIED"] = 7] = "PERMISSION_DENIED";
  7571. RpcCode[RpcCode["UNAUTHENTICATED"] = 16] = "UNAUTHENTICATED";
  7572. RpcCode[RpcCode["RESOURCE_EXHAUSTED"] = 8] = "RESOURCE_EXHAUSTED";
  7573. RpcCode[RpcCode["FAILED_PRECONDITION"] = 9] = "FAILED_PRECONDITION";
  7574. RpcCode[RpcCode["ABORTED"] = 10] = "ABORTED";
  7575. RpcCode[RpcCode["OUT_OF_RANGE"] = 11] = "OUT_OF_RANGE";
  7576. RpcCode[RpcCode["UNIMPLEMENTED"] = 12] = "UNIMPLEMENTED";
  7577. RpcCode[RpcCode["INTERNAL"] = 13] = "INTERNAL";
  7578. RpcCode[RpcCode["UNAVAILABLE"] = 14] = "UNAVAILABLE";
  7579. RpcCode[RpcCode["DATA_LOSS"] = 15] = "DATA_LOSS";
  7580. })(RpcCode || (RpcCode = {}));
  7581. /**
  7582. * Determines whether an error code represents a permanent error when received
  7583. * in response to a non-write operation.
  7584. *
  7585. * See isPermanentWriteError for classifying write errors.
  7586. */
  7587. function isPermanentError(code) {
  7588. switch (code) {
  7589. case Code.OK:
  7590. return fail();
  7591. case Code.CANCELLED:
  7592. case Code.UNKNOWN:
  7593. case Code.DEADLINE_EXCEEDED:
  7594. case Code.RESOURCE_EXHAUSTED:
  7595. case Code.INTERNAL:
  7596. case Code.UNAVAILABLE:
  7597. // Unauthenticated means something went wrong with our token and we need
  7598. // to retry with new credentials which will happen automatically.
  7599. case Code.UNAUTHENTICATED:
  7600. return false;
  7601. case Code.INVALID_ARGUMENT:
  7602. case Code.NOT_FOUND:
  7603. case Code.ALREADY_EXISTS:
  7604. case Code.PERMISSION_DENIED:
  7605. case Code.FAILED_PRECONDITION:
  7606. // Aborted might be retried in some scenarios, but that is dependant on
  7607. // the context and should handled individually by the calling code.
  7608. // See https://cloud.google.com/apis/design/errors.
  7609. case Code.ABORTED:
  7610. case Code.OUT_OF_RANGE:
  7611. case Code.UNIMPLEMENTED:
  7612. case Code.DATA_LOSS:
  7613. return true;
  7614. default:
  7615. return fail();
  7616. }
  7617. }
  7618. /**
  7619. * Determines whether an error code represents a permanent error when received
  7620. * in response to a write operation.
  7621. *
  7622. * Write operations must be handled specially because as of b/119437764, ABORTED
  7623. * errors on the write stream should be retried too (even though ABORTED errors
  7624. * are not generally retryable).
  7625. *
  7626. * Note that during the initial handshake on the write stream an ABORTED error
  7627. * signals that we should discard our stream token (i.e. it is permanent). This
  7628. * means a handshake error should be classified with isPermanentError, above.
  7629. */
  7630. function isPermanentWriteError(code) {
  7631. return isPermanentError(code) && code !== Code.ABORTED;
  7632. }
  7633. /**
  7634. * Maps an error Code from GRPC status code number, like 0, 1, or 14. These
  7635. * are not the same as HTTP status codes.
  7636. *
  7637. * @returns The Code equivalent to the given GRPC status code. Fails if there
  7638. * is no match.
  7639. */
  7640. function mapCodeFromRpcCode(code) {
  7641. if (code === undefined) {
  7642. // This shouldn't normally happen, but in certain error cases (like trying
  7643. // to send invalid proto messages) we may get an error with no GRPC code.
  7644. logError('GRPC error has no .code');
  7645. return Code.UNKNOWN;
  7646. }
  7647. switch (code) {
  7648. case RpcCode.OK:
  7649. return Code.OK;
  7650. case RpcCode.CANCELLED:
  7651. return Code.CANCELLED;
  7652. case RpcCode.UNKNOWN:
  7653. return Code.UNKNOWN;
  7654. case RpcCode.DEADLINE_EXCEEDED:
  7655. return Code.DEADLINE_EXCEEDED;
  7656. case RpcCode.RESOURCE_EXHAUSTED:
  7657. return Code.RESOURCE_EXHAUSTED;
  7658. case RpcCode.INTERNAL:
  7659. return Code.INTERNAL;
  7660. case RpcCode.UNAVAILABLE:
  7661. return Code.UNAVAILABLE;
  7662. case RpcCode.UNAUTHENTICATED:
  7663. return Code.UNAUTHENTICATED;
  7664. case RpcCode.INVALID_ARGUMENT:
  7665. return Code.INVALID_ARGUMENT;
  7666. case RpcCode.NOT_FOUND:
  7667. return Code.NOT_FOUND;
  7668. case RpcCode.ALREADY_EXISTS:
  7669. return Code.ALREADY_EXISTS;
  7670. case RpcCode.PERMISSION_DENIED:
  7671. return Code.PERMISSION_DENIED;
  7672. case RpcCode.FAILED_PRECONDITION:
  7673. return Code.FAILED_PRECONDITION;
  7674. case RpcCode.ABORTED:
  7675. return Code.ABORTED;
  7676. case RpcCode.OUT_OF_RANGE:
  7677. return Code.OUT_OF_RANGE;
  7678. case RpcCode.UNIMPLEMENTED:
  7679. return Code.UNIMPLEMENTED;
  7680. case RpcCode.DATA_LOSS:
  7681. return Code.DATA_LOSS;
  7682. default:
  7683. return fail();
  7684. }
  7685. }
  7686. /**
  7687. * @license
  7688. * Copyright 2017 Google LLC
  7689. *
  7690. * Licensed under the Apache License, Version 2.0 (the "License");
  7691. * you may not use this file except in compliance with the License.
  7692. * You may obtain a copy of the License at
  7693. *
  7694. * http://www.apache.org/licenses/LICENSE-2.0
  7695. *
  7696. * Unless required by applicable law or agreed to in writing, software
  7697. * distributed under the License is distributed on an "AS IS" BASIS,
  7698. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  7699. * See the License for the specific language governing permissions and
  7700. * limitations under the License.
  7701. */
  7702. /**
  7703. * An event from the RemoteStore. It is split into targetChanges (changes to the
  7704. * state or the set of documents in our watched targets) and documentUpdates
  7705. * (changes to the actual documents).
  7706. */
  7707. class RemoteEvent {
  7708. constructor(
  7709. /**
  7710. * The snapshot version this event brings us up to, or MIN if not set.
  7711. */
  7712. snapshotVersion,
  7713. /**
  7714. * A map from target to changes to the target. See TargetChange.
  7715. */
  7716. targetChanges,
  7717. /**
  7718. * A set of targets that is known to be inconsistent. Listens for these
  7719. * targets should be re-established without resume tokens.
  7720. */
  7721. targetMismatches,
  7722. /**
  7723. * A set of which documents have changed or been deleted, along with the
  7724. * doc's new values (if not deleted).
  7725. */
  7726. documentUpdates,
  7727. /**
  7728. * A set of which document updates are due only to limbo resolution targets.
  7729. */
  7730. resolvedLimboDocuments) {
  7731. this.snapshotVersion = snapshotVersion;
  7732. this.targetChanges = targetChanges;
  7733. this.targetMismatches = targetMismatches;
  7734. this.documentUpdates = documentUpdates;
  7735. this.resolvedLimboDocuments = resolvedLimboDocuments;
  7736. }
  7737. /**
  7738. * HACK: Views require RemoteEvents in order to determine whether the view is
  7739. * CURRENT, but secondary tabs don't receive remote events. So this method is
  7740. * used to create a synthesized RemoteEvent that can be used to apply a
  7741. * CURRENT status change to a View, for queries executed in a different tab.
  7742. */
  7743. // PORTING NOTE: Multi-tab only
  7744. static createSynthesizedRemoteEventForCurrentChange(targetId, current, resumeToken) {
  7745. const targetChanges = new Map();
  7746. targetChanges.set(targetId, TargetChange.createSynthesizedTargetChangeForCurrentChange(targetId, current, resumeToken));
  7747. return new RemoteEvent(SnapshotVersion.min(), targetChanges, targetIdSet(), mutableDocumentMap(), documentKeySet());
  7748. }
  7749. }
  7750. /**
  7751. * A TargetChange specifies the set of changes for a specific target as part of
  7752. * a RemoteEvent. These changes track which documents are added, modified or
  7753. * removed, as well as the target's resume token and whether the target is
  7754. * marked CURRENT.
  7755. * The actual changes *to* documents are not part of the TargetChange since
  7756. * documents may be part of multiple targets.
  7757. */
  7758. class TargetChange {
  7759. constructor(
  7760. /**
  7761. * An opaque, server-assigned token that allows watching a query to be resumed
  7762. * after disconnecting without retransmitting all the data that matches the
  7763. * query. The resume token essentially identifies a point in time from which
  7764. * the server should resume sending results.
  7765. */
  7766. resumeToken,
  7767. /**
  7768. * The "current" (synced) status of this target. Note that "current"
  7769. * has special meaning in the RPC protocol that implies that a target is
  7770. * both up-to-date and consistent with the rest of the watch stream.
  7771. */
  7772. current,
  7773. /**
  7774. * The set of documents that were newly assigned to this target as part of
  7775. * this remote event.
  7776. */
  7777. addedDocuments,
  7778. /**
  7779. * The set of documents that were already assigned to this target but received
  7780. * an update during this remote event.
  7781. */
  7782. modifiedDocuments,
  7783. /**
  7784. * The set of documents that were removed from this target as part of this
  7785. * remote event.
  7786. */
  7787. removedDocuments) {
  7788. this.resumeToken = resumeToken;
  7789. this.current = current;
  7790. this.addedDocuments = addedDocuments;
  7791. this.modifiedDocuments = modifiedDocuments;
  7792. this.removedDocuments = removedDocuments;
  7793. }
  7794. /**
  7795. * This method is used to create a synthesized TargetChanges that can be used to
  7796. * apply a CURRENT status change to a View (for queries executed in a different
  7797. * tab) or for new queries (to raise snapshots with correct CURRENT status).
  7798. */
  7799. static createSynthesizedTargetChangeForCurrentChange(targetId, current, resumeToken) {
  7800. return new TargetChange(resumeToken, current, documentKeySet(), documentKeySet(), documentKeySet());
  7801. }
  7802. }
  7803. /**
  7804. * @license
  7805. * Copyright 2017 Google LLC
  7806. *
  7807. * Licensed under the Apache License, Version 2.0 (the "License");
  7808. * you may not use this file except in compliance with the License.
  7809. * You may obtain a copy of the License at
  7810. *
  7811. * http://www.apache.org/licenses/LICENSE-2.0
  7812. *
  7813. * Unless required by applicable law or agreed to in writing, software
  7814. * distributed under the License is distributed on an "AS IS" BASIS,
  7815. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  7816. * See the License for the specific language governing permissions and
  7817. * limitations under the License.
  7818. */
  7819. /**
  7820. * Represents a changed document and a list of target ids to which this change
  7821. * applies.
  7822. *
  7823. * If document has been deleted NoDocument will be provided.
  7824. */
  7825. class DocumentWatchChange {
  7826. constructor(
  7827. /** The new document applies to all of these targets. */
  7828. updatedTargetIds,
  7829. /** The new document is removed from all of these targets. */
  7830. removedTargetIds,
  7831. /** The key of the document for this change. */
  7832. key,
  7833. /**
  7834. * The new document or NoDocument if it was deleted. Is null if the
  7835. * document went out of view without the server sending a new document.
  7836. */
  7837. newDoc) {
  7838. this.updatedTargetIds = updatedTargetIds;
  7839. this.removedTargetIds = removedTargetIds;
  7840. this.key = key;
  7841. this.newDoc = newDoc;
  7842. }
  7843. }
  7844. class ExistenceFilterChange {
  7845. constructor(targetId, existenceFilter) {
  7846. this.targetId = targetId;
  7847. this.existenceFilter = existenceFilter;
  7848. }
  7849. }
  7850. class WatchTargetChange {
  7851. constructor(
  7852. /** What kind of change occurred to the watch target. */
  7853. state,
  7854. /** The target IDs that were added/removed/set. */
  7855. targetIds,
  7856. /**
  7857. * An opaque, server-assigned token that allows watching a target to be
  7858. * resumed after disconnecting without retransmitting all the data that
  7859. * matches the target. The resume token essentially identifies a point in
  7860. * time from which the server should resume sending results.
  7861. */
  7862. resumeToken = ByteString.EMPTY_BYTE_STRING,
  7863. /** An RPC error indicating why the watch failed. */
  7864. cause = null) {
  7865. this.state = state;
  7866. this.targetIds = targetIds;
  7867. this.resumeToken = resumeToken;
  7868. this.cause = cause;
  7869. }
  7870. }
  7871. /** Tracks the internal state of a Watch target. */
  7872. class TargetState {
  7873. constructor() {
  7874. /**
  7875. * The number of pending responses (adds or removes) that we are waiting on.
  7876. * We only consider targets active that have no pending responses.
  7877. */
  7878. this.pendingResponses = 0;
  7879. /**
  7880. * Keeps track of the document changes since the last raised snapshot.
  7881. *
  7882. * These changes are continuously updated as we receive document updates and
  7883. * always reflect the current set of changes against the last issued snapshot.
  7884. */
  7885. this.documentChanges = snapshotChangesMap();
  7886. /** See public getters for explanations of these fields. */
  7887. this._resumeToken = ByteString.EMPTY_BYTE_STRING;
  7888. this._current = false;
  7889. /**
  7890. * Whether this target state should be included in the next snapshot. We
  7891. * initialize to true so that newly-added targets are included in the next
  7892. * RemoteEvent.
  7893. */
  7894. this._hasPendingChanges = true;
  7895. }
  7896. /**
  7897. * Whether this target has been marked 'current'.
  7898. *
  7899. * 'Current' has special meaning in the RPC protocol: It implies that the
  7900. * Watch backend has sent us all changes up to the point at which the target
  7901. * was added and that the target is consistent with the rest of the watch
  7902. * stream.
  7903. */
  7904. get current() {
  7905. return this._current;
  7906. }
  7907. /** The last resume token sent to us for this target. */
  7908. get resumeToken() {
  7909. return this._resumeToken;
  7910. }
  7911. /** Whether this target has pending target adds or target removes. */
  7912. get isPending() {
  7913. return this.pendingResponses !== 0;
  7914. }
  7915. /** Whether we have modified any state that should trigger a snapshot. */
  7916. get hasPendingChanges() {
  7917. return this._hasPendingChanges;
  7918. }
  7919. /**
  7920. * Applies the resume token to the TargetChange, but only when it has a new
  7921. * value. Empty resumeTokens are discarded.
  7922. */
  7923. updateResumeToken(resumeToken) {
  7924. if (resumeToken.approximateByteSize() > 0) {
  7925. this._hasPendingChanges = true;
  7926. this._resumeToken = resumeToken;
  7927. }
  7928. }
  7929. /**
  7930. * Creates a target change from the current set of changes.
  7931. *
  7932. * To reset the document changes after raising this snapshot, call
  7933. * `clearPendingChanges()`.
  7934. */
  7935. toTargetChange() {
  7936. let addedDocuments = documentKeySet();
  7937. let modifiedDocuments = documentKeySet();
  7938. let removedDocuments = documentKeySet();
  7939. this.documentChanges.forEach((key, changeType) => {
  7940. switch (changeType) {
  7941. case 0 /* ChangeType.Added */:
  7942. addedDocuments = addedDocuments.add(key);
  7943. break;
  7944. case 2 /* ChangeType.Modified */:
  7945. modifiedDocuments = modifiedDocuments.add(key);
  7946. break;
  7947. case 1 /* ChangeType.Removed */:
  7948. removedDocuments = removedDocuments.add(key);
  7949. break;
  7950. default:
  7951. fail();
  7952. }
  7953. });
  7954. return new TargetChange(this._resumeToken, this._current, addedDocuments, modifiedDocuments, removedDocuments);
  7955. }
  7956. /**
  7957. * Resets the document changes and sets `hasPendingChanges` to false.
  7958. */
  7959. clearPendingChanges() {
  7960. this._hasPendingChanges = false;
  7961. this.documentChanges = snapshotChangesMap();
  7962. }
  7963. addDocumentChange(key, changeType) {
  7964. this._hasPendingChanges = true;
  7965. this.documentChanges = this.documentChanges.insert(key, changeType);
  7966. }
  7967. removeDocumentChange(key) {
  7968. this._hasPendingChanges = true;
  7969. this.documentChanges = this.documentChanges.remove(key);
  7970. }
  7971. recordPendingTargetRequest() {
  7972. this.pendingResponses += 1;
  7973. }
  7974. recordTargetResponse() {
  7975. this.pendingResponses -= 1;
  7976. }
  7977. markCurrent() {
  7978. this._hasPendingChanges = true;
  7979. this._current = true;
  7980. }
  7981. }
  7982. const LOG_TAG$g = 'WatchChangeAggregator';
  7983. /**
  7984. * A helper class to accumulate watch changes into a RemoteEvent.
  7985. */
  7986. class WatchChangeAggregator {
  7987. constructor(metadataProvider) {
  7988. this.metadataProvider = metadataProvider;
  7989. /** The internal state of all tracked targets. */
  7990. this.targetStates = new Map();
  7991. /** Keeps track of the documents to update since the last raised snapshot. */
  7992. this.pendingDocumentUpdates = mutableDocumentMap();
  7993. /** A mapping of document keys to their set of target IDs. */
  7994. this.pendingDocumentTargetMapping = documentTargetMap();
  7995. /**
  7996. * A list of targets with existence filter mismatches. These targets are
  7997. * known to be inconsistent and their listens needs to be re-established by
  7998. * RemoteStore.
  7999. */
  8000. this.pendingTargetResets = new SortedSet(primitiveComparator);
  8001. }
  8002. /**
  8003. * Processes and adds the DocumentWatchChange to the current set of changes.
  8004. */
  8005. handleDocumentChange(docChange) {
  8006. for (const targetId of docChange.updatedTargetIds) {
  8007. if (docChange.newDoc && docChange.newDoc.isFoundDocument()) {
  8008. this.addDocumentToTarget(targetId, docChange.newDoc);
  8009. }
  8010. else {
  8011. this.removeDocumentFromTarget(targetId, docChange.key, docChange.newDoc);
  8012. }
  8013. }
  8014. for (const targetId of docChange.removedTargetIds) {
  8015. this.removeDocumentFromTarget(targetId, docChange.key, docChange.newDoc);
  8016. }
  8017. }
  8018. /** Processes and adds the WatchTargetChange to the current set of changes. */
  8019. handleTargetChange(targetChange) {
  8020. this.forEachTarget(targetChange, targetId => {
  8021. const targetState = this.ensureTargetState(targetId);
  8022. switch (targetChange.state) {
  8023. case 0 /* WatchTargetChangeState.NoChange */:
  8024. if (this.isActiveTarget(targetId)) {
  8025. targetState.updateResumeToken(targetChange.resumeToken);
  8026. }
  8027. break;
  8028. case 1 /* WatchTargetChangeState.Added */:
  8029. // We need to decrement the number of pending acks needed from watch
  8030. // for this targetId.
  8031. targetState.recordTargetResponse();
  8032. if (!targetState.isPending) {
  8033. // We have a freshly added target, so we need to reset any state
  8034. // that we had previously. This can happen e.g. when remove and add
  8035. // back a target for existence filter mismatches.
  8036. targetState.clearPendingChanges();
  8037. }
  8038. targetState.updateResumeToken(targetChange.resumeToken);
  8039. break;
  8040. case 2 /* WatchTargetChangeState.Removed */:
  8041. // We need to keep track of removed targets to we can post-filter and
  8042. // remove any target changes.
  8043. // We need to decrement the number of pending acks needed from watch
  8044. // for this targetId.
  8045. targetState.recordTargetResponse();
  8046. if (!targetState.isPending) {
  8047. this.removeTarget(targetId);
  8048. }
  8049. break;
  8050. case 3 /* WatchTargetChangeState.Current */:
  8051. if (this.isActiveTarget(targetId)) {
  8052. targetState.markCurrent();
  8053. targetState.updateResumeToken(targetChange.resumeToken);
  8054. }
  8055. break;
  8056. case 4 /* WatchTargetChangeState.Reset */:
  8057. if (this.isActiveTarget(targetId)) {
  8058. // Reset the target and synthesizes removes for all existing
  8059. // documents. The backend will re-add any documents that still
  8060. // match the target before it sends the next global snapshot.
  8061. this.resetTarget(targetId);
  8062. targetState.updateResumeToken(targetChange.resumeToken);
  8063. }
  8064. break;
  8065. default:
  8066. fail();
  8067. }
  8068. });
  8069. }
  8070. /**
  8071. * Iterates over all targetIds that the watch change applies to: either the
  8072. * targetIds explicitly listed in the change or the targetIds of all currently
  8073. * active targets.
  8074. */
  8075. forEachTarget(targetChange, fn) {
  8076. if (targetChange.targetIds.length > 0) {
  8077. targetChange.targetIds.forEach(fn);
  8078. }
  8079. else {
  8080. this.targetStates.forEach((_, targetId) => {
  8081. if (this.isActiveTarget(targetId)) {
  8082. fn(targetId);
  8083. }
  8084. });
  8085. }
  8086. }
  8087. /**
  8088. * Handles existence filters and synthesizes deletes for filter mismatches.
  8089. * Targets that are invalidated by filter mismatches are added to
  8090. * `pendingTargetResets`.
  8091. */
  8092. handleExistenceFilter(watchChange) {
  8093. const targetId = watchChange.targetId;
  8094. const expectedCount = watchChange.existenceFilter.count;
  8095. const targetData = this.targetDataForActiveTarget(targetId);
  8096. if (targetData) {
  8097. const target = targetData.target;
  8098. if (targetIsDocumentTarget(target)) {
  8099. if (expectedCount === 0) {
  8100. // The existence filter told us the document does not exist. We deduce
  8101. // that this document does not exist and apply a deleted document to
  8102. // our updates. Without applying this deleted document there might be
  8103. // another query that will raise this document as part of a snapshot
  8104. // until it is resolved, essentially exposing inconsistency between
  8105. // queries.
  8106. const key = new DocumentKey(target.path);
  8107. this.removeDocumentFromTarget(targetId, key, MutableDocument.newNoDocument(key, SnapshotVersion.min()));
  8108. }
  8109. else {
  8110. hardAssert(expectedCount === 1);
  8111. }
  8112. }
  8113. else {
  8114. const currentSize = this.getCurrentDocumentCountForTarget(targetId);
  8115. if (currentSize !== expectedCount) {
  8116. // Existence filter mismatch: We reset the mapping and raise a new
  8117. // snapshot with `isFromCache:true`.
  8118. this.resetTarget(targetId);
  8119. this.pendingTargetResets = this.pendingTargetResets.add(targetId);
  8120. }
  8121. }
  8122. }
  8123. }
  8124. /**
  8125. * Converts the currently accumulated state into a remote event at the
  8126. * provided snapshot version. Resets the accumulated changes before returning.
  8127. */
  8128. createRemoteEvent(snapshotVersion) {
  8129. const targetChanges = new Map();
  8130. this.targetStates.forEach((targetState, targetId) => {
  8131. const targetData = this.targetDataForActiveTarget(targetId);
  8132. if (targetData) {
  8133. if (targetState.current && targetIsDocumentTarget(targetData.target)) {
  8134. // Document queries for document that don't exist can produce an empty
  8135. // result set. To update our local cache, we synthesize a document
  8136. // delete if we have not previously received the document. This
  8137. // resolves the limbo state of the document, removing it from
  8138. // limboDocumentRefs.
  8139. //
  8140. // TODO(dimond): Ideally we would have an explicit lookup target
  8141. // instead resulting in an explicit delete message and we could
  8142. // remove this special logic.
  8143. const key = new DocumentKey(targetData.target.path);
  8144. if (this.pendingDocumentUpdates.get(key) === null &&
  8145. !this.targetContainsDocument(targetId, key)) {
  8146. this.removeDocumentFromTarget(targetId, key, MutableDocument.newNoDocument(key, snapshotVersion));
  8147. }
  8148. }
  8149. if (targetState.hasPendingChanges) {
  8150. targetChanges.set(targetId, targetState.toTargetChange());
  8151. targetState.clearPendingChanges();
  8152. }
  8153. }
  8154. });
  8155. let resolvedLimboDocuments = documentKeySet();
  8156. // We extract the set of limbo-only document updates as the GC logic
  8157. // special-cases documents that do not appear in the target cache.
  8158. //
  8159. // TODO(gsoltis): Expand on this comment once GC is available in the JS
  8160. // client.
  8161. this.pendingDocumentTargetMapping.forEach((key, targets) => {
  8162. let isOnlyLimboTarget = true;
  8163. targets.forEachWhile(targetId => {
  8164. const targetData = this.targetDataForActiveTarget(targetId);
  8165. if (targetData &&
  8166. targetData.purpose !== 2 /* TargetPurpose.LimboResolution */) {
  8167. isOnlyLimboTarget = false;
  8168. return false;
  8169. }
  8170. return true;
  8171. });
  8172. if (isOnlyLimboTarget) {
  8173. resolvedLimboDocuments = resolvedLimboDocuments.add(key);
  8174. }
  8175. });
  8176. this.pendingDocumentUpdates.forEach((_, doc) => doc.setReadTime(snapshotVersion));
  8177. const remoteEvent = new RemoteEvent(snapshotVersion, targetChanges, this.pendingTargetResets, this.pendingDocumentUpdates, resolvedLimboDocuments);
  8178. this.pendingDocumentUpdates = mutableDocumentMap();
  8179. this.pendingDocumentTargetMapping = documentTargetMap();
  8180. this.pendingTargetResets = new SortedSet(primitiveComparator);
  8181. return remoteEvent;
  8182. }
  8183. /**
  8184. * Adds the provided document to the internal list of document updates and
  8185. * its document key to the given target's mapping.
  8186. */
  8187. // Visible for testing.
  8188. addDocumentToTarget(targetId, document) {
  8189. if (!this.isActiveTarget(targetId)) {
  8190. return;
  8191. }
  8192. const changeType = this.targetContainsDocument(targetId, document.key)
  8193. ? 2 /* ChangeType.Modified */
  8194. : 0 /* ChangeType.Added */;
  8195. const targetState = this.ensureTargetState(targetId);
  8196. targetState.addDocumentChange(document.key, changeType);
  8197. this.pendingDocumentUpdates = this.pendingDocumentUpdates.insert(document.key, document);
  8198. this.pendingDocumentTargetMapping =
  8199. this.pendingDocumentTargetMapping.insert(document.key, this.ensureDocumentTargetMapping(document.key).add(targetId));
  8200. }
  8201. /**
  8202. * Removes the provided document from the target mapping. If the
  8203. * document no longer matches the target, but the document's state is still
  8204. * known (e.g. we know that the document was deleted or we received the change
  8205. * that caused the filter mismatch), the new document can be provided
  8206. * to update the remote document cache.
  8207. */
  8208. // Visible for testing.
  8209. removeDocumentFromTarget(targetId, key, updatedDocument) {
  8210. if (!this.isActiveTarget(targetId)) {
  8211. return;
  8212. }
  8213. const targetState = this.ensureTargetState(targetId);
  8214. if (this.targetContainsDocument(targetId, key)) {
  8215. targetState.addDocumentChange(key, 1 /* ChangeType.Removed */);
  8216. }
  8217. else {
  8218. // The document may have entered and left the target before we raised a
  8219. // snapshot, so we can just ignore the change.
  8220. targetState.removeDocumentChange(key);
  8221. }
  8222. this.pendingDocumentTargetMapping =
  8223. this.pendingDocumentTargetMapping.insert(key, this.ensureDocumentTargetMapping(key).delete(targetId));
  8224. if (updatedDocument) {
  8225. this.pendingDocumentUpdates = this.pendingDocumentUpdates.insert(key, updatedDocument);
  8226. }
  8227. }
  8228. removeTarget(targetId) {
  8229. this.targetStates.delete(targetId);
  8230. }
  8231. /**
  8232. * Returns the current count of documents in the target. This includes both
  8233. * the number of documents that the LocalStore considers to be part of the
  8234. * target as well as any accumulated changes.
  8235. */
  8236. getCurrentDocumentCountForTarget(targetId) {
  8237. const targetState = this.ensureTargetState(targetId);
  8238. const targetChange = targetState.toTargetChange();
  8239. return (this.metadataProvider.getRemoteKeysForTarget(targetId).size +
  8240. targetChange.addedDocuments.size -
  8241. targetChange.removedDocuments.size);
  8242. }
  8243. /**
  8244. * Increment the number of acks needed from watch before we can consider the
  8245. * server to be 'in-sync' with the client's active targets.
  8246. */
  8247. recordPendingTargetRequest(targetId) {
  8248. // For each request we get we need to record we need a response for it.
  8249. const targetState = this.ensureTargetState(targetId);
  8250. targetState.recordPendingTargetRequest();
  8251. }
  8252. ensureTargetState(targetId) {
  8253. let result = this.targetStates.get(targetId);
  8254. if (!result) {
  8255. result = new TargetState();
  8256. this.targetStates.set(targetId, result);
  8257. }
  8258. return result;
  8259. }
  8260. ensureDocumentTargetMapping(key) {
  8261. let targetMapping = this.pendingDocumentTargetMapping.get(key);
  8262. if (!targetMapping) {
  8263. targetMapping = new SortedSet(primitiveComparator);
  8264. this.pendingDocumentTargetMapping =
  8265. this.pendingDocumentTargetMapping.insert(key, targetMapping);
  8266. }
  8267. return targetMapping;
  8268. }
  8269. /**
  8270. * Verifies that the user is still interested in this target (by calling
  8271. * `getTargetDataForTarget()`) and that we are not waiting for pending ADDs
  8272. * from watch.
  8273. */
  8274. isActiveTarget(targetId) {
  8275. const targetActive = this.targetDataForActiveTarget(targetId) !== null;
  8276. if (!targetActive) {
  8277. logDebug(LOG_TAG$g, 'Detected inactive target', targetId);
  8278. }
  8279. return targetActive;
  8280. }
  8281. /**
  8282. * Returns the TargetData for an active target (i.e. a target that the user
  8283. * is still interested in that has no outstanding target change requests).
  8284. */
  8285. targetDataForActiveTarget(targetId) {
  8286. const targetState = this.targetStates.get(targetId);
  8287. return targetState && targetState.isPending
  8288. ? null
  8289. : this.metadataProvider.getTargetDataForTarget(targetId);
  8290. }
  8291. /**
  8292. * Resets the state of a Watch target to its initial state (e.g. sets
  8293. * 'current' to false, clears the resume token and removes its target mapping
  8294. * from all documents).
  8295. */
  8296. resetTarget(targetId) {
  8297. this.targetStates.set(targetId, new TargetState());
  8298. // Trigger removal for any documents currently mapped to this target.
  8299. // These removals will be part of the initial snapshot if Watch does not
  8300. // resend these documents.
  8301. const existingKeys = this.metadataProvider.getRemoteKeysForTarget(targetId);
  8302. existingKeys.forEach(key => {
  8303. this.removeDocumentFromTarget(targetId, key, /*updatedDocument=*/ null);
  8304. });
  8305. }
  8306. /**
  8307. * Returns whether the LocalStore considers the document to be part of the
  8308. * specified target.
  8309. */
  8310. targetContainsDocument(targetId, key) {
  8311. const existingKeys = this.metadataProvider.getRemoteKeysForTarget(targetId);
  8312. return existingKeys.has(key);
  8313. }
  8314. }
  8315. function documentTargetMap() {
  8316. return new SortedMap(DocumentKey.comparator);
  8317. }
  8318. function snapshotChangesMap() {
  8319. return new SortedMap(DocumentKey.comparator);
  8320. }
  8321. /**
  8322. * @license
  8323. * Copyright 2017 Google LLC
  8324. *
  8325. * Licensed under the Apache License, Version 2.0 (the "License");
  8326. * you may not use this file except in compliance with the License.
  8327. * You may obtain a copy of the License at
  8328. *
  8329. * http://www.apache.org/licenses/LICENSE-2.0
  8330. *
  8331. * Unless required by applicable law or agreed to in writing, software
  8332. * distributed under the License is distributed on an "AS IS" BASIS,
  8333. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  8334. * See the License for the specific language governing permissions and
  8335. * limitations under the License.
  8336. */
  8337. const DIRECTIONS = (() => {
  8338. const dirs = {};
  8339. dirs["asc" /* Direction.ASCENDING */] = 'ASCENDING';
  8340. dirs["desc" /* Direction.DESCENDING */] = 'DESCENDING';
  8341. return dirs;
  8342. })();
  8343. const OPERATORS = (() => {
  8344. const ops = {};
  8345. ops["<" /* Operator.LESS_THAN */] = 'LESS_THAN';
  8346. ops["<=" /* Operator.LESS_THAN_OR_EQUAL */] = 'LESS_THAN_OR_EQUAL';
  8347. ops[">" /* Operator.GREATER_THAN */] = 'GREATER_THAN';
  8348. ops[">=" /* Operator.GREATER_THAN_OR_EQUAL */] = 'GREATER_THAN_OR_EQUAL';
  8349. ops["==" /* Operator.EQUAL */] = 'EQUAL';
  8350. ops["!=" /* Operator.NOT_EQUAL */] = 'NOT_EQUAL';
  8351. ops["array-contains" /* Operator.ARRAY_CONTAINS */] = 'ARRAY_CONTAINS';
  8352. ops["in" /* Operator.IN */] = 'IN';
  8353. ops["not-in" /* Operator.NOT_IN */] = 'NOT_IN';
  8354. ops["array-contains-any" /* Operator.ARRAY_CONTAINS_ANY */] = 'ARRAY_CONTAINS_ANY';
  8355. return ops;
  8356. })();
  8357. const COMPOSITE_OPERATORS = (() => {
  8358. const ops = {};
  8359. ops["and" /* CompositeOperator.AND */] = 'AND';
  8360. ops["or" /* CompositeOperator.OR */] = 'OR';
  8361. return ops;
  8362. })();
  8363. function assertPresent(value, description) {
  8364. }
  8365. /**
  8366. * This class generates JsonObject values for the Datastore API suitable for
  8367. * sending to either GRPC stub methods or via the JSON/HTTP REST API.
  8368. *
  8369. * The serializer supports both Protobuf.js and Proto3 JSON formats. By
  8370. * setting `useProto3Json` to true, the serializer will use the Proto3 JSON
  8371. * format.
  8372. *
  8373. * For a description of the Proto3 JSON format check
  8374. * https://developers.google.com/protocol-buffers/docs/proto3#json
  8375. *
  8376. * TODO(klimt): We can remove the databaseId argument if we keep the full
  8377. * resource name in documents.
  8378. */
  8379. class JsonProtoSerializer {
  8380. constructor(databaseId, useProto3Json) {
  8381. this.databaseId = databaseId;
  8382. this.useProto3Json = useProto3Json;
  8383. }
  8384. }
  8385. function fromRpcStatus(status) {
  8386. const code = status.code === undefined ? Code.UNKNOWN : mapCodeFromRpcCode(status.code);
  8387. return new FirestoreError(code, status.message || '');
  8388. }
  8389. /**
  8390. * Returns a value for a number (or null) that's appropriate to put into
  8391. * a google.protobuf.Int32Value proto.
  8392. * DO NOT USE THIS FOR ANYTHING ELSE.
  8393. * This method cheats. It's typed as returning "number" because that's what
  8394. * our generated proto interfaces say Int32Value must be. But GRPC actually
  8395. * expects a { value: <number> } struct.
  8396. */
  8397. function toInt32Proto(serializer, val) {
  8398. if (serializer.useProto3Json || isNullOrUndefined(val)) {
  8399. return val;
  8400. }
  8401. else {
  8402. return { value: val };
  8403. }
  8404. }
  8405. /**
  8406. * Returns a number (or null) from a google.protobuf.Int32Value proto.
  8407. */
  8408. function fromInt32Proto(val) {
  8409. let result;
  8410. if (typeof val === 'object') {
  8411. result = val.value;
  8412. }
  8413. else {
  8414. result = val;
  8415. }
  8416. return isNullOrUndefined(result) ? null : result;
  8417. }
  8418. /**
  8419. * Returns a value for a Date that's appropriate to put into a proto.
  8420. */
  8421. function toTimestamp(serializer, timestamp) {
  8422. if (serializer.useProto3Json) {
  8423. // Serialize to ISO-8601 date format, but with full nano resolution.
  8424. // Since JS Date has only millis, let's only use it for the seconds and
  8425. // then manually add the fractions to the end.
  8426. const jsDateStr = new Date(timestamp.seconds * 1000).toISOString();
  8427. // Remove .xxx frac part and Z in the end.
  8428. const strUntilSeconds = jsDateStr.replace(/\.\d*/, '').replace('Z', '');
  8429. // Pad the fraction out to 9 digits (nanos).
  8430. const nanoStr = ('000000000' + timestamp.nanoseconds).slice(-9);
  8431. return `${strUntilSeconds}.${nanoStr}Z`;
  8432. }
  8433. else {
  8434. return {
  8435. seconds: '' + timestamp.seconds,
  8436. nanos: timestamp.nanoseconds
  8437. // eslint-disable-next-line @typescript-eslint/no-explicit-any
  8438. };
  8439. }
  8440. }
  8441. function fromTimestamp(date) {
  8442. const timestamp = normalizeTimestamp(date);
  8443. return new Timestamp(timestamp.seconds, timestamp.nanos);
  8444. }
  8445. /**
  8446. * Returns a value for bytes that's appropriate to put in a proto.
  8447. *
  8448. * Visible for testing.
  8449. */
  8450. function toBytes(serializer, bytes) {
  8451. if (serializer.useProto3Json) {
  8452. return bytes.toBase64();
  8453. }
  8454. else {
  8455. return bytes.toUint8Array();
  8456. }
  8457. }
  8458. /**
  8459. * Returns a ByteString based on the proto string value.
  8460. */
  8461. function fromBytes(serializer, value) {
  8462. if (serializer.useProto3Json) {
  8463. hardAssert(value === undefined || typeof value === 'string');
  8464. return ByteString.fromBase64String(value ? value : '');
  8465. }
  8466. else {
  8467. hardAssert(value === undefined || value instanceof Uint8Array);
  8468. return ByteString.fromUint8Array(value ? value : new Uint8Array());
  8469. }
  8470. }
  8471. function toVersion(serializer, version) {
  8472. return toTimestamp(serializer, version.toTimestamp());
  8473. }
  8474. function fromVersion(version) {
  8475. hardAssert(!!version);
  8476. return SnapshotVersion.fromTimestamp(fromTimestamp(version));
  8477. }
  8478. function toResourceName(databaseId, path) {
  8479. return fullyQualifiedPrefixPath(databaseId)
  8480. .child('documents')
  8481. .child(path)
  8482. .canonicalString();
  8483. }
  8484. function fromResourceName(name) {
  8485. const resource = ResourcePath.fromString(name);
  8486. hardAssert(isValidResourceName(resource));
  8487. return resource;
  8488. }
  8489. function toName(serializer, key) {
  8490. return toResourceName(serializer.databaseId, key.path);
  8491. }
  8492. function fromName(serializer, name) {
  8493. const resource = fromResourceName(name);
  8494. if (resource.get(1) !== serializer.databaseId.projectId) {
  8495. throw new FirestoreError(Code.INVALID_ARGUMENT, 'Tried to deserialize key from different project: ' +
  8496. resource.get(1) +
  8497. ' vs ' +
  8498. serializer.databaseId.projectId);
  8499. }
  8500. if (resource.get(3) !== serializer.databaseId.database) {
  8501. throw new FirestoreError(Code.INVALID_ARGUMENT, 'Tried to deserialize key from different database: ' +
  8502. resource.get(3) +
  8503. ' vs ' +
  8504. serializer.databaseId.database);
  8505. }
  8506. return new DocumentKey(extractLocalPathFromResourceName(resource));
  8507. }
  8508. function toQueryPath(serializer, path) {
  8509. return toResourceName(serializer.databaseId, path);
  8510. }
  8511. function fromQueryPath(name) {
  8512. const resourceName = fromResourceName(name);
  8513. // In v1beta1 queries for collections at the root did not have a trailing
  8514. // "/documents". In v1 all resource paths contain "/documents". Preserve the
  8515. // ability to read the v1beta1 form for compatibility with queries persisted
  8516. // in the local target cache.
  8517. if (resourceName.length === 4) {
  8518. return ResourcePath.emptyPath();
  8519. }
  8520. return extractLocalPathFromResourceName(resourceName);
  8521. }
  8522. function getEncodedDatabaseId(serializer) {
  8523. const path = new ResourcePath([
  8524. 'projects',
  8525. serializer.databaseId.projectId,
  8526. 'databases',
  8527. serializer.databaseId.database
  8528. ]);
  8529. return path.canonicalString();
  8530. }
  8531. function fullyQualifiedPrefixPath(databaseId) {
  8532. return new ResourcePath([
  8533. 'projects',
  8534. databaseId.projectId,
  8535. 'databases',
  8536. databaseId.database
  8537. ]);
  8538. }
  8539. function extractLocalPathFromResourceName(resourceName) {
  8540. hardAssert(resourceName.length > 4 && resourceName.get(4) === 'documents');
  8541. return resourceName.popFirst(5);
  8542. }
  8543. /** Creates a Document proto from key and fields (but no create/update time) */
  8544. function toMutationDocument(serializer, key, fields) {
  8545. return {
  8546. name: toName(serializer, key),
  8547. fields: fields.value.mapValue.fields
  8548. };
  8549. }
  8550. function toDocument(serializer, document) {
  8551. return {
  8552. name: toName(serializer, document.key),
  8553. fields: document.data.value.mapValue.fields,
  8554. updateTime: toTimestamp(serializer, document.version.toTimestamp()),
  8555. createTime: toTimestamp(serializer, document.createTime.toTimestamp())
  8556. };
  8557. }
  8558. function fromDocument(serializer, document, hasCommittedMutations) {
  8559. const key = fromName(serializer, document.name);
  8560. const version = fromVersion(document.updateTime);
  8561. // If we read a document from persistence that is missing createTime, it's due
  8562. // to older SDK versions not storing this information. In such cases, we'll
  8563. // set the createTime to zero. This can be removed in the long term.
  8564. const createTime = document.createTime
  8565. ? fromVersion(document.createTime)
  8566. : SnapshotVersion.min();
  8567. const data = new ObjectValue({ mapValue: { fields: document.fields } });
  8568. const result = MutableDocument.newFoundDocument(key, version, createTime, data);
  8569. if (hasCommittedMutations) {
  8570. result.setHasCommittedMutations();
  8571. }
  8572. return hasCommittedMutations ? result.setHasCommittedMutations() : result;
  8573. }
  8574. function fromFound(serializer, doc) {
  8575. hardAssert(!!doc.found);
  8576. assertPresent(doc.found.name);
  8577. assertPresent(doc.found.updateTime);
  8578. const key = fromName(serializer, doc.found.name);
  8579. const version = fromVersion(doc.found.updateTime);
  8580. const createTime = doc.found.createTime
  8581. ? fromVersion(doc.found.createTime)
  8582. : SnapshotVersion.min();
  8583. const data = new ObjectValue({ mapValue: { fields: doc.found.fields } });
  8584. return MutableDocument.newFoundDocument(key, version, createTime, data);
  8585. }
  8586. function fromMissing(serializer, result) {
  8587. hardAssert(!!result.missing);
  8588. hardAssert(!!result.readTime);
  8589. const key = fromName(serializer, result.missing);
  8590. const version = fromVersion(result.readTime);
  8591. return MutableDocument.newNoDocument(key, version);
  8592. }
  8593. function fromBatchGetDocumentsResponse(serializer, result) {
  8594. if ('found' in result) {
  8595. return fromFound(serializer, result);
  8596. }
  8597. else if ('missing' in result) {
  8598. return fromMissing(serializer, result);
  8599. }
  8600. return fail();
  8601. }
  8602. function fromWatchChange(serializer, change) {
  8603. let watchChange;
  8604. if ('targetChange' in change) {
  8605. assertPresent(change.targetChange);
  8606. // proto3 default value is unset in JSON (undefined), so use 'NO_CHANGE'
  8607. // if unset
  8608. const state = fromWatchTargetChangeState(change.targetChange.targetChangeType || 'NO_CHANGE');
  8609. const targetIds = change.targetChange.targetIds || [];
  8610. const resumeToken = fromBytes(serializer, change.targetChange.resumeToken);
  8611. const causeProto = change.targetChange.cause;
  8612. const cause = causeProto && fromRpcStatus(causeProto);
  8613. watchChange = new WatchTargetChange(state, targetIds, resumeToken, cause || null);
  8614. }
  8615. else if ('documentChange' in change) {
  8616. assertPresent(change.documentChange);
  8617. const entityChange = change.documentChange;
  8618. assertPresent(entityChange.document);
  8619. assertPresent(entityChange.document.name);
  8620. assertPresent(entityChange.document.updateTime);
  8621. const key = fromName(serializer, entityChange.document.name);
  8622. const version = fromVersion(entityChange.document.updateTime);
  8623. const createTime = entityChange.document.createTime
  8624. ? fromVersion(entityChange.document.createTime)
  8625. : SnapshotVersion.min();
  8626. const data = new ObjectValue({
  8627. mapValue: { fields: entityChange.document.fields }
  8628. });
  8629. const doc = MutableDocument.newFoundDocument(key, version, createTime, data);
  8630. const updatedTargetIds = entityChange.targetIds || [];
  8631. const removedTargetIds = entityChange.removedTargetIds || [];
  8632. watchChange = new DocumentWatchChange(updatedTargetIds, removedTargetIds, doc.key, doc);
  8633. }
  8634. else if ('documentDelete' in change) {
  8635. assertPresent(change.documentDelete);
  8636. const docDelete = change.documentDelete;
  8637. assertPresent(docDelete.document);
  8638. const key = fromName(serializer, docDelete.document);
  8639. const version = docDelete.readTime
  8640. ? fromVersion(docDelete.readTime)
  8641. : SnapshotVersion.min();
  8642. const doc = MutableDocument.newNoDocument(key, version);
  8643. const removedTargetIds = docDelete.removedTargetIds || [];
  8644. watchChange = new DocumentWatchChange([], removedTargetIds, doc.key, doc);
  8645. }
  8646. else if ('documentRemove' in change) {
  8647. assertPresent(change.documentRemove);
  8648. const docRemove = change.documentRemove;
  8649. assertPresent(docRemove.document);
  8650. const key = fromName(serializer, docRemove.document);
  8651. const removedTargetIds = docRemove.removedTargetIds || [];
  8652. watchChange = new DocumentWatchChange([], removedTargetIds, key, null);
  8653. }
  8654. else if ('filter' in change) {
  8655. // TODO(dimond): implement existence filter parsing with strategy.
  8656. assertPresent(change.filter);
  8657. const filter = change.filter;
  8658. assertPresent(filter.targetId);
  8659. const count = filter.count || 0;
  8660. const existenceFilter = new ExistenceFilter(count);
  8661. const targetId = filter.targetId;
  8662. watchChange = new ExistenceFilterChange(targetId, existenceFilter);
  8663. }
  8664. else {
  8665. return fail();
  8666. }
  8667. return watchChange;
  8668. }
  8669. function fromWatchTargetChangeState(state) {
  8670. if (state === 'NO_CHANGE') {
  8671. return 0 /* WatchTargetChangeState.NoChange */;
  8672. }
  8673. else if (state === 'ADD') {
  8674. return 1 /* WatchTargetChangeState.Added */;
  8675. }
  8676. else if (state === 'REMOVE') {
  8677. return 2 /* WatchTargetChangeState.Removed */;
  8678. }
  8679. else if (state === 'CURRENT') {
  8680. return 3 /* WatchTargetChangeState.Current */;
  8681. }
  8682. else if (state === 'RESET') {
  8683. return 4 /* WatchTargetChangeState.Reset */;
  8684. }
  8685. else {
  8686. return fail();
  8687. }
  8688. }
  8689. function versionFromListenResponse(change) {
  8690. // We have only reached a consistent snapshot for the entire stream if there
  8691. // is a read_time set and it applies to all targets (i.e. the list of
  8692. // targets is empty). The backend is guaranteed to send such responses.
  8693. if (!('targetChange' in change)) {
  8694. return SnapshotVersion.min();
  8695. }
  8696. const targetChange = change.targetChange;
  8697. if (targetChange.targetIds && targetChange.targetIds.length) {
  8698. return SnapshotVersion.min();
  8699. }
  8700. if (!targetChange.readTime) {
  8701. return SnapshotVersion.min();
  8702. }
  8703. return fromVersion(targetChange.readTime);
  8704. }
  8705. function toMutation(serializer, mutation) {
  8706. let result;
  8707. if (mutation instanceof SetMutation) {
  8708. result = {
  8709. update: toMutationDocument(serializer, mutation.key, mutation.value)
  8710. };
  8711. }
  8712. else if (mutation instanceof DeleteMutation) {
  8713. result = { delete: toName(serializer, mutation.key) };
  8714. }
  8715. else if (mutation instanceof PatchMutation) {
  8716. result = {
  8717. update: toMutationDocument(serializer, mutation.key, mutation.data),
  8718. updateMask: toDocumentMask(mutation.fieldMask)
  8719. };
  8720. }
  8721. else if (mutation instanceof VerifyMutation) {
  8722. result = {
  8723. verify: toName(serializer, mutation.key)
  8724. };
  8725. }
  8726. else {
  8727. return fail();
  8728. }
  8729. if (mutation.fieldTransforms.length > 0) {
  8730. result.updateTransforms = mutation.fieldTransforms.map(transform => toFieldTransform(serializer, transform));
  8731. }
  8732. if (!mutation.precondition.isNone) {
  8733. result.currentDocument = toPrecondition(serializer, mutation.precondition);
  8734. }
  8735. return result;
  8736. }
  8737. function fromMutation(serializer, proto) {
  8738. const precondition = proto.currentDocument
  8739. ? fromPrecondition(proto.currentDocument)
  8740. : Precondition.none();
  8741. const fieldTransforms = proto.updateTransforms
  8742. ? proto.updateTransforms.map(transform => fromFieldTransform(serializer, transform))
  8743. : [];
  8744. if (proto.update) {
  8745. assertPresent(proto.update.name);
  8746. const key = fromName(serializer, proto.update.name);
  8747. const value = new ObjectValue({
  8748. mapValue: { fields: proto.update.fields }
  8749. });
  8750. if (proto.updateMask) {
  8751. const fieldMask = fromDocumentMask(proto.updateMask);
  8752. return new PatchMutation(key, value, fieldMask, precondition, fieldTransforms);
  8753. }
  8754. else {
  8755. return new SetMutation(key, value, precondition, fieldTransforms);
  8756. }
  8757. }
  8758. else if (proto.delete) {
  8759. const key = fromName(serializer, proto.delete);
  8760. return new DeleteMutation(key, precondition);
  8761. }
  8762. else if (proto.verify) {
  8763. const key = fromName(serializer, proto.verify);
  8764. return new VerifyMutation(key, precondition);
  8765. }
  8766. else {
  8767. return fail();
  8768. }
  8769. }
  8770. function toPrecondition(serializer, precondition) {
  8771. if (precondition.updateTime !== undefined) {
  8772. return {
  8773. updateTime: toVersion(serializer, precondition.updateTime)
  8774. };
  8775. }
  8776. else if (precondition.exists !== undefined) {
  8777. return { exists: precondition.exists };
  8778. }
  8779. else {
  8780. return fail();
  8781. }
  8782. }
  8783. function fromPrecondition(precondition) {
  8784. if (precondition.updateTime !== undefined) {
  8785. return Precondition.updateTime(fromVersion(precondition.updateTime));
  8786. }
  8787. else if (precondition.exists !== undefined) {
  8788. return Precondition.exists(precondition.exists);
  8789. }
  8790. else {
  8791. return Precondition.none();
  8792. }
  8793. }
  8794. function fromWriteResult(proto, commitTime) {
  8795. // NOTE: Deletes don't have an updateTime.
  8796. let version = proto.updateTime
  8797. ? fromVersion(proto.updateTime)
  8798. : fromVersion(commitTime);
  8799. if (version.isEqual(SnapshotVersion.min())) {
  8800. // The Firestore Emulator currently returns an update time of 0 for
  8801. // deletes of non-existing documents (rather than null). This breaks the
  8802. // test "get deleted doc while offline with source=cache" as NoDocuments
  8803. // with version 0 are filtered by IndexedDb's RemoteDocumentCache.
  8804. // TODO(#2149): Remove this when Emulator is fixed
  8805. version = fromVersion(commitTime);
  8806. }
  8807. return new MutationResult(version, proto.transformResults || []);
  8808. }
  8809. function fromWriteResults(protos, commitTime) {
  8810. if (protos && protos.length > 0) {
  8811. hardAssert(commitTime !== undefined);
  8812. return protos.map(proto => fromWriteResult(proto, commitTime));
  8813. }
  8814. else {
  8815. return [];
  8816. }
  8817. }
  8818. function toFieldTransform(serializer, fieldTransform) {
  8819. const transform = fieldTransform.transform;
  8820. if (transform instanceof ServerTimestampTransform) {
  8821. return {
  8822. fieldPath: fieldTransform.field.canonicalString(),
  8823. setToServerValue: 'REQUEST_TIME'
  8824. };
  8825. }
  8826. else if (transform instanceof ArrayUnionTransformOperation) {
  8827. return {
  8828. fieldPath: fieldTransform.field.canonicalString(),
  8829. appendMissingElements: {
  8830. values: transform.elements
  8831. }
  8832. };
  8833. }
  8834. else if (transform instanceof ArrayRemoveTransformOperation) {
  8835. return {
  8836. fieldPath: fieldTransform.field.canonicalString(),
  8837. removeAllFromArray: {
  8838. values: transform.elements
  8839. }
  8840. };
  8841. }
  8842. else if (transform instanceof NumericIncrementTransformOperation) {
  8843. return {
  8844. fieldPath: fieldTransform.field.canonicalString(),
  8845. increment: transform.operand
  8846. };
  8847. }
  8848. else {
  8849. throw fail();
  8850. }
  8851. }
  8852. function fromFieldTransform(serializer, proto) {
  8853. let transform = null;
  8854. if ('setToServerValue' in proto) {
  8855. hardAssert(proto.setToServerValue === 'REQUEST_TIME');
  8856. transform = new ServerTimestampTransform();
  8857. }
  8858. else if ('appendMissingElements' in proto) {
  8859. const values = proto.appendMissingElements.values || [];
  8860. transform = new ArrayUnionTransformOperation(values);
  8861. }
  8862. else if ('removeAllFromArray' in proto) {
  8863. const values = proto.removeAllFromArray.values || [];
  8864. transform = new ArrayRemoveTransformOperation(values);
  8865. }
  8866. else if ('increment' in proto) {
  8867. transform = new NumericIncrementTransformOperation(serializer, proto.increment);
  8868. }
  8869. else {
  8870. fail();
  8871. }
  8872. const fieldPath = FieldPath$1.fromServerFormat(proto.fieldPath);
  8873. return new FieldTransform(fieldPath, transform);
  8874. }
  8875. function toDocumentsTarget(serializer, target) {
  8876. return { documents: [toQueryPath(serializer, target.path)] };
  8877. }
  8878. function fromDocumentsTarget(documentsTarget) {
  8879. const count = documentsTarget.documents.length;
  8880. hardAssert(count === 1);
  8881. const name = documentsTarget.documents[0];
  8882. return queryToTarget(newQueryForPath(fromQueryPath(name)));
  8883. }
  8884. function toQueryTarget(serializer, target) {
  8885. // Dissect the path into parent, collectionId, and optional key filter.
  8886. const result = { structuredQuery: {} };
  8887. const path = target.path;
  8888. if (target.collectionGroup !== null) {
  8889. result.parent = toQueryPath(serializer, path);
  8890. result.structuredQuery.from = [
  8891. {
  8892. collectionId: target.collectionGroup,
  8893. allDescendants: true
  8894. }
  8895. ];
  8896. }
  8897. else {
  8898. result.parent = toQueryPath(serializer, path.popLast());
  8899. result.structuredQuery.from = [{ collectionId: path.lastSegment() }];
  8900. }
  8901. const where = toFilters(target.filters);
  8902. if (where) {
  8903. result.structuredQuery.where = where;
  8904. }
  8905. const orderBy = toOrder(target.orderBy);
  8906. if (orderBy) {
  8907. result.structuredQuery.orderBy = orderBy;
  8908. }
  8909. const limit = toInt32Proto(serializer, target.limit);
  8910. if (limit !== null) {
  8911. result.structuredQuery.limit = limit;
  8912. }
  8913. if (target.startAt) {
  8914. result.structuredQuery.startAt = toStartAtCursor(target.startAt);
  8915. }
  8916. if (target.endAt) {
  8917. result.structuredQuery.endAt = toEndAtCursor(target.endAt);
  8918. }
  8919. return result;
  8920. }
  8921. function toRunAggregationQueryRequest(serializer, target) {
  8922. const queryTarget = toQueryTarget(serializer, target);
  8923. return {
  8924. structuredAggregationQuery: {
  8925. aggregations: [
  8926. {
  8927. count: {},
  8928. alias: 'count_alias'
  8929. }
  8930. ],
  8931. structuredQuery: queryTarget.structuredQuery
  8932. },
  8933. parent: queryTarget.parent
  8934. };
  8935. }
  8936. function convertQueryTargetToQuery(target) {
  8937. let path = fromQueryPath(target.parent);
  8938. const query = target.structuredQuery;
  8939. const fromCount = query.from ? query.from.length : 0;
  8940. let collectionGroup = null;
  8941. if (fromCount > 0) {
  8942. hardAssert(fromCount === 1);
  8943. const from = query.from[0];
  8944. if (from.allDescendants) {
  8945. collectionGroup = from.collectionId;
  8946. }
  8947. else {
  8948. path = path.child(from.collectionId);
  8949. }
  8950. }
  8951. let filterBy = [];
  8952. if (query.where) {
  8953. filterBy = fromFilters(query.where);
  8954. }
  8955. let orderBy = [];
  8956. if (query.orderBy) {
  8957. orderBy = fromOrder(query.orderBy);
  8958. }
  8959. let limit = null;
  8960. if (query.limit) {
  8961. limit = fromInt32Proto(query.limit);
  8962. }
  8963. let startAt = null;
  8964. if (query.startAt) {
  8965. startAt = fromStartAtCursor(query.startAt);
  8966. }
  8967. let endAt = null;
  8968. if (query.endAt) {
  8969. endAt = fromEndAtCursor(query.endAt);
  8970. }
  8971. return newQuery(path, collectionGroup, orderBy, filterBy, limit, "F" /* LimitType.First */, startAt, endAt);
  8972. }
  8973. function fromQueryTarget(target) {
  8974. return queryToTarget(convertQueryTargetToQuery(target));
  8975. }
  8976. function toListenRequestLabels(serializer, targetData) {
  8977. const value = toLabel(serializer, targetData.purpose);
  8978. if (value == null) {
  8979. return null;
  8980. }
  8981. else {
  8982. return {
  8983. 'goog-listen-tags': value
  8984. };
  8985. }
  8986. }
  8987. function toLabel(serializer, purpose) {
  8988. switch (purpose) {
  8989. case 0 /* TargetPurpose.Listen */:
  8990. return null;
  8991. case 1 /* TargetPurpose.ExistenceFilterMismatch */:
  8992. return 'existence-filter-mismatch';
  8993. case 2 /* TargetPurpose.LimboResolution */:
  8994. return 'limbo-document';
  8995. default:
  8996. return fail();
  8997. }
  8998. }
  8999. function toTarget(serializer, targetData) {
  9000. let result;
  9001. const target = targetData.target;
  9002. if (targetIsDocumentTarget(target)) {
  9003. result = { documents: toDocumentsTarget(serializer, target) };
  9004. }
  9005. else {
  9006. result = { query: toQueryTarget(serializer, target) };
  9007. }
  9008. result.targetId = targetData.targetId;
  9009. if (targetData.resumeToken.approximateByteSize() > 0) {
  9010. result.resumeToken = toBytes(serializer, targetData.resumeToken);
  9011. }
  9012. else if (targetData.snapshotVersion.compareTo(SnapshotVersion.min()) > 0) {
  9013. // TODO(wuandy): Consider removing above check because it is most likely true.
  9014. // Right now, many tests depend on this behaviour though (leaving min() out
  9015. // of serialization).
  9016. result.readTime = toTimestamp(serializer, targetData.snapshotVersion.toTimestamp());
  9017. }
  9018. return result;
  9019. }
  9020. function toFilters(filters) {
  9021. if (filters.length === 0) {
  9022. return;
  9023. }
  9024. return toFilter(CompositeFilter.create(filters, "and" /* CompositeOperator.AND */));
  9025. }
  9026. function fromFilters(filter) {
  9027. const result = fromFilter(filter);
  9028. if (result instanceof CompositeFilter &&
  9029. compositeFilterIsFlatConjunction(result)) {
  9030. return result.getFilters();
  9031. }
  9032. return [result];
  9033. }
  9034. function fromFilter(filter) {
  9035. if (filter.unaryFilter !== undefined) {
  9036. return fromUnaryFilter(filter);
  9037. }
  9038. else if (filter.fieldFilter !== undefined) {
  9039. return fromFieldFilter(filter);
  9040. }
  9041. else if (filter.compositeFilter !== undefined) {
  9042. return fromCompositeFilter(filter);
  9043. }
  9044. else {
  9045. return fail();
  9046. }
  9047. }
  9048. function toOrder(orderBys) {
  9049. if (orderBys.length === 0) {
  9050. return;
  9051. }
  9052. return orderBys.map(order => toPropertyOrder(order));
  9053. }
  9054. function fromOrder(orderBys) {
  9055. return orderBys.map(order => fromPropertyOrder(order));
  9056. }
  9057. function toStartAtCursor(cursor) {
  9058. return {
  9059. before: cursor.inclusive,
  9060. values: cursor.position
  9061. };
  9062. }
  9063. function toEndAtCursor(cursor) {
  9064. return {
  9065. before: !cursor.inclusive,
  9066. values: cursor.position
  9067. };
  9068. }
  9069. function fromStartAtCursor(cursor) {
  9070. const inclusive = !!cursor.before;
  9071. const position = cursor.values || [];
  9072. return new Bound(position, inclusive);
  9073. }
  9074. function fromEndAtCursor(cursor) {
  9075. const inclusive = !cursor.before;
  9076. const position = cursor.values || [];
  9077. return new Bound(position, inclusive);
  9078. }
  9079. // visible for testing
  9080. function toDirection(dir) {
  9081. return DIRECTIONS[dir];
  9082. }
  9083. // visible for testing
  9084. function fromDirection(dir) {
  9085. switch (dir) {
  9086. case 'ASCENDING':
  9087. return "asc" /* Direction.ASCENDING */;
  9088. case 'DESCENDING':
  9089. return "desc" /* Direction.DESCENDING */;
  9090. default:
  9091. return undefined;
  9092. }
  9093. }
  9094. // visible for testing
  9095. function toOperatorName(op) {
  9096. return OPERATORS[op];
  9097. }
  9098. function toCompositeOperatorName(op) {
  9099. return COMPOSITE_OPERATORS[op];
  9100. }
  9101. function fromOperatorName(op) {
  9102. switch (op) {
  9103. case 'EQUAL':
  9104. return "==" /* Operator.EQUAL */;
  9105. case 'NOT_EQUAL':
  9106. return "!=" /* Operator.NOT_EQUAL */;
  9107. case 'GREATER_THAN':
  9108. return ">" /* Operator.GREATER_THAN */;
  9109. case 'GREATER_THAN_OR_EQUAL':
  9110. return ">=" /* Operator.GREATER_THAN_OR_EQUAL */;
  9111. case 'LESS_THAN':
  9112. return "<" /* Operator.LESS_THAN */;
  9113. case 'LESS_THAN_OR_EQUAL':
  9114. return "<=" /* Operator.LESS_THAN_OR_EQUAL */;
  9115. case 'ARRAY_CONTAINS':
  9116. return "array-contains" /* Operator.ARRAY_CONTAINS */;
  9117. case 'IN':
  9118. return "in" /* Operator.IN */;
  9119. case 'NOT_IN':
  9120. return "not-in" /* Operator.NOT_IN */;
  9121. case 'ARRAY_CONTAINS_ANY':
  9122. return "array-contains-any" /* Operator.ARRAY_CONTAINS_ANY */;
  9123. case 'OPERATOR_UNSPECIFIED':
  9124. return fail();
  9125. default:
  9126. return fail();
  9127. }
  9128. }
  9129. function fromCompositeOperatorName(op) {
  9130. switch (op) {
  9131. case 'AND':
  9132. return "and" /* CompositeOperator.AND */;
  9133. case 'OR':
  9134. return "or" /* CompositeOperator.OR */;
  9135. default:
  9136. return fail();
  9137. }
  9138. }
  9139. function toFieldPathReference(path) {
  9140. return { fieldPath: path.canonicalString() };
  9141. }
  9142. function fromFieldPathReference(fieldReference) {
  9143. return FieldPath$1.fromServerFormat(fieldReference.fieldPath);
  9144. }
  9145. // visible for testing
  9146. function toPropertyOrder(orderBy) {
  9147. return {
  9148. field: toFieldPathReference(orderBy.field),
  9149. direction: toDirection(orderBy.dir)
  9150. };
  9151. }
  9152. function fromPropertyOrder(orderBy) {
  9153. return new OrderBy(fromFieldPathReference(orderBy.field), fromDirection(orderBy.direction));
  9154. }
  9155. // visible for testing
  9156. function toFilter(filter) {
  9157. if (filter instanceof FieldFilter) {
  9158. return toUnaryOrFieldFilter(filter);
  9159. }
  9160. else if (filter instanceof CompositeFilter) {
  9161. return toCompositeFilter(filter);
  9162. }
  9163. else {
  9164. return fail();
  9165. }
  9166. }
  9167. function toCompositeFilter(filter) {
  9168. const protos = filter.getFilters().map(filter => toFilter(filter));
  9169. if (protos.length === 1) {
  9170. return protos[0];
  9171. }
  9172. return {
  9173. compositeFilter: {
  9174. op: toCompositeOperatorName(filter.op),
  9175. filters: protos
  9176. }
  9177. };
  9178. }
  9179. function toUnaryOrFieldFilter(filter) {
  9180. if (filter.op === "==" /* Operator.EQUAL */) {
  9181. if (isNanValue(filter.value)) {
  9182. return {
  9183. unaryFilter: {
  9184. field: toFieldPathReference(filter.field),
  9185. op: 'IS_NAN'
  9186. }
  9187. };
  9188. }
  9189. else if (isNullValue(filter.value)) {
  9190. return {
  9191. unaryFilter: {
  9192. field: toFieldPathReference(filter.field),
  9193. op: 'IS_NULL'
  9194. }
  9195. };
  9196. }
  9197. }
  9198. else if (filter.op === "!=" /* Operator.NOT_EQUAL */) {
  9199. if (isNanValue(filter.value)) {
  9200. return {
  9201. unaryFilter: {
  9202. field: toFieldPathReference(filter.field),
  9203. op: 'IS_NOT_NAN'
  9204. }
  9205. };
  9206. }
  9207. else if (isNullValue(filter.value)) {
  9208. return {
  9209. unaryFilter: {
  9210. field: toFieldPathReference(filter.field),
  9211. op: 'IS_NOT_NULL'
  9212. }
  9213. };
  9214. }
  9215. }
  9216. return {
  9217. fieldFilter: {
  9218. field: toFieldPathReference(filter.field),
  9219. op: toOperatorName(filter.op),
  9220. value: filter.value
  9221. }
  9222. };
  9223. }
  9224. function fromUnaryFilter(filter) {
  9225. switch (filter.unaryFilter.op) {
  9226. case 'IS_NAN':
  9227. const nanField = fromFieldPathReference(filter.unaryFilter.field);
  9228. return FieldFilter.create(nanField, "==" /* Operator.EQUAL */, {
  9229. doubleValue: NaN
  9230. });
  9231. case 'IS_NULL':
  9232. const nullField = fromFieldPathReference(filter.unaryFilter.field);
  9233. return FieldFilter.create(nullField, "==" /* Operator.EQUAL */, {
  9234. nullValue: 'NULL_VALUE'
  9235. });
  9236. case 'IS_NOT_NAN':
  9237. const notNanField = fromFieldPathReference(filter.unaryFilter.field);
  9238. return FieldFilter.create(notNanField, "!=" /* Operator.NOT_EQUAL */, {
  9239. doubleValue: NaN
  9240. });
  9241. case 'IS_NOT_NULL':
  9242. const notNullField = fromFieldPathReference(filter.unaryFilter.field);
  9243. return FieldFilter.create(notNullField, "!=" /* Operator.NOT_EQUAL */, {
  9244. nullValue: 'NULL_VALUE'
  9245. });
  9246. case 'OPERATOR_UNSPECIFIED':
  9247. return fail();
  9248. default:
  9249. return fail();
  9250. }
  9251. }
  9252. function fromFieldFilter(filter) {
  9253. return FieldFilter.create(fromFieldPathReference(filter.fieldFilter.field), fromOperatorName(filter.fieldFilter.op), filter.fieldFilter.value);
  9254. }
  9255. function fromCompositeFilter(filter) {
  9256. return CompositeFilter.create(filter.compositeFilter.filters.map(filter => fromFilter(filter)), fromCompositeOperatorName(filter.compositeFilter.op));
  9257. }
  9258. function toDocumentMask(fieldMask) {
  9259. const canonicalFields = [];
  9260. fieldMask.fields.forEach(field => canonicalFields.push(field.canonicalString()));
  9261. return {
  9262. fieldPaths: canonicalFields
  9263. };
  9264. }
  9265. function fromDocumentMask(proto) {
  9266. const paths = proto.fieldPaths || [];
  9267. return new FieldMask(paths.map(path => FieldPath$1.fromServerFormat(path)));
  9268. }
  9269. function isValidResourceName(path) {
  9270. // Resource names have at least 4 components (project ID, database ID)
  9271. return (path.length >= 4 &&
  9272. path.get(0) === 'projects' &&
  9273. path.get(2) === 'databases');
  9274. }
  9275. /**
  9276. * @license
  9277. * Copyright 2017 Google LLC
  9278. *
  9279. * Licensed under the Apache License, Version 2.0 (the "License");
  9280. * you may not use this file except in compliance with the License.
  9281. * You may obtain a copy of the License at
  9282. *
  9283. * http://www.apache.org/licenses/LICENSE-2.0
  9284. *
  9285. * Unless required by applicable law or agreed to in writing, software
  9286. * distributed under the License is distributed on an "AS IS" BASIS,
  9287. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  9288. * See the License for the specific language governing permissions and
  9289. * limitations under the License.
  9290. */
  9291. /**
  9292. * An immutable set of metadata that the local store tracks for each target.
  9293. */
  9294. class TargetData {
  9295. constructor(
  9296. /** The target being listened to. */
  9297. target,
  9298. /**
  9299. * The target ID to which the target corresponds; Assigned by the
  9300. * LocalStore for user listens and by the SyncEngine for limbo watches.
  9301. */
  9302. targetId,
  9303. /** The purpose of the target. */
  9304. purpose,
  9305. /**
  9306. * The sequence number of the last transaction during which this target data
  9307. * was modified.
  9308. */
  9309. sequenceNumber,
  9310. /** The latest snapshot version seen for this target. */
  9311. snapshotVersion = SnapshotVersion.min(),
  9312. /**
  9313. * The maximum snapshot version at which the associated view
  9314. * contained no limbo documents.
  9315. */
  9316. lastLimboFreeSnapshotVersion = SnapshotVersion.min(),
  9317. /**
  9318. * An opaque, server-assigned token that allows watching a target to be
  9319. * resumed after disconnecting without retransmitting all the data that
  9320. * matches the target. The resume token essentially identifies a point in
  9321. * time from which the server should resume sending results.
  9322. */
  9323. resumeToken = ByteString.EMPTY_BYTE_STRING) {
  9324. this.target = target;
  9325. this.targetId = targetId;
  9326. this.purpose = purpose;
  9327. this.sequenceNumber = sequenceNumber;
  9328. this.snapshotVersion = snapshotVersion;
  9329. this.lastLimboFreeSnapshotVersion = lastLimboFreeSnapshotVersion;
  9330. this.resumeToken = resumeToken;
  9331. }
  9332. /** Creates a new target data instance with an updated sequence number. */
  9333. withSequenceNumber(sequenceNumber) {
  9334. return new TargetData(this.target, this.targetId, this.purpose, sequenceNumber, this.snapshotVersion, this.lastLimboFreeSnapshotVersion, this.resumeToken);
  9335. }
  9336. /**
  9337. * Creates a new target data instance with an updated resume token and
  9338. * snapshot version.
  9339. */
  9340. withResumeToken(resumeToken, snapshotVersion) {
  9341. return new TargetData(this.target, this.targetId, this.purpose, this.sequenceNumber, snapshotVersion, this.lastLimboFreeSnapshotVersion, resumeToken);
  9342. }
  9343. /**
  9344. * Creates a new target data instance with an updated last limbo free
  9345. * snapshot version number.
  9346. */
  9347. withLastLimboFreeSnapshotVersion(lastLimboFreeSnapshotVersion) {
  9348. return new TargetData(this.target, this.targetId, this.purpose, this.sequenceNumber, this.snapshotVersion, lastLimboFreeSnapshotVersion, this.resumeToken);
  9349. }
  9350. }
  9351. /**
  9352. * @license
  9353. * Copyright 2017 Google LLC
  9354. *
  9355. * Licensed under the Apache License, Version 2.0 (the "License");
  9356. * you may not use this file except in compliance with the License.
  9357. * You may obtain a copy of the License at
  9358. *
  9359. * http://www.apache.org/licenses/LICENSE-2.0
  9360. *
  9361. * Unless required by applicable law or agreed to in writing, software
  9362. * distributed under the License is distributed on an "AS IS" BASIS,
  9363. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  9364. * See the License for the specific language governing permissions and
  9365. * limitations under the License.
  9366. */
  9367. /** Serializer for values stored in the LocalStore. */
  9368. class LocalSerializer {
  9369. constructor(remoteSerializer) {
  9370. this.remoteSerializer = remoteSerializer;
  9371. }
  9372. }
  9373. /** Decodes a remote document from storage locally to a Document. */
  9374. function fromDbRemoteDocument(localSerializer, remoteDoc) {
  9375. let doc;
  9376. if (remoteDoc.document) {
  9377. doc = fromDocument(localSerializer.remoteSerializer, remoteDoc.document, !!remoteDoc.hasCommittedMutations);
  9378. }
  9379. else if (remoteDoc.noDocument) {
  9380. const key = DocumentKey.fromSegments(remoteDoc.noDocument.path);
  9381. const version = fromDbTimestamp(remoteDoc.noDocument.readTime);
  9382. doc = MutableDocument.newNoDocument(key, version);
  9383. if (remoteDoc.hasCommittedMutations) {
  9384. doc.setHasCommittedMutations();
  9385. }
  9386. }
  9387. else if (remoteDoc.unknownDocument) {
  9388. const key = DocumentKey.fromSegments(remoteDoc.unknownDocument.path);
  9389. const version = fromDbTimestamp(remoteDoc.unknownDocument.version);
  9390. doc = MutableDocument.newUnknownDocument(key, version);
  9391. }
  9392. else {
  9393. return fail();
  9394. }
  9395. if (remoteDoc.readTime) {
  9396. doc.setReadTime(fromDbTimestampKey(remoteDoc.readTime));
  9397. }
  9398. return doc;
  9399. }
  9400. /** Encodes a document for storage locally. */
  9401. function toDbRemoteDocument(localSerializer, document) {
  9402. const key = document.key;
  9403. const remoteDoc = {
  9404. prefixPath: key.getCollectionPath().popLast().toArray(),
  9405. collectionGroup: key.collectionGroup,
  9406. documentId: key.path.lastSegment(),
  9407. readTime: toDbTimestampKey(document.readTime),
  9408. hasCommittedMutations: document.hasCommittedMutations
  9409. };
  9410. if (document.isFoundDocument()) {
  9411. remoteDoc.document = toDocument(localSerializer.remoteSerializer, document);
  9412. }
  9413. else if (document.isNoDocument()) {
  9414. remoteDoc.noDocument = {
  9415. path: key.path.toArray(),
  9416. readTime: toDbTimestamp(document.version)
  9417. };
  9418. }
  9419. else if (document.isUnknownDocument()) {
  9420. remoteDoc.unknownDocument = {
  9421. path: key.path.toArray(),
  9422. version: toDbTimestamp(document.version)
  9423. };
  9424. }
  9425. else {
  9426. return fail();
  9427. }
  9428. return remoteDoc;
  9429. }
  9430. function toDbTimestampKey(snapshotVersion) {
  9431. const timestamp = snapshotVersion.toTimestamp();
  9432. return [timestamp.seconds, timestamp.nanoseconds];
  9433. }
  9434. function fromDbTimestampKey(dbTimestampKey) {
  9435. const timestamp = new Timestamp(dbTimestampKey[0], dbTimestampKey[1]);
  9436. return SnapshotVersion.fromTimestamp(timestamp);
  9437. }
  9438. function toDbTimestamp(snapshotVersion) {
  9439. const timestamp = snapshotVersion.toTimestamp();
  9440. return { seconds: timestamp.seconds, nanoseconds: timestamp.nanoseconds };
  9441. }
  9442. function fromDbTimestamp(dbTimestamp) {
  9443. const timestamp = new Timestamp(dbTimestamp.seconds, dbTimestamp.nanoseconds);
  9444. return SnapshotVersion.fromTimestamp(timestamp);
  9445. }
  9446. /** Encodes a batch of mutations into a DbMutationBatch for local storage. */
  9447. function toDbMutationBatch(localSerializer, userId, batch) {
  9448. const serializedBaseMutations = batch.baseMutations.map(m => toMutation(localSerializer.remoteSerializer, m));
  9449. const serializedMutations = batch.mutations.map(m => toMutation(localSerializer.remoteSerializer, m));
  9450. return {
  9451. userId,
  9452. batchId: batch.batchId,
  9453. localWriteTimeMs: batch.localWriteTime.toMillis(),
  9454. baseMutations: serializedBaseMutations,
  9455. mutations: serializedMutations
  9456. };
  9457. }
  9458. /** Decodes a DbMutationBatch into a MutationBatch */
  9459. function fromDbMutationBatch(localSerializer, dbBatch) {
  9460. const baseMutations = (dbBatch.baseMutations || []).map(m => fromMutation(localSerializer.remoteSerializer, m));
  9461. // Squash old transform mutations into existing patch or set mutations.
  9462. // The replacement of representing `transforms` with `update_transforms`
  9463. // on the SDK means that old `transform` mutations stored in IndexedDB need
  9464. // to be updated to `update_transforms`.
  9465. // TODO(b/174608374): Remove this code once we perform a schema migration.
  9466. for (let i = 0; i < dbBatch.mutations.length - 1; ++i) {
  9467. const currentMutation = dbBatch.mutations[i];
  9468. const hasTransform = i + 1 < dbBatch.mutations.length &&
  9469. dbBatch.mutations[i + 1].transform !== undefined;
  9470. if (hasTransform) {
  9471. const transformMutation = dbBatch.mutations[i + 1];
  9472. currentMutation.updateTransforms =
  9473. transformMutation.transform.fieldTransforms;
  9474. dbBatch.mutations.splice(i + 1, 1);
  9475. ++i;
  9476. }
  9477. }
  9478. const mutations = dbBatch.mutations.map(m => fromMutation(localSerializer.remoteSerializer, m));
  9479. const timestamp = Timestamp.fromMillis(dbBatch.localWriteTimeMs);
  9480. return new MutationBatch(dbBatch.batchId, timestamp, baseMutations, mutations);
  9481. }
  9482. /** Decodes a DbTarget into TargetData */
  9483. function fromDbTarget(dbTarget) {
  9484. const version = fromDbTimestamp(dbTarget.readTime);
  9485. const lastLimboFreeSnapshotVersion = dbTarget.lastLimboFreeSnapshotVersion !== undefined
  9486. ? fromDbTimestamp(dbTarget.lastLimboFreeSnapshotVersion)
  9487. : SnapshotVersion.min();
  9488. let target;
  9489. if (isDocumentQuery(dbTarget.query)) {
  9490. target = fromDocumentsTarget(dbTarget.query);
  9491. }
  9492. else {
  9493. target = fromQueryTarget(dbTarget.query);
  9494. }
  9495. return new TargetData(target, dbTarget.targetId, 0 /* TargetPurpose.Listen */, dbTarget.lastListenSequenceNumber, version, lastLimboFreeSnapshotVersion, ByteString.fromBase64String(dbTarget.resumeToken));
  9496. }
  9497. /** Encodes TargetData into a DbTarget for storage locally. */
  9498. function toDbTarget(localSerializer, targetData) {
  9499. const dbTimestamp = toDbTimestamp(targetData.snapshotVersion);
  9500. const dbLastLimboFreeTimestamp = toDbTimestamp(targetData.lastLimboFreeSnapshotVersion);
  9501. let queryProto;
  9502. if (targetIsDocumentTarget(targetData.target)) {
  9503. queryProto = toDocumentsTarget(localSerializer.remoteSerializer, targetData.target);
  9504. }
  9505. else {
  9506. queryProto = toQueryTarget(localSerializer.remoteSerializer, targetData.target);
  9507. }
  9508. // We can't store the resumeToken as a ByteString in IndexedDb, so we
  9509. // convert it to a base64 string for storage.
  9510. const resumeToken = targetData.resumeToken.toBase64();
  9511. // lastListenSequenceNumber is always 0 until we do real GC.
  9512. return {
  9513. targetId: targetData.targetId,
  9514. canonicalId: canonifyTarget(targetData.target),
  9515. readTime: dbTimestamp,
  9516. resumeToken,
  9517. lastListenSequenceNumber: targetData.sequenceNumber,
  9518. lastLimboFreeSnapshotVersion: dbLastLimboFreeTimestamp,
  9519. query: queryProto
  9520. };
  9521. }
  9522. /**
  9523. * A helper function for figuring out what kind of query has been stored.
  9524. */
  9525. function isDocumentQuery(dbQuery) {
  9526. return dbQuery.documents !== undefined;
  9527. }
  9528. /** Encodes a DbBundle to a BundleMetadata object. */
  9529. function fromDbBundle(dbBundle) {
  9530. return {
  9531. id: dbBundle.bundleId,
  9532. createTime: fromDbTimestamp(dbBundle.createTime),
  9533. version: dbBundle.version
  9534. };
  9535. }
  9536. /** Encodes a BundleMetadata to a DbBundle. */
  9537. function toDbBundle(metadata) {
  9538. return {
  9539. bundleId: metadata.id,
  9540. createTime: toDbTimestamp(fromVersion(metadata.createTime)),
  9541. version: metadata.version
  9542. };
  9543. }
  9544. /** Encodes a DbNamedQuery to a NamedQuery. */
  9545. function fromDbNamedQuery(dbNamedQuery) {
  9546. return {
  9547. name: dbNamedQuery.name,
  9548. query: fromBundledQuery(dbNamedQuery.bundledQuery),
  9549. readTime: fromDbTimestamp(dbNamedQuery.readTime)
  9550. };
  9551. }
  9552. /** Encodes a NamedQuery from a bundle proto to a DbNamedQuery. */
  9553. function toDbNamedQuery(query) {
  9554. return {
  9555. name: query.name,
  9556. readTime: toDbTimestamp(fromVersion(query.readTime)),
  9557. bundledQuery: query.bundledQuery
  9558. };
  9559. }
  9560. /**
  9561. * Encodes a `BundledQuery` from bundle proto to a Query object.
  9562. *
  9563. * This reconstructs the original query used to build the bundle being loaded,
  9564. * including features exists only in SDKs (for example: limit-to-last).
  9565. */
  9566. function fromBundledQuery(bundledQuery) {
  9567. const query = convertQueryTargetToQuery({
  9568. parent: bundledQuery.parent,
  9569. structuredQuery: bundledQuery.structuredQuery
  9570. });
  9571. if (bundledQuery.limitType === 'LAST') {
  9572. return queryWithLimit(query, query.limit, "L" /* LimitType.Last */);
  9573. }
  9574. return query;
  9575. }
  9576. /** Encodes a NamedQuery proto object to a NamedQuery model object. */
  9577. function fromProtoNamedQuery(namedQuery) {
  9578. return {
  9579. name: namedQuery.name,
  9580. query: fromBundledQuery(namedQuery.bundledQuery),
  9581. readTime: fromVersion(namedQuery.readTime)
  9582. };
  9583. }
  9584. /** Decodes a BundleMetadata proto into a BundleMetadata object. */
  9585. function fromBundleMetadata(metadata) {
  9586. return {
  9587. id: metadata.id,
  9588. version: metadata.version,
  9589. createTime: fromVersion(metadata.createTime)
  9590. };
  9591. }
  9592. /** Encodes a DbDocumentOverlay object to an Overlay model object. */
  9593. function fromDbDocumentOverlay(localSerializer, dbDocumentOverlay) {
  9594. return new Overlay(dbDocumentOverlay.largestBatchId, fromMutation(localSerializer.remoteSerializer, dbDocumentOverlay.overlayMutation));
  9595. }
  9596. /** Decodes an Overlay model object into a DbDocumentOverlay object. */
  9597. function toDbDocumentOverlay(localSerializer, userId, overlay) {
  9598. const [_, collectionPath, documentId] = toDbDocumentOverlayKey(userId, overlay.mutation.key);
  9599. return {
  9600. userId,
  9601. collectionPath,
  9602. documentId,
  9603. collectionGroup: overlay.mutation.key.getCollectionGroup(),
  9604. largestBatchId: overlay.largestBatchId,
  9605. overlayMutation: toMutation(localSerializer.remoteSerializer, overlay.mutation)
  9606. };
  9607. }
  9608. /**
  9609. * Returns the DbDocumentOverlayKey corresponding to the given user and
  9610. * document key.
  9611. */
  9612. function toDbDocumentOverlayKey(userId, docKey) {
  9613. const docId = docKey.path.lastSegment();
  9614. const collectionPath = encodeResourcePath(docKey.path.popLast());
  9615. return [userId, collectionPath, docId];
  9616. }
  9617. function toDbIndexConfiguration(index) {
  9618. return {
  9619. indexId: index.indexId,
  9620. collectionGroup: index.collectionGroup,
  9621. fields: index.fields.map(s => [s.fieldPath.canonicalString(), s.kind])
  9622. };
  9623. }
  9624. function fromDbIndexConfiguration(index, state) {
  9625. const decodedState = state
  9626. ? new IndexState(state.sequenceNumber, new IndexOffset(fromDbTimestamp(state.readTime), new DocumentKey(decodeResourcePath(state.documentKey)), state.largestBatchId))
  9627. : IndexState.empty();
  9628. const decodedSegments = index.fields.map(([fieldPath, kind]) => new IndexSegment(FieldPath$1.fromServerFormat(fieldPath), kind));
  9629. return new FieldIndex(index.indexId, index.collectionGroup, decodedSegments, decodedState);
  9630. }
  9631. function toDbIndexState(indexId, user, sequenceNumber, offset) {
  9632. return {
  9633. indexId,
  9634. uid: user.uid || '',
  9635. sequenceNumber,
  9636. readTime: toDbTimestamp(offset.readTime),
  9637. documentKey: encodeResourcePath(offset.documentKey.path),
  9638. largestBatchId: offset.largestBatchId
  9639. };
  9640. }
  9641. /**
  9642. * @license
  9643. * Copyright 2020 Google LLC
  9644. *
  9645. * Licensed under the Apache License, Version 2.0 (the "License");
  9646. * you may not use this file except in compliance with the License.
  9647. * You may obtain a copy of the License at
  9648. *
  9649. * http://www.apache.org/licenses/LICENSE-2.0
  9650. *
  9651. * Unless required by applicable law or agreed to in writing, software
  9652. * distributed under the License is distributed on an "AS IS" BASIS,
  9653. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  9654. * See the License for the specific language governing permissions and
  9655. * limitations under the License.
  9656. */
  9657. class IndexedDbBundleCache {
  9658. getBundleMetadata(transaction, bundleId) {
  9659. return bundlesStore(transaction)
  9660. .get(bundleId)
  9661. .next(bundle => {
  9662. if (bundle) {
  9663. return fromDbBundle(bundle);
  9664. }
  9665. return undefined;
  9666. });
  9667. }
  9668. saveBundleMetadata(transaction, bundleMetadata) {
  9669. return bundlesStore(transaction).put(toDbBundle(bundleMetadata));
  9670. }
  9671. getNamedQuery(transaction, queryName) {
  9672. return namedQueriesStore(transaction)
  9673. .get(queryName)
  9674. .next(query => {
  9675. if (query) {
  9676. return fromDbNamedQuery(query);
  9677. }
  9678. return undefined;
  9679. });
  9680. }
  9681. saveNamedQuery(transaction, query) {
  9682. return namedQueriesStore(transaction).put(toDbNamedQuery(query));
  9683. }
  9684. }
  9685. /**
  9686. * Helper to get a typed SimpleDbStore for the bundles object store.
  9687. */
  9688. function bundlesStore(txn) {
  9689. return getStore(txn, DbBundleStore);
  9690. }
  9691. /**
  9692. * Helper to get a typed SimpleDbStore for the namedQueries object store.
  9693. */
  9694. function namedQueriesStore(txn) {
  9695. return getStore(txn, DbNamedQueryStore);
  9696. }
  9697. /**
  9698. * @license
  9699. * Copyright 2022 Google LLC
  9700. *
  9701. * Licensed under the Apache License, Version 2.0 (the "License");
  9702. * you may not use this file except in compliance with the License.
  9703. * You may obtain a copy of the License at
  9704. *
  9705. * http://www.apache.org/licenses/LICENSE-2.0
  9706. *
  9707. * Unless required by applicable law or agreed to in writing, software
  9708. * distributed under the License is distributed on an "AS IS" BASIS,
  9709. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  9710. * See the License for the specific language governing permissions and
  9711. * limitations under the License.
  9712. */
  9713. /**
  9714. * Implementation of DocumentOverlayCache using IndexedDb.
  9715. */
  9716. class IndexedDbDocumentOverlayCache {
  9717. /**
  9718. * @param serializer - The document serializer.
  9719. * @param userId - The userId for which we are accessing overlays.
  9720. */
  9721. constructor(serializer, userId) {
  9722. this.serializer = serializer;
  9723. this.userId = userId;
  9724. }
  9725. static forUser(serializer, user) {
  9726. const userId = user.uid || '';
  9727. return new IndexedDbDocumentOverlayCache(serializer, userId);
  9728. }
  9729. getOverlay(transaction, key) {
  9730. return documentOverlayStore(transaction)
  9731. .get(toDbDocumentOverlayKey(this.userId, key))
  9732. .next(dbOverlay => {
  9733. if (dbOverlay) {
  9734. return fromDbDocumentOverlay(this.serializer, dbOverlay);
  9735. }
  9736. return null;
  9737. });
  9738. }
  9739. getOverlays(transaction, keys) {
  9740. const result = newOverlayMap();
  9741. return PersistencePromise.forEach(keys, (key) => {
  9742. return this.getOverlay(transaction, key).next(overlay => {
  9743. if (overlay !== null) {
  9744. result.set(key, overlay);
  9745. }
  9746. });
  9747. }).next(() => result);
  9748. }
  9749. saveOverlays(transaction, largestBatchId, overlays) {
  9750. const promises = [];
  9751. overlays.forEach((_, mutation) => {
  9752. const overlay = new Overlay(largestBatchId, mutation);
  9753. promises.push(this.saveOverlay(transaction, overlay));
  9754. });
  9755. return PersistencePromise.waitFor(promises);
  9756. }
  9757. removeOverlaysForBatchId(transaction, documentKeys, batchId) {
  9758. const collectionPaths = new Set();
  9759. // Get the set of unique collection paths.
  9760. documentKeys.forEach(key => collectionPaths.add(encodeResourcePath(key.getCollectionPath())));
  9761. const promises = [];
  9762. collectionPaths.forEach(collectionPath => {
  9763. const range = IDBKeyRange.bound([this.userId, collectionPath, batchId], [this.userId, collectionPath, batchId + 1],
  9764. /*lowerOpen=*/ false,
  9765. /*upperOpen=*/ true);
  9766. promises.push(documentOverlayStore(transaction).deleteAll(DbDocumentOverlayCollectionPathOverlayIndex, range));
  9767. });
  9768. return PersistencePromise.waitFor(promises);
  9769. }
  9770. getOverlaysForCollection(transaction, collection, sinceBatchId) {
  9771. const result = newOverlayMap();
  9772. const collectionPath = encodeResourcePath(collection);
  9773. // We want batch IDs larger than `sinceBatchId`, and so the lower bound
  9774. // is not inclusive.
  9775. const range = IDBKeyRange.bound([this.userId, collectionPath, sinceBatchId], [this.userId, collectionPath, Number.POSITIVE_INFINITY],
  9776. /*lowerOpen=*/ true);
  9777. return documentOverlayStore(transaction)
  9778. .loadAll(DbDocumentOverlayCollectionPathOverlayIndex, range)
  9779. .next(dbOverlays => {
  9780. for (const dbOverlay of dbOverlays) {
  9781. const overlay = fromDbDocumentOverlay(this.serializer, dbOverlay);
  9782. result.set(overlay.getKey(), overlay);
  9783. }
  9784. return result;
  9785. });
  9786. }
  9787. getOverlaysForCollectionGroup(transaction, collectionGroup, sinceBatchId, count) {
  9788. const result = newOverlayMap();
  9789. let currentBatchId = undefined;
  9790. // We want batch IDs larger than `sinceBatchId`, and so the lower bound
  9791. // is not inclusive.
  9792. const range = IDBKeyRange.bound([this.userId, collectionGroup, sinceBatchId], [this.userId, collectionGroup, Number.POSITIVE_INFINITY],
  9793. /*lowerOpen=*/ true);
  9794. return documentOverlayStore(transaction)
  9795. .iterate({
  9796. index: DbDocumentOverlayCollectionGroupOverlayIndex,
  9797. range
  9798. }, (_, dbOverlay, control) => {
  9799. // We do not want to return partial batch overlays, even if the size
  9800. // of the result set exceeds the given `count` argument. Therefore, we
  9801. // continue to aggregate results even after the result size exceeds
  9802. // `count` if there are more overlays from the `currentBatchId`.
  9803. const overlay = fromDbDocumentOverlay(this.serializer, dbOverlay);
  9804. if (result.size() < count ||
  9805. overlay.largestBatchId === currentBatchId) {
  9806. result.set(overlay.getKey(), overlay);
  9807. currentBatchId = overlay.largestBatchId;
  9808. }
  9809. else {
  9810. control.done();
  9811. }
  9812. })
  9813. .next(() => result);
  9814. }
  9815. saveOverlay(transaction, overlay) {
  9816. return documentOverlayStore(transaction).put(toDbDocumentOverlay(this.serializer, this.userId, overlay));
  9817. }
  9818. }
  9819. /**
  9820. * Helper to get a typed SimpleDbStore for the document overlay object store.
  9821. */
  9822. function documentOverlayStore(txn) {
  9823. return getStore(txn, DbDocumentOverlayStore);
  9824. }
  9825. /**
  9826. * @license
  9827. * Copyright 2021 Google LLC
  9828. *
  9829. * Licensed under the Apache License, Version 2.0 (the "License");
  9830. * you may not use this file except in compliance with the License.
  9831. * You may obtain a copy of the License at
  9832. *
  9833. * http://www.apache.org/licenses/LICENSE-2.0
  9834. *
  9835. * Unless required by applicable law or agreed to in writing, software
  9836. * distributed under the License is distributed on an "AS IS" BASIS,
  9837. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  9838. * See the License for the specific language governing permissions and
  9839. * limitations under the License.
  9840. */
  9841. // Note: This code is copied from the backend. Code that is not used by
  9842. // Firestore was removed.
  9843. const INDEX_TYPE_NULL = 5;
  9844. const INDEX_TYPE_BOOLEAN = 10;
  9845. const INDEX_TYPE_NAN = 13;
  9846. const INDEX_TYPE_NUMBER = 15;
  9847. const INDEX_TYPE_TIMESTAMP = 20;
  9848. const INDEX_TYPE_STRING = 25;
  9849. const INDEX_TYPE_BLOB = 30;
  9850. const INDEX_TYPE_REFERENCE = 37;
  9851. const INDEX_TYPE_GEOPOINT = 45;
  9852. const INDEX_TYPE_ARRAY = 50;
  9853. const INDEX_TYPE_MAP = 55;
  9854. const INDEX_TYPE_REFERENCE_SEGMENT = 60;
  9855. // A terminator that indicates that a truncatable value was not truncated.
  9856. // This must be smaller than all other type labels.
  9857. const NOT_TRUNCATED = 2;
  9858. /** Firestore index value writer. */
  9859. class FirestoreIndexValueWriter {
  9860. constructor() { }
  9861. // The write methods below short-circuit writing terminators for values
  9862. // containing a (terminating) truncated value.
  9863. //
  9864. // As an example, consider the resulting encoding for:
  9865. //
  9866. // ["bar", [2, "foo"]] -> (STRING, "bar", TERM, ARRAY, NUMBER, 2, STRING, "foo", TERM, TERM, TERM)
  9867. // ["bar", [2, truncated("foo")]] -> (STRING, "bar", TERM, ARRAY, NUMBER, 2, STRING, "foo", TRUNC)
  9868. // ["bar", truncated(["foo"])] -> (STRING, "bar", TERM, ARRAY. STRING, "foo", TERM, TRUNC)
  9869. /** Writes an index value. */
  9870. writeIndexValue(value, encoder) {
  9871. this.writeIndexValueAux(value, encoder);
  9872. // Write separator to split index values
  9873. // (see go/firestore-storage-format#encodings).
  9874. encoder.writeInfinity();
  9875. }
  9876. writeIndexValueAux(indexValue, encoder) {
  9877. if ('nullValue' in indexValue) {
  9878. this.writeValueTypeLabel(encoder, INDEX_TYPE_NULL);
  9879. }
  9880. else if ('booleanValue' in indexValue) {
  9881. this.writeValueTypeLabel(encoder, INDEX_TYPE_BOOLEAN);
  9882. encoder.writeNumber(indexValue.booleanValue ? 1 : 0);
  9883. }
  9884. else if ('integerValue' in indexValue) {
  9885. this.writeValueTypeLabel(encoder, INDEX_TYPE_NUMBER);
  9886. encoder.writeNumber(normalizeNumber(indexValue.integerValue));
  9887. }
  9888. else if ('doubleValue' in indexValue) {
  9889. const n = normalizeNumber(indexValue.doubleValue);
  9890. if (isNaN(n)) {
  9891. this.writeValueTypeLabel(encoder, INDEX_TYPE_NAN);
  9892. }
  9893. else {
  9894. this.writeValueTypeLabel(encoder, INDEX_TYPE_NUMBER);
  9895. if (isNegativeZero(n)) {
  9896. // -0.0, 0 and 0.0 are all considered the same
  9897. encoder.writeNumber(0.0);
  9898. }
  9899. else {
  9900. encoder.writeNumber(n);
  9901. }
  9902. }
  9903. }
  9904. else if ('timestampValue' in indexValue) {
  9905. const timestamp = indexValue.timestampValue;
  9906. this.writeValueTypeLabel(encoder, INDEX_TYPE_TIMESTAMP);
  9907. if (typeof timestamp === 'string') {
  9908. encoder.writeString(timestamp);
  9909. }
  9910. else {
  9911. encoder.writeString(`${timestamp.seconds || ''}`);
  9912. encoder.writeNumber(timestamp.nanos || 0);
  9913. }
  9914. }
  9915. else if ('stringValue' in indexValue) {
  9916. this.writeIndexString(indexValue.stringValue, encoder);
  9917. this.writeTruncationMarker(encoder);
  9918. }
  9919. else if ('bytesValue' in indexValue) {
  9920. this.writeValueTypeLabel(encoder, INDEX_TYPE_BLOB);
  9921. encoder.writeBytes(normalizeByteString(indexValue.bytesValue));
  9922. this.writeTruncationMarker(encoder);
  9923. }
  9924. else if ('referenceValue' in indexValue) {
  9925. this.writeIndexEntityRef(indexValue.referenceValue, encoder);
  9926. }
  9927. else if ('geoPointValue' in indexValue) {
  9928. const geoPoint = indexValue.geoPointValue;
  9929. this.writeValueTypeLabel(encoder, INDEX_TYPE_GEOPOINT);
  9930. encoder.writeNumber(geoPoint.latitude || 0);
  9931. encoder.writeNumber(geoPoint.longitude || 0);
  9932. }
  9933. else if ('mapValue' in indexValue) {
  9934. if (isMaxValue(indexValue)) {
  9935. this.writeValueTypeLabel(encoder, Number.MAX_SAFE_INTEGER);
  9936. }
  9937. else {
  9938. this.writeIndexMap(indexValue.mapValue, encoder);
  9939. this.writeTruncationMarker(encoder);
  9940. }
  9941. }
  9942. else if ('arrayValue' in indexValue) {
  9943. this.writeIndexArray(indexValue.arrayValue, encoder);
  9944. this.writeTruncationMarker(encoder);
  9945. }
  9946. else {
  9947. fail();
  9948. }
  9949. }
  9950. writeIndexString(stringIndexValue, encoder) {
  9951. this.writeValueTypeLabel(encoder, INDEX_TYPE_STRING);
  9952. this.writeUnlabeledIndexString(stringIndexValue, encoder);
  9953. }
  9954. writeUnlabeledIndexString(stringIndexValue, encoder) {
  9955. encoder.writeString(stringIndexValue);
  9956. }
  9957. writeIndexMap(mapIndexValue, encoder) {
  9958. const map = mapIndexValue.fields || {};
  9959. this.writeValueTypeLabel(encoder, INDEX_TYPE_MAP);
  9960. for (const key of Object.keys(map)) {
  9961. this.writeIndexString(key, encoder);
  9962. this.writeIndexValueAux(map[key], encoder);
  9963. }
  9964. }
  9965. writeIndexArray(arrayIndexValue, encoder) {
  9966. const values = arrayIndexValue.values || [];
  9967. this.writeValueTypeLabel(encoder, INDEX_TYPE_ARRAY);
  9968. for (const element of values) {
  9969. this.writeIndexValueAux(element, encoder);
  9970. }
  9971. }
  9972. writeIndexEntityRef(referenceValue, encoder) {
  9973. this.writeValueTypeLabel(encoder, INDEX_TYPE_REFERENCE);
  9974. const path = DocumentKey.fromName(referenceValue).path;
  9975. path.forEach(segment => {
  9976. this.writeValueTypeLabel(encoder, INDEX_TYPE_REFERENCE_SEGMENT);
  9977. this.writeUnlabeledIndexString(segment, encoder);
  9978. });
  9979. }
  9980. writeValueTypeLabel(encoder, typeOrder) {
  9981. encoder.writeNumber(typeOrder);
  9982. }
  9983. writeTruncationMarker(encoder) {
  9984. // While the SDK does not implement truncation, the truncation marker is
  9985. // used to terminate all variable length values (which are strings, bytes,
  9986. // references, arrays and maps).
  9987. encoder.writeNumber(NOT_TRUNCATED);
  9988. }
  9989. }
  9990. FirestoreIndexValueWriter.INSTANCE = new FirestoreIndexValueWriter();
  9991. /**
  9992. * @license
  9993. * Copyright 2021 Google LLC
  9994. *
  9995. * Licensed under the Apache License, Version 2.0 (the "License");
  9996. * you may not use this file except in compliance with the License.
  9997. * You may obtain a copy of the License at
  9998. *
  9999. * http://www.apache.org/licenses/LICENSE-2.0
  10000. *
  10001. * Unless required by applicable law | agreed to in writing, software
  10002. * distributed under the License is distributed on an "AS IS" BASIS,
  10003. * WITHOUT WARRANTIES | CONDITIONS OF ANY KIND, either express | implied.
  10004. * See the License for the specific language governing permissions and
  10005. * limitations under the License.
  10006. */
  10007. /** These constants are taken from the backend. */
  10008. const MIN_SURROGATE = '\uD800';
  10009. const MAX_SURROGATE = '\uDBFF';
  10010. const ESCAPE1 = 0x00;
  10011. const NULL_BYTE = 0xff; // Combined with ESCAPE1
  10012. const SEPARATOR = 0x01; // Combined with ESCAPE1
  10013. const ESCAPE2 = 0xff;
  10014. const INFINITY = 0xff; // Combined with ESCAPE2
  10015. const FF_BYTE = 0x00; // Combined with ESCAPE2
  10016. const LONG_SIZE = 64;
  10017. const BYTE_SIZE = 8;
  10018. /**
  10019. * The default size of the buffer. This is arbitrary, but likely larger than
  10020. * most index values so that less copies of the underlying buffer will be made.
  10021. * For large values, a single copy will made to double the buffer length.
  10022. */
  10023. const DEFAULT_BUFFER_SIZE = 1024;
  10024. /** Converts a JavaScript number to a byte array (using big endian encoding). */
  10025. function doubleToLongBits(value) {
  10026. const dv = new DataView(new ArrayBuffer(8));
  10027. dv.setFloat64(0, value, /* littleEndian= */ false);
  10028. return new Uint8Array(dv.buffer);
  10029. }
  10030. /**
  10031. * Counts the number of zeros in a byte.
  10032. *
  10033. * Visible for testing.
  10034. */
  10035. function numberOfLeadingZerosInByte(x) {
  10036. if (x === 0) {
  10037. return 8;
  10038. }
  10039. let zeros = 0;
  10040. if (x >> 4 === 0) {
  10041. // Test if the first four bits are zero.
  10042. zeros += 4;
  10043. x = x << 4;
  10044. }
  10045. if (x >> 6 === 0) {
  10046. // Test if the first two (or next two) bits are zero.
  10047. zeros += 2;
  10048. x = x << 2;
  10049. }
  10050. if (x >> 7 === 0) {
  10051. // Test if the remaining bit is zero.
  10052. zeros += 1;
  10053. }
  10054. return zeros;
  10055. }
  10056. /** Counts the number of leading zeros in the given byte array. */
  10057. function numberOfLeadingZeros(bytes) {
  10058. let leadingZeros = 0;
  10059. for (let i = 0; i < 8; ++i) {
  10060. const zeros = numberOfLeadingZerosInByte(bytes[i] & 0xff);
  10061. leadingZeros += zeros;
  10062. if (zeros !== 8) {
  10063. break;
  10064. }
  10065. }
  10066. return leadingZeros;
  10067. }
  10068. /**
  10069. * Returns the number of bytes required to store "value". Leading zero bytes
  10070. * are skipped.
  10071. */
  10072. function unsignedNumLength(value) {
  10073. // This is just the number of bytes for the unsigned representation of the number.
  10074. const numBits = LONG_SIZE - numberOfLeadingZeros(value);
  10075. return Math.ceil(numBits / BYTE_SIZE);
  10076. }
  10077. /**
  10078. * OrderedCodeWriter is a minimal-allocation implementation of the writing
  10079. * behavior defined by the backend.
  10080. *
  10081. * The code is ported from its Java counterpart.
  10082. */
  10083. class OrderedCodeWriter {
  10084. constructor() {
  10085. this.buffer = new Uint8Array(DEFAULT_BUFFER_SIZE);
  10086. this.position = 0;
  10087. }
  10088. writeBytesAscending(value) {
  10089. const it = value[Symbol.iterator]();
  10090. let byte = it.next();
  10091. while (!byte.done) {
  10092. this.writeByteAscending(byte.value);
  10093. byte = it.next();
  10094. }
  10095. this.writeSeparatorAscending();
  10096. }
  10097. writeBytesDescending(value) {
  10098. const it = value[Symbol.iterator]();
  10099. let byte = it.next();
  10100. while (!byte.done) {
  10101. this.writeByteDescending(byte.value);
  10102. byte = it.next();
  10103. }
  10104. this.writeSeparatorDescending();
  10105. }
  10106. /** Writes utf8 bytes into this byte sequence, ascending. */
  10107. writeUtf8Ascending(sequence) {
  10108. for (const c of sequence) {
  10109. const charCode = c.charCodeAt(0);
  10110. if (charCode < 0x80) {
  10111. this.writeByteAscending(charCode);
  10112. }
  10113. else if (charCode < 0x800) {
  10114. this.writeByteAscending((0x0f << 6) | (charCode >>> 6));
  10115. this.writeByteAscending(0x80 | (0x3f & charCode));
  10116. }
  10117. else if (c < MIN_SURROGATE || MAX_SURROGATE < c) {
  10118. this.writeByteAscending((0x0f << 5) | (charCode >>> 12));
  10119. this.writeByteAscending(0x80 | (0x3f & (charCode >>> 6)));
  10120. this.writeByteAscending(0x80 | (0x3f & charCode));
  10121. }
  10122. else {
  10123. const codePoint = c.codePointAt(0);
  10124. this.writeByteAscending((0x0f << 4) | (codePoint >>> 18));
  10125. this.writeByteAscending(0x80 | (0x3f & (codePoint >>> 12)));
  10126. this.writeByteAscending(0x80 | (0x3f & (codePoint >>> 6)));
  10127. this.writeByteAscending(0x80 | (0x3f & codePoint));
  10128. }
  10129. }
  10130. this.writeSeparatorAscending();
  10131. }
  10132. /** Writes utf8 bytes into this byte sequence, descending */
  10133. writeUtf8Descending(sequence) {
  10134. for (const c of sequence) {
  10135. const charCode = c.charCodeAt(0);
  10136. if (charCode < 0x80) {
  10137. this.writeByteDescending(charCode);
  10138. }
  10139. else if (charCode < 0x800) {
  10140. this.writeByteDescending((0x0f << 6) | (charCode >>> 6));
  10141. this.writeByteDescending(0x80 | (0x3f & charCode));
  10142. }
  10143. else if (c < MIN_SURROGATE || MAX_SURROGATE < c) {
  10144. this.writeByteDescending((0x0f << 5) | (charCode >>> 12));
  10145. this.writeByteDescending(0x80 | (0x3f & (charCode >>> 6)));
  10146. this.writeByteDescending(0x80 | (0x3f & charCode));
  10147. }
  10148. else {
  10149. const codePoint = c.codePointAt(0);
  10150. this.writeByteDescending((0x0f << 4) | (codePoint >>> 18));
  10151. this.writeByteDescending(0x80 | (0x3f & (codePoint >>> 12)));
  10152. this.writeByteDescending(0x80 | (0x3f & (codePoint >>> 6)));
  10153. this.writeByteDescending(0x80 | (0x3f & codePoint));
  10154. }
  10155. }
  10156. this.writeSeparatorDescending();
  10157. }
  10158. writeNumberAscending(val) {
  10159. // Values are encoded with a single byte length prefix, followed by the
  10160. // actual value in big-endian format with leading 0 bytes dropped.
  10161. const value = this.toOrderedBits(val);
  10162. const len = unsignedNumLength(value);
  10163. this.ensureAvailable(1 + len);
  10164. this.buffer[this.position++] = len & 0xff; // Write the length
  10165. for (let i = value.length - len; i < value.length; ++i) {
  10166. this.buffer[this.position++] = value[i] & 0xff;
  10167. }
  10168. }
  10169. writeNumberDescending(val) {
  10170. // Values are encoded with a single byte length prefix, followed by the
  10171. // inverted value in big-endian format with leading 0 bytes dropped.
  10172. const value = this.toOrderedBits(val);
  10173. const len = unsignedNumLength(value);
  10174. this.ensureAvailable(1 + len);
  10175. this.buffer[this.position++] = ~(len & 0xff); // Write the length
  10176. for (let i = value.length - len; i < value.length; ++i) {
  10177. this.buffer[this.position++] = ~(value[i] & 0xff);
  10178. }
  10179. }
  10180. /**
  10181. * Writes the "infinity" byte sequence that sorts after all other byte
  10182. * sequences written in ascending order.
  10183. */
  10184. writeInfinityAscending() {
  10185. this.writeEscapedByteAscending(ESCAPE2);
  10186. this.writeEscapedByteAscending(INFINITY);
  10187. }
  10188. /**
  10189. * Writes the "infinity" byte sequence that sorts before all other byte
  10190. * sequences written in descending order.
  10191. */
  10192. writeInfinityDescending() {
  10193. this.writeEscapedByteDescending(ESCAPE2);
  10194. this.writeEscapedByteDescending(INFINITY);
  10195. }
  10196. /**
  10197. * Resets the buffer such that it is the same as when it was newly
  10198. * constructed.
  10199. */
  10200. reset() {
  10201. this.position = 0;
  10202. }
  10203. seed(encodedBytes) {
  10204. this.ensureAvailable(encodedBytes.length);
  10205. this.buffer.set(encodedBytes, this.position);
  10206. this.position += encodedBytes.length;
  10207. }
  10208. /** Makes a copy of the encoded bytes in this buffer. */
  10209. encodedBytes() {
  10210. return this.buffer.slice(0, this.position);
  10211. }
  10212. /**
  10213. * Encodes `val` into an encoding so that the order matches the IEEE 754
  10214. * floating-point comparison results with the following exceptions:
  10215. * -0.0 < 0.0
  10216. * all non-NaN < NaN
  10217. * NaN = NaN
  10218. */
  10219. toOrderedBits(val) {
  10220. const value = doubleToLongBits(val);
  10221. // Check if the first bit is set. We use a bit mask since value[0] is
  10222. // encoded as a number from 0 to 255.
  10223. const isNegative = (value[0] & 0x80) !== 0;
  10224. // Revert the two complement to get natural ordering
  10225. value[0] ^= isNegative ? 0xff : 0x80;
  10226. for (let i = 1; i < value.length; ++i) {
  10227. value[i] ^= isNegative ? 0xff : 0x00;
  10228. }
  10229. return value;
  10230. }
  10231. /** Writes a single byte ascending to the buffer. */
  10232. writeByteAscending(b) {
  10233. const masked = b & 0xff;
  10234. if (masked === ESCAPE1) {
  10235. this.writeEscapedByteAscending(ESCAPE1);
  10236. this.writeEscapedByteAscending(NULL_BYTE);
  10237. }
  10238. else if (masked === ESCAPE2) {
  10239. this.writeEscapedByteAscending(ESCAPE2);
  10240. this.writeEscapedByteAscending(FF_BYTE);
  10241. }
  10242. else {
  10243. this.writeEscapedByteAscending(masked);
  10244. }
  10245. }
  10246. /** Writes a single byte descending to the buffer. */
  10247. writeByteDescending(b) {
  10248. const masked = b & 0xff;
  10249. if (masked === ESCAPE1) {
  10250. this.writeEscapedByteDescending(ESCAPE1);
  10251. this.writeEscapedByteDescending(NULL_BYTE);
  10252. }
  10253. else if (masked === ESCAPE2) {
  10254. this.writeEscapedByteDescending(ESCAPE2);
  10255. this.writeEscapedByteDescending(FF_BYTE);
  10256. }
  10257. else {
  10258. this.writeEscapedByteDescending(b);
  10259. }
  10260. }
  10261. writeSeparatorAscending() {
  10262. this.writeEscapedByteAscending(ESCAPE1);
  10263. this.writeEscapedByteAscending(SEPARATOR);
  10264. }
  10265. writeSeparatorDescending() {
  10266. this.writeEscapedByteDescending(ESCAPE1);
  10267. this.writeEscapedByteDescending(SEPARATOR);
  10268. }
  10269. writeEscapedByteAscending(b) {
  10270. this.ensureAvailable(1);
  10271. this.buffer[this.position++] = b;
  10272. }
  10273. writeEscapedByteDescending(b) {
  10274. this.ensureAvailable(1);
  10275. this.buffer[this.position++] = ~b;
  10276. }
  10277. ensureAvailable(bytes) {
  10278. const minCapacity = bytes + this.position;
  10279. if (minCapacity <= this.buffer.length) {
  10280. return;
  10281. }
  10282. // Try doubling.
  10283. let newLength = this.buffer.length * 2;
  10284. // Still not big enough? Just allocate the right size.
  10285. if (newLength < minCapacity) {
  10286. newLength = minCapacity;
  10287. }
  10288. // Create the new buffer.
  10289. const newBuffer = new Uint8Array(newLength);
  10290. newBuffer.set(this.buffer); // copy old data
  10291. this.buffer = newBuffer;
  10292. }
  10293. }
  10294. class AscendingIndexByteEncoder {
  10295. constructor(orderedCode) {
  10296. this.orderedCode = orderedCode;
  10297. }
  10298. writeBytes(value) {
  10299. this.orderedCode.writeBytesAscending(value);
  10300. }
  10301. writeString(value) {
  10302. this.orderedCode.writeUtf8Ascending(value);
  10303. }
  10304. writeNumber(value) {
  10305. this.orderedCode.writeNumberAscending(value);
  10306. }
  10307. writeInfinity() {
  10308. this.orderedCode.writeInfinityAscending();
  10309. }
  10310. }
  10311. class DescendingIndexByteEncoder {
  10312. constructor(orderedCode) {
  10313. this.orderedCode = orderedCode;
  10314. }
  10315. writeBytes(value) {
  10316. this.orderedCode.writeBytesDescending(value);
  10317. }
  10318. writeString(value) {
  10319. this.orderedCode.writeUtf8Descending(value);
  10320. }
  10321. writeNumber(value) {
  10322. this.orderedCode.writeNumberDescending(value);
  10323. }
  10324. writeInfinity() {
  10325. this.orderedCode.writeInfinityDescending();
  10326. }
  10327. }
  10328. /**
  10329. * Implements `DirectionalIndexByteEncoder` using `OrderedCodeWriter` for the
  10330. * actual encoding.
  10331. */
  10332. class IndexByteEncoder {
  10333. constructor() {
  10334. this.orderedCode = new OrderedCodeWriter();
  10335. this.ascending = new AscendingIndexByteEncoder(this.orderedCode);
  10336. this.descending = new DescendingIndexByteEncoder(this.orderedCode);
  10337. }
  10338. seed(encodedBytes) {
  10339. this.orderedCode.seed(encodedBytes);
  10340. }
  10341. forKind(kind) {
  10342. return kind === 0 /* IndexKind.ASCENDING */ ? this.ascending : this.descending;
  10343. }
  10344. encodedBytes() {
  10345. return this.orderedCode.encodedBytes();
  10346. }
  10347. reset() {
  10348. this.orderedCode.reset();
  10349. }
  10350. }
  10351. /**
  10352. * @license
  10353. * Copyright 2022 Google LLC
  10354. *
  10355. * Licensed under the Apache License, Version 2.0 (the "License");
  10356. * you may not use this file except in compliance with the License.
  10357. * You may obtain a copy of the License at
  10358. *
  10359. * http://www.apache.org/licenses/LICENSE-2.0
  10360. *
  10361. * Unless required by applicable law or agreed to in writing, software
  10362. * distributed under the License is distributed on an "AS IS" BASIS,
  10363. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10364. * See the License for the specific language governing permissions and
  10365. * limitations under the License.
  10366. */
  10367. /** Represents an index entry saved by the SDK in persisted storage. */
  10368. class IndexEntry {
  10369. constructor(indexId, documentKey, arrayValue, directionalValue) {
  10370. this.indexId = indexId;
  10371. this.documentKey = documentKey;
  10372. this.arrayValue = arrayValue;
  10373. this.directionalValue = directionalValue;
  10374. }
  10375. /**
  10376. * Returns an IndexEntry entry that sorts immediately after the current
  10377. * directional value.
  10378. */
  10379. successor() {
  10380. const currentLength = this.directionalValue.length;
  10381. const newLength = currentLength === 0 || this.directionalValue[currentLength - 1] === 255
  10382. ? currentLength + 1
  10383. : currentLength;
  10384. const successor = new Uint8Array(newLength);
  10385. successor.set(this.directionalValue, 0);
  10386. if (newLength !== currentLength) {
  10387. successor.set([0], this.directionalValue.length);
  10388. }
  10389. else {
  10390. ++successor[successor.length - 1];
  10391. }
  10392. return new IndexEntry(this.indexId, this.documentKey, this.arrayValue, successor);
  10393. }
  10394. }
  10395. function indexEntryComparator(left, right) {
  10396. let cmp = left.indexId - right.indexId;
  10397. if (cmp !== 0) {
  10398. return cmp;
  10399. }
  10400. cmp = compareByteArrays(left.arrayValue, right.arrayValue);
  10401. if (cmp !== 0) {
  10402. return cmp;
  10403. }
  10404. cmp = compareByteArrays(left.directionalValue, right.directionalValue);
  10405. if (cmp !== 0) {
  10406. return cmp;
  10407. }
  10408. return DocumentKey.comparator(left.documentKey, right.documentKey);
  10409. }
  10410. function compareByteArrays(left, right) {
  10411. for (let i = 0; i < left.length && i < right.length; ++i) {
  10412. const compare = left[i] - right[i];
  10413. if (compare !== 0) {
  10414. return compare;
  10415. }
  10416. }
  10417. return left.length - right.length;
  10418. }
  10419. /**
  10420. * @license
  10421. * Copyright 2022 Google LLC
  10422. *
  10423. * Licensed under the Apache License, Version 2.0 (the "License");
  10424. * you may not use this file except in compliance with the License.
  10425. * You may obtain a copy of the License at
  10426. *
  10427. * http://www.apache.org/licenses/LICENSE-2.0
  10428. *
  10429. * Unless required by applicable law or agreed to in writing, software
  10430. * distributed under the License is distributed on an "AS IS" BASIS,
  10431. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10432. * See the License for the specific language governing permissions and
  10433. * limitations under the License.
  10434. */
  10435. /**
  10436. * A light query planner for Firestore.
  10437. *
  10438. * This class matches a `FieldIndex` against a Firestore Query `Target`. It
  10439. * determines whether a given index can be used to serve the specified target.
  10440. *
  10441. * The following table showcases some possible index configurations:
  10442. *
  10443. * Query | Index
  10444. * -----------------------------------------------------------------------------
  10445. * where('a', '==', 'a').where('b', '==', 'b') | a ASC, b DESC
  10446. * where('a', '==', 'a').where('b', '==', 'b') | a ASC
  10447. * where('a', '==', 'a').where('b', '==', 'b') | b DESC
  10448. * where('a', '>=', 'a').orderBy('a') | a ASC
  10449. * where('a', '>=', 'a').orderBy('a', 'desc') | a DESC
  10450. * where('a', '>=', 'a').orderBy('a').orderBy('b') | a ASC, b ASC
  10451. * where('a', '>=', 'a').orderBy('a').orderBy('b') | a ASC
  10452. * where('a', 'array-contains', 'a').orderBy('b') | a CONTAINS, b ASCENDING
  10453. * where('a', 'array-contains', 'a').orderBy('b') | a CONTAINS
  10454. */
  10455. class TargetIndexMatcher {
  10456. constructor(target) {
  10457. this.collectionId =
  10458. target.collectionGroup != null
  10459. ? target.collectionGroup
  10460. : target.path.lastSegment();
  10461. this.orderBys = target.orderBy;
  10462. this.equalityFilters = [];
  10463. for (const filter of target.filters) {
  10464. const fieldFilter = filter;
  10465. if (fieldFilter.isInequality()) {
  10466. this.inequalityFilter = fieldFilter;
  10467. }
  10468. else {
  10469. this.equalityFilters.push(fieldFilter);
  10470. }
  10471. }
  10472. }
  10473. /**
  10474. * Returns whether the index can be used to serve the TargetIndexMatcher's
  10475. * target.
  10476. *
  10477. * An index is considered capable of serving the target when:
  10478. * - The target uses all index segments for its filters and orderBy clauses.
  10479. * The target can have additional filter and orderBy clauses, but not
  10480. * fewer.
  10481. * - If an ArrayContains/ArrayContainsAnyfilter is used, the index must also
  10482. * have a corresponding `CONTAINS` segment.
  10483. * - All directional index segments can be mapped to the target as a series of
  10484. * equality filters, a single inequality filter and a series of orderBy
  10485. * clauses.
  10486. * - The segments that represent the equality filters may appear out of order.
  10487. * - The optional segment for the inequality filter must appear after all
  10488. * equality segments.
  10489. * - The segments that represent that orderBy clause of the target must appear
  10490. * in order after all equality and inequality segments. Single orderBy
  10491. * clauses cannot be skipped, but a continuous orderBy suffix may be
  10492. * omitted.
  10493. */
  10494. servedByIndex(index) {
  10495. hardAssert(index.collectionGroup === this.collectionId);
  10496. // If there is an array element, find a matching filter.
  10497. const arraySegment = fieldIndexGetArraySegment(index);
  10498. if (arraySegment !== undefined &&
  10499. !this.hasMatchingEqualityFilter(arraySegment)) {
  10500. return false;
  10501. }
  10502. const segments = fieldIndexGetDirectionalSegments(index);
  10503. let segmentIndex = 0;
  10504. let orderBysIndex = 0;
  10505. // Process all equalities first. Equalities can appear out of order.
  10506. for (; segmentIndex < segments.length; ++segmentIndex) {
  10507. // We attempt to greedily match all segments to equality filters. If a
  10508. // filter matches an index segment, we can mark the segment as used.
  10509. // Since it is not possible to use the same field path in both an equality
  10510. // and inequality/oderBy clause, we do not have to consider the possibility
  10511. // that a matching equality segment should instead be used to map to an
  10512. // inequality filter or orderBy clause.
  10513. if (!this.hasMatchingEqualityFilter(segments[segmentIndex])) {
  10514. // If we cannot find a matching filter, we need to verify whether the
  10515. // remaining segments map to the target's inequality and its orderBy
  10516. // clauses.
  10517. break;
  10518. }
  10519. }
  10520. // If we already have processed all segments, all segments are used to serve
  10521. // the equality filters and we do not need to map any segments to the
  10522. // target's inequality and orderBy clauses.
  10523. if (segmentIndex === segments.length) {
  10524. return true;
  10525. }
  10526. // If there is an inequality filter, the next segment must match both the
  10527. // filter and the first orderBy clause.
  10528. if (this.inequalityFilter !== undefined) {
  10529. const segment = segments[segmentIndex];
  10530. if (!this.matchesFilter(this.inequalityFilter, segment) ||
  10531. !this.matchesOrderBy(this.orderBys[orderBysIndex++], segment)) {
  10532. return false;
  10533. }
  10534. ++segmentIndex;
  10535. }
  10536. // All remaining segments need to represent the prefix of the target's
  10537. // orderBy.
  10538. for (; segmentIndex < segments.length; ++segmentIndex) {
  10539. const segment = segments[segmentIndex];
  10540. if (orderBysIndex >= this.orderBys.length ||
  10541. !this.matchesOrderBy(this.orderBys[orderBysIndex++], segment)) {
  10542. return false;
  10543. }
  10544. }
  10545. return true;
  10546. }
  10547. hasMatchingEqualityFilter(segment) {
  10548. for (const filter of this.equalityFilters) {
  10549. if (this.matchesFilter(filter, segment)) {
  10550. return true;
  10551. }
  10552. }
  10553. return false;
  10554. }
  10555. matchesFilter(filter, segment) {
  10556. if (filter === undefined || !filter.field.isEqual(segment.fieldPath)) {
  10557. return false;
  10558. }
  10559. const isArrayOperator = filter.op === "array-contains" /* Operator.ARRAY_CONTAINS */ ||
  10560. filter.op === "array-contains-any" /* Operator.ARRAY_CONTAINS_ANY */;
  10561. return (segment.kind === 2 /* IndexKind.CONTAINS */) === isArrayOperator;
  10562. }
  10563. matchesOrderBy(orderBy, segment) {
  10564. if (!orderBy.field.isEqual(segment.fieldPath)) {
  10565. return false;
  10566. }
  10567. return ((segment.kind === 0 /* IndexKind.ASCENDING */ &&
  10568. orderBy.dir === "asc" /* Direction.ASCENDING */) ||
  10569. (segment.kind === 1 /* IndexKind.DESCENDING */ &&
  10570. orderBy.dir === "desc" /* Direction.DESCENDING */));
  10571. }
  10572. }
  10573. /**
  10574. * @license
  10575. * Copyright 2022 Google LLC
  10576. *
  10577. * Licensed under the Apache License, Version 2.0 (the "License");
  10578. * you may not use this file except in compliance with the License.
  10579. * You may obtain a copy of the License at
  10580. *
  10581. * http://www.apache.org/licenses/LICENSE-2.0
  10582. *
  10583. * Unless required by applicable law or agreed to in writing, software
  10584. * distributed under the License is distributed on an "AS IS" BASIS,
  10585. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10586. * See the License for the specific language governing permissions and
  10587. * limitations under the License.
  10588. */
  10589. /**
  10590. * Provides utility functions that help with boolean logic transformations needed for handling
  10591. * complex filters used in queries.
  10592. */
  10593. /**
  10594. * The `in` filter is only a syntactic sugar over a disjunction of equalities. For instance: `a in
  10595. * [1,2,3]` is in fact `a==1 || a==2 || a==3`. This method expands any `in` filter in the given
  10596. * input into a disjunction of equality filters and returns the expanded filter.
  10597. */
  10598. function computeInExpansion(filter) {
  10599. var _a, _b;
  10600. hardAssert(filter instanceof FieldFilter || filter instanceof CompositeFilter);
  10601. if (filter instanceof FieldFilter) {
  10602. if (filter instanceof InFilter) {
  10603. const expandedFilters = ((_b = (_a = filter.value.arrayValue) === null || _a === void 0 ? void 0 : _a.values) === null || _b === void 0 ? void 0 : _b.map(value => FieldFilter.create(filter.field, "==" /* Operator.EQUAL */, value))) || [];
  10604. return CompositeFilter.create(expandedFilters, "or" /* CompositeOperator.OR */);
  10605. }
  10606. else {
  10607. // We have reached other kinds of field filters.
  10608. return filter;
  10609. }
  10610. }
  10611. // We have a composite filter.
  10612. const expandedFilters = filter.filters.map(subfilter => computeInExpansion(subfilter));
  10613. return CompositeFilter.create(expandedFilters, filter.op);
  10614. }
  10615. /**
  10616. * Given a composite filter, returns the list of terms in its disjunctive normal form.
  10617. *
  10618. * <p>Each element in the return value is one term of the resulting DNF. For instance: For the
  10619. * input: (A || B) && C, the DNF form is: (A && C) || (B && C), and the return value is a list
  10620. * with two elements: a composite filter that performs (A && C), and a composite filter that
  10621. * performs (B && C).
  10622. *
  10623. * @param filter the composite filter to calculate DNF transform for.
  10624. * @return the terms in the DNF transform.
  10625. */
  10626. function getDnfTerms(filter) {
  10627. if (filter.getFilters().length === 0) {
  10628. return [];
  10629. }
  10630. const result = computeDistributedNormalForm(computeInExpansion(filter));
  10631. hardAssert(isDisjunctiveNormalForm(result));
  10632. if (isSingleFieldFilter(result) || isFlatConjunction(result)) {
  10633. return [result];
  10634. }
  10635. return result.getFilters();
  10636. }
  10637. /** Returns true if the given filter is a single field filter. e.g. (a == 10). */
  10638. function isSingleFieldFilter(filter) {
  10639. return filter instanceof FieldFilter;
  10640. }
  10641. /**
  10642. * Returns true if the given filter is the conjunction of one or more field filters. e.g. (a == 10
  10643. * && b == 20)
  10644. */
  10645. function isFlatConjunction(filter) {
  10646. return (filter instanceof CompositeFilter &&
  10647. compositeFilterIsFlatConjunction(filter));
  10648. }
  10649. /**
  10650. * Returns whether or not the given filter is in disjunctive normal form (DNF).
  10651. *
  10652. * <p>In boolean logic, a disjunctive normal form (DNF) is a canonical normal form of a logical
  10653. * formula consisting of a disjunction of conjunctions; it can also be described as an OR of ANDs.
  10654. *
  10655. * <p>For more info, visit: https://en.wikipedia.org/wiki/Disjunctive_normal_form
  10656. */
  10657. function isDisjunctiveNormalForm(filter) {
  10658. return (isSingleFieldFilter(filter) ||
  10659. isFlatConjunction(filter) ||
  10660. isDisjunctionOfFieldFiltersAndFlatConjunctions(filter));
  10661. }
  10662. /**
  10663. * Returns true if the given filter is the disjunction of one or more "flat conjunctions" and
  10664. * field filters. e.g. (a == 10) || (b==20 && c==30)
  10665. */
  10666. function isDisjunctionOfFieldFiltersAndFlatConjunctions(filter) {
  10667. if (filter instanceof CompositeFilter) {
  10668. if (compositeFilterIsDisjunction(filter)) {
  10669. for (const subFilter of filter.getFilters()) {
  10670. if (!isSingleFieldFilter(subFilter) && !isFlatConjunction(subFilter)) {
  10671. return false;
  10672. }
  10673. }
  10674. return true;
  10675. }
  10676. }
  10677. return false;
  10678. }
  10679. function computeDistributedNormalForm(filter) {
  10680. hardAssert(filter instanceof FieldFilter || filter instanceof CompositeFilter);
  10681. if (filter instanceof FieldFilter) {
  10682. return filter;
  10683. }
  10684. if (filter.filters.length === 1) {
  10685. return computeDistributedNormalForm(filter.filters[0]);
  10686. }
  10687. // Compute DNF for each of the subfilters first
  10688. const result = filter.filters.map(subfilter => computeDistributedNormalForm(subfilter));
  10689. let newFilter = CompositeFilter.create(result, filter.op);
  10690. newFilter = applyAssociation(newFilter);
  10691. if (isDisjunctiveNormalForm(newFilter)) {
  10692. return newFilter;
  10693. }
  10694. hardAssert(newFilter instanceof CompositeFilter);
  10695. hardAssert(compositeFilterIsConjunction(newFilter));
  10696. hardAssert(newFilter.filters.length > 1);
  10697. return newFilter.filters.reduce((runningResult, filter) => applyDistribution(runningResult, filter));
  10698. }
  10699. function applyDistribution(lhs, rhs) {
  10700. hardAssert(lhs instanceof FieldFilter || lhs instanceof CompositeFilter);
  10701. hardAssert(rhs instanceof FieldFilter || rhs instanceof CompositeFilter);
  10702. let result;
  10703. if (lhs instanceof FieldFilter) {
  10704. if (rhs instanceof FieldFilter) {
  10705. // FieldFilter FieldFilter
  10706. result = applyDistributionFieldFilters(lhs, rhs);
  10707. }
  10708. else {
  10709. // FieldFilter CompositeFilter
  10710. result = applyDistributionFieldAndCompositeFilters(lhs, rhs);
  10711. }
  10712. }
  10713. else {
  10714. if (rhs instanceof FieldFilter) {
  10715. // CompositeFilter FieldFilter
  10716. result = applyDistributionFieldAndCompositeFilters(rhs, lhs);
  10717. }
  10718. else {
  10719. // CompositeFilter CompositeFilter
  10720. result = applyDistributionCompositeFilters(lhs, rhs);
  10721. }
  10722. }
  10723. return applyAssociation(result);
  10724. }
  10725. function applyDistributionFieldFilters(lhs, rhs) {
  10726. // Conjunction distribution for two field filters is the conjunction of them.
  10727. return CompositeFilter.create([lhs, rhs], "and" /* CompositeOperator.AND */);
  10728. }
  10729. function applyDistributionCompositeFilters(lhs, rhs) {
  10730. hardAssert(lhs.filters.length > 0 && rhs.filters.length > 0);
  10731. // There are four cases:
  10732. // (A & B) & (C & D) --> (A & B & C & D)
  10733. // (A & B) & (C | D) --> (A & B & C) | (A & B & D)
  10734. // (A | B) & (C & D) --> (C & D & A) | (C & D & B)
  10735. // (A | B) & (C | D) --> (A & C) | (A & D) | (B & C) | (B & D)
  10736. // Case 1 is a merge.
  10737. if (compositeFilterIsConjunction(lhs) && compositeFilterIsConjunction(rhs)) {
  10738. return compositeFilterWithAddedFilters(lhs, rhs.getFilters());
  10739. }
  10740. // Case 2,3,4 all have at least one side (lhs or rhs) that is a disjunction. In all three cases
  10741. // we should take each element of the disjunction and distribute it over the other side, and
  10742. // return the disjunction of the distribution results.
  10743. const disjunctionSide = compositeFilterIsDisjunction(lhs) ? lhs : rhs;
  10744. const otherSide = compositeFilterIsDisjunction(lhs) ? rhs : lhs;
  10745. const results = disjunctionSide.filters.map(subfilter => applyDistribution(subfilter, otherSide));
  10746. return CompositeFilter.create(results, "or" /* CompositeOperator.OR */);
  10747. }
  10748. function applyDistributionFieldAndCompositeFilters(fieldFilter, compositeFilter) {
  10749. // There are two cases:
  10750. // A & (B & C) --> (A & B & C)
  10751. // A & (B | C) --> (A & B) | (A & C)
  10752. if (compositeFilterIsConjunction(compositeFilter)) {
  10753. // Case 1
  10754. return compositeFilterWithAddedFilters(compositeFilter, fieldFilter.getFilters());
  10755. }
  10756. else {
  10757. // Case 2
  10758. const newFilters = compositeFilter.filters.map(subfilter => applyDistribution(fieldFilter, subfilter));
  10759. return CompositeFilter.create(newFilters, "or" /* CompositeOperator.OR */);
  10760. }
  10761. }
  10762. /**
  10763. * Applies the associativity property to the given filter and returns the resulting filter.
  10764. *
  10765. * <ul>
  10766. * <li>A | (B | C) == (A | B) | C == (A | B | C)
  10767. * <li>A & (B & C) == (A & B) & C == (A & B & C)
  10768. * </ul>
  10769. *
  10770. * <p>For more info, visit: https://en.wikipedia.org/wiki/Associative_property#Propositional_logic
  10771. */
  10772. function applyAssociation(filter) {
  10773. hardAssert(filter instanceof FieldFilter || filter instanceof CompositeFilter);
  10774. if (filter instanceof FieldFilter) {
  10775. return filter;
  10776. }
  10777. const filters = filter.getFilters();
  10778. // If the composite filter only contains 1 filter, apply associativity to it.
  10779. if (filters.length === 1) {
  10780. return applyAssociation(filters[0]);
  10781. }
  10782. // Associativity applied to a flat composite filter results is itself.
  10783. if (compositeFilterIsFlat(filter)) {
  10784. return filter;
  10785. }
  10786. // First apply associativity to all subfilters. This will in turn recursively apply
  10787. // associativity to all nested composite filters and field filters.
  10788. const updatedFilters = filters.map(subfilter => applyAssociation(subfilter));
  10789. // For composite subfilters that perform the same kind of logical operation as `compositeFilter`
  10790. // take out their filters and add them to `compositeFilter`. For example:
  10791. // compositeFilter = (A | (B | C | D))
  10792. // compositeSubfilter = (B | C | D)
  10793. // Result: (A | B | C | D)
  10794. // Note that the `compositeSubfilter` has been eliminated, and its filters (B, C, D) have been
  10795. // added to the top-level "compositeFilter".
  10796. const newSubfilters = [];
  10797. updatedFilters.forEach(subfilter => {
  10798. if (subfilter instanceof FieldFilter) {
  10799. newSubfilters.push(subfilter);
  10800. }
  10801. else if (subfilter instanceof CompositeFilter) {
  10802. if (subfilter.op === filter.op) {
  10803. // compositeFilter: (A | (B | C))
  10804. // compositeSubfilter: (B | C)
  10805. // Result: (A | B | C)
  10806. newSubfilters.push(...subfilter.filters);
  10807. }
  10808. else {
  10809. // compositeFilter: (A | (B & C))
  10810. // compositeSubfilter: (B & C)
  10811. // Result: (A | (B & C))
  10812. newSubfilters.push(subfilter);
  10813. }
  10814. }
  10815. });
  10816. if (newSubfilters.length === 1) {
  10817. return newSubfilters[0];
  10818. }
  10819. return CompositeFilter.create(newSubfilters, filter.op);
  10820. }
  10821. /**
  10822. * @license
  10823. * Copyright 2019 Google LLC
  10824. *
  10825. * Licensed under the Apache License, Version 2.0 (the "License");
  10826. * you may not use this file except in compliance with the License.
  10827. * You may obtain a copy of the License at
  10828. *
  10829. * http://www.apache.org/licenses/LICENSE-2.0
  10830. *
  10831. * Unless required by applicable law or agreed to in writing, software
  10832. * distributed under the License is distributed on an "AS IS" BASIS,
  10833. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10834. * See the License for the specific language governing permissions and
  10835. * limitations under the License.
  10836. */
  10837. /**
  10838. * An in-memory implementation of IndexManager.
  10839. */
  10840. class MemoryIndexManager {
  10841. constructor() {
  10842. this.collectionParentIndex = new MemoryCollectionParentIndex();
  10843. }
  10844. addToCollectionParentIndex(transaction, collectionPath) {
  10845. this.collectionParentIndex.add(collectionPath);
  10846. return PersistencePromise.resolve();
  10847. }
  10848. getCollectionParents(transaction, collectionId) {
  10849. return PersistencePromise.resolve(this.collectionParentIndex.getEntries(collectionId));
  10850. }
  10851. addFieldIndex(transaction, index) {
  10852. // Field indices are not supported with memory persistence.
  10853. return PersistencePromise.resolve();
  10854. }
  10855. deleteFieldIndex(transaction, index) {
  10856. // Field indices are not supported with memory persistence.
  10857. return PersistencePromise.resolve();
  10858. }
  10859. getDocumentsMatchingTarget(transaction, target) {
  10860. // Field indices are not supported with memory persistence.
  10861. return PersistencePromise.resolve(null);
  10862. }
  10863. getIndexType(transaction, target) {
  10864. // Field indices are not supported with memory persistence.
  10865. return PersistencePromise.resolve(0 /* IndexType.NONE */);
  10866. }
  10867. getFieldIndexes(transaction, collectionGroup) {
  10868. // Field indices are not supported with memory persistence.
  10869. return PersistencePromise.resolve([]);
  10870. }
  10871. getNextCollectionGroupToUpdate(transaction) {
  10872. // Field indices are not supported with memory persistence.
  10873. return PersistencePromise.resolve(null);
  10874. }
  10875. getMinOffset(transaction, target) {
  10876. return PersistencePromise.resolve(IndexOffset.min());
  10877. }
  10878. getMinOffsetFromCollectionGroup(transaction, collectionGroup) {
  10879. return PersistencePromise.resolve(IndexOffset.min());
  10880. }
  10881. updateCollectionGroup(transaction, collectionGroup, offset) {
  10882. // Field indices are not supported with memory persistence.
  10883. return PersistencePromise.resolve();
  10884. }
  10885. updateIndexEntries(transaction, documents) {
  10886. // Field indices are not supported with memory persistence.
  10887. return PersistencePromise.resolve();
  10888. }
  10889. }
  10890. /**
  10891. * Internal implementation of the collection-parent index exposed by MemoryIndexManager.
  10892. * Also used for in-memory caching by IndexedDbIndexManager and initial index population
  10893. * in indexeddb_schema.ts
  10894. */
  10895. class MemoryCollectionParentIndex {
  10896. constructor() {
  10897. this.index = {};
  10898. }
  10899. // Returns false if the entry already existed.
  10900. add(collectionPath) {
  10901. const collectionId = collectionPath.lastSegment();
  10902. const parentPath = collectionPath.popLast();
  10903. const existingParents = this.index[collectionId] ||
  10904. new SortedSet(ResourcePath.comparator);
  10905. const added = !existingParents.has(parentPath);
  10906. this.index[collectionId] = existingParents.add(parentPath);
  10907. return added;
  10908. }
  10909. has(collectionPath) {
  10910. const collectionId = collectionPath.lastSegment();
  10911. const parentPath = collectionPath.popLast();
  10912. const existingParents = this.index[collectionId];
  10913. return existingParents && existingParents.has(parentPath);
  10914. }
  10915. getEntries(collectionId) {
  10916. const parentPaths = this.index[collectionId] ||
  10917. new SortedSet(ResourcePath.comparator);
  10918. return parentPaths.toArray();
  10919. }
  10920. }
  10921. /**
  10922. * @license
  10923. * Copyright 2019 Google LLC
  10924. *
  10925. * Licensed under the Apache License, Version 2.0 (the "License");
  10926. * you may not use this file except in compliance with the License.
  10927. * You may obtain a copy of the License at
  10928. *
  10929. * http://www.apache.org/licenses/LICENSE-2.0
  10930. *
  10931. * Unless required by applicable law or agreed to in writing, software
  10932. * distributed under the License is distributed on an "AS IS" BASIS,
  10933. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  10934. * See the License for the specific language governing permissions and
  10935. * limitations under the License.
  10936. */
  10937. const LOG_TAG$f = 'IndexedDbIndexManager';
  10938. const EMPTY_VALUE = new Uint8Array(0);
  10939. /**
  10940. * A persisted implementation of IndexManager.
  10941. *
  10942. * PORTING NOTE: Unlike iOS and Android, the Web SDK does not memoize index
  10943. * data as it supports multi-tab access.
  10944. */
  10945. class IndexedDbIndexManager {
  10946. constructor(user, databaseId) {
  10947. this.user = user;
  10948. this.databaseId = databaseId;
  10949. /**
  10950. * An in-memory copy of the index entries we've already written since the SDK
  10951. * launched. Used to avoid re-writing the same entry repeatedly.
  10952. *
  10953. * This is *NOT* a complete cache of what's in persistence and so can never be
  10954. * used to satisfy reads.
  10955. */
  10956. this.collectionParentsCache = new MemoryCollectionParentIndex();
  10957. /**
  10958. * Maps from a target to its equivalent list of sub-targets. Each sub-target
  10959. * contains only one term from the target's disjunctive normal form (DNF).
  10960. */
  10961. this.targetToDnfSubTargets = new ObjectMap(t => canonifyTarget(t), (l, r) => targetEquals(l, r));
  10962. this.uid = user.uid || '';
  10963. }
  10964. /**
  10965. * Adds a new entry to the collection parent index.
  10966. *
  10967. * Repeated calls for the same collectionPath should be avoided within a
  10968. * transaction as IndexedDbIndexManager only caches writes once a transaction
  10969. * has been committed.
  10970. */
  10971. addToCollectionParentIndex(transaction, collectionPath) {
  10972. if (!this.collectionParentsCache.has(collectionPath)) {
  10973. const collectionId = collectionPath.lastSegment();
  10974. const parentPath = collectionPath.popLast();
  10975. transaction.addOnCommittedListener(() => {
  10976. // Add the collection to the in memory cache only if the transaction was
  10977. // successfully committed.
  10978. this.collectionParentsCache.add(collectionPath);
  10979. });
  10980. const collectionParent = {
  10981. collectionId,
  10982. parent: encodeResourcePath(parentPath)
  10983. };
  10984. return collectionParentsStore(transaction).put(collectionParent);
  10985. }
  10986. return PersistencePromise.resolve();
  10987. }
  10988. getCollectionParents(transaction, collectionId) {
  10989. const parentPaths = [];
  10990. const range = IDBKeyRange.bound([collectionId, ''], [immediateSuccessor(collectionId), ''],
  10991. /*lowerOpen=*/ false,
  10992. /*upperOpen=*/ true);
  10993. return collectionParentsStore(transaction)
  10994. .loadAll(range)
  10995. .next(entries => {
  10996. for (const entry of entries) {
  10997. // This collectionId guard shouldn't be necessary (and isn't as long
  10998. // as we're running in a real browser), but there's a bug in
  10999. // indexeddbshim that breaks our range in our tests running in node:
  11000. // https://github.com/axemclion/IndexedDBShim/issues/334
  11001. if (entry.collectionId !== collectionId) {
  11002. break;
  11003. }
  11004. parentPaths.push(decodeResourcePath(entry.parent));
  11005. }
  11006. return parentPaths;
  11007. });
  11008. }
  11009. addFieldIndex(transaction, index) {
  11010. // TODO(indexing): Verify that the auto-incrementing index ID works in
  11011. // Safari & Firefox.
  11012. const indexes = indexConfigurationStore(transaction);
  11013. const dbIndex = toDbIndexConfiguration(index);
  11014. delete dbIndex.indexId; // `indexId` is auto-populated by IndexedDb
  11015. const result = indexes.add(dbIndex);
  11016. if (index.indexState) {
  11017. const states = indexStateStore(transaction);
  11018. return result.next(indexId => {
  11019. states.put(toDbIndexState(indexId, this.user, index.indexState.sequenceNumber, index.indexState.offset));
  11020. });
  11021. }
  11022. else {
  11023. return result.next();
  11024. }
  11025. }
  11026. deleteFieldIndex(transaction, index) {
  11027. const indexes = indexConfigurationStore(transaction);
  11028. const states = indexStateStore(transaction);
  11029. const entries = indexEntriesStore(transaction);
  11030. return indexes
  11031. .delete(index.indexId)
  11032. .next(() => states.delete(IDBKeyRange.bound([index.indexId], [index.indexId + 1],
  11033. /*lowerOpen=*/ false,
  11034. /*upperOpen=*/ true)))
  11035. .next(() => entries.delete(IDBKeyRange.bound([index.indexId], [index.indexId + 1],
  11036. /*lowerOpen=*/ false,
  11037. /*upperOpen=*/ true)));
  11038. }
  11039. getDocumentsMatchingTarget(transaction, target) {
  11040. const indexEntries = indexEntriesStore(transaction);
  11041. let canServeTarget = true;
  11042. const indexes = new Map();
  11043. return PersistencePromise.forEach(this.getSubTargets(target), (subTarget) => {
  11044. return this.getFieldIndex(transaction, subTarget).next(index => {
  11045. canServeTarget && (canServeTarget = !!index);
  11046. indexes.set(subTarget, index);
  11047. });
  11048. }).next(() => {
  11049. if (!canServeTarget) {
  11050. return PersistencePromise.resolve(null);
  11051. }
  11052. else {
  11053. let existingKeys = documentKeySet();
  11054. const result = [];
  11055. return PersistencePromise.forEach(indexes, (index, subTarget) => {
  11056. logDebug(LOG_TAG$f, `Using index ${fieldIndexToString(index)} to execute ${canonifyTarget(target)}`);
  11057. const arrayValues = targetGetArrayValues(subTarget, index);
  11058. const notInValues = targetGetNotInValues(subTarget, index);
  11059. const lowerBound = targetGetLowerBound(subTarget, index);
  11060. const upperBound = targetGetUpperBound(subTarget, index);
  11061. const lowerBoundEncoded = this.encodeBound(index, subTarget, lowerBound);
  11062. const upperBoundEncoded = this.encodeBound(index, subTarget, upperBound);
  11063. const notInEncoded = this.encodeValues(index, subTarget, notInValues);
  11064. const indexRanges = this.generateIndexRanges(index.indexId, arrayValues, lowerBoundEncoded, lowerBound.inclusive, upperBoundEncoded, upperBound.inclusive, notInEncoded);
  11065. return PersistencePromise.forEach(indexRanges, (indexRange) => {
  11066. return indexEntries
  11067. .loadFirst(indexRange, target.limit)
  11068. .next(entries => {
  11069. entries.forEach(entry => {
  11070. const documentKey = DocumentKey.fromSegments(entry.documentKey);
  11071. if (!existingKeys.has(documentKey)) {
  11072. existingKeys = existingKeys.add(documentKey);
  11073. result.push(documentKey);
  11074. }
  11075. });
  11076. });
  11077. });
  11078. }).next(() => result);
  11079. }
  11080. });
  11081. }
  11082. getSubTargets(target) {
  11083. let subTargets = this.targetToDnfSubTargets.get(target);
  11084. if (subTargets) {
  11085. return subTargets;
  11086. }
  11087. if (target.filters.length === 0) {
  11088. subTargets = [target];
  11089. }
  11090. else {
  11091. // There is an implicit AND operation between all the filters stored in the target
  11092. const dnf = getDnfTerms(CompositeFilter.create(target.filters, "and" /* CompositeOperator.AND */));
  11093. subTargets = dnf.map(term => newTarget(target.path, target.collectionGroup, target.orderBy, term.getFilters(), target.limit, target.startAt, target.endAt));
  11094. }
  11095. this.targetToDnfSubTargets.set(target, subTargets);
  11096. return subTargets;
  11097. }
  11098. /**
  11099. * Constructs a key range query on `DbIndexEntryStore` that unions all
  11100. * bounds.
  11101. */
  11102. generateIndexRanges(indexId, arrayValues, lowerBounds, lowerBoundInclusive, upperBounds, upperBoundInclusive, notInValues) {
  11103. // The number of total index scans we union together. This is similar to a
  11104. // distributed normal form, but adapted for array values. We create a single
  11105. // index range per value in an ARRAY_CONTAINS or ARRAY_CONTAINS_ANY filter
  11106. // combined with the values from the query bounds.
  11107. const totalScans = (arrayValues != null ? arrayValues.length : 1) *
  11108. Math.max(lowerBounds.length, upperBounds.length);
  11109. const scansPerArrayElement = totalScans / (arrayValues != null ? arrayValues.length : 1);
  11110. const indexRanges = [];
  11111. for (let i = 0; i < totalScans; ++i) {
  11112. const arrayValue = arrayValues
  11113. ? this.encodeSingleElement(arrayValues[i / scansPerArrayElement])
  11114. : EMPTY_VALUE;
  11115. const lowerBound = this.generateLowerBound(indexId, arrayValue, lowerBounds[i % scansPerArrayElement], lowerBoundInclusive);
  11116. const upperBound = this.generateUpperBound(indexId, arrayValue, upperBounds[i % scansPerArrayElement], upperBoundInclusive);
  11117. const notInBound = notInValues.map(notIn => this.generateLowerBound(indexId, arrayValue, notIn,
  11118. /* inclusive= */ true));
  11119. indexRanges.push(...this.createRange(lowerBound, upperBound, notInBound));
  11120. }
  11121. return indexRanges;
  11122. }
  11123. /** Generates the lower bound for `arrayValue` and `directionalValue`. */
  11124. generateLowerBound(indexId, arrayValue, directionalValue, inclusive) {
  11125. const entry = new IndexEntry(indexId, DocumentKey.empty(), arrayValue, directionalValue);
  11126. return inclusive ? entry : entry.successor();
  11127. }
  11128. /** Generates the upper bound for `arrayValue` and `directionalValue`. */
  11129. generateUpperBound(indexId, arrayValue, directionalValue, inclusive) {
  11130. const entry = new IndexEntry(indexId, DocumentKey.empty(), arrayValue, directionalValue);
  11131. return inclusive ? entry.successor() : entry;
  11132. }
  11133. getFieldIndex(transaction, target) {
  11134. const targetIndexMatcher = new TargetIndexMatcher(target);
  11135. const collectionGroup = target.collectionGroup != null
  11136. ? target.collectionGroup
  11137. : target.path.lastSegment();
  11138. return this.getFieldIndexes(transaction, collectionGroup).next(indexes => {
  11139. // Return the index with the most number of segments.
  11140. let index = null;
  11141. for (const candidate of indexes) {
  11142. const matches = targetIndexMatcher.servedByIndex(candidate);
  11143. if (matches &&
  11144. (!index || candidate.fields.length > index.fields.length)) {
  11145. index = candidate;
  11146. }
  11147. }
  11148. return index;
  11149. });
  11150. }
  11151. getIndexType(transaction, target) {
  11152. let indexType = 2 /* IndexType.FULL */;
  11153. const subTargets = this.getSubTargets(target);
  11154. return PersistencePromise.forEach(subTargets, (target) => {
  11155. return this.getFieldIndex(transaction, target).next(index => {
  11156. if (!index) {
  11157. indexType = 0 /* IndexType.NONE */;
  11158. }
  11159. else if (indexType !== 0 /* IndexType.NONE */ &&
  11160. index.fields.length < targetGetSegmentCount(target)) {
  11161. indexType = 1 /* IndexType.PARTIAL */;
  11162. }
  11163. });
  11164. }).next(() => {
  11165. // OR queries have more than one sub-target (one sub-target per DNF term). We currently consider
  11166. // OR queries that have a `limit` to have a partial index. For such queries we perform sorting
  11167. // and apply the limit in memory as a post-processing step.
  11168. if (targetHasLimit(target) &&
  11169. subTargets.length > 1 &&
  11170. indexType === 2 /* IndexType.FULL */) {
  11171. return 1 /* IndexType.PARTIAL */;
  11172. }
  11173. return indexType;
  11174. });
  11175. }
  11176. /**
  11177. * Returns the byte encoded form of the directional values in the field index.
  11178. * Returns `null` if the document does not have all fields specified in the
  11179. * index.
  11180. */
  11181. encodeDirectionalElements(fieldIndex, document) {
  11182. const encoder = new IndexByteEncoder();
  11183. for (const segment of fieldIndexGetDirectionalSegments(fieldIndex)) {
  11184. const field = document.data.field(segment.fieldPath);
  11185. if (field == null) {
  11186. return null;
  11187. }
  11188. const directionalEncoder = encoder.forKind(segment.kind);
  11189. FirestoreIndexValueWriter.INSTANCE.writeIndexValue(field, directionalEncoder);
  11190. }
  11191. return encoder.encodedBytes();
  11192. }
  11193. /** Encodes a single value to the ascending index format. */
  11194. encodeSingleElement(value) {
  11195. const encoder = new IndexByteEncoder();
  11196. FirestoreIndexValueWriter.INSTANCE.writeIndexValue(value, encoder.forKind(0 /* IndexKind.ASCENDING */));
  11197. return encoder.encodedBytes();
  11198. }
  11199. /**
  11200. * Returns an encoded form of the document key that sorts based on the key
  11201. * ordering of the field index.
  11202. */
  11203. encodeDirectionalKey(fieldIndex, documentKey) {
  11204. const encoder = new IndexByteEncoder();
  11205. FirestoreIndexValueWriter.INSTANCE.writeIndexValue(refValue(this.databaseId, documentKey), encoder.forKind(fieldIndexGetKeyOrder(fieldIndex)));
  11206. return encoder.encodedBytes();
  11207. }
  11208. /**
  11209. * Encodes the given field values according to the specification in `target`.
  11210. * For IN queries, a list of possible values is returned.
  11211. */
  11212. encodeValues(fieldIndex, target, values) {
  11213. if (values === null) {
  11214. return [];
  11215. }
  11216. let encoders = [];
  11217. encoders.push(new IndexByteEncoder());
  11218. let valueIdx = 0;
  11219. for (const segment of fieldIndexGetDirectionalSegments(fieldIndex)) {
  11220. const value = values[valueIdx++];
  11221. for (const encoder of encoders) {
  11222. if (this.isInFilter(target, segment.fieldPath) && isArray(value)) {
  11223. encoders = this.expandIndexValues(encoders, segment, value);
  11224. }
  11225. else {
  11226. const directionalEncoder = encoder.forKind(segment.kind);
  11227. FirestoreIndexValueWriter.INSTANCE.writeIndexValue(value, directionalEncoder);
  11228. }
  11229. }
  11230. }
  11231. return this.getEncodedBytes(encoders);
  11232. }
  11233. /**
  11234. * Encodes the given bounds according to the specification in `target`. For IN
  11235. * queries, a list of possible values is returned.
  11236. */
  11237. encodeBound(fieldIndex, target, bound) {
  11238. return this.encodeValues(fieldIndex, target, bound.position);
  11239. }
  11240. /** Returns the byte representation for the provided encoders. */
  11241. getEncodedBytes(encoders) {
  11242. const result = [];
  11243. for (let i = 0; i < encoders.length; ++i) {
  11244. result[i] = encoders[i].encodedBytes();
  11245. }
  11246. return result;
  11247. }
  11248. /**
  11249. * Creates a separate encoder for each element of an array.
  11250. *
  11251. * The method appends each value to all existing encoders (e.g. filter("a",
  11252. * "==", "a1").filter("b", "in", ["b1", "b2"]) becomes ["a1,b1", "a1,b2"]). A
  11253. * list of new encoders is returned.
  11254. */
  11255. expandIndexValues(encoders, segment, value) {
  11256. const prefixes = [...encoders];
  11257. const results = [];
  11258. for (const arrayElement of value.arrayValue.values || []) {
  11259. for (const prefix of prefixes) {
  11260. const clonedEncoder = new IndexByteEncoder();
  11261. clonedEncoder.seed(prefix.encodedBytes());
  11262. FirestoreIndexValueWriter.INSTANCE.writeIndexValue(arrayElement, clonedEncoder.forKind(segment.kind));
  11263. results.push(clonedEncoder);
  11264. }
  11265. }
  11266. return results;
  11267. }
  11268. isInFilter(target, fieldPath) {
  11269. return !!target.filters.find(f => f instanceof FieldFilter &&
  11270. f.field.isEqual(fieldPath) &&
  11271. (f.op === "in" /* Operator.IN */ || f.op === "not-in" /* Operator.NOT_IN */));
  11272. }
  11273. getFieldIndexes(transaction, collectionGroup) {
  11274. const indexes = indexConfigurationStore(transaction);
  11275. const states = indexStateStore(transaction);
  11276. return (collectionGroup
  11277. ? indexes.loadAll(DbIndexConfigurationCollectionGroupIndex, IDBKeyRange.bound(collectionGroup, collectionGroup))
  11278. : indexes.loadAll()).next(indexConfigs => {
  11279. const result = [];
  11280. return PersistencePromise.forEach(indexConfigs, (indexConfig) => {
  11281. return states
  11282. .get([indexConfig.indexId, this.uid])
  11283. .next(indexState => {
  11284. result.push(fromDbIndexConfiguration(indexConfig, indexState));
  11285. });
  11286. }).next(() => result);
  11287. });
  11288. }
  11289. getNextCollectionGroupToUpdate(transaction) {
  11290. return this.getFieldIndexes(transaction).next(indexes => {
  11291. if (indexes.length === 0) {
  11292. return null;
  11293. }
  11294. indexes.sort((l, r) => {
  11295. const cmp = l.indexState.sequenceNumber - r.indexState.sequenceNumber;
  11296. return cmp !== 0
  11297. ? cmp
  11298. : primitiveComparator(l.collectionGroup, r.collectionGroup);
  11299. });
  11300. return indexes[0].collectionGroup;
  11301. });
  11302. }
  11303. updateCollectionGroup(transaction, collectionGroup, offset) {
  11304. const indexes = indexConfigurationStore(transaction);
  11305. const states = indexStateStore(transaction);
  11306. return this.getNextSequenceNumber(transaction).next(nextSequenceNumber => indexes
  11307. .loadAll(DbIndexConfigurationCollectionGroupIndex, IDBKeyRange.bound(collectionGroup, collectionGroup))
  11308. .next(configs => PersistencePromise.forEach(configs, (config) => states.put(toDbIndexState(config.indexId, this.user, nextSequenceNumber, offset)))));
  11309. }
  11310. updateIndexEntries(transaction, documents) {
  11311. // Porting Note: `getFieldIndexes()` on Web does not cache index lookups as
  11312. // it could be used across different IndexedDB transactions. As any cached
  11313. // data might be invalidated by other multi-tab clients, we can only trust
  11314. // data within a single IndexedDB transaction. We therefore add a cache
  11315. // here.
  11316. const memoizedIndexes = new Map();
  11317. return PersistencePromise.forEach(documents, (key, doc) => {
  11318. const memoizedCollectionIndexes = memoizedIndexes.get(key.collectionGroup);
  11319. const fieldIndexes = memoizedCollectionIndexes
  11320. ? PersistencePromise.resolve(memoizedCollectionIndexes)
  11321. : this.getFieldIndexes(transaction, key.collectionGroup);
  11322. return fieldIndexes.next(fieldIndexes => {
  11323. memoizedIndexes.set(key.collectionGroup, fieldIndexes);
  11324. return PersistencePromise.forEach(fieldIndexes, (fieldIndex) => {
  11325. return this.getExistingIndexEntries(transaction, key, fieldIndex).next(existingEntries => {
  11326. const newEntries = this.computeIndexEntries(doc, fieldIndex);
  11327. if (!existingEntries.isEqual(newEntries)) {
  11328. return this.updateEntries(transaction, doc, fieldIndex, existingEntries, newEntries);
  11329. }
  11330. return PersistencePromise.resolve();
  11331. });
  11332. });
  11333. });
  11334. });
  11335. }
  11336. addIndexEntry(transaction, document, fieldIndex, indexEntry) {
  11337. const indexEntries = indexEntriesStore(transaction);
  11338. return indexEntries.put({
  11339. indexId: indexEntry.indexId,
  11340. uid: this.uid,
  11341. arrayValue: indexEntry.arrayValue,
  11342. directionalValue: indexEntry.directionalValue,
  11343. orderedDocumentKey: this.encodeDirectionalKey(fieldIndex, document.key),
  11344. documentKey: document.key.path.toArray()
  11345. });
  11346. }
  11347. deleteIndexEntry(transaction, document, fieldIndex, indexEntry) {
  11348. const indexEntries = indexEntriesStore(transaction);
  11349. return indexEntries.delete([
  11350. indexEntry.indexId,
  11351. this.uid,
  11352. indexEntry.arrayValue,
  11353. indexEntry.directionalValue,
  11354. this.encodeDirectionalKey(fieldIndex, document.key),
  11355. document.key.path.toArray()
  11356. ]);
  11357. }
  11358. getExistingIndexEntries(transaction, documentKey, fieldIndex) {
  11359. const indexEntries = indexEntriesStore(transaction);
  11360. let results = new SortedSet(indexEntryComparator);
  11361. return indexEntries
  11362. .iterate({
  11363. index: DbIndexEntryDocumentKeyIndex,
  11364. range: IDBKeyRange.only([
  11365. fieldIndex.indexId,
  11366. this.uid,
  11367. this.encodeDirectionalKey(fieldIndex, documentKey)
  11368. ])
  11369. }, (_, entry) => {
  11370. results = results.add(new IndexEntry(fieldIndex.indexId, documentKey, entry.arrayValue, entry.directionalValue));
  11371. })
  11372. .next(() => results);
  11373. }
  11374. /** Creates the index entries for the given document. */
  11375. computeIndexEntries(document, fieldIndex) {
  11376. let results = new SortedSet(indexEntryComparator);
  11377. const directionalValue = this.encodeDirectionalElements(fieldIndex, document);
  11378. if (directionalValue == null) {
  11379. return results;
  11380. }
  11381. const arraySegment = fieldIndexGetArraySegment(fieldIndex);
  11382. if (arraySegment != null) {
  11383. const value = document.data.field(arraySegment.fieldPath);
  11384. if (isArray(value)) {
  11385. for (const arrayValue of value.arrayValue.values || []) {
  11386. results = results.add(new IndexEntry(fieldIndex.indexId, document.key, this.encodeSingleElement(arrayValue), directionalValue));
  11387. }
  11388. }
  11389. }
  11390. else {
  11391. results = results.add(new IndexEntry(fieldIndex.indexId, document.key, EMPTY_VALUE, directionalValue));
  11392. }
  11393. return results;
  11394. }
  11395. /**
  11396. * Updates the index entries for the provided document by deleting entries
  11397. * that are no longer referenced in `newEntries` and adding all newly added
  11398. * entries.
  11399. */
  11400. updateEntries(transaction, document, fieldIndex, existingEntries, newEntries) {
  11401. logDebug(LOG_TAG$f, "Updating index entries for document '%s'", document.key);
  11402. const promises = [];
  11403. diffSortedSets(existingEntries, newEntries, indexEntryComparator,
  11404. /* onAdd= */ entry => {
  11405. promises.push(this.addIndexEntry(transaction, document, fieldIndex, entry));
  11406. },
  11407. /* onRemove= */ entry => {
  11408. promises.push(this.deleteIndexEntry(transaction, document, fieldIndex, entry));
  11409. });
  11410. return PersistencePromise.waitFor(promises);
  11411. }
  11412. getNextSequenceNumber(transaction) {
  11413. let nextSequenceNumber = 1;
  11414. const states = indexStateStore(transaction);
  11415. return states
  11416. .iterate({
  11417. index: DbIndexStateSequenceNumberIndex,
  11418. reverse: true,
  11419. range: IDBKeyRange.upperBound([this.uid, Number.MAX_SAFE_INTEGER])
  11420. }, (_, state, controller) => {
  11421. controller.done();
  11422. nextSequenceNumber = state.sequenceNumber + 1;
  11423. })
  11424. .next(() => nextSequenceNumber);
  11425. }
  11426. /**
  11427. * Returns a new set of IDB ranges that splits the existing range and excludes
  11428. * any values that match the `notInValue` from these ranges. As an example,
  11429. * '[foo > 2 && foo != 3]` becomes `[foo > 2 && < 3, foo > 3]`.
  11430. */
  11431. createRange(lower, upper, notInValues) {
  11432. // The notIn values need to be sorted and unique so that we can return a
  11433. // sorted set of non-overlapping ranges.
  11434. notInValues = notInValues
  11435. .sort((l, r) => indexEntryComparator(l, r))
  11436. .filter((el, i, values) => !i || indexEntryComparator(el, values[i - 1]) !== 0);
  11437. const bounds = [];
  11438. bounds.push(lower);
  11439. for (const notInValue of notInValues) {
  11440. const cmpToLower = indexEntryComparator(notInValue, lower);
  11441. const cmpToUpper = indexEntryComparator(notInValue, upper);
  11442. if (cmpToLower === 0) {
  11443. // `notInValue` is the lower bound. We therefore need to raise the bound
  11444. // to the next value.
  11445. bounds[0] = lower.successor();
  11446. }
  11447. else if (cmpToLower > 0 && cmpToUpper < 0) {
  11448. // `notInValue` is in the middle of the range
  11449. bounds.push(notInValue);
  11450. bounds.push(notInValue.successor());
  11451. }
  11452. else if (cmpToUpper > 0) {
  11453. // `notInValue` (and all following values) are out of the range
  11454. break;
  11455. }
  11456. }
  11457. bounds.push(upper);
  11458. const ranges = [];
  11459. for (let i = 0; i < bounds.length; i += 2) {
  11460. // If we encounter two bounds that will create an unmatchable key range,
  11461. // then we return an empty set of key ranges.
  11462. if (this.isRangeMatchable(bounds[i], bounds[i + 1])) {
  11463. return [];
  11464. }
  11465. const lowerBound = [
  11466. bounds[i].indexId,
  11467. this.uid,
  11468. bounds[i].arrayValue,
  11469. bounds[i].directionalValue,
  11470. EMPTY_VALUE,
  11471. []
  11472. ];
  11473. const upperBound = [
  11474. bounds[i + 1].indexId,
  11475. this.uid,
  11476. bounds[i + 1].arrayValue,
  11477. bounds[i + 1].directionalValue,
  11478. EMPTY_VALUE,
  11479. []
  11480. ];
  11481. ranges.push(IDBKeyRange.bound(lowerBound, upperBound));
  11482. }
  11483. return ranges;
  11484. }
  11485. isRangeMatchable(lowerBound, upperBound) {
  11486. // If lower bound is greater than the upper bound, then the key
  11487. // range can never be matched.
  11488. return indexEntryComparator(lowerBound, upperBound) > 0;
  11489. }
  11490. getMinOffsetFromCollectionGroup(transaction, collectionGroup) {
  11491. return this.getFieldIndexes(transaction, collectionGroup).next(getMinOffsetFromFieldIndexes);
  11492. }
  11493. getMinOffset(transaction, target) {
  11494. return PersistencePromise.mapArray(this.getSubTargets(target), (subTarget) => this.getFieldIndex(transaction, subTarget).next(index => index ? index : fail())).next(getMinOffsetFromFieldIndexes);
  11495. }
  11496. }
  11497. /**
  11498. * Helper to get a typed SimpleDbStore for the collectionParents
  11499. * document store.
  11500. */
  11501. function collectionParentsStore(txn) {
  11502. return getStore(txn, DbCollectionParentStore);
  11503. }
  11504. /**
  11505. * Helper to get a typed SimpleDbStore for the index entry object store.
  11506. */
  11507. function indexEntriesStore(txn) {
  11508. return getStore(txn, DbIndexEntryStore);
  11509. }
  11510. /**
  11511. * Helper to get a typed SimpleDbStore for the index configuration object store.
  11512. */
  11513. function indexConfigurationStore(txn) {
  11514. return getStore(txn, DbIndexConfigurationStore);
  11515. }
  11516. /**
  11517. * Helper to get a typed SimpleDbStore for the index state object store.
  11518. */
  11519. function indexStateStore(txn) {
  11520. return getStore(txn, DbIndexStateStore);
  11521. }
  11522. function getMinOffsetFromFieldIndexes(fieldIndexes) {
  11523. hardAssert(fieldIndexes.length !== 0);
  11524. let minOffset = fieldIndexes[0].indexState.offset;
  11525. let maxBatchId = minOffset.largestBatchId;
  11526. for (let i = 1; i < fieldIndexes.length; i++) {
  11527. const newOffset = fieldIndexes[i].indexState.offset;
  11528. if (indexOffsetComparator(newOffset, minOffset) < 0) {
  11529. minOffset = newOffset;
  11530. }
  11531. if (maxBatchId < newOffset.largestBatchId) {
  11532. maxBatchId = newOffset.largestBatchId;
  11533. }
  11534. }
  11535. return new IndexOffset(minOffset.readTime, minOffset.documentKey, maxBatchId);
  11536. }
  11537. /**
  11538. * @license
  11539. * Copyright 2020 Google LLC
  11540. *
  11541. * Licensed under the Apache License, Version 2.0 (the "License");
  11542. * you may not use this file except in compliance with the License.
  11543. * You may obtain a copy of the License at
  11544. *
  11545. * http://www.apache.org/licenses/LICENSE-2.0
  11546. *
  11547. * Unless required by applicable law or agreed to in writing, software
  11548. * distributed under the License is distributed on an "AS IS" BASIS,
  11549. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  11550. * See the License for the specific language governing permissions and
  11551. * limitations under the License.
  11552. */
  11553. /**
  11554. * Delete a mutation batch and the associated document mutations.
  11555. * @returns A PersistencePromise of the document mutations that were removed.
  11556. */
  11557. function removeMutationBatch(txn, userId, batch) {
  11558. const mutationStore = txn.store(DbMutationBatchStore);
  11559. const indexTxn = txn.store(DbDocumentMutationStore);
  11560. const promises = [];
  11561. const range = IDBKeyRange.only(batch.batchId);
  11562. let numDeleted = 0;
  11563. const removePromise = mutationStore.iterate({ range }, (key, value, control) => {
  11564. numDeleted++;
  11565. return control.delete();
  11566. });
  11567. promises.push(removePromise.next(() => {
  11568. hardAssert(numDeleted === 1);
  11569. }));
  11570. const removedDocuments = [];
  11571. for (const mutation of batch.mutations) {
  11572. const indexKey = newDbDocumentMutationKey(userId, mutation.key.path, batch.batchId);
  11573. promises.push(indexTxn.delete(indexKey));
  11574. removedDocuments.push(mutation.key);
  11575. }
  11576. return PersistencePromise.waitFor(promises).next(() => removedDocuments);
  11577. }
  11578. /**
  11579. * Returns an approximate size for the given document.
  11580. */
  11581. function dbDocumentSize(doc) {
  11582. if (!doc) {
  11583. return 0;
  11584. }
  11585. let value;
  11586. if (doc.document) {
  11587. value = doc.document;
  11588. }
  11589. else if (doc.unknownDocument) {
  11590. value = doc.unknownDocument;
  11591. }
  11592. else if (doc.noDocument) {
  11593. value = doc.noDocument;
  11594. }
  11595. else {
  11596. throw fail();
  11597. }
  11598. return JSON.stringify(value).length;
  11599. }
  11600. /**
  11601. * @license
  11602. * Copyright 2017 Google LLC
  11603. *
  11604. * Licensed under the Apache License, Version 2.0 (the "License");
  11605. * you may not use this file except in compliance with the License.
  11606. * You may obtain a copy of the License at
  11607. *
  11608. * http://www.apache.org/licenses/LICENSE-2.0
  11609. *
  11610. * Unless required by applicable law or agreed to in writing, software
  11611. * distributed under the License is distributed on an "AS IS" BASIS,
  11612. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  11613. * See the License for the specific language governing permissions and
  11614. * limitations under the License.
  11615. */
  11616. /** A mutation queue for a specific user, backed by IndexedDB. */
  11617. class IndexedDbMutationQueue {
  11618. constructor(
  11619. /**
  11620. * The normalized userId (e.g. null UID => "" userId) used to store /
  11621. * retrieve mutations.
  11622. */
  11623. userId, serializer, indexManager, referenceDelegate) {
  11624. this.userId = userId;
  11625. this.serializer = serializer;
  11626. this.indexManager = indexManager;
  11627. this.referenceDelegate = referenceDelegate;
  11628. /**
  11629. * Caches the document keys for pending mutation batches. If the mutation
  11630. * has been removed from IndexedDb, the cached value may continue to
  11631. * be used to retrieve the batch's document keys. To remove a cached value
  11632. * locally, `removeCachedMutationKeys()` should be invoked either directly
  11633. * or through `removeMutationBatches()`.
  11634. *
  11635. * With multi-tab, when the primary client acknowledges or rejects a mutation,
  11636. * this cache is used by secondary clients to invalidate the local
  11637. * view of the documents that were previously affected by the mutation.
  11638. */
  11639. // PORTING NOTE: Multi-tab only.
  11640. this.documentKeysByBatchId = {};
  11641. }
  11642. /**
  11643. * Creates a new mutation queue for the given user.
  11644. * @param user - The user for which to create a mutation queue.
  11645. * @param serializer - The serializer to use when persisting to IndexedDb.
  11646. */
  11647. static forUser(user, serializer, indexManager, referenceDelegate) {
  11648. // TODO(mcg): Figure out what constraints there are on userIDs
  11649. // In particular, are there any reserved characters? are empty ids allowed?
  11650. // For the moment store these together in the same mutations table assuming
  11651. // that empty userIDs aren't allowed.
  11652. hardAssert(user.uid !== '');
  11653. const userId = user.isAuthenticated() ? user.uid : '';
  11654. return new IndexedDbMutationQueue(userId, serializer, indexManager, referenceDelegate);
  11655. }
  11656. checkEmpty(transaction) {
  11657. let empty = true;
  11658. const range = IDBKeyRange.bound([this.userId, Number.NEGATIVE_INFINITY], [this.userId, Number.POSITIVE_INFINITY]);
  11659. return mutationsStore(transaction)
  11660. .iterate({ index: DbMutationBatchUserMutationsIndex, range }, (key, value, control) => {
  11661. empty = false;
  11662. control.done();
  11663. })
  11664. .next(() => empty);
  11665. }
  11666. addMutationBatch(transaction, localWriteTime, baseMutations, mutations) {
  11667. const documentStore = documentMutationsStore(transaction);
  11668. const mutationStore = mutationsStore(transaction);
  11669. // The IndexedDb implementation in Chrome (and Firefox) does not handle
  11670. // compound indices that include auto-generated keys correctly. To ensure
  11671. // that the index entry is added correctly in all browsers, we perform two
  11672. // writes: The first write is used to retrieve the next auto-generated Batch
  11673. // ID, and the second write populates the index and stores the actual
  11674. // mutation batch.
  11675. // See: https://bugs.chromium.org/p/chromium/issues/detail?id=701972
  11676. // We write an empty object to obtain key
  11677. // eslint-disable-next-line @typescript-eslint/no-explicit-any
  11678. return mutationStore.add({}).next(batchId => {
  11679. hardAssert(typeof batchId === 'number');
  11680. const batch = new MutationBatch(batchId, localWriteTime, baseMutations, mutations);
  11681. const dbBatch = toDbMutationBatch(this.serializer, this.userId, batch);
  11682. const promises = [];
  11683. let collectionParents = new SortedSet((l, r) => primitiveComparator(l.canonicalString(), r.canonicalString()));
  11684. for (const mutation of mutations) {
  11685. const indexKey = newDbDocumentMutationKey(this.userId, mutation.key.path, batchId);
  11686. collectionParents = collectionParents.add(mutation.key.path.popLast());
  11687. promises.push(mutationStore.put(dbBatch));
  11688. promises.push(documentStore.put(indexKey, DbDocumentMutationPlaceholder));
  11689. }
  11690. collectionParents.forEach(parent => {
  11691. promises.push(this.indexManager.addToCollectionParentIndex(transaction, parent));
  11692. });
  11693. transaction.addOnCommittedListener(() => {
  11694. this.documentKeysByBatchId[batchId] = batch.keys();
  11695. });
  11696. return PersistencePromise.waitFor(promises).next(() => batch);
  11697. });
  11698. }
  11699. lookupMutationBatch(transaction, batchId) {
  11700. return mutationsStore(transaction)
  11701. .get(batchId)
  11702. .next(dbBatch => {
  11703. if (dbBatch) {
  11704. hardAssert(dbBatch.userId === this.userId);
  11705. return fromDbMutationBatch(this.serializer, dbBatch);
  11706. }
  11707. return null;
  11708. });
  11709. }
  11710. /**
  11711. * Returns the document keys for the mutation batch with the given batchId.
  11712. * For primary clients, this method returns `null` after
  11713. * `removeMutationBatches()` has been called. Secondary clients return a
  11714. * cached result until `removeCachedMutationKeys()` is invoked.
  11715. */
  11716. // PORTING NOTE: Multi-tab only.
  11717. lookupMutationKeys(transaction, batchId) {
  11718. if (this.documentKeysByBatchId[batchId]) {
  11719. return PersistencePromise.resolve(this.documentKeysByBatchId[batchId]);
  11720. }
  11721. else {
  11722. return this.lookupMutationBatch(transaction, batchId).next(batch => {
  11723. if (batch) {
  11724. const keys = batch.keys();
  11725. this.documentKeysByBatchId[batchId] = keys;
  11726. return keys;
  11727. }
  11728. else {
  11729. return null;
  11730. }
  11731. });
  11732. }
  11733. }
  11734. getNextMutationBatchAfterBatchId(transaction, batchId) {
  11735. const nextBatchId = batchId + 1;
  11736. const range = IDBKeyRange.lowerBound([this.userId, nextBatchId]);
  11737. let foundBatch = null;
  11738. return mutationsStore(transaction)
  11739. .iterate({ index: DbMutationBatchUserMutationsIndex, range }, (key, dbBatch, control) => {
  11740. if (dbBatch.userId === this.userId) {
  11741. hardAssert(dbBatch.batchId >= nextBatchId);
  11742. foundBatch = fromDbMutationBatch(this.serializer, dbBatch);
  11743. }
  11744. control.done();
  11745. })
  11746. .next(() => foundBatch);
  11747. }
  11748. getHighestUnacknowledgedBatchId(transaction) {
  11749. const range = IDBKeyRange.upperBound([
  11750. this.userId,
  11751. Number.POSITIVE_INFINITY
  11752. ]);
  11753. let batchId = BATCHID_UNKNOWN;
  11754. return mutationsStore(transaction)
  11755. .iterate({ index: DbMutationBatchUserMutationsIndex, range, reverse: true }, (key, dbBatch, control) => {
  11756. batchId = dbBatch.batchId;
  11757. control.done();
  11758. })
  11759. .next(() => batchId);
  11760. }
  11761. getAllMutationBatches(transaction) {
  11762. const range = IDBKeyRange.bound([this.userId, BATCHID_UNKNOWN], [this.userId, Number.POSITIVE_INFINITY]);
  11763. return mutationsStore(transaction)
  11764. .loadAll(DbMutationBatchUserMutationsIndex, range)
  11765. .next(dbBatches => dbBatches.map(dbBatch => fromDbMutationBatch(this.serializer, dbBatch)));
  11766. }
  11767. getAllMutationBatchesAffectingDocumentKey(transaction, documentKey) {
  11768. // Scan the document-mutation index starting with a prefix starting with
  11769. // the given documentKey.
  11770. const indexPrefix = newDbDocumentMutationPrefixForPath(this.userId, documentKey.path);
  11771. const indexStart = IDBKeyRange.lowerBound(indexPrefix);
  11772. const results = [];
  11773. return documentMutationsStore(transaction)
  11774. .iterate({ range: indexStart }, (indexKey, _, control) => {
  11775. const [userID, encodedPath, batchId] = indexKey;
  11776. // Only consider rows matching exactly the specific key of
  11777. // interest. Note that because we order by path first, and we
  11778. // order terminators before path separators, we'll encounter all
  11779. // the index rows for documentKey contiguously. In particular, all
  11780. // the rows for documentKey will occur before any rows for
  11781. // documents nested in a subcollection beneath documentKey so we
  11782. // can stop as soon as we hit any such row.
  11783. const path = decodeResourcePath(encodedPath);
  11784. if (userID !== this.userId || !documentKey.path.isEqual(path)) {
  11785. control.done();
  11786. return;
  11787. }
  11788. // Look up the mutation batch in the store.
  11789. return mutationsStore(transaction)
  11790. .get(batchId)
  11791. .next(mutation => {
  11792. if (!mutation) {
  11793. throw fail();
  11794. }
  11795. hardAssert(mutation.userId === this.userId);
  11796. results.push(fromDbMutationBatch(this.serializer, mutation));
  11797. });
  11798. })
  11799. .next(() => results);
  11800. }
  11801. getAllMutationBatchesAffectingDocumentKeys(transaction, documentKeys) {
  11802. let uniqueBatchIDs = new SortedSet(primitiveComparator);
  11803. const promises = [];
  11804. documentKeys.forEach(documentKey => {
  11805. const indexStart = newDbDocumentMutationPrefixForPath(this.userId, documentKey.path);
  11806. const range = IDBKeyRange.lowerBound(indexStart);
  11807. const promise = documentMutationsStore(transaction).iterate({ range }, (indexKey, _, control) => {
  11808. const [userID, encodedPath, batchID] = indexKey;
  11809. // Only consider rows matching exactly the specific key of
  11810. // interest. Note that because we order by path first, and we
  11811. // order terminators before path separators, we'll encounter all
  11812. // the index rows for documentKey contiguously. In particular, all
  11813. // the rows for documentKey will occur before any rows for
  11814. // documents nested in a subcollection beneath documentKey so we
  11815. // can stop as soon as we hit any such row.
  11816. const path = decodeResourcePath(encodedPath);
  11817. if (userID !== this.userId || !documentKey.path.isEqual(path)) {
  11818. control.done();
  11819. return;
  11820. }
  11821. uniqueBatchIDs = uniqueBatchIDs.add(batchID);
  11822. });
  11823. promises.push(promise);
  11824. });
  11825. return PersistencePromise.waitFor(promises).next(() => this.lookupMutationBatches(transaction, uniqueBatchIDs));
  11826. }
  11827. getAllMutationBatchesAffectingQuery(transaction, query) {
  11828. const queryPath = query.path;
  11829. const immediateChildrenLength = queryPath.length + 1;
  11830. // TODO(mcg): Actually implement a single-collection query
  11831. //
  11832. // This is actually executing an ancestor query, traversing the whole
  11833. // subtree below the collection which can be horrifically inefficient for
  11834. // some structures. The right way to solve this is to implement the full
  11835. // value index, but that's not in the cards in the near future so this is
  11836. // the best we can do for the moment.
  11837. //
  11838. // Since we don't yet index the actual properties in the mutations, our
  11839. // current approach is to just return all mutation batches that affect
  11840. // documents in the collection being queried.
  11841. const indexPrefix = newDbDocumentMutationPrefixForPath(this.userId, queryPath);
  11842. const indexStart = IDBKeyRange.lowerBound(indexPrefix);
  11843. // Collect up unique batchIDs encountered during a scan of the index. Use a
  11844. // SortedSet to accumulate batch IDs so they can be traversed in order in a
  11845. // scan of the main table.
  11846. let uniqueBatchIDs = new SortedSet(primitiveComparator);
  11847. return documentMutationsStore(transaction)
  11848. .iterate({ range: indexStart }, (indexKey, _, control) => {
  11849. const [userID, encodedPath, batchID] = indexKey;
  11850. const path = decodeResourcePath(encodedPath);
  11851. if (userID !== this.userId || !queryPath.isPrefixOf(path)) {
  11852. control.done();
  11853. return;
  11854. }
  11855. // Rows with document keys more than one segment longer than the
  11856. // query path can't be matches. For example, a query on 'rooms'
  11857. // can't match the document /rooms/abc/messages/xyx.
  11858. // TODO(mcg): we'll need a different scanner when we implement
  11859. // ancestor queries.
  11860. if (path.length !== immediateChildrenLength) {
  11861. return;
  11862. }
  11863. uniqueBatchIDs = uniqueBatchIDs.add(batchID);
  11864. })
  11865. .next(() => this.lookupMutationBatches(transaction, uniqueBatchIDs));
  11866. }
  11867. lookupMutationBatches(transaction, batchIDs) {
  11868. const results = [];
  11869. const promises = [];
  11870. // TODO(rockwood): Implement this using iterate.
  11871. batchIDs.forEach(batchId => {
  11872. promises.push(mutationsStore(transaction)
  11873. .get(batchId)
  11874. .next(mutation => {
  11875. if (mutation === null) {
  11876. throw fail();
  11877. }
  11878. hardAssert(mutation.userId === this.userId);
  11879. results.push(fromDbMutationBatch(this.serializer, mutation));
  11880. }));
  11881. });
  11882. return PersistencePromise.waitFor(promises).next(() => results);
  11883. }
  11884. removeMutationBatch(transaction, batch) {
  11885. return removeMutationBatch(transaction.simpleDbTransaction, this.userId, batch).next(removedDocuments => {
  11886. transaction.addOnCommittedListener(() => {
  11887. this.removeCachedMutationKeys(batch.batchId);
  11888. });
  11889. return PersistencePromise.forEach(removedDocuments, (key) => {
  11890. return this.referenceDelegate.markPotentiallyOrphaned(transaction, key);
  11891. });
  11892. });
  11893. }
  11894. /**
  11895. * Clears the cached keys for a mutation batch. This method should be
  11896. * called by secondary clients after they process mutation updates.
  11897. *
  11898. * Note that this method does not have to be called from primary clients as
  11899. * the corresponding cache entries are cleared when an acknowledged or
  11900. * rejected batch is removed from the mutation queue.
  11901. */
  11902. // PORTING NOTE: Multi-tab only
  11903. removeCachedMutationKeys(batchId) {
  11904. delete this.documentKeysByBatchId[batchId];
  11905. }
  11906. performConsistencyCheck(txn) {
  11907. return this.checkEmpty(txn).next(empty => {
  11908. if (!empty) {
  11909. return PersistencePromise.resolve();
  11910. }
  11911. // Verify that there are no entries in the documentMutations index if
  11912. // the queue is empty.
  11913. const startRange = IDBKeyRange.lowerBound(newDbDocumentMutationPrefixForUser(this.userId));
  11914. const danglingMutationReferences = [];
  11915. return documentMutationsStore(txn)
  11916. .iterate({ range: startRange }, (key, _, control) => {
  11917. const userID = key[0];
  11918. if (userID !== this.userId) {
  11919. control.done();
  11920. return;
  11921. }
  11922. else {
  11923. const path = decodeResourcePath(key[1]);
  11924. danglingMutationReferences.push(path);
  11925. }
  11926. })
  11927. .next(() => {
  11928. hardAssert(danglingMutationReferences.length === 0);
  11929. });
  11930. });
  11931. }
  11932. containsKey(txn, key) {
  11933. return mutationQueueContainsKey(txn, this.userId, key);
  11934. }
  11935. // PORTING NOTE: Multi-tab only (state is held in memory in other clients).
  11936. /** Returns the mutation queue's metadata from IndexedDb. */
  11937. getMutationQueueMetadata(transaction) {
  11938. return mutationQueuesStore(transaction)
  11939. .get(this.userId)
  11940. .next((metadata) => {
  11941. return (metadata || {
  11942. userId: this.userId,
  11943. lastAcknowledgedBatchId: BATCHID_UNKNOWN,
  11944. lastStreamToken: ''
  11945. });
  11946. });
  11947. }
  11948. }
  11949. /**
  11950. * @returns true if the mutation queue for the given user contains a pending
  11951. * mutation for the given key.
  11952. */
  11953. function mutationQueueContainsKey(txn, userId, key) {
  11954. const indexKey = newDbDocumentMutationPrefixForPath(userId, key.path);
  11955. const encodedPath = indexKey[1];
  11956. const startRange = IDBKeyRange.lowerBound(indexKey);
  11957. let containsKey = false;
  11958. return documentMutationsStore(txn)
  11959. .iterate({ range: startRange, keysOnly: true }, (key, value, control) => {
  11960. const [userID, keyPath, /*batchID*/ _] = key;
  11961. if (userID === userId && keyPath === encodedPath) {
  11962. containsKey = true;
  11963. }
  11964. control.done();
  11965. })
  11966. .next(() => containsKey);
  11967. }
  11968. /** Returns true if any mutation queue contains the given document. */
  11969. function mutationQueuesContainKey(txn, docKey) {
  11970. let found = false;
  11971. return mutationQueuesStore(txn)
  11972. .iterateSerial(userId => {
  11973. return mutationQueueContainsKey(txn, userId, docKey).next(containsKey => {
  11974. if (containsKey) {
  11975. found = true;
  11976. }
  11977. return PersistencePromise.resolve(!containsKey);
  11978. });
  11979. })
  11980. .next(() => found);
  11981. }
  11982. /**
  11983. * Helper to get a typed SimpleDbStore for the mutations object store.
  11984. */
  11985. function mutationsStore(txn) {
  11986. return getStore(txn, DbMutationBatchStore);
  11987. }
  11988. /**
  11989. * Helper to get a typed SimpleDbStore for the mutationQueues object store.
  11990. */
  11991. function documentMutationsStore(txn) {
  11992. return getStore(txn, DbDocumentMutationStore);
  11993. }
  11994. /**
  11995. * Helper to get a typed SimpleDbStore for the mutationQueues object store.
  11996. */
  11997. function mutationQueuesStore(txn) {
  11998. return getStore(txn, DbMutationQueueStore);
  11999. }
  12000. /**
  12001. * @license
  12002. * Copyright 2017 Google LLC
  12003. *
  12004. * Licensed under the Apache License, Version 2.0 (the "License");
  12005. * you may not use this file except in compliance with the License.
  12006. * You may obtain a copy of the License at
  12007. *
  12008. * http://www.apache.org/licenses/LICENSE-2.0
  12009. *
  12010. * Unless required by applicable law or agreed to in writing, software
  12011. * distributed under the License is distributed on an "AS IS" BASIS,
  12012. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12013. * See the License for the specific language governing permissions and
  12014. * limitations under the License.
  12015. */
  12016. /** Offset to ensure non-overlapping target ids. */
  12017. const OFFSET = 2;
  12018. /**
  12019. * Generates monotonically increasing target IDs for sending targets to the
  12020. * watch stream.
  12021. *
  12022. * The client constructs two generators, one for the target cache, and one for
  12023. * for the sync engine (to generate limbo documents targets). These
  12024. * generators produce non-overlapping IDs (by using even and odd IDs
  12025. * respectively).
  12026. *
  12027. * By separating the target ID space, the query cache can generate target IDs
  12028. * that persist across client restarts, while sync engine can independently
  12029. * generate in-memory target IDs that are transient and can be reused after a
  12030. * restart.
  12031. */
  12032. class TargetIdGenerator {
  12033. constructor(lastId) {
  12034. this.lastId = lastId;
  12035. }
  12036. next() {
  12037. this.lastId += OFFSET;
  12038. return this.lastId;
  12039. }
  12040. static forTargetCache() {
  12041. // The target cache generator must return '2' in its first call to `next()`
  12042. // as there is no differentiation in the protocol layer between an unset
  12043. // number and the number '0'. If we were to sent a target with target ID
  12044. // '0', the backend would consider it unset and replace it with its own ID.
  12045. return new TargetIdGenerator(2 - OFFSET);
  12046. }
  12047. static forSyncEngine() {
  12048. // Sync engine assigns target IDs for limbo document detection.
  12049. return new TargetIdGenerator(1 - OFFSET);
  12050. }
  12051. }
  12052. /**
  12053. * @license
  12054. * Copyright 2017 Google LLC
  12055. *
  12056. * Licensed under the Apache License, Version 2.0 (the "License");
  12057. * you may not use this file except in compliance with the License.
  12058. * You may obtain a copy of the License at
  12059. *
  12060. * http://www.apache.org/licenses/LICENSE-2.0
  12061. *
  12062. * Unless required by applicable law or agreed to in writing, software
  12063. * distributed under the License is distributed on an "AS IS" BASIS,
  12064. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12065. * See the License for the specific language governing permissions and
  12066. * limitations under the License.
  12067. */
  12068. class IndexedDbTargetCache {
  12069. constructor(referenceDelegate, serializer) {
  12070. this.referenceDelegate = referenceDelegate;
  12071. this.serializer = serializer;
  12072. }
  12073. // PORTING NOTE: We don't cache global metadata for the target cache, since
  12074. // some of it (in particular `highestTargetId`) can be modified by secondary
  12075. // tabs. We could perhaps be more granular (and e.g. still cache
  12076. // `lastRemoteSnapshotVersion` in memory) but for simplicity we currently go
  12077. // to IndexedDb whenever we need to read metadata. We can revisit if it turns
  12078. // out to have a meaningful performance impact.
  12079. allocateTargetId(transaction) {
  12080. return this.retrieveMetadata(transaction).next(metadata => {
  12081. const targetIdGenerator = new TargetIdGenerator(metadata.highestTargetId);
  12082. metadata.highestTargetId = targetIdGenerator.next();
  12083. return this.saveMetadata(transaction, metadata).next(() => metadata.highestTargetId);
  12084. });
  12085. }
  12086. getLastRemoteSnapshotVersion(transaction) {
  12087. return this.retrieveMetadata(transaction).next(metadata => {
  12088. return SnapshotVersion.fromTimestamp(new Timestamp(metadata.lastRemoteSnapshotVersion.seconds, metadata.lastRemoteSnapshotVersion.nanoseconds));
  12089. });
  12090. }
  12091. getHighestSequenceNumber(transaction) {
  12092. return this.retrieveMetadata(transaction).next(targetGlobal => targetGlobal.highestListenSequenceNumber);
  12093. }
  12094. setTargetsMetadata(transaction, highestListenSequenceNumber, lastRemoteSnapshotVersion) {
  12095. return this.retrieveMetadata(transaction).next(metadata => {
  12096. metadata.highestListenSequenceNumber = highestListenSequenceNumber;
  12097. if (lastRemoteSnapshotVersion) {
  12098. metadata.lastRemoteSnapshotVersion =
  12099. lastRemoteSnapshotVersion.toTimestamp();
  12100. }
  12101. if (highestListenSequenceNumber > metadata.highestListenSequenceNumber) {
  12102. metadata.highestListenSequenceNumber = highestListenSequenceNumber;
  12103. }
  12104. return this.saveMetadata(transaction, metadata);
  12105. });
  12106. }
  12107. addTargetData(transaction, targetData) {
  12108. return this.saveTargetData(transaction, targetData).next(() => {
  12109. return this.retrieveMetadata(transaction).next(metadata => {
  12110. metadata.targetCount += 1;
  12111. this.updateMetadataFromTargetData(targetData, metadata);
  12112. return this.saveMetadata(transaction, metadata);
  12113. });
  12114. });
  12115. }
  12116. updateTargetData(transaction, targetData) {
  12117. return this.saveTargetData(transaction, targetData);
  12118. }
  12119. removeTargetData(transaction, targetData) {
  12120. return this.removeMatchingKeysForTargetId(transaction, targetData.targetId)
  12121. .next(() => targetsStore(transaction).delete(targetData.targetId))
  12122. .next(() => this.retrieveMetadata(transaction))
  12123. .next(metadata => {
  12124. hardAssert(metadata.targetCount > 0);
  12125. metadata.targetCount -= 1;
  12126. return this.saveMetadata(transaction, metadata);
  12127. });
  12128. }
  12129. /**
  12130. * Drops any targets with sequence number less than or equal to the upper bound, excepting those
  12131. * present in `activeTargetIds`. Document associations for the removed targets are also removed.
  12132. * Returns the number of targets removed.
  12133. */
  12134. removeTargets(txn, upperBound, activeTargetIds) {
  12135. let count = 0;
  12136. const promises = [];
  12137. return targetsStore(txn)
  12138. .iterate((key, value) => {
  12139. const targetData = fromDbTarget(value);
  12140. if (targetData.sequenceNumber <= upperBound &&
  12141. activeTargetIds.get(targetData.targetId) === null) {
  12142. count++;
  12143. promises.push(this.removeTargetData(txn, targetData));
  12144. }
  12145. })
  12146. .next(() => PersistencePromise.waitFor(promises))
  12147. .next(() => count);
  12148. }
  12149. /**
  12150. * Call provided function with each `TargetData` that we have cached.
  12151. */
  12152. forEachTarget(txn, f) {
  12153. return targetsStore(txn).iterate((key, value) => {
  12154. const targetData = fromDbTarget(value);
  12155. f(targetData);
  12156. });
  12157. }
  12158. retrieveMetadata(transaction) {
  12159. return globalTargetStore(transaction)
  12160. .get(DbTargetGlobalKey)
  12161. .next(metadata => {
  12162. hardAssert(metadata !== null);
  12163. return metadata;
  12164. });
  12165. }
  12166. saveMetadata(transaction, metadata) {
  12167. return globalTargetStore(transaction).put(DbTargetGlobalKey, metadata);
  12168. }
  12169. saveTargetData(transaction, targetData) {
  12170. return targetsStore(transaction).put(toDbTarget(this.serializer, targetData));
  12171. }
  12172. /**
  12173. * In-place updates the provided metadata to account for values in the given
  12174. * TargetData. Saving is done separately. Returns true if there were any
  12175. * changes to the metadata.
  12176. */
  12177. updateMetadataFromTargetData(targetData, metadata) {
  12178. let updated = false;
  12179. if (targetData.targetId > metadata.highestTargetId) {
  12180. metadata.highestTargetId = targetData.targetId;
  12181. updated = true;
  12182. }
  12183. if (targetData.sequenceNumber > metadata.highestListenSequenceNumber) {
  12184. metadata.highestListenSequenceNumber = targetData.sequenceNumber;
  12185. updated = true;
  12186. }
  12187. return updated;
  12188. }
  12189. getTargetCount(transaction) {
  12190. return this.retrieveMetadata(transaction).next(metadata => metadata.targetCount);
  12191. }
  12192. getTargetData(transaction, target) {
  12193. // Iterating by the canonicalId may yield more than one result because
  12194. // canonicalId values are not required to be unique per target. This query
  12195. // depends on the queryTargets index to be efficient.
  12196. const canonicalId = canonifyTarget(target);
  12197. const range = IDBKeyRange.bound([canonicalId, Number.NEGATIVE_INFINITY], [canonicalId, Number.POSITIVE_INFINITY]);
  12198. let result = null;
  12199. return targetsStore(transaction)
  12200. .iterate({ range, index: DbTargetQueryTargetsIndexName }, (key, value, control) => {
  12201. const found = fromDbTarget(value);
  12202. // After finding a potential match, check that the target is
  12203. // actually equal to the requested target.
  12204. if (targetEquals(target, found.target)) {
  12205. result = found;
  12206. control.done();
  12207. }
  12208. })
  12209. .next(() => result);
  12210. }
  12211. addMatchingKeys(txn, keys, targetId) {
  12212. // PORTING NOTE: The reverse index (documentsTargets) is maintained by
  12213. // IndexedDb.
  12214. const promises = [];
  12215. const store = documentTargetStore(txn);
  12216. keys.forEach(key => {
  12217. const path = encodeResourcePath(key.path);
  12218. promises.push(store.put({ targetId, path }));
  12219. promises.push(this.referenceDelegate.addReference(txn, targetId, key));
  12220. });
  12221. return PersistencePromise.waitFor(promises);
  12222. }
  12223. removeMatchingKeys(txn, keys, targetId) {
  12224. // PORTING NOTE: The reverse index (documentsTargets) is maintained by
  12225. // IndexedDb.
  12226. const store = documentTargetStore(txn);
  12227. return PersistencePromise.forEach(keys, (key) => {
  12228. const path = encodeResourcePath(key.path);
  12229. return PersistencePromise.waitFor([
  12230. store.delete([targetId, path]),
  12231. this.referenceDelegate.removeReference(txn, targetId, key)
  12232. ]);
  12233. });
  12234. }
  12235. removeMatchingKeysForTargetId(txn, targetId) {
  12236. const store = documentTargetStore(txn);
  12237. const range = IDBKeyRange.bound([targetId], [targetId + 1],
  12238. /*lowerOpen=*/ false,
  12239. /*upperOpen=*/ true);
  12240. return store.delete(range);
  12241. }
  12242. getMatchingKeysForTargetId(txn, targetId) {
  12243. const range = IDBKeyRange.bound([targetId], [targetId + 1],
  12244. /*lowerOpen=*/ false,
  12245. /*upperOpen=*/ true);
  12246. const store = documentTargetStore(txn);
  12247. let result = documentKeySet();
  12248. return store
  12249. .iterate({ range, keysOnly: true }, (key, _, control) => {
  12250. const path = decodeResourcePath(key[1]);
  12251. const docKey = new DocumentKey(path);
  12252. result = result.add(docKey);
  12253. })
  12254. .next(() => result);
  12255. }
  12256. containsKey(txn, key) {
  12257. const path = encodeResourcePath(key.path);
  12258. const range = IDBKeyRange.bound([path], [immediateSuccessor(path)],
  12259. /*lowerOpen=*/ false,
  12260. /*upperOpen=*/ true);
  12261. let count = 0;
  12262. return documentTargetStore(txn)
  12263. .iterate({
  12264. index: DbTargetDocumentDocumentTargetsIndex,
  12265. keysOnly: true,
  12266. range
  12267. }, ([targetId, path], _, control) => {
  12268. // Having a sentinel row for a document does not count as containing that document;
  12269. // For the target cache, containing the document means the document is part of some
  12270. // target.
  12271. if (targetId !== 0) {
  12272. count++;
  12273. control.done();
  12274. }
  12275. })
  12276. .next(() => count > 0);
  12277. }
  12278. /**
  12279. * Looks up a TargetData entry by target ID.
  12280. *
  12281. * @param targetId - The target ID of the TargetData entry to look up.
  12282. * @returns The cached TargetData entry, or null if the cache has no entry for
  12283. * the target.
  12284. */
  12285. // PORTING NOTE: Multi-tab only.
  12286. getTargetDataForTarget(transaction, targetId) {
  12287. return targetsStore(transaction)
  12288. .get(targetId)
  12289. .next(found => {
  12290. if (found) {
  12291. return fromDbTarget(found);
  12292. }
  12293. else {
  12294. return null;
  12295. }
  12296. });
  12297. }
  12298. }
  12299. /**
  12300. * Helper to get a typed SimpleDbStore for the queries object store.
  12301. */
  12302. function targetsStore(txn) {
  12303. return getStore(txn, DbTargetStore);
  12304. }
  12305. /**
  12306. * Helper to get a typed SimpleDbStore for the target globals object store.
  12307. */
  12308. function globalTargetStore(txn) {
  12309. return getStore(txn, DbTargetGlobalStore);
  12310. }
  12311. /**
  12312. * Helper to get a typed SimpleDbStore for the document target object store.
  12313. */
  12314. function documentTargetStore(txn) {
  12315. return getStore(txn, DbTargetDocumentStore);
  12316. }
  12317. /**
  12318. * @license
  12319. * Copyright 2018 Google LLC
  12320. *
  12321. * Licensed under the Apache License, Version 2.0 (the "License");
  12322. * you may not use this file except in compliance with the License.
  12323. * You may obtain a copy of the License at
  12324. *
  12325. * http://www.apache.org/licenses/LICENSE-2.0
  12326. *
  12327. * Unless required by applicable law or agreed to in writing, software
  12328. * distributed under the License is distributed on an "AS IS" BASIS,
  12329. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12330. * See the License for the specific language governing permissions and
  12331. * limitations under the License.
  12332. */
  12333. const GC_DID_NOT_RUN = {
  12334. didRun: false,
  12335. sequenceNumbersCollected: 0,
  12336. targetsRemoved: 0,
  12337. documentsRemoved: 0
  12338. };
  12339. const LRU_COLLECTION_DISABLED = -1;
  12340. const LRU_DEFAULT_CACHE_SIZE_BYTES = 40 * 1024 * 1024;
  12341. class LruParams {
  12342. constructor(
  12343. // When we attempt to collect, we will only do so if the cache size is greater than this
  12344. // threshold. Passing `COLLECTION_DISABLED` here will cause collection to always be skipped.
  12345. cacheSizeCollectionThreshold,
  12346. // The percentage of sequence numbers that we will attempt to collect
  12347. percentileToCollect,
  12348. // A cap on the total number of sequence numbers that will be collected. This prevents
  12349. // us from collecting a huge number of sequence numbers if the cache has grown very large.
  12350. maximumSequenceNumbersToCollect) {
  12351. this.cacheSizeCollectionThreshold = cacheSizeCollectionThreshold;
  12352. this.percentileToCollect = percentileToCollect;
  12353. this.maximumSequenceNumbersToCollect = maximumSequenceNumbersToCollect;
  12354. }
  12355. static withCacheSize(cacheSize) {
  12356. return new LruParams(cacheSize, LruParams.DEFAULT_COLLECTION_PERCENTILE, LruParams.DEFAULT_MAX_SEQUENCE_NUMBERS_TO_COLLECT);
  12357. }
  12358. }
  12359. LruParams.DEFAULT_COLLECTION_PERCENTILE = 10;
  12360. LruParams.DEFAULT_MAX_SEQUENCE_NUMBERS_TO_COLLECT = 1000;
  12361. LruParams.DEFAULT = new LruParams(LRU_DEFAULT_CACHE_SIZE_BYTES, LruParams.DEFAULT_COLLECTION_PERCENTILE, LruParams.DEFAULT_MAX_SEQUENCE_NUMBERS_TO_COLLECT);
  12362. LruParams.DISABLED = new LruParams(LRU_COLLECTION_DISABLED, 0, 0);
  12363. /**
  12364. * @license
  12365. * Copyright 2020 Google LLC
  12366. *
  12367. * Licensed under the Apache License, Version 2.0 (the "License");
  12368. * you may not use this file except in compliance with the License.
  12369. * You may obtain a copy of the License at
  12370. *
  12371. * http://www.apache.org/licenses/LICENSE-2.0
  12372. *
  12373. * Unless required by applicable law or agreed to in writing, software
  12374. * distributed under the License is distributed on an "AS IS" BASIS,
  12375. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12376. * See the License for the specific language governing permissions and
  12377. * limitations under the License.
  12378. */
  12379. const LOG_TAG$e = 'LruGarbageCollector';
  12380. const LRU_MINIMUM_CACHE_SIZE_BYTES = 1 * 1024 * 1024;
  12381. /** How long we wait to try running LRU GC after SDK initialization. */
  12382. const INITIAL_GC_DELAY_MS = 1 * 60 * 1000;
  12383. /** Minimum amount of time between GC checks, after the first one. */
  12384. const REGULAR_GC_DELAY_MS = 5 * 60 * 1000;
  12385. function bufferEntryComparator([aSequence, aIndex], [bSequence, bIndex]) {
  12386. const seqCmp = primitiveComparator(aSequence, bSequence);
  12387. if (seqCmp === 0) {
  12388. // This order doesn't matter, but we can bias against churn by sorting
  12389. // entries created earlier as less than newer entries.
  12390. return primitiveComparator(aIndex, bIndex);
  12391. }
  12392. else {
  12393. return seqCmp;
  12394. }
  12395. }
  12396. /**
  12397. * Used to calculate the nth sequence number. Keeps a rolling buffer of the
  12398. * lowest n values passed to `addElement`, and finally reports the largest of
  12399. * them in `maxValue`.
  12400. */
  12401. class RollingSequenceNumberBuffer {
  12402. constructor(maxElements) {
  12403. this.maxElements = maxElements;
  12404. this.buffer = new SortedSet(bufferEntryComparator);
  12405. this.previousIndex = 0;
  12406. }
  12407. nextIndex() {
  12408. return ++this.previousIndex;
  12409. }
  12410. addElement(sequenceNumber) {
  12411. const entry = [sequenceNumber, this.nextIndex()];
  12412. if (this.buffer.size < this.maxElements) {
  12413. this.buffer = this.buffer.add(entry);
  12414. }
  12415. else {
  12416. const highestValue = this.buffer.last();
  12417. if (bufferEntryComparator(entry, highestValue) < 0) {
  12418. this.buffer = this.buffer.delete(highestValue).add(entry);
  12419. }
  12420. }
  12421. }
  12422. get maxValue() {
  12423. // Guaranteed to be non-empty. If we decide we are not collecting any
  12424. // sequence numbers, nthSequenceNumber below short-circuits. If we have
  12425. // decided that we are collecting n sequence numbers, it's because n is some
  12426. // percentage of the existing sequence numbers. That means we should never
  12427. // be in a situation where we are collecting sequence numbers but don't
  12428. // actually have any.
  12429. return this.buffer.last()[0];
  12430. }
  12431. }
  12432. /**
  12433. * This class is responsible for the scheduling of LRU garbage collection. It handles checking
  12434. * whether or not GC is enabled, as well as which delay to use before the next run.
  12435. */
  12436. class LruScheduler {
  12437. constructor(garbageCollector, asyncQueue, localStore) {
  12438. this.garbageCollector = garbageCollector;
  12439. this.asyncQueue = asyncQueue;
  12440. this.localStore = localStore;
  12441. this.gcTask = null;
  12442. }
  12443. start() {
  12444. if (this.garbageCollector.params.cacheSizeCollectionThreshold !==
  12445. LRU_COLLECTION_DISABLED) {
  12446. this.scheduleGC(INITIAL_GC_DELAY_MS);
  12447. }
  12448. }
  12449. stop() {
  12450. if (this.gcTask) {
  12451. this.gcTask.cancel();
  12452. this.gcTask = null;
  12453. }
  12454. }
  12455. get started() {
  12456. return this.gcTask !== null;
  12457. }
  12458. scheduleGC(delay) {
  12459. logDebug(LOG_TAG$e, `Garbage collection scheduled in ${delay}ms`);
  12460. this.gcTask = this.asyncQueue.enqueueAfterDelay("lru_garbage_collection" /* TimerId.LruGarbageCollection */, delay, async () => {
  12461. this.gcTask = null;
  12462. try {
  12463. await this.localStore.collectGarbage(this.garbageCollector);
  12464. }
  12465. catch (e) {
  12466. if (isIndexedDbTransactionError(e)) {
  12467. logDebug(LOG_TAG$e, 'Ignoring IndexedDB error during garbage collection: ', e);
  12468. }
  12469. else {
  12470. await ignoreIfPrimaryLeaseLoss(e);
  12471. }
  12472. }
  12473. await this.scheduleGC(REGULAR_GC_DELAY_MS);
  12474. });
  12475. }
  12476. }
  12477. /** Implements the steps for LRU garbage collection. */
  12478. class LruGarbageCollectorImpl {
  12479. constructor(delegate, params) {
  12480. this.delegate = delegate;
  12481. this.params = params;
  12482. }
  12483. calculateTargetCount(txn, percentile) {
  12484. return this.delegate.getSequenceNumberCount(txn).next(targetCount => {
  12485. return Math.floor((percentile / 100.0) * targetCount);
  12486. });
  12487. }
  12488. nthSequenceNumber(txn, n) {
  12489. if (n === 0) {
  12490. return PersistencePromise.resolve(ListenSequence.INVALID);
  12491. }
  12492. const buffer = new RollingSequenceNumberBuffer(n);
  12493. return this.delegate
  12494. .forEachTarget(txn, target => buffer.addElement(target.sequenceNumber))
  12495. .next(() => {
  12496. return this.delegate.forEachOrphanedDocumentSequenceNumber(txn, sequenceNumber => buffer.addElement(sequenceNumber));
  12497. })
  12498. .next(() => buffer.maxValue);
  12499. }
  12500. removeTargets(txn, upperBound, activeTargetIds) {
  12501. return this.delegate.removeTargets(txn, upperBound, activeTargetIds);
  12502. }
  12503. removeOrphanedDocuments(txn, upperBound) {
  12504. return this.delegate.removeOrphanedDocuments(txn, upperBound);
  12505. }
  12506. collect(txn, activeTargetIds) {
  12507. if (this.params.cacheSizeCollectionThreshold === LRU_COLLECTION_DISABLED) {
  12508. logDebug('LruGarbageCollector', 'Garbage collection skipped; disabled');
  12509. return PersistencePromise.resolve(GC_DID_NOT_RUN);
  12510. }
  12511. return this.getCacheSize(txn).next(cacheSize => {
  12512. if (cacheSize < this.params.cacheSizeCollectionThreshold) {
  12513. logDebug('LruGarbageCollector', `Garbage collection skipped; Cache size ${cacheSize} ` +
  12514. `is lower than threshold ${this.params.cacheSizeCollectionThreshold}`);
  12515. return GC_DID_NOT_RUN;
  12516. }
  12517. else {
  12518. return this.runGarbageCollection(txn, activeTargetIds);
  12519. }
  12520. });
  12521. }
  12522. getCacheSize(txn) {
  12523. return this.delegate.getCacheSize(txn);
  12524. }
  12525. runGarbageCollection(txn, activeTargetIds) {
  12526. let upperBoundSequenceNumber;
  12527. let sequenceNumbersToCollect, targetsRemoved;
  12528. // Timestamps for various pieces of the process
  12529. let countedTargetsTs, foundUpperBoundTs, removedTargetsTs, removedDocumentsTs;
  12530. const startTs = Date.now();
  12531. return this.calculateTargetCount(txn, this.params.percentileToCollect)
  12532. .next(sequenceNumbers => {
  12533. // Cap at the configured max
  12534. if (sequenceNumbers > this.params.maximumSequenceNumbersToCollect) {
  12535. logDebug('LruGarbageCollector', 'Capping sequence numbers to collect down ' +
  12536. `to the maximum of ${this.params.maximumSequenceNumbersToCollect} ` +
  12537. `from ${sequenceNumbers}`);
  12538. sequenceNumbersToCollect =
  12539. this.params.maximumSequenceNumbersToCollect;
  12540. }
  12541. else {
  12542. sequenceNumbersToCollect = sequenceNumbers;
  12543. }
  12544. countedTargetsTs = Date.now();
  12545. return this.nthSequenceNumber(txn, sequenceNumbersToCollect);
  12546. })
  12547. .next(upperBound => {
  12548. upperBoundSequenceNumber = upperBound;
  12549. foundUpperBoundTs = Date.now();
  12550. return this.removeTargets(txn, upperBoundSequenceNumber, activeTargetIds);
  12551. })
  12552. .next(numTargetsRemoved => {
  12553. targetsRemoved = numTargetsRemoved;
  12554. removedTargetsTs = Date.now();
  12555. return this.removeOrphanedDocuments(txn, upperBoundSequenceNumber);
  12556. })
  12557. .next(documentsRemoved => {
  12558. removedDocumentsTs = Date.now();
  12559. if (getLogLevel() <= LogLevel.DEBUG) {
  12560. const desc = 'LRU Garbage Collection\n' +
  12561. `\tCounted targets in ${countedTargetsTs - startTs}ms\n` +
  12562. `\tDetermined least recently used ${sequenceNumbersToCollect} in ` +
  12563. `${foundUpperBoundTs - countedTargetsTs}ms\n` +
  12564. `\tRemoved ${targetsRemoved} targets in ` +
  12565. `${removedTargetsTs - foundUpperBoundTs}ms\n` +
  12566. `\tRemoved ${documentsRemoved} documents in ` +
  12567. `${removedDocumentsTs - removedTargetsTs}ms\n` +
  12568. `Total Duration: ${removedDocumentsTs - startTs}ms`;
  12569. logDebug('LruGarbageCollector', desc);
  12570. }
  12571. return PersistencePromise.resolve({
  12572. didRun: true,
  12573. sequenceNumbersCollected: sequenceNumbersToCollect,
  12574. targetsRemoved,
  12575. documentsRemoved
  12576. });
  12577. });
  12578. }
  12579. }
  12580. function newLruGarbageCollector(delegate, params) {
  12581. return new LruGarbageCollectorImpl(delegate, params);
  12582. }
  12583. /**
  12584. * @license
  12585. * Copyright 2020 Google LLC
  12586. *
  12587. * Licensed under the Apache License, Version 2.0 (the "License");
  12588. * you may not use this file except in compliance with the License.
  12589. * You may obtain a copy of the License at
  12590. *
  12591. * http://www.apache.org/licenses/LICENSE-2.0
  12592. *
  12593. * Unless required by applicable law or agreed to in writing, software
  12594. * distributed under the License is distributed on an "AS IS" BASIS,
  12595. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12596. * See the License for the specific language governing permissions and
  12597. * limitations under the License.
  12598. */
  12599. /** Provides LRU functionality for IndexedDB persistence. */
  12600. class IndexedDbLruDelegateImpl {
  12601. constructor(db, params) {
  12602. this.db = db;
  12603. this.garbageCollector = newLruGarbageCollector(this, params);
  12604. }
  12605. getSequenceNumberCount(txn) {
  12606. const docCountPromise = this.orphanedDocumentCount(txn);
  12607. const targetCountPromise = this.db.getTargetCache().getTargetCount(txn);
  12608. return targetCountPromise.next(targetCount => docCountPromise.next(docCount => targetCount + docCount));
  12609. }
  12610. orphanedDocumentCount(txn) {
  12611. let orphanedCount = 0;
  12612. return this.forEachOrphanedDocumentSequenceNumber(txn, _ => {
  12613. orphanedCount++;
  12614. }).next(() => orphanedCount);
  12615. }
  12616. forEachTarget(txn, f) {
  12617. return this.db.getTargetCache().forEachTarget(txn, f);
  12618. }
  12619. forEachOrphanedDocumentSequenceNumber(txn, f) {
  12620. return this.forEachOrphanedDocument(txn, (docKey, sequenceNumber) => f(sequenceNumber));
  12621. }
  12622. addReference(txn, targetId, key) {
  12623. return writeSentinelKey(txn, key);
  12624. }
  12625. removeReference(txn, targetId, key) {
  12626. return writeSentinelKey(txn, key);
  12627. }
  12628. removeTargets(txn, upperBound, activeTargetIds) {
  12629. return this.db.getTargetCache().removeTargets(txn, upperBound, activeTargetIds);
  12630. }
  12631. markPotentiallyOrphaned(txn, key) {
  12632. return writeSentinelKey(txn, key);
  12633. }
  12634. /**
  12635. * Returns true if anything would prevent this document from being garbage
  12636. * collected, given that the document in question is not present in any
  12637. * targets and has a sequence number less than or equal to the upper bound for
  12638. * the collection run.
  12639. */
  12640. isPinned(txn, docKey) {
  12641. return mutationQueuesContainKey(txn, docKey);
  12642. }
  12643. removeOrphanedDocuments(txn, upperBound) {
  12644. const documentCache = this.db.getRemoteDocumentCache();
  12645. const changeBuffer = documentCache.newChangeBuffer();
  12646. const promises = [];
  12647. let documentCount = 0;
  12648. const iteration = this.forEachOrphanedDocument(txn, (docKey, sequenceNumber) => {
  12649. if (sequenceNumber <= upperBound) {
  12650. const p = this.isPinned(txn, docKey).next(isPinned => {
  12651. if (!isPinned) {
  12652. documentCount++;
  12653. // Our size accounting requires us to read all documents before
  12654. // removing them.
  12655. return changeBuffer.getEntry(txn, docKey).next(() => {
  12656. changeBuffer.removeEntry(docKey, SnapshotVersion.min());
  12657. return documentTargetStore(txn).delete(sentinelKey$1(docKey));
  12658. });
  12659. }
  12660. });
  12661. promises.push(p);
  12662. }
  12663. });
  12664. return iteration
  12665. .next(() => PersistencePromise.waitFor(promises))
  12666. .next(() => changeBuffer.apply(txn))
  12667. .next(() => documentCount);
  12668. }
  12669. removeTarget(txn, targetData) {
  12670. const updated = targetData.withSequenceNumber(txn.currentSequenceNumber);
  12671. return this.db.getTargetCache().updateTargetData(txn, updated);
  12672. }
  12673. updateLimboDocument(txn, key) {
  12674. return writeSentinelKey(txn, key);
  12675. }
  12676. /**
  12677. * Call provided function for each document in the cache that is 'orphaned'. Orphaned
  12678. * means not a part of any target, so the only entry in the target-document index for
  12679. * that document will be the sentinel row (targetId 0), which will also have the sequence
  12680. * number for the last time the document was accessed.
  12681. */
  12682. forEachOrphanedDocument(txn, f) {
  12683. const store = documentTargetStore(txn);
  12684. let nextToReport = ListenSequence.INVALID;
  12685. let nextPath;
  12686. return store
  12687. .iterate({
  12688. index: DbTargetDocumentDocumentTargetsIndex
  12689. }, ([targetId, docKey], { path, sequenceNumber }) => {
  12690. if (targetId === 0) {
  12691. // if nextToReport is valid, report it, this is a new key so the
  12692. // last one must not be a member of any targets.
  12693. if (nextToReport !== ListenSequence.INVALID) {
  12694. f(new DocumentKey(decodeResourcePath(nextPath)), nextToReport);
  12695. }
  12696. // set nextToReport to be this sequence number. It's the next one we
  12697. // might report, if we don't find any targets for this document.
  12698. // Note that the sequence number must be defined when the targetId
  12699. // is 0.
  12700. nextToReport = sequenceNumber;
  12701. nextPath = path;
  12702. }
  12703. else {
  12704. // set nextToReport to be invalid, we know we don't need to report
  12705. // this one since we found a target for it.
  12706. nextToReport = ListenSequence.INVALID;
  12707. }
  12708. })
  12709. .next(() => {
  12710. // Since we report sequence numbers after getting to the next key, we
  12711. // need to check if the last key we iterated over was an orphaned
  12712. // document and report it.
  12713. if (nextToReport !== ListenSequence.INVALID) {
  12714. f(new DocumentKey(decodeResourcePath(nextPath)), nextToReport);
  12715. }
  12716. });
  12717. }
  12718. getCacheSize(txn) {
  12719. return this.db.getRemoteDocumentCache().getSize(txn);
  12720. }
  12721. }
  12722. function sentinelKey$1(key) {
  12723. return [0, encodeResourcePath(key.path)];
  12724. }
  12725. /**
  12726. * @returns A value suitable for writing a sentinel row in the target-document
  12727. * store.
  12728. */
  12729. function sentinelRow(key, sequenceNumber) {
  12730. return { targetId: 0, path: encodeResourcePath(key.path), sequenceNumber };
  12731. }
  12732. function writeSentinelKey(txn, key) {
  12733. return documentTargetStore(txn).put(sentinelRow(key, txn.currentSequenceNumber));
  12734. }
  12735. /**
  12736. * @license
  12737. * Copyright 2017 Google LLC
  12738. *
  12739. * Licensed under the Apache License, Version 2.0 (the "License");
  12740. * you may not use this file except in compliance with the License.
  12741. * You may obtain a copy of the License at
  12742. *
  12743. * http://www.apache.org/licenses/LICENSE-2.0
  12744. *
  12745. * Unless required by applicable law or agreed to in writing, software
  12746. * distributed under the License is distributed on an "AS IS" BASIS,
  12747. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12748. * See the License for the specific language governing permissions and
  12749. * limitations under the License.
  12750. */
  12751. /**
  12752. * An in-memory buffer of entries to be written to a RemoteDocumentCache.
  12753. * It can be used to batch up a set of changes to be written to the cache, but
  12754. * additionally supports reading entries back with the `getEntry()` method,
  12755. * falling back to the underlying RemoteDocumentCache if no entry is
  12756. * buffered.
  12757. *
  12758. * Entries added to the cache *must* be read first. This is to facilitate
  12759. * calculating the size delta of the pending changes.
  12760. *
  12761. * PORTING NOTE: This class was implemented then removed from other platforms.
  12762. * If byte-counting ends up being needed on the other platforms, consider
  12763. * porting this class as part of that implementation work.
  12764. */
  12765. class RemoteDocumentChangeBuffer {
  12766. constructor() {
  12767. // A mapping of document key to the new cache entry that should be written.
  12768. this.changes = new ObjectMap(key => key.toString(), (l, r) => l.isEqual(r));
  12769. this.changesApplied = false;
  12770. }
  12771. /**
  12772. * Buffers a `RemoteDocumentCache.addEntry()` call.
  12773. *
  12774. * You can only modify documents that have already been retrieved via
  12775. * `getEntry()/getEntries()` (enforced via IndexedDbs `apply()`).
  12776. */
  12777. addEntry(document) {
  12778. this.assertNotApplied();
  12779. this.changes.set(document.key, document);
  12780. }
  12781. /**
  12782. * Buffers a `RemoteDocumentCache.removeEntry()` call.
  12783. *
  12784. * You can only remove documents that have already been retrieved via
  12785. * `getEntry()/getEntries()` (enforced via IndexedDbs `apply()`).
  12786. */
  12787. removeEntry(key, readTime) {
  12788. this.assertNotApplied();
  12789. this.changes.set(key, MutableDocument.newInvalidDocument(key).setReadTime(readTime));
  12790. }
  12791. /**
  12792. * Looks up an entry in the cache. The buffered changes will first be checked,
  12793. * and if no buffered change applies, this will forward to
  12794. * `RemoteDocumentCache.getEntry()`.
  12795. *
  12796. * @param transaction - The transaction in which to perform any persistence
  12797. * operations.
  12798. * @param documentKey - The key of the entry to look up.
  12799. * @returns The cached document or an invalid document if we have nothing
  12800. * cached.
  12801. */
  12802. getEntry(transaction, documentKey) {
  12803. this.assertNotApplied();
  12804. const bufferedEntry = this.changes.get(documentKey);
  12805. if (bufferedEntry !== undefined) {
  12806. return PersistencePromise.resolve(bufferedEntry);
  12807. }
  12808. else {
  12809. return this.getFromCache(transaction, documentKey);
  12810. }
  12811. }
  12812. /**
  12813. * Looks up several entries in the cache, forwarding to
  12814. * `RemoteDocumentCache.getEntry()`.
  12815. *
  12816. * @param transaction - The transaction in which to perform any persistence
  12817. * operations.
  12818. * @param documentKeys - The keys of the entries to look up.
  12819. * @returns A map of cached documents, indexed by key. If an entry cannot be
  12820. * found, the corresponding key will be mapped to an invalid document.
  12821. */
  12822. getEntries(transaction, documentKeys) {
  12823. return this.getAllFromCache(transaction, documentKeys);
  12824. }
  12825. /**
  12826. * Applies buffered changes to the underlying RemoteDocumentCache, using
  12827. * the provided transaction.
  12828. */
  12829. apply(transaction) {
  12830. this.assertNotApplied();
  12831. this.changesApplied = true;
  12832. return this.applyChanges(transaction);
  12833. }
  12834. /** Helper to assert this.changes is not null */
  12835. assertNotApplied() {
  12836. }
  12837. }
  12838. /**
  12839. * @license
  12840. * Copyright 2017 Google LLC
  12841. *
  12842. * Licensed under the Apache License, Version 2.0 (the "License");
  12843. * you may not use this file except in compliance with the License.
  12844. * You may obtain a copy of the License at
  12845. *
  12846. * http://www.apache.org/licenses/LICENSE-2.0
  12847. *
  12848. * Unless required by applicable law or agreed to in writing, software
  12849. * distributed under the License is distributed on an "AS IS" BASIS,
  12850. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12851. * See the License for the specific language governing permissions and
  12852. * limitations under the License.
  12853. */
  12854. /**
  12855. * The RemoteDocumentCache for IndexedDb. To construct, invoke
  12856. * `newIndexedDbRemoteDocumentCache()`.
  12857. */
  12858. class IndexedDbRemoteDocumentCacheImpl {
  12859. constructor(serializer) {
  12860. this.serializer = serializer;
  12861. }
  12862. setIndexManager(indexManager) {
  12863. this.indexManager = indexManager;
  12864. }
  12865. /**
  12866. * Adds the supplied entries to the cache.
  12867. *
  12868. * All calls of `addEntry` are required to go through the RemoteDocumentChangeBuffer
  12869. * returned by `newChangeBuffer()` to ensure proper accounting of metadata.
  12870. */
  12871. addEntry(transaction, key, doc) {
  12872. const documentStore = remoteDocumentsStore(transaction);
  12873. return documentStore.put(doc);
  12874. }
  12875. /**
  12876. * Removes a document from the cache.
  12877. *
  12878. * All calls of `removeEntry` are required to go through the RemoteDocumentChangeBuffer
  12879. * returned by `newChangeBuffer()` to ensure proper accounting of metadata.
  12880. */
  12881. removeEntry(transaction, documentKey, readTime) {
  12882. const store = remoteDocumentsStore(transaction);
  12883. return store.delete(dbReadTimeKey(documentKey, readTime));
  12884. }
  12885. /**
  12886. * Updates the current cache size.
  12887. *
  12888. * Callers to `addEntry()` and `removeEntry()` *must* call this afterwards to update the
  12889. * cache's metadata.
  12890. */
  12891. updateMetadata(transaction, sizeDelta) {
  12892. return this.getMetadata(transaction).next(metadata => {
  12893. metadata.byteSize += sizeDelta;
  12894. return this.setMetadata(transaction, metadata);
  12895. });
  12896. }
  12897. getEntry(transaction, documentKey) {
  12898. let doc = MutableDocument.newInvalidDocument(documentKey);
  12899. return remoteDocumentsStore(transaction)
  12900. .iterate({
  12901. index: DbRemoteDocumentDocumentKeyIndex,
  12902. range: IDBKeyRange.only(dbKey(documentKey))
  12903. }, (_, dbRemoteDoc) => {
  12904. doc = this.maybeDecodeDocument(documentKey, dbRemoteDoc);
  12905. })
  12906. .next(() => doc);
  12907. }
  12908. /**
  12909. * Looks up an entry in the cache.
  12910. *
  12911. * @param documentKey - The key of the entry to look up.
  12912. * @returns The cached document entry and its size.
  12913. */
  12914. getSizedEntry(transaction, documentKey) {
  12915. let result = {
  12916. size: 0,
  12917. document: MutableDocument.newInvalidDocument(documentKey)
  12918. };
  12919. return remoteDocumentsStore(transaction)
  12920. .iterate({
  12921. index: DbRemoteDocumentDocumentKeyIndex,
  12922. range: IDBKeyRange.only(dbKey(documentKey))
  12923. }, (_, dbRemoteDoc) => {
  12924. result = {
  12925. document: this.maybeDecodeDocument(documentKey, dbRemoteDoc),
  12926. size: dbDocumentSize(dbRemoteDoc)
  12927. };
  12928. })
  12929. .next(() => result);
  12930. }
  12931. getEntries(transaction, documentKeys) {
  12932. let results = mutableDocumentMap();
  12933. return this.forEachDbEntry(transaction, documentKeys, (key, dbRemoteDoc) => {
  12934. const doc = this.maybeDecodeDocument(key, dbRemoteDoc);
  12935. results = results.insert(key, doc);
  12936. }).next(() => results);
  12937. }
  12938. /**
  12939. * Looks up several entries in the cache.
  12940. *
  12941. * @param documentKeys - The set of keys entries to look up.
  12942. * @returns A map of documents indexed by key and a map of sizes indexed by
  12943. * key (zero if the document does not exist).
  12944. */
  12945. getSizedEntries(transaction, documentKeys) {
  12946. let results = mutableDocumentMap();
  12947. let sizeMap = new SortedMap(DocumentKey.comparator);
  12948. return this.forEachDbEntry(transaction, documentKeys, (key, dbRemoteDoc) => {
  12949. const doc = this.maybeDecodeDocument(key, dbRemoteDoc);
  12950. results = results.insert(key, doc);
  12951. sizeMap = sizeMap.insert(key, dbDocumentSize(dbRemoteDoc));
  12952. }).next(() => {
  12953. return { documents: results, sizeMap };
  12954. });
  12955. }
  12956. forEachDbEntry(transaction, documentKeys, callback) {
  12957. if (documentKeys.isEmpty()) {
  12958. return PersistencePromise.resolve();
  12959. }
  12960. let sortedKeys = new SortedSet(dbKeyComparator);
  12961. documentKeys.forEach(e => (sortedKeys = sortedKeys.add(e)));
  12962. const range = IDBKeyRange.bound(dbKey(sortedKeys.first()), dbKey(sortedKeys.last()));
  12963. const keyIter = sortedKeys.getIterator();
  12964. let nextKey = keyIter.getNext();
  12965. return remoteDocumentsStore(transaction)
  12966. .iterate({ index: DbRemoteDocumentDocumentKeyIndex, range }, (_, dbRemoteDoc, control) => {
  12967. const potentialKey = DocumentKey.fromSegments([
  12968. ...dbRemoteDoc.prefixPath,
  12969. dbRemoteDoc.collectionGroup,
  12970. dbRemoteDoc.documentId
  12971. ]);
  12972. // Go through keys not found in cache.
  12973. while (nextKey && dbKeyComparator(nextKey, potentialKey) < 0) {
  12974. callback(nextKey, null);
  12975. nextKey = keyIter.getNext();
  12976. }
  12977. if (nextKey && nextKey.isEqual(potentialKey)) {
  12978. // Key found in cache.
  12979. callback(nextKey, dbRemoteDoc);
  12980. nextKey = keyIter.hasNext() ? keyIter.getNext() : null;
  12981. }
  12982. // Skip to the next key (if there is one).
  12983. if (nextKey) {
  12984. control.skip(dbKey(nextKey));
  12985. }
  12986. else {
  12987. control.done();
  12988. }
  12989. })
  12990. .next(() => {
  12991. // The rest of the keys are not in the cache. One case where `iterate`
  12992. // above won't go through them is when the cache is empty.
  12993. while (nextKey) {
  12994. callback(nextKey, null);
  12995. nextKey = keyIter.hasNext() ? keyIter.getNext() : null;
  12996. }
  12997. });
  12998. }
  12999. getAllFromCollection(transaction, collection, offset) {
  13000. const startKey = [
  13001. collection.popLast().toArray(),
  13002. collection.lastSegment(),
  13003. toDbTimestampKey(offset.readTime),
  13004. offset.documentKey.path.isEmpty()
  13005. ? ''
  13006. : offset.documentKey.path.lastSegment()
  13007. ];
  13008. const endKey = [
  13009. collection.popLast().toArray(),
  13010. collection.lastSegment(),
  13011. [Number.MAX_SAFE_INTEGER, Number.MAX_SAFE_INTEGER],
  13012. ''
  13013. ];
  13014. return remoteDocumentsStore(transaction)
  13015. .loadAll(IDBKeyRange.bound(startKey, endKey, true))
  13016. .next(dbRemoteDocs => {
  13017. let results = mutableDocumentMap();
  13018. for (const dbRemoteDoc of dbRemoteDocs) {
  13019. const document = this.maybeDecodeDocument(DocumentKey.fromSegments(dbRemoteDoc.prefixPath.concat(dbRemoteDoc.collectionGroup, dbRemoteDoc.documentId)), dbRemoteDoc);
  13020. results = results.insert(document.key, document);
  13021. }
  13022. return results;
  13023. });
  13024. }
  13025. getAllFromCollectionGroup(transaction, collectionGroup, offset, limit) {
  13026. let results = mutableDocumentMap();
  13027. const startKey = dbCollectionGroupKey(collectionGroup, offset);
  13028. const endKey = dbCollectionGroupKey(collectionGroup, IndexOffset.max());
  13029. return remoteDocumentsStore(transaction)
  13030. .iterate({
  13031. index: DbRemoteDocumentCollectionGroupIndex,
  13032. range: IDBKeyRange.bound(startKey, endKey, true)
  13033. }, (_, dbRemoteDoc, control) => {
  13034. const document = this.maybeDecodeDocument(DocumentKey.fromSegments(dbRemoteDoc.prefixPath.concat(dbRemoteDoc.collectionGroup, dbRemoteDoc.documentId)), dbRemoteDoc);
  13035. results = results.insert(document.key, document);
  13036. if (results.size === limit) {
  13037. control.done();
  13038. }
  13039. })
  13040. .next(() => results);
  13041. }
  13042. newChangeBuffer(options) {
  13043. return new IndexedDbRemoteDocumentChangeBuffer(this, !!options && options.trackRemovals);
  13044. }
  13045. getSize(txn) {
  13046. return this.getMetadata(txn).next(metadata => metadata.byteSize);
  13047. }
  13048. getMetadata(txn) {
  13049. return documentGlobalStore(txn)
  13050. .get(DbRemoteDocumentGlobalKey)
  13051. .next(metadata => {
  13052. hardAssert(!!metadata);
  13053. return metadata;
  13054. });
  13055. }
  13056. setMetadata(txn, metadata) {
  13057. return documentGlobalStore(txn).put(DbRemoteDocumentGlobalKey, metadata);
  13058. }
  13059. /**
  13060. * Decodes `dbRemoteDoc` and returns the document (or an invalid document if
  13061. * the document corresponds to the format used for sentinel deletes).
  13062. */
  13063. maybeDecodeDocument(documentKey, dbRemoteDoc) {
  13064. if (dbRemoteDoc) {
  13065. const doc = fromDbRemoteDocument(this.serializer, dbRemoteDoc);
  13066. // Whether the document is a sentinel removal and should only be used in the
  13067. // `getNewDocumentChanges()`
  13068. const isSentinelRemoval = doc.isNoDocument() && doc.version.isEqual(SnapshotVersion.min());
  13069. if (!isSentinelRemoval) {
  13070. return doc;
  13071. }
  13072. }
  13073. return MutableDocument.newInvalidDocument(documentKey);
  13074. }
  13075. }
  13076. /** Creates a new IndexedDbRemoteDocumentCache. */
  13077. function newIndexedDbRemoteDocumentCache(serializer) {
  13078. return new IndexedDbRemoteDocumentCacheImpl(serializer);
  13079. }
  13080. /**
  13081. * Handles the details of adding and updating documents in the IndexedDbRemoteDocumentCache.
  13082. *
  13083. * Unlike the MemoryRemoteDocumentChangeBuffer, the IndexedDb implementation computes the size
  13084. * delta for all submitted changes. This avoids having to re-read all documents from IndexedDb
  13085. * when we apply the changes.
  13086. */
  13087. class IndexedDbRemoteDocumentChangeBuffer extends RemoteDocumentChangeBuffer {
  13088. /**
  13089. * @param documentCache - The IndexedDbRemoteDocumentCache to apply the changes to.
  13090. * @param trackRemovals - Whether to create sentinel deletes that can be tracked by
  13091. * `getNewDocumentChanges()`.
  13092. */
  13093. constructor(documentCache, trackRemovals) {
  13094. super();
  13095. this.documentCache = documentCache;
  13096. this.trackRemovals = trackRemovals;
  13097. // A map of document sizes and read times prior to applying the changes in
  13098. // this buffer.
  13099. this.documentStates = new ObjectMap(key => key.toString(), (l, r) => l.isEqual(r));
  13100. }
  13101. applyChanges(transaction) {
  13102. const promises = [];
  13103. let sizeDelta = 0;
  13104. let collectionParents = new SortedSet((l, r) => primitiveComparator(l.canonicalString(), r.canonicalString()));
  13105. this.changes.forEach((key, documentChange) => {
  13106. const previousDoc = this.documentStates.get(key);
  13107. promises.push(this.documentCache.removeEntry(transaction, key, previousDoc.readTime));
  13108. if (documentChange.isValidDocument()) {
  13109. const doc = toDbRemoteDocument(this.documentCache.serializer, documentChange);
  13110. collectionParents = collectionParents.add(key.path.popLast());
  13111. const size = dbDocumentSize(doc);
  13112. sizeDelta += size - previousDoc.size;
  13113. promises.push(this.documentCache.addEntry(transaction, key, doc));
  13114. }
  13115. else {
  13116. sizeDelta -= previousDoc.size;
  13117. if (this.trackRemovals) {
  13118. // In order to track removals, we store a "sentinel delete" in the
  13119. // RemoteDocumentCache. This entry is represented by a NoDocument
  13120. // with a version of 0 and ignored by `maybeDecodeDocument()` but
  13121. // preserved in `getNewDocumentChanges()`.
  13122. const deletedDoc = toDbRemoteDocument(this.documentCache.serializer, documentChange.convertToNoDocument(SnapshotVersion.min()));
  13123. promises.push(this.documentCache.addEntry(transaction, key, deletedDoc));
  13124. }
  13125. }
  13126. });
  13127. collectionParents.forEach(parent => {
  13128. promises.push(this.documentCache.indexManager.addToCollectionParentIndex(transaction, parent));
  13129. });
  13130. promises.push(this.documentCache.updateMetadata(transaction, sizeDelta));
  13131. return PersistencePromise.waitFor(promises);
  13132. }
  13133. getFromCache(transaction, documentKey) {
  13134. // Record the size of everything we load from the cache so we can compute a delta later.
  13135. return this.documentCache
  13136. .getSizedEntry(transaction, documentKey)
  13137. .next(getResult => {
  13138. this.documentStates.set(documentKey, {
  13139. size: getResult.size,
  13140. readTime: getResult.document.readTime
  13141. });
  13142. return getResult.document;
  13143. });
  13144. }
  13145. getAllFromCache(transaction, documentKeys) {
  13146. // Record the size of everything we load from the cache so we can compute
  13147. // a delta later.
  13148. return this.documentCache
  13149. .getSizedEntries(transaction, documentKeys)
  13150. .next(({ documents, sizeMap }) => {
  13151. // Note: `getAllFromCache` returns two maps instead of a single map from
  13152. // keys to `DocumentSizeEntry`s. This is to allow returning the
  13153. // `MutableDocumentMap` directly, without a conversion.
  13154. sizeMap.forEach((documentKey, size) => {
  13155. this.documentStates.set(documentKey, {
  13156. size,
  13157. readTime: documents.get(documentKey).readTime
  13158. });
  13159. });
  13160. return documents;
  13161. });
  13162. }
  13163. }
  13164. function documentGlobalStore(txn) {
  13165. return getStore(txn, DbRemoteDocumentGlobalStore);
  13166. }
  13167. /**
  13168. * Helper to get a typed SimpleDbStore for the remoteDocuments object store.
  13169. */
  13170. function remoteDocumentsStore(txn) {
  13171. return getStore(txn, DbRemoteDocumentStore);
  13172. }
  13173. /**
  13174. * Returns a key that can be used for document lookups on the
  13175. * `DbRemoteDocumentDocumentKeyIndex` index.
  13176. */
  13177. function dbKey(documentKey) {
  13178. const path = documentKey.path.toArray();
  13179. return [
  13180. /* prefix path */ path.slice(0, path.length - 2),
  13181. /* collection id */ path[path.length - 2],
  13182. /* document id */ path[path.length - 1]
  13183. ];
  13184. }
  13185. /**
  13186. * Returns a key that can be used for document lookups via the primary key of
  13187. * the DbRemoteDocument object store.
  13188. */
  13189. function dbReadTimeKey(documentKey, readTime) {
  13190. const path = documentKey.path.toArray();
  13191. return [
  13192. /* prefix path */ path.slice(0, path.length - 2),
  13193. /* collection id */ path[path.length - 2],
  13194. toDbTimestampKey(readTime),
  13195. /* document id */ path[path.length - 1]
  13196. ];
  13197. }
  13198. /**
  13199. * Returns a key that can be used for document lookups on the
  13200. * `DbRemoteDocumentDocumentCollectionGroupIndex` index.
  13201. */
  13202. function dbCollectionGroupKey(collectionGroup, offset) {
  13203. const path = offset.documentKey.path.toArray();
  13204. return [
  13205. /* collection id */ collectionGroup,
  13206. toDbTimestampKey(offset.readTime),
  13207. /* prefix path */ path.slice(0, path.length - 2),
  13208. /* document id */ path.length > 0 ? path[path.length - 1] : ''
  13209. ];
  13210. }
  13211. /**
  13212. * Comparator that compares document keys according to the primary key sorting
  13213. * used by the `DbRemoteDocumentDocument` store (by prefix path, collection id
  13214. * and then document ID).
  13215. *
  13216. * Visible for testing.
  13217. */
  13218. function dbKeyComparator(l, r) {
  13219. const left = l.path.toArray();
  13220. const right = r.path.toArray();
  13221. // The ordering is based on https://chromium.googlesource.com/chromium/blink/+/fe5c21fef94dae71c1c3344775b8d8a7f7e6d9ec/Source/modules/indexeddb/IDBKey.cpp#74
  13222. let cmp = 0;
  13223. for (let i = 0; i < left.length - 2 && i < right.length - 2; ++i) {
  13224. cmp = primitiveComparator(left[i], right[i]);
  13225. if (cmp) {
  13226. return cmp;
  13227. }
  13228. }
  13229. cmp = primitiveComparator(left.length, right.length);
  13230. if (cmp) {
  13231. return cmp;
  13232. }
  13233. cmp = primitiveComparator(left[left.length - 2], right[right.length - 2]);
  13234. if (cmp) {
  13235. return cmp;
  13236. }
  13237. return primitiveComparator(left[left.length - 1], right[right.length - 1]);
  13238. }
  13239. /**
  13240. * @license
  13241. * Copyright 2017 Google LLC
  13242. *
  13243. * Licensed under the Apache License, Version 2.0 (the "License");
  13244. * you may not use this file except in compliance with the License.
  13245. * You may obtain a copy of the License at
  13246. *
  13247. * http://www.apache.org/licenses/LICENSE-2.0
  13248. *
  13249. * Unless required by applicable law or agreed to in writing, software
  13250. * distributed under the License is distributed on an "AS IS" BASIS,
  13251. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13252. * See the License for the specific language governing permissions and
  13253. * limitations under the License.
  13254. */
  13255. /**
  13256. * Schema Version for the Web client:
  13257. * 1. Initial version including Mutation Queue, Query Cache, and Remote
  13258. * Document Cache
  13259. * 2. Used to ensure a targetGlobal object exists and add targetCount to it. No
  13260. * longer required because migration 3 unconditionally clears it.
  13261. * 3. Dropped and re-created Query Cache to deal with cache corruption related
  13262. * to limbo resolution. Addresses
  13263. * https://github.com/firebase/firebase-ios-sdk/issues/1548
  13264. * 4. Multi-Tab Support.
  13265. * 5. Removal of held write acks.
  13266. * 6. Create document global for tracking document cache size.
  13267. * 7. Ensure every cached document has a sentinel row with a sequence number.
  13268. * 8. Add collection-parent index for Collection Group queries.
  13269. * 9. Change RemoteDocumentChanges store to be keyed by readTime rather than
  13270. * an auto-incrementing ID. This is required for Index-Free queries.
  13271. * 10. Rewrite the canonical IDs to the explicit Protobuf-based format.
  13272. * 11. Add bundles and named_queries for bundle support.
  13273. * 12. Add document overlays.
  13274. * 13. Rewrite the keys of the remote document cache to allow for efficient
  13275. * document lookup via `getAll()`.
  13276. * 14. Add overlays.
  13277. * 15. Add indexing support.
  13278. */
  13279. const SCHEMA_VERSION = 15;
  13280. /**
  13281. * @license
  13282. * Copyright 2022 Google LLC
  13283. *
  13284. * Licensed under the Apache License, Version 2.0 (the "License");
  13285. * you may not use this file except in compliance with the License.
  13286. * You may obtain a copy of the License at
  13287. *
  13288. * http://www.apache.org/licenses/LICENSE-2.0
  13289. *
  13290. * Unless required by applicable law or agreed to in writing, software
  13291. * distributed under the License is distributed on an "AS IS" BASIS,
  13292. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13293. * See the License for the specific language governing permissions and
  13294. * limitations under the License.
  13295. */
  13296. /**
  13297. * Represents a local view (overlay) of a document, and the fields that are
  13298. * locally mutated.
  13299. */
  13300. class OverlayedDocument {
  13301. constructor(overlayedDocument,
  13302. /**
  13303. * The fields that are locally mutated by patch mutations.
  13304. *
  13305. * If the overlayed document is from set or delete mutations, this is `null`.
  13306. * If there is no overlay (mutation) for the document, this is an empty `FieldMask`.
  13307. */
  13308. mutatedFields) {
  13309. this.overlayedDocument = overlayedDocument;
  13310. this.mutatedFields = mutatedFields;
  13311. }
  13312. }
  13313. /**
  13314. * @license
  13315. * Copyright 2017 Google LLC
  13316. *
  13317. * Licensed under the Apache License, Version 2.0 (the "License");
  13318. * you may not use this file except in compliance with the License.
  13319. * You may obtain a copy of the License at
  13320. *
  13321. * http://www.apache.org/licenses/LICENSE-2.0
  13322. *
  13323. * Unless required by applicable law or agreed to in writing, software
  13324. * distributed under the License is distributed on an "AS IS" BASIS,
  13325. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13326. * See the License for the specific language governing permissions and
  13327. * limitations under the License.
  13328. */
  13329. /**
  13330. * A readonly view of the local state of all documents we're tracking (i.e. we
  13331. * have a cached version in remoteDocumentCache or local mutations for the
  13332. * document). The view is computed by applying the mutations in the
  13333. * MutationQueue to the RemoteDocumentCache.
  13334. */
  13335. class LocalDocumentsView {
  13336. constructor(remoteDocumentCache, mutationQueue, documentOverlayCache, indexManager) {
  13337. this.remoteDocumentCache = remoteDocumentCache;
  13338. this.mutationQueue = mutationQueue;
  13339. this.documentOverlayCache = documentOverlayCache;
  13340. this.indexManager = indexManager;
  13341. }
  13342. /**
  13343. * Get the local view of the document identified by `key`.
  13344. *
  13345. * @returns Local view of the document or null if we don't have any cached
  13346. * state for it.
  13347. */
  13348. getDocument(transaction, key) {
  13349. let overlay = null;
  13350. return this.documentOverlayCache
  13351. .getOverlay(transaction, key)
  13352. .next(value => {
  13353. overlay = value;
  13354. return this.remoteDocumentCache.getEntry(transaction, key);
  13355. })
  13356. .next(document => {
  13357. if (overlay !== null) {
  13358. mutationApplyToLocalView(overlay.mutation, document, FieldMask.empty(), Timestamp.now());
  13359. }
  13360. return document;
  13361. });
  13362. }
  13363. /**
  13364. * Gets the local view of the documents identified by `keys`.
  13365. *
  13366. * If we don't have cached state for a document in `keys`, a NoDocument will
  13367. * be stored for that key in the resulting set.
  13368. */
  13369. getDocuments(transaction, keys) {
  13370. return this.remoteDocumentCache
  13371. .getEntries(transaction, keys)
  13372. .next(docs => this.getLocalViewOfDocuments(transaction, docs, documentKeySet()).next(() => docs));
  13373. }
  13374. /**
  13375. * Similar to `getDocuments`, but creates the local view from the given
  13376. * `baseDocs` without retrieving documents from the local store.
  13377. *
  13378. * @param transaction - The transaction this operation is scoped to.
  13379. * @param docs - The documents to apply local mutations to get the local views.
  13380. * @param existenceStateChanged - The set of document keys whose existence state
  13381. * is changed. This is useful to determine if some documents overlay needs
  13382. * to be recalculated.
  13383. */
  13384. getLocalViewOfDocuments(transaction, docs, existenceStateChanged = documentKeySet()) {
  13385. const overlays = newOverlayMap();
  13386. return this.populateOverlays(transaction, overlays, docs).next(() => {
  13387. return this.computeViews(transaction, docs, overlays, existenceStateChanged).next(computeViewsResult => {
  13388. let result = documentMap();
  13389. computeViewsResult.forEach((documentKey, overlayedDocument) => {
  13390. result = result.insert(documentKey, overlayedDocument.overlayedDocument);
  13391. });
  13392. return result;
  13393. });
  13394. });
  13395. }
  13396. /**
  13397. * Gets the overlayed documents for the given document map, which will include
  13398. * the local view of those documents and a `FieldMask` indicating which fields
  13399. * are mutated locally, `null` if overlay is a Set or Delete mutation.
  13400. */
  13401. getOverlayedDocuments(transaction, docs) {
  13402. const overlays = newOverlayMap();
  13403. return this.populateOverlays(transaction, overlays, docs).next(() => this.computeViews(transaction, docs, overlays, documentKeySet()));
  13404. }
  13405. /**
  13406. * Fetches the overlays for {@code docs} and adds them to provided overlay map
  13407. * if the map does not already contain an entry for the given document key.
  13408. */
  13409. populateOverlays(transaction, overlays, docs) {
  13410. const missingOverlays = [];
  13411. docs.forEach(key => {
  13412. if (!overlays.has(key)) {
  13413. missingOverlays.push(key);
  13414. }
  13415. });
  13416. return this.documentOverlayCache
  13417. .getOverlays(transaction, missingOverlays)
  13418. .next(result => {
  13419. result.forEach((key, val) => {
  13420. overlays.set(key, val);
  13421. });
  13422. });
  13423. }
  13424. /**
  13425. * Computes the local view for the given documents.
  13426. *
  13427. * @param docs - The documents to compute views for. It also has the base
  13428. * version of the documents.
  13429. * @param overlays - The overlays that need to be applied to the given base
  13430. * version of the documents.
  13431. * @param existenceStateChanged - A set of documents whose existence states
  13432. * might have changed. This is used to determine if we need to re-calculate
  13433. * overlays from mutation queues.
  13434. * @return A map represents the local documents view.
  13435. */
  13436. computeViews(transaction, docs, overlays, existenceStateChanged) {
  13437. let recalculateDocuments = mutableDocumentMap();
  13438. const mutatedFields = newDocumentKeyMap();
  13439. const results = newOverlayedDocumentMap();
  13440. docs.forEach((_, doc) => {
  13441. const overlay = overlays.get(doc.key);
  13442. // Recalculate an overlay if the document's existence state changed due to
  13443. // a remote event *and* the overlay is a PatchMutation. This is because
  13444. // document existence state can change if some patch mutation's
  13445. // preconditions are met.
  13446. // NOTE: we recalculate when `overlay` is undefined as well, because there
  13447. // might be a patch mutation whose precondition does not match before the
  13448. // change (hence overlay is undefined), but would now match.
  13449. if (existenceStateChanged.has(doc.key) &&
  13450. (overlay === undefined || overlay.mutation instanceof PatchMutation)) {
  13451. recalculateDocuments = recalculateDocuments.insert(doc.key, doc);
  13452. }
  13453. else if (overlay !== undefined) {
  13454. mutatedFields.set(doc.key, overlay.mutation.getFieldMask());
  13455. mutationApplyToLocalView(overlay.mutation, doc, overlay.mutation.getFieldMask(), Timestamp.now());
  13456. }
  13457. else {
  13458. // no overlay exists
  13459. // Using EMPTY to indicate there is no overlay for the document.
  13460. mutatedFields.set(doc.key, FieldMask.empty());
  13461. }
  13462. });
  13463. return this.recalculateAndSaveOverlays(transaction, recalculateDocuments).next(recalculatedFields => {
  13464. recalculatedFields.forEach((documentKey, mask) => mutatedFields.set(documentKey, mask));
  13465. docs.forEach((documentKey, document) => {
  13466. var _a;
  13467. return results.set(documentKey, new OverlayedDocument(document, (_a = mutatedFields.get(documentKey)) !== null && _a !== void 0 ? _a : null));
  13468. });
  13469. return results;
  13470. });
  13471. }
  13472. recalculateAndSaveOverlays(transaction, docs) {
  13473. const masks = newDocumentKeyMap();
  13474. // A reverse lookup map from batch id to the documents within that batch.
  13475. let documentsByBatchId = new SortedMap((key1, key2) => key1 - key2);
  13476. let processed = documentKeySet();
  13477. return this.mutationQueue
  13478. .getAllMutationBatchesAffectingDocumentKeys(transaction, docs)
  13479. .next(batches => {
  13480. for (const batch of batches) {
  13481. batch.keys().forEach(key => {
  13482. const baseDoc = docs.get(key);
  13483. if (baseDoc === null) {
  13484. return;
  13485. }
  13486. let mask = masks.get(key) || FieldMask.empty();
  13487. mask = batch.applyToLocalView(baseDoc, mask);
  13488. masks.set(key, mask);
  13489. const newSet = (documentsByBatchId.get(batch.batchId) || documentKeySet()).add(key);
  13490. documentsByBatchId = documentsByBatchId.insert(batch.batchId, newSet);
  13491. });
  13492. }
  13493. })
  13494. .next(() => {
  13495. const promises = [];
  13496. // Iterate in descending order of batch IDs, and skip documents that are
  13497. // already saved.
  13498. const iter = documentsByBatchId.getReverseIterator();
  13499. while (iter.hasNext()) {
  13500. const entry = iter.getNext();
  13501. const batchId = entry.key;
  13502. const keys = entry.value;
  13503. const overlays = newMutationMap();
  13504. keys.forEach(key => {
  13505. if (!processed.has(key)) {
  13506. const overlayMutation = calculateOverlayMutation(docs.get(key), masks.get(key));
  13507. if (overlayMutation !== null) {
  13508. overlays.set(key, overlayMutation);
  13509. }
  13510. processed = processed.add(key);
  13511. }
  13512. });
  13513. promises.push(this.documentOverlayCache.saveOverlays(transaction, batchId, overlays));
  13514. }
  13515. return PersistencePromise.waitFor(promises);
  13516. })
  13517. .next(() => masks);
  13518. }
  13519. /**
  13520. * Recalculates overlays by reading the documents from remote document cache
  13521. * first, and saves them after they are calculated.
  13522. */
  13523. recalculateAndSaveOverlaysForDocumentKeys(transaction, documentKeys) {
  13524. return this.remoteDocumentCache
  13525. .getEntries(transaction, documentKeys)
  13526. .next(docs => this.recalculateAndSaveOverlays(transaction, docs));
  13527. }
  13528. /**
  13529. * Performs a query against the local view of all documents.
  13530. *
  13531. * @param transaction - The persistence transaction.
  13532. * @param query - The query to match documents against.
  13533. * @param offset - Read time and key to start scanning by (exclusive).
  13534. */
  13535. getDocumentsMatchingQuery(transaction, query, offset) {
  13536. if (isDocumentQuery$1(query)) {
  13537. return this.getDocumentsMatchingDocumentQuery(transaction, query.path);
  13538. }
  13539. else if (isCollectionGroupQuery(query)) {
  13540. return this.getDocumentsMatchingCollectionGroupQuery(transaction, query, offset);
  13541. }
  13542. else {
  13543. return this.getDocumentsMatchingCollectionQuery(transaction, query, offset);
  13544. }
  13545. }
  13546. /**
  13547. * Given a collection group, returns the next documents that follow the provided offset, along
  13548. * with an updated batch ID.
  13549. *
  13550. * <p>The documents returned by this method are ordered by remote version from the provided
  13551. * offset. If there are no more remote documents after the provided offset, documents with
  13552. * mutations in order of batch id from the offset are returned. Since all documents in a batch are
  13553. * returned together, the total number of documents returned can exceed {@code count}.
  13554. *
  13555. * @param transaction
  13556. * @param collectionGroup The collection group for the documents.
  13557. * @param offset The offset to index into.
  13558. * @param count The number of documents to return
  13559. * @return A LocalWriteResult with the documents that follow the provided offset and the last processed batch id.
  13560. */
  13561. getNextDocuments(transaction, collectionGroup, offset, count) {
  13562. return this.remoteDocumentCache
  13563. .getAllFromCollectionGroup(transaction, collectionGroup, offset, count)
  13564. .next((originalDocs) => {
  13565. const overlaysPromise = count - originalDocs.size > 0
  13566. ? this.documentOverlayCache.getOverlaysForCollectionGroup(transaction, collectionGroup, offset.largestBatchId, count - originalDocs.size)
  13567. : PersistencePromise.resolve(newOverlayMap());
  13568. // The callsite will use the largest batch ID together with the latest read time to create
  13569. // a new index offset. Since we only process batch IDs if all remote documents have been read,
  13570. // no overlay will increase the overall read time. This is why we only need to special case
  13571. // the batch id.
  13572. let largestBatchId = INITIAL_LARGEST_BATCH_ID;
  13573. let modifiedDocs = originalDocs;
  13574. return overlaysPromise.next(overlays => {
  13575. return PersistencePromise.forEach(overlays, (key, overlay) => {
  13576. if (largestBatchId < overlay.largestBatchId) {
  13577. largestBatchId = overlay.largestBatchId;
  13578. }
  13579. if (originalDocs.get(key)) {
  13580. return PersistencePromise.resolve();
  13581. }
  13582. return this.remoteDocumentCache
  13583. .getEntry(transaction, key)
  13584. .next(doc => {
  13585. modifiedDocs = modifiedDocs.insert(key, doc);
  13586. });
  13587. })
  13588. .next(() => this.populateOverlays(transaction, overlays, originalDocs))
  13589. .next(() => this.computeViews(transaction, modifiedDocs, overlays, documentKeySet()))
  13590. .next(localDocs => ({
  13591. batchId: largestBatchId,
  13592. changes: convertOverlayedDocumentMapToDocumentMap(localDocs)
  13593. }));
  13594. });
  13595. });
  13596. }
  13597. getDocumentsMatchingDocumentQuery(transaction, docPath) {
  13598. // Just do a simple document lookup.
  13599. return this.getDocument(transaction, new DocumentKey(docPath)).next(document => {
  13600. let result = documentMap();
  13601. if (document.isFoundDocument()) {
  13602. result = result.insert(document.key, document);
  13603. }
  13604. return result;
  13605. });
  13606. }
  13607. getDocumentsMatchingCollectionGroupQuery(transaction, query, offset) {
  13608. const collectionId = query.collectionGroup;
  13609. let results = documentMap();
  13610. return this.indexManager
  13611. .getCollectionParents(transaction, collectionId)
  13612. .next(parents => {
  13613. // Perform a collection query against each parent that contains the
  13614. // collectionId and aggregate the results.
  13615. return PersistencePromise.forEach(parents, (parent) => {
  13616. const collectionQuery = asCollectionQueryAtPath(query, parent.child(collectionId));
  13617. return this.getDocumentsMatchingCollectionQuery(transaction, collectionQuery, offset).next(r => {
  13618. r.forEach((key, doc) => {
  13619. results = results.insert(key, doc);
  13620. });
  13621. });
  13622. }).next(() => results);
  13623. });
  13624. }
  13625. getDocumentsMatchingCollectionQuery(transaction, query, offset) {
  13626. // Query the remote documents and overlay mutations.
  13627. let remoteDocuments;
  13628. return this.remoteDocumentCache
  13629. .getAllFromCollection(transaction, query.path, offset)
  13630. .next(queryResults => {
  13631. remoteDocuments = queryResults;
  13632. return this.documentOverlayCache.getOverlaysForCollection(transaction, query.path, offset.largestBatchId);
  13633. })
  13634. .next(overlays => {
  13635. // As documents might match the query because of their overlay we need to
  13636. // include documents for all overlays in the initial document set.
  13637. overlays.forEach((_, overlay) => {
  13638. const key = overlay.getKey();
  13639. if (remoteDocuments.get(key) === null) {
  13640. remoteDocuments = remoteDocuments.insert(key, MutableDocument.newInvalidDocument(key));
  13641. }
  13642. });
  13643. // Apply the overlays and match against the query.
  13644. let results = documentMap();
  13645. remoteDocuments.forEach((key, document) => {
  13646. const overlay = overlays.get(key);
  13647. if (overlay !== undefined) {
  13648. mutationApplyToLocalView(overlay.mutation, document, FieldMask.empty(), Timestamp.now());
  13649. }
  13650. // Finally, insert the documents that still match the query
  13651. if (queryMatches(query, document)) {
  13652. results = results.insert(key, document);
  13653. }
  13654. });
  13655. return results;
  13656. });
  13657. }
  13658. }
  13659. /**
  13660. * @license
  13661. * Copyright 2020 Google LLC
  13662. *
  13663. * Licensed under the Apache License, Version 2.0 (the "License");
  13664. * you may not use this file except in compliance with the License.
  13665. * You may obtain a copy of the License at
  13666. *
  13667. * http://www.apache.org/licenses/LICENSE-2.0
  13668. *
  13669. * Unless required by applicable law or agreed to in writing, software
  13670. * distributed under the License is distributed on an "AS IS" BASIS,
  13671. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13672. * See the License for the specific language governing permissions and
  13673. * limitations under the License.
  13674. */
  13675. class MemoryBundleCache {
  13676. constructor(serializer) {
  13677. this.serializer = serializer;
  13678. this.bundles = new Map();
  13679. this.namedQueries = new Map();
  13680. }
  13681. getBundleMetadata(transaction, bundleId) {
  13682. return PersistencePromise.resolve(this.bundles.get(bundleId));
  13683. }
  13684. saveBundleMetadata(transaction, bundleMetadata) {
  13685. this.bundles.set(bundleMetadata.id, fromBundleMetadata(bundleMetadata));
  13686. return PersistencePromise.resolve();
  13687. }
  13688. getNamedQuery(transaction, queryName) {
  13689. return PersistencePromise.resolve(this.namedQueries.get(queryName));
  13690. }
  13691. saveNamedQuery(transaction, query) {
  13692. this.namedQueries.set(query.name, fromProtoNamedQuery(query));
  13693. return PersistencePromise.resolve();
  13694. }
  13695. }
  13696. /**
  13697. * @license
  13698. * Copyright 2022 Google LLC
  13699. *
  13700. * Licensed under the Apache License, Version 2.0 (the "License");
  13701. * you may not use this file except in compliance with the License.
  13702. * You may obtain a copy of the License at
  13703. *
  13704. * http://www.apache.org/licenses/LICENSE-2.0
  13705. *
  13706. * Unless required by applicable law or agreed to in writing, software
  13707. * distributed under the License is distributed on an "AS IS" BASIS,
  13708. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13709. * See the License for the specific language governing permissions and
  13710. * limitations under the License.
  13711. */
  13712. /**
  13713. * An in-memory implementation of DocumentOverlayCache.
  13714. */
  13715. class MemoryDocumentOverlayCache {
  13716. constructor() {
  13717. // A map sorted by DocumentKey, whose value is a pair of the largest batch id
  13718. // for the overlay and the overlay itself.
  13719. this.overlays = new SortedMap(DocumentKey.comparator);
  13720. this.overlayByBatchId = new Map();
  13721. }
  13722. getOverlay(transaction, key) {
  13723. return PersistencePromise.resolve(this.overlays.get(key));
  13724. }
  13725. getOverlays(transaction, keys) {
  13726. const result = newOverlayMap();
  13727. return PersistencePromise.forEach(keys, (key) => {
  13728. return this.getOverlay(transaction, key).next(overlay => {
  13729. if (overlay !== null) {
  13730. result.set(key, overlay);
  13731. }
  13732. });
  13733. }).next(() => result);
  13734. }
  13735. saveOverlays(transaction, largestBatchId, overlays) {
  13736. overlays.forEach((_, mutation) => {
  13737. this.saveOverlay(transaction, largestBatchId, mutation);
  13738. });
  13739. return PersistencePromise.resolve();
  13740. }
  13741. removeOverlaysForBatchId(transaction, documentKeys, batchId) {
  13742. const keys = this.overlayByBatchId.get(batchId);
  13743. if (keys !== undefined) {
  13744. keys.forEach(key => (this.overlays = this.overlays.remove(key)));
  13745. this.overlayByBatchId.delete(batchId);
  13746. }
  13747. return PersistencePromise.resolve();
  13748. }
  13749. getOverlaysForCollection(transaction, collection, sinceBatchId) {
  13750. const result = newOverlayMap();
  13751. const immediateChildrenPathLength = collection.length + 1;
  13752. const prefix = new DocumentKey(collection.child(''));
  13753. const iter = this.overlays.getIteratorFrom(prefix);
  13754. while (iter.hasNext()) {
  13755. const entry = iter.getNext();
  13756. const overlay = entry.value;
  13757. const key = overlay.getKey();
  13758. if (!collection.isPrefixOf(key.path)) {
  13759. break;
  13760. }
  13761. // Documents from sub-collections
  13762. if (key.path.length !== immediateChildrenPathLength) {
  13763. continue;
  13764. }
  13765. if (overlay.largestBatchId > sinceBatchId) {
  13766. result.set(overlay.getKey(), overlay);
  13767. }
  13768. }
  13769. return PersistencePromise.resolve(result);
  13770. }
  13771. getOverlaysForCollectionGroup(transaction, collectionGroup, sinceBatchId, count) {
  13772. let batchIdToOverlays = new SortedMap((key1, key2) => key1 - key2);
  13773. const iter = this.overlays.getIterator();
  13774. while (iter.hasNext()) {
  13775. const entry = iter.getNext();
  13776. const overlay = entry.value;
  13777. const key = overlay.getKey();
  13778. if (key.getCollectionGroup() !== collectionGroup) {
  13779. continue;
  13780. }
  13781. if (overlay.largestBatchId > sinceBatchId) {
  13782. let overlaysForBatchId = batchIdToOverlays.get(overlay.largestBatchId);
  13783. if (overlaysForBatchId === null) {
  13784. overlaysForBatchId = newOverlayMap();
  13785. batchIdToOverlays = batchIdToOverlays.insert(overlay.largestBatchId, overlaysForBatchId);
  13786. }
  13787. overlaysForBatchId.set(overlay.getKey(), overlay);
  13788. }
  13789. }
  13790. const result = newOverlayMap();
  13791. const batchIter = batchIdToOverlays.getIterator();
  13792. while (batchIter.hasNext()) {
  13793. const entry = batchIter.getNext();
  13794. const overlays = entry.value;
  13795. overlays.forEach((key, overlay) => result.set(key, overlay));
  13796. if (result.size() >= count) {
  13797. break;
  13798. }
  13799. }
  13800. return PersistencePromise.resolve(result);
  13801. }
  13802. saveOverlay(transaction, largestBatchId, mutation) {
  13803. // Remove the association of the overlay to its batch id.
  13804. const existing = this.overlays.get(mutation.key);
  13805. if (existing !== null) {
  13806. const newSet = this.overlayByBatchId
  13807. .get(existing.largestBatchId)
  13808. .delete(mutation.key);
  13809. this.overlayByBatchId.set(existing.largestBatchId, newSet);
  13810. }
  13811. this.overlays = this.overlays.insert(mutation.key, new Overlay(largestBatchId, mutation));
  13812. // Create the association of this overlay to the given largestBatchId.
  13813. let batch = this.overlayByBatchId.get(largestBatchId);
  13814. if (batch === undefined) {
  13815. batch = documentKeySet();
  13816. this.overlayByBatchId.set(largestBatchId, batch);
  13817. }
  13818. this.overlayByBatchId.set(largestBatchId, batch.add(mutation.key));
  13819. }
  13820. }
  13821. /**
  13822. * @license
  13823. * Copyright 2017 Google LLC
  13824. *
  13825. * Licensed under the Apache License, Version 2.0 (the "License");
  13826. * you may not use this file except in compliance with the License.
  13827. * You may obtain a copy of the License at
  13828. *
  13829. * http://www.apache.org/licenses/LICENSE-2.0
  13830. *
  13831. * Unless required by applicable law or agreed to in writing, software
  13832. * distributed under the License is distributed on an "AS IS" BASIS,
  13833. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13834. * See the License for the specific language governing permissions and
  13835. * limitations under the License.
  13836. */
  13837. /**
  13838. * A collection of references to a document from some kind of numbered entity
  13839. * (either a target ID or batch ID). As references are added to or removed from
  13840. * the set corresponding events are emitted to a registered garbage collector.
  13841. *
  13842. * Each reference is represented by a DocumentReference object. Each of them
  13843. * contains enough information to uniquely identify the reference. They are all
  13844. * stored primarily in a set sorted by key. A document is considered garbage if
  13845. * there's no references in that set (this can be efficiently checked thanks to
  13846. * sorting by key).
  13847. *
  13848. * ReferenceSet also keeps a secondary set that contains references sorted by
  13849. * IDs. This one is used to efficiently implement removal of all references by
  13850. * some target ID.
  13851. */
  13852. class ReferenceSet {
  13853. constructor() {
  13854. // A set of outstanding references to a document sorted by key.
  13855. this.refsByKey = new SortedSet(DocReference.compareByKey);
  13856. // A set of outstanding references to a document sorted by target id.
  13857. this.refsByTarget = new SortedSet(DocReference.compareByTargetId);
  13858. }
  13859. /** Returns true if the reference set contains no references. */
  13860. isEmpty() {
  13861. return this.refsByKey.isEmpty();
  13862. }
  13863. /** Adds a reference to the given document key for the given ID. */
  13864. addReference(key, id) {
  13865. const ref = new DocReference(key, id);
  13866. this.refsByKey = this.refsByKey.add(ref);
  13867. this.refsByTarget = this.refsByTarget.add(ref);
  13868. }
  13869. /** Add references to the given document keys for the given ID. */
  13870. addReferences(keys, id) {
  13871. keys.forEach(key => this.addReference(key, id));
  13872. }
  13873. /**
  13874. * Removes a reference to the given document key for the given
  13875. * ID.
  13876. */
  13877. removeReference(key, id) {
  13878. this.removeRef(new DocReference(key, id));
  13879. }
  13880. removeReferences(keys, id) {
  13881. keys.forEach(key => this.removeReference(key, id));
  13882. }
  13883. /**
  13884. * Clears all references with a given ID. Calls removeRef() for each key
  13885. * removed.
  13886. */
  13887. removeReferencesForId(id) {
  13888. const emptyKey = new DocumentKey(new ResourcePath([]));
  13889. const startRef = new DocReference(emptyKey, id);
  13890. const endRef = new DocReference(emptyKey, id + 1);
  13891. const keys = [];
  13892. this.refsByTarget.forEachInRange([startRef, endRef], ref => {
  13893. this.removeRef(ref);
  13894. keys.push(ref.key);
  13895. });
  13896. return keys;
  13897. }
  13898. removeAllReferences() {
  13899. this.refsByKey.forEach(ref => this.removeRef(ref));
  13900. }
  13901. removeRef(ref) {
  13902. this.refsByKey = this.refsByKey.delete(ref);
  13903. this.refsByTarget = this.refsByTarget.delete(ref);
  13904. }
  13905. referencesForId(id) {
  13906. const emptyKey = new DocumentKey(new ResourcePath([]));
  13907. const startRef = new DocReference(emptyKey, id);
  13908. const endRef = new DocReference(emptyKey, id + 1);
  13909. let keys = documentKeySet();
  13910. this.refsByTarget.forEachInRange([startRef, endRef], ref => {
  13911. keys = keys.add(ref.key);
  13912. });
  13913. return keys;
  13914. }
  13915. containsKey(key) {
  13916. const ref = new DocReference(key, 0);
  13917. const firstRef = this.refsByKey.firstAfterOrEqual(ref);
  13918. return firstRef !== null && key.isEqual(firstRef.key);
  13919. }
  13920. }
  13921. class DocReference {
  13922. constructor(key, targetOrBatchId) {
  13923. this.key = key;
  13924. this.targetOrBatchId = targetOrBatchId;
  13925. }
  13926. /** Compare by key then by ID */
  13927. static compareByKey(left, right) {
  13928. return (DocumentKey.comparator(left.key, right.key) ||
  13929. primitiveComparator(left.targetOrBatchId, right.targetOrBatchId));
  13930. }
  13931. /** Compare by ID then by key */
  13932. static compareByTargetId(left, right) {
  13933. return (primitiveComparator(left.targetOrBatchId, right.targetOrBatchId) ||
  13934. DocumentKey.comparator(left.key, right.key));
  13935. }
  13936. }
  13937. /**
  13938. * @license
  13939. * Copyright 2017 Google LLC
  13940. *
  13941. * Licensed under the Apache License, Version 2.0 (the "License");
  13942. * you may not use this file except in compliance with the License.
  13943. * You may obtain a copy of the License at
  13944. *
  13945. * http://www.apache.org/licenses/LICENSE-2.0
  13946. *
  13947. * Unless required by applicable law or agreed to in writing, software
  13948. * distributed under the License is distributed on an "AS IS" BASIS,
  13949. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  13950. * See the License for the specific language governing permissions and
  13951. * limitations under the License.
  13952. */
  13953. class MemoryMutationQueue {
  13954. constructor(indexManager, referenceDelegate) {
  13955. this.indexManager = indexManager;
  13956. this.referenceDelegate = referenceDelegate;
  13957. /**
  13958. * The set of all mutations that have been sent but not yet been applied to
  13959. * the backend.
  13960. */
  13961. this.mutationQueue = [];
  13962. /** Next value to use when assigning sequential IDs to each mutation batch. */
  13963. this.nextBatchId = 1;
  13964. /** An ordered mapping between documents and the mutations batch IDs. */
  13965. this.batchesByDocumentKey = new SortedSet(DocReference.compareByKey);
  13966. }
  13967. checkEmpty(transaction) {
  13968. return PersistencePromise.resolve(this.mutationQueue.length === 0);
  13969. }
  13970. addMutationBatch(transaction, localWriteTime, baseMutations, mutations) {
  13971. const batchId = this.nextBatchId;
  13972. this.nextBatchId++;
  13973. if (this.mutationQueue.length > 0) {
  13974. this.mutationQueue[this.mutationQueue.length - 1];
  13975. }
  13976. const batch = new MutationBatch(batchId, localWriteTime, baseMutations, mutations);
  13977. this.mutationQueue.push(batch);
  13978. // Track references by document key and index collection parents.
  13979. for (const mutation of mutations) {
  13980. this.batchesByDocumentKey = this.batchesByDocumentKey.add(new DocReference(mutation.key, batchId));
  13981. this.indexManager.addToCollectionParentIndex(transaction, mutation.key.path.popLast());
  13982. }
  13983. return PersistencePromise.resolve(batch);
  13984. }
  13985. lookupMutationBatch(transaction, batchId) {
  13986. return PersistencePromise.resolve(this.findMutationBatch(batchId));
  13987. }
  13988. getNextMutationBatchAfterBatchId(transaction, batchId) {
  13989. const nextBatchId = batchId + 1;
  13990. // The requested batchId may still be out of range so normalize it to the
  13991. // start of the queue.
  13992. const rawIndex = this.indexOfBatchId(nextBatchId);
  13993. const index = rawIndex < 0 ? 0 : rawIndex;
  13994. return PersistencePromise.resolve(this.mutationQueue.length > index ? this.mutationQueue[index] : null);
  13995. }
  13996. getHighestUnacknowledgedBatchId() {
  13997. return PersistencePromise.resolve(this.mutationQueue.length === 0 ? BATCHID_UNKNOWN : this.nextBatchId - 1);
  13998. }
  13999. getAllMutationBatches(transaction) {
  14000. return PersistencePromise.resolve(this.mutationQueue.slice());
  14001. }
  14002. getAllMutationBatchesAffectingDocumentKey(transaction, documentKey) {
  14003. const start = new DocReference(documentKey, 0);
  14004. const end = new DocReference(documentKey, Number.POSITIVE_INFINITY);
  14005. const result = [];
  14006. this.batchesByDocumentKey.forEachInRange([start, end], ref => {
  14007. const batch = this.findMutationBatch(ref.targetOrBatchId);
  14008. result.push(batch);
  14009. });
  14010. return PersistencePromise.resolve(result);
  14011. }
  14012. getAllMutationBatchesAffectingDocumentKeys(transaction, documentKeys) {
  14013. let uniqueBatchIDs = new SortedSet(primitiveComparator);
  14014. documentKeys.forEach(documentKey => {
  14015. const start = new DocReference(documentKey, 0);
  14016. const end = new DocReference(documentKey, Number.POSITIVE_INFINITY);
  14017. this.batchesByDocumentKey.forEachInRange([start, end], ref => {
  14018. uniqueBatchIDs = uniqueBatchIDs.add(ref.targetOrBatchId);
  14019. });
  14020. });
  14021. return PersistencePromise.resolve(this.findMutationBatches(uniqueBatchIDs));
  14022. }
  14023. getAllMutationBatchesAffectingQuery(transaction, query) {
  14024. // Use the query path as a prefix for testing if a document matches the
  14025. // query.
  14026. const prefix = query.path;
  14027. const immediateChildrenPathLength = prefix.length + 1;
  14028. // Construct a document reference for actually scanning the index. Unlike
  14029. // the prefix the document key in this reference must have an even number of
  14030. // segments. The empty segment can be used a suffix of the query path
  14031. // because it precedes all other segments in an ordered traversal.
  14032. let startPath = prefix;
  14033. if (!DocumentKey.isDocumentKey(startPath)) {
  14034. startPath = startPath.child('');
  14035. }
  14036. const start = new DocReference(new DocumentKey(startPath), 0);
  14037. // Find unique batchIDs referenced by all documents potentially matching the
  14038. // query.
  14039. let uniqueBatchIDs = new SortedSet(primitiveComparator);
  14040. this.batchesByDocumentKey.forEachWhile(ref => {
  14041. const rowKeyPath = ref.key.path;
  14042. if (!prefix.isPrefixOf(rowKeyPath)) {
  14043. return false;
  14044. }
  14045. else {
  14046. // Rows with document keys more than one segment longer than the query
  14047. // path can't be matches. For example, a query on 'rooms' can't match
  14048. // the document /rooms/abc/messages/xyx.
  14049. // TODO(mcg): we'll need a different scanner when we implement
  14050. // ancestor queries.
  14051. if (rowKeyPath.length === immediateChildrenPathLength) {
  14052. uniqueBatchIDs = uniqueBatchIDs.add(ref.targetOrBatchId);
  14053. }
  14054. return true;
  14055. }
  14056. }, start);
  14057. return PersistencePromise.resolve(this.findMutationBatches(uniqueBatchIDs));
  14058. }
  14059. findMutationBatches(batchIDs) {
  14060. // Construct an array of matching batches, sorted by batchID to ensure that
  14061. // multiple mutations affecting the same document key are applied in order.
  14062. const result = [];
  14063. batchIDs.forEach(batchId => {
  14064. const batch = this.findMutationBatch(batchId);
  14065. if (batch !== null) {
  14066. result.push(batch);
  14067. }
  14068. });
  14069. return result;
  14070. }
  14071. removeMutationBatch(transaction, batch) {
  14072. // Find the position of the first batch for removal.
  14073. const batchIndex = this.indexOfExistingBatchId(batch.batchId, 'removed');
  14074. hardAssert(batchIndex === 0);
  14075. this.mutationQueue.shift();
  14076. let references = this.batchesByDocumentKey;
  14077. return PersistencePromise.forEach(batch.mutations, (mutation) => {
  14078. const ref = new DocReference(mutation.key, batch.batchId);
  14079. references = references.delete(ref);
  14080. return this.referenceDelegate.markPotentiallyOrphaned(transaction, mutation.key);
  14081. }).next(() => {
  14082. this.batchesByDocumentKey = references;
  14083. });
  14084. }
  14085. removeCachedMutationKeys(batchId) {
  14086. // No-op since the memory mutation queue does not maintain a separate cache.
  14087. }
  14088. containsKey(txn, key) {
  14089. const ref = new DocReference(key, 0);
  14090. const firstRef = this.batchesByDocumentKey.firstAfterOrEqual(ref);
  14091. return PersistencePromise.resolve(key.isEqual(firstRef && firstRef.key));
  14092. }
  14093. performConsistencyCheck(txn) {
  14094. if (this.mutationQueue.length === 0) ;
  14095. return PersistencePromise.resolve();
  14096. }
  14097. /**
  14098. * Finds the index of the given batchId in the mutation queue and asserts that
  14099. * the resulting index is within the bounds of the queue.
  14100. *
  14101. * @param batchId - The batchId to search for
  14102. * @param action - A description of what the caller is doing, phrased in passive
  14103. * form (e.g. "acknowledged" in a routine that acknowledges batches).
  14104. */
  14105. indexOfExistingBatchId(batchId, action) {
  14106. const index = this.indexOfBatchId(batchId);
  14107. return index;
  14108. }
  14109. /**
  14110. * Finds the index of the given batchId in the mutation queue. This operation
  14111. * is O(1).
  14112. *
  14113. * @returns The computed index of the batch with the given batchId, based on
  14114. * the state of the queue. Note this index can be negative if the requested
  14115. * batchId has already been remvoed from the queue or past the end of the
  14116. * queue if the batchId is larger than the last added batch.
  14117. */
  14118. indexOfBatchId(batchId) {
  14119. if (this.mutationQueue.length === 0) {
  14120. // As an index this is past the end of the queue
  14121. return 0;
  14122. }
  14123. // Examine the front of the queue to figure out the difference between the
  14124. // batchId and indexes in the array. Note that since the queue is ordered
  14125. // by batchId, if the first batch has a larger batchId then the requested
  14126. // batchId doesn't exist in the queue.
  14127. const firstBatchId = this.mutationQueue[0].batchId;
  14128. return batchId - firstBatchId;
  14129. }
  14130. /**
  14131. * A version of lookupMutationBatch that doesn't return a promise, this makes
  14132. * other functions that uses this code easier to read and more efficent.
  14133. */
  14134. findMutationBatch(batchId) {
  14135. const index = this.indexOfBatchId(batchId);
  14136. if (index < 0 || index >= this.mutationQueue.length) {
  14137. return null;
  14138. }
  14139. const batch = this.mutationQueue[index];
  14140. return batch;
  14141. }
  14142. }
  14143. /**
  14144. * @license
  14145. * Copyright 2017 Google LLC
  14146. *
  14147. * Licensed under the Apache License, Version 2.0 (the "License");
  14148. * you may not use this file except in compliance with the License.
  14149. * You may obtain a copy of the License at
  14150. *
  14151. * http://www.apache.org/licenses/LICENSE-2.0
  14152. *
  14153. * Unless required by applicable law or agreed to in writing, software
  14154. * distributed under the License is distributed on an "AS IS" BASIS,
  14155. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  14156. * See the License for the specific language governing permissions and
  14157. * limitations under the License.
  14158. */
  14159. function documentEntryMap() {
  14160. return new SortedMap(DocumentKey.comparator);
  14161. }
  14162. /**
  14163. * The memory-only RemoteDocumentCache for IndexedDb. To construct, invoke
  14164. * `newMemoryRemoteDocumentCache()`.
  14165. */
  14166. class MemoryRemoteDocumentCacheImpl {
  14167. /**
  14168. * @param sizer - Used to assess the size of a document. For eager GC, this is
  14169. * expected to just return 0 to avoid unnecessarily doing the work of
  14170. * calculating the size.
  14171. */
  14172. constructor(sizer) {
  14173. this.sizer = sizer;
  14174. /** Underlying cache of documents and their read times. */
  14175. this.docs = documentEntryMap();
  14176. /** Size of all cached documents. */
  14177. this.size = 0;
  14178. }
  14179. setIndexManager(indexManager) {
  14180. this.indexManager = indexManager;
  14181. }
  14182. /**
  14183. * Adds the supplied entry to the cache and updates the cache size as appropriate.
  14184. *
  14185. * All calls of `addEntry` are required to go through the RemoteDocumentChangeBuffer
  14186. * returned by `newChangeBuffer()`.
  14187. */
  14188. addEntry(transaction, doc) {
  14189. const key = doc.key;
  14190. const entry = this.docs.get(key);
  14191. const previousSize = entry ? entry.size : 0;
  14192. const currentSize = this.sizer(doc);
  14193. this.docs = this.docs.insert(key, {
  14194. document: doc.mutableCopy(),
  14195. size: currentSize
  14196. });
  14197. this.size += currentSize - previousSize;
  14198. return this.indexManager.addToCollectionParentIndex(transaction, key.path.popLast());
  14199. }
  14200. /**
  14201. * Removes the specified entry from the cache and updates the cache size as appropriate.
  14202. *
  14203. * All calls of `removeEntry` are required to go through the RemoteDocumentChangeBuffer
  14204. * returned by `newChangeBuffer()`.
  14205. */
  14206. removeEntry(documentKey) {
  14207. const entry = this.docs.get(documentKey);
  14208. if (entry) {
  14209. this.docs = this.docs.remove(documentKey);
  14210. this.size -= entry.size;
  14211. }
  14212. }
  14213. getEntry(transaction, documentKey) {
  14214. const entry = this.docs.get(documentKey);
  14215. return PersistencePromise.resolve(entry
  14216. ? entry.document.mutableCopy()
  14217. : MutableDocument.newInvalidDocument(documentKey));
  14218. }
  14219. getEntries(transaction, documentKeys) {
  14220. let results = mutableDocumentMap();
  14221. documentKeys.forEach(documentKey => {
  14222. const entry = this.docs.get(documentKey);
  14223. results = results.insert(documentKey, entry
  14224. ? entry.document.mutableCopy()
  14225. : MutableDocument.newInvalidDocument(documentKey));
  14226. });
  14227. return PersistencePromise.resolve(results);
  14228. }
  14229. getAllFromCollection(transaction, collectionPath, offset) {
  14230. let results = mutableDocumentMap();
  14231. // Documents are ordered by key, so we can use a prefix scan to narrow down
  14232. // the documents we need to match the query against.
  14233. const prefix = new DocumentKey(collectionPath.child(''));
  14234. const iterator = this.docs.getIteratorFrom(prefix);
  14235. while (iterator.hasNext()) {
  14236. const { key, value: { document } } = iterator.getNext();
  14237. if (!collectionPath.isPrefixOf(key.path)) {
  14238. break;
  14239. }
  14240. if (key.path.length > collectionPath.length + 1) {
  14241. // Exclude entries from subcollections.
  14242. continue;
  14243. }
  14244. if (indexOffsetComparator(newIndexOffsetFromDocument(document), offset) <= 0) {
  14245. // The document sorts before the offset.
  14246. continue;
  14247. }
  14248. results = results.insert(document.key, document.mutableCopy());
  14249. }
  14250. return PersistencePromise.resolve(results);
  14251. }
  14252. getAllFromCollectionGroup(transaction, collectionGroup, offset, limti) {
  14253. // This method should only be called from the IndexBackfiller if persistence
  14254. // is enabled.
  14255. fail();
  14256. }
  14257. forEachDocumentKey(transaction, f) {
  14258. return PersistencePromise.forEach(this.docs, (key) => f(key));
  14259. }
  14260. newChangeBuffer(options) {
  14261. // `trackRemovals` is ignores since the MemoryRemoteDocumentCache keeps
  14262. // a separate changelog and does not need special handling for removals.
  14263. return new MemoryRemoteDocumentChangeBuffer(this);
  14264. }
  14265. getSize(txn) {
  14266. return PersistencePromise.resolve(this.size);
  14267. }
  14268. }
  14269. /**
  14270. * Creates a new memory-only RemoteDocumentCache.
  14271. *
  14272. * @param sizer - Used to assess the size of a document. For eager GC, this is
  14273. * expected to just return 0 to avoid unnecessarily doing the work of
  14274. * calculating the size.
  14275. */
  14276. function newMemoryRemoteDocumentCache(sizer) {
  14277. return new MemoryRemoteDocumentCacheImpl(sizer);
  14278. }
  14279. /**
  14280. * Handles the details of adding and updating documents in the MemoryRemoteDocumentCache.
  14281. */
  14282. class MemoryRemoteDocumentChangeBuffer extends RemoteDocumentChangeBuffer {
  14283. constructor(documentCache) {
  14284. super();
  14285. this.documentCache = documentCache;
  14286. }
  14287. applyChanges(transaction) {
  14288. const promises = [];
  14289. this.changes.forEach((key, doc) => {
  14290. if (doc.isValidDocument()) {
  14291. promises.push(this.documentCache.addEntry(transaction, doc));
  14292. }
  14293. else {
  14294. this.documentCache.removeEntry(key);
  14295. }
  14296. });
  14297. return PersistencePromise.waitFor(promises);
  14298. }
  14299. getFromCache(transaction, documentKey) {
  14300. return this.documentCache.getEntry(transaction, documentKey);
  14301. }
  14302. getAllFromCache(transaction, documentKeys) {
  14303. return this.documentCache.getEntries(transaction, documentKeys);
  14304. }
  14305. }
  14306. /**
  14307. * @license
  14308. * Copyright 2017 Google LLC
  14309. *
  14310. * Licensed under the Apache License, Version 2.0 (the "License");
  14311. * you may not use this file except in compliance with the License.
  14312. * You may obtain a copy of the License at
  14313. *
  14314. * http://www.apache.org/licenses/LICENSE-2.0
  14315. *
  14316. * Unless required by applicable law or agreed to in writing, software
  14317. * distributed under the License is distributed on an "AS IS" BASIS,
  14318. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  14319. * See the License for the specific language governing permissions and
  14320. * limitations under the License.
  14321. */
  14322. class MemoryTargetCache {
  14323. constructor(persistence) {
  14324. this.persistence = persistence;
  14325. /**
  14326. * Maps a target to the data about that target
  14327. */
  14328. this.targets = new ObjectMap(t => canonifyTarget(t), targetEquals);
  14329. /** The last received snapshot version. */
  14330. this.lastRemoteSnapshotVersion = SnapshotVersion.min();
  14331. /** The highest numbered target ID encountered. */
  14332. this.highestTargetId = 0;
  14333. /** The highest sequence number encountered. */
  14334. this.highestSequenceNumber = 0;
  14335. /**
  14336. * A ordered bidirectional mapping between documents and the remote target
  14337. * IDs.
  14338. */
  14339. this.references = new ReferenceSet();
  14340. this.targetCount = 0;
  14341. this.targetIdGenerator = TargetIdGenerator.forTargetCache();
  14342. }
  14343. forEachTarget(txn, f) {
  14344. this.targets.forEach((_, targetData) => f(targetData));
  14345. return PersistencePromise.resolve();
  14346. }
  14347. getLastRemoteSnapshotVersion(transaction) {
  14348. return PersistencePromise.resolve(this.lastRemoteSnapshotVersion);
  14349. }
  14350. getHighestSequenceNumber(transaction) {
  14351. return PersistencePromise.resolve(this.highestSequenceNumber);
  14352. }
  14353. allocateTargetId(transaction) {
  14354. this.highestTargetId = this.targetIdGenerator.next();
  14355. return PersistencePromise.resolve(this.highestTargetId);
  14356. }
  14357. setTargetsMetadata(transaction, highestListenSequenceNumber, lastRemoteSnapshotVersion) {
  14358. if (lastRemoteSnapshotVersion) {
  14359. this.lastRemoteSnapshotVersion = lastRemoteSnapshotVersion;
  14360. }
  14361. if (highestListenSequenceNumber > this.highestSequenceNumber) {
  14362. this.highestSequenceNumber = highestListenSequenceNumber;
  14363. }
  14364. return PersistencePromise.resolve();
  14365. }
  14366. saveTargetData(targetData) {
  14367. this.targets.set(targetData.target, targetData);
  14368. const targetId = targetData.targetId;
  14369. if (targetId > this.highestTargetId) {
  14370. this.targetIdGenerator = new TargetIdGenerator(targetId);
  14371. this.highestTargetId = targetId;
  14372. }
  14373. if (targetData.sequenceNumber > this.highestSequenceNumber) {
  14374. this.highestSequenceNumber = targetData.sequenceNumber;
  14375. }
  14376. }
  14377. addTargetData(transaction, targetData) {
  14378. this.saveTargetData(targetData);
  14379. this.targetCount += 1;
  14380. return PersistencePromise.resolve();
  14381. }
  14382. updateTargetData(transaction, targetData) {
  14383. this.saveTargetData(targetData);
  14384. return PersistencePromise.resolve();
  14385. }
  14386. removeTargetData(transaction, targetData) {
  14387. this.targets.delete(targetData.target);
  14388. this.references.removeReferencesForId(targetData.targetId);
  14389. this.targetCount -= 1;
  14390. return PersistencePromise.resolve();
  14391. }
  14392. removeTargets(transaction, upperBound, activeTargetIds) {
  14393. let count = 0;
  14394. const removals = [];
  14395. this.targets.forEach((key, targetData) => {
  14396. if (targetData.sequenceNumber <= upperBound &&
  14397. activeTargetIds.get(targetData.targetId) === null) {
  14398. this.targets.delete(key);
  14399. removals.push(this.removeMatchingKeysForTargetId(transaction, targetData.targetId));
  14400. count++;
  14401. }
  14402. });
  14403. return PersistencePromise.waitFor(removals).next(() => count);
  14404. }
  14405. getTargetCount(transaction) {
  14406. return PersistencePromise.resolve(this.targetCount);
  14407. }
  14408. getTargetData(transaction, target) {
  14409. const targetData = this.targets.get(target) || null;
  14410. return PersistencePromise.resolve(targetData);
  14411. }
  14412. addMatchingKeys(txn, keys, targetId) {
  14413. this.references.addReferences(keys, targetId);
  14414. return PersistencePromise.resolve();
  14415. }
  14416. removeMatchingKeys(txn, keys, targetId) {
  14417. this.references.removeReferences(keys, targetId);
  14418. const referenceDelegate = this.persistence.referenceDelegate;
  14419. const promises = [];
  14420. if (referenceDelegate) {
  14421. keys.forEach(key => {
  14422. promises.push(referenceDelegate.markPotentiallyOrphaned(txn, key));
  14423. });
  14424. }
  14425. return PersistencePromise.waitFor(promises);
  14426. }
  14427. removeMatchingKeysForTargetId(txn, targetId) {
  14428. this.references.removeReferencesForId(targetId);
  14429. return PersistencePromise.resolve();
  14430. }
  14431. getMatchingKeysForTargetId(txn, targetId) {
  14432. const matchingKeys = this.references.referencesForId(targetId);
  14433. return PersistencePromise.resolve(matchingKeys);
  14434. }
  14435. containsKey(txn, key) {
  14436. return PersistencePromise.resolve(this.references.containsKey(key));
  14437. }
  14438. }
  14439. /**
  14440. * @license
  14441. * Copyright 2017 Google LLC
  14442. *
  14443. * Licensed under the Apache License, Version 2.0 (the "License");
  14444. * you may not use this file except in compliance with the License.
  14445. * You may obtain a copy of the License at
  14446. *
  14447. * http://www.apache.org/licenses/LICENSE-2.0
  14448. *
  14449. * Unless required by applicable law or agreed to in writing, software
  14450. * distributed under the License is distributed on an "AS IS" BASIS,
  14451. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  14452. * See the License for the specific language governing permissions and
  14453. * limitations under the License.
  14454. */
  14455. const LOG_TAG$d = 'MemoryPersistence';
  14456. /**
  14457. * A memory-backed instance of Persistence. Data is stored only in RAM and
  14458. * not persisted across sessions.
  14459. */
  14460. class MemoryPersistence {
  14461. /**
  14462. * The constructor accepts a factory for creating a reference delegate. This
  14463. * allows both the delegate and this instance to have strong references to
  14464. * each other without having nullable fields that would then need to be
  14465. * checked or asserted on every access.
  14466. */
  14467. constructor(referenceDelegateFactory, serializer) {
  14468. this.mutationQueues = {};
  14469. this.overlays = {};
  14470. this.listenSequence = new ListenSequence(0);
  14471. this._started = false;
  14472. this._started = true;
  14473. this.referenceDelegate = referenceDelegateFactory(this);
  14474. this.targetCache = new MemoryTargetCache(this);
  14475. const sizer = (doc) => this.referenceDelegate.documentSize(doc);
  14476. this.indexManager = new MemoryIndexManager();
  14477. this.remoteDocumentCache = newMemoryRemoteDocumentCache(sizer);
  14478. this.serializer = new LocalSerializer(serializer);
  14479. this.bundleCache = new MemoryBundleCache(this.serializer);
  14480. }
  14481. start() {
  14482. return Promise.resolve();
  14483. }
  14484. shutdown() {
  14485. // No durable state to ensure is closed on shutdown.
  14486. this._started = false;
  14487. return Promise.resolve();
  14488. }
  14489. get started() {
  14490. return this._started;
  14491. }
  14492. setDatabaseDeletedListener() {
  14493. // No op.
  14494. }
  14495. setNetworkEnabled() {
  14496. // No op.
  14497. }
  14498. getIndexManager(user) {
  14499. // We do not currently support indices for memory persistence, so we can
  14500. // return the same shared instance of the memory index manager.
  14501. return this.indexManager;
  14502. }
  14503. getDocumentOverlayCache(user) {
  14504. let overlay = this.overlays[user.toKey()];
  14505. if (!overlay) {
  14506. overlay = new MemoryDocumentOverlayCache();
  14507. this.overlays[user.toKey()] = overlay;
  14508. }
  14509. return overlay;
  14510. }
  14511. getMutationQueue(user, indexManager) {
  14512. let queue = this.mutationQueues[user.toKey()];
  14513. if (!queue) {
  14514. queue = new MemoryMutationQueue(indexManager, this.referenceDelegate);
  14515. this.mutationQueues[user.toKey()] = queue;
  14516. }
  14517. return queue;
  14518. }
  14519. getTargetCache() {
  14520. return this.targetCache;
  14521. }
  14522. getRemoteDocumentCache() {
  14523. return this.remoteDocumentCache;
  14524. }
  14525. getBundleCache() {
  14526. return this.bundleCache;
  14527. }
  14528. runTransaction(action, mode, transactionOperation) {
  14529. logDebug(LOG_TAG$d, 'Starting transaction:', action);
  14530. const txn = new MemoryTransaction(this.listenSequence.next());
  14531. this.referenceDelegate.onTransactionStarted();
  14532. return transactionOperation(txn)
  14533. .next(result => {
  14534. return this.referenceDelegate
  14535. .onTransactionCommitted(txn)
  14536. .next(() => result);
  14537. })
  14538. .toPromise()
  14539. .then(result => {
  14540. txn.raiseOnCommittedEvent();
  14541. return result;
  14542. });
  14543. }
  14544. mutationQueuesContainKey(transaction, key) {
  14545. return PersistencePromise.or(Object.values(this.mutationQueues).map(queue => () => queue.containsKey(transaction, key)));
  14546. }
  14547. }
  14548. /**
  14549. * Memory persistence is not actually transactional, but future implementations
  14550. * may have transaction-scoped state.
  14551. */
  14552. class MemoryTransaction extends PersistenceTransaction {
  14553. constructor(currentSequenceNumber) {
  14554. super();
  14555. this.currentSequenceNumber = currentSequenceNumber;
  14556. }
  14557. }
  14558. class MemoryEagerDelegate {
  14559. constructor(persistence) {
  14560. this.persistence = persistence;
  14561. /** Tracks all documents that are active in Query views. */
  14562. this.localViewReferences = new ReferenceSet();
  14563. /** The list of documents that are potentially GCed after each transaction. */
  14564. this._orphanedDocuments = null;
  14565. }
  14566. static factory(persistence) {
  14567. return new MemoryEagerDelegate(persistence);
  14568. }
  14569. get orphanedDocuments() {
  14570. if (!this._orphanedDocuments) {
  14571. throw fail();
  14572. }
  14573. else {
  14574. return this._orphanedDocuments;
  14575. }
  14576. }
  14577. addReference(txn, targetId, key) {
  14578. this.localViewReferences.addReference(key, targetId);
  14579. this.orphanedDocuments.delete(key.toString());
  14580. return PersistencePromise.resolve();
  14581. }
  14582. removeReference(txn, targetId, key) {
  14583. this.localViewReferences.removeReference(key, targetId);
  14584. this.orphanedDocuments.add(key.toString());
  14585. return PersistencePromise.resolve();
  14586. }
  14587. markPotentiallyOrphaned(txn, key) {
  14588. this.orphanedDocuments.add(key.toString());
  14589. return PersistencePromise.resolve();
  14590. }
  14591. removeTarget(txn, targetData) {
  14592. const orphaned = this.localViewReferences.removeReferencesForId(targetData.targetId);
  14593. orphaned.forEach(key => this.orphanedDocuments.add(key.toString()));
  14594. const cache = this.persistence.getTargetCache();
  14595. return cache
  14596. .getMatchingKeysForTargetId(txn, targetData.targetId)
  14597. .next(keys => {
  14598. keys.forEach(key => this.orphanedDocuments.add(key.toString()));
  14599. })
  14600. .next(() => cache.removeTargetData(txn, targetData));
  14601. }
  14602. onTransactionStarted() {
  14603. this._orphanedDocuments = new Set();
  14604. }
  14605. onTransactionCommitted(txn) {
  14606. // Remove newly orphaned documents.
  14607. const cache = this.persistence.getRemoteDocumentCache();
  14608. const changeBuffer = cache.newChangeBuffer();
  14609. return PersistencePromise.forEach(this.orphanedDocuments, (path) => {
  14610. const key = DocumentKey.fromPath(path);
  14611. return this.isReferenced(txn, key).next(isReferenced => {
  14612. if (!isReferenced) {
  14613. changeBuffer.removeEntry(key, SnapshotVersion.min());
  14614. }
  14615. });
  14616. }).next(() => {
  14617. this._orphanedDocuments = null;
  14618. return changeBuffer.apply(txn);
  14619. });
  14620. }
  14621. updateLimboDocument(txn, key) {
  14622. return this.isReferenced(txn, key).next(isReferenced => {
  14623. if (isReferenced) {
  14624. this.orphanedDocuments.delete(key.toString());
  14625. }
  14626. else {
  14627. this.orphanedDocuments.add(key.toString());
  14628. }
  14629. });
  14630. }
  14631. documentSize(doc) {
  14632. // For eager GC, we don't care about the document size, there are no size thresholds.
  14633. return 0;
  14634. }
  14635. isReferenced(txn, key) {
  14636. return PersistencePromise.or([
  14637. () => PersistencePromise.resolve(this.localViewReferences.containsKey(key)),
  14638. () => this.persistence.getTargetCache().containsKey(txn, key),
  14639. () => this.persistence.mutationQueuesContainKey(txn, key)
  14640. ]);
  14641. }
  14642. }
  14643. /**
  14644. * @license
  14645. * Copyright 2020 Google LLC
  14646. *
  14647. * Licensed under the Apache License, Version 2.0 (the "License");
  14648. * you may not use this file except in compliance with the License.
  14649. * You may obtain a copy of the License at
  14650. *
  14651. * http://www.apache.org/licenses/LICENSE-2.0
  14652. *
  14653. * Unless required by applicable law or agreed to in writing, software
  14654. * distributed under the License is distributed on an "AS IS" BASIS,
  14655. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  14656. * See the License for the specific language governing permissions and
  14657. * limitations under the License.
  14658. */
  14659. /** Performs database creation and schema upgrades. */
  14660. class SchemaConverter {
  14661. constructor(serializer) {
  14662. this.serializer = serializer;
  14663. }
  14664. /**
  14665. * Performs database creation and schema upgrades.
  14666. *
  14667. * Note that in production, this method is only ever used to upgrade the schema
  14668. * to SCHEMA_VERSION. Different values of toVersion are only used for testing
  14669. * and local feature development.
  14670. */
  14671. createOrUpgrade(db, txn, fromVersion, toVersion) {
  14672. const simpleDbTransaction = new SimpleDbTransaction('createOrUpgrade', txn);
  14673. if (fromVersion < 1 && toVersion >= 1) {
  14674. createPrimaryClientStore(db);
  14675. createMutationQueue(db);
  14676. createQueryCache(db);
  14677. createLegacyRemoteDocumentCache(db);
  14678. }
  14679. // Migration 2 to populate the targetGlobal object no longer needed since
  14680. // migration 3 unconditionally clears it.
  14681. let p = PersistencePromise.resolve();
  14682. if (fromVersion < 3 && toVersion >= 3) {
  14683. // Brand new clients don't need to drop and recreate--only clients that
  14684. // potentially have corrupt data.
  14685. if (fromVersion !== 0) {
  14686. dropQueryCache(db);
  14687. createQueryCache(db);
  14688. }
  14689. p = p.next(() => writeEmptyTargetGlobalEntry(simpleDbTransaction));
  14690. }
  14691. if (fromVersion < 4 && toVersion >= 4) {
  14692. if (fromVersion !== 0) {
  14693. // Schema version 3 uses auto-generated keys to generate globally unique
  14694. // mutation batch IDs (this was previously ensured internally by the
  14695. // client). To migrate to the new schema, we have to read all mutations
  14696. // and write them back out. We preserve the existing batch IDs to guarantee
  14697. // consistency with other object stores. Any further mutation batch IDs will
  14698. // be auto-generated.
  14699. p = p.next(() => upgradeMutationBatchSchemaAndMigrateData(db, simpleDbTransaction));
  14700. }
  14701. p = p.next(() => {
  14702. createClientMetadataStore(db);
  14703. });
  14704. }
  14705. if (fromVersion < 5 && toVersion >= 5) {
  14706. p = p.next(() => this.removeAcknowledgedMutations(simpleDbTransaction));
  14707. }
  14708. if (fromVersion < 6 && toVersion >= 6) {
  14709. p = p.next(() => {
  14710. createDocumentGlobalStore(db);
  14711. return this.addDocumentGlobal(simpleDbTransaction);
  14712. });
  14713. }
  14714. if (fromVersion < 7 && toVersion >= 7) {
  14715. p = p.next(() => this.ensureSequenceNumbers(simpleDbTransaction));
  14716. }
  14717. if (fromVersion < 8 && toVersion >= 8) {
  14718. p = p.next(() => this.createCollectionParentIndex(db, simpleDbTransaction));
  14719. }
  14720. if (fromVersion < 9 && toVersion >= 9) {
  14721. p = p.next(() => {
  14722. // Multi-Tab used to manage its own changelog, but this has been moved
  14723. // to the DbRemoteDocument object store itself. Since the previous change
  14724. // log only contained transient data, we can drop its object store.
  14725. dropRemoteDocumentChangesStore(db);
  14726. // Note: Schema version 9 used to create a read time index for the
  14727. // RemoteDocumentCache. This is now done with schema version 13.
  14728. });
  14729. }
  14730. if (fromVersion < 10 && toVersion >= 10) {
  14731. p = p.next(() => this.rewriteCanonicalIds(simpleDbTransaction));
  14732. }
  14733. if (fromVersion < 11 && toVersion >= 11) {
  14734. p = p.next(() => {
  14735. createBundlesStore(db);
  14736. createNamedQueriesStore(db);
  14737. });
  14738. }
  14739. if (fromVersion < 12 && toVersion >= 12) {
  14740. p = p.next(() => {
  14741. createDocumentOverlayStore(db);
  14742. });
  14743. }
  14744. if (fromVersion < 13 && toVersion >= 13) {
  14745. p = p
  14746. .next(() => createRemoteDocumentCache(db))
  14747. .next(() => this.rewriteRemoteDocumentCache(db, simpleDbTransaction))
  14748. .next(() => db.deleteObjectStore(DbRemoteDocumentStore$1));
  14749. }
  14750. if (fromVersion < 14 && toVersion >= 14) {
  14751. p = p.next(() => this.runOverlayMigration(db, simpleDbTransaction));
  14752. }
  14753. if (fromVersion < 15 && toVersion >= 15) {
  14754. p = p.next(() => createFieldIndex(db));
  14755. }
  14756. return p;
  14757. }
  14758. addDocumentGlobal(txn) {
  14759. let byteSize = 0;
  14760. return txn
  14761. .store(DbRemoteDocumentStore$1)
  14762. .iterate((_, doc) => {
  14763. byteSize += dbDocumentSize(doc);
  14764. })
  14765. .next(() => {
  14766. const metadata = { byteSize };
  14767. return txn
  14768. .store(DbRemoteDocumentGlobalStore)
  14769. .put(DbRemoteDocumentGlobalKey, metadata);
  14770. });
  14771. }
  14772. removeAcknowledgedMutations(txn) {
  14773. const queuesStore = txn.store(DbMutationQueueStore);
  14774. const mutationsStore = txn.store(DbMutationBatchStore);
  14775. return queuesStore.loadAll().next(queues => {
  14776. return PersistencePromise.forEach(queues, (queue) => {
  14777. const range = IDBKeyRange.bound([queue.userId, BATCHID_UNKNOWN], [queue.userId, queue.lastAcknowledgedBatchId]);
  14778. return mutationsStore
  14779. .loadAll(DbMutationBatchUserMutationsIndex, range)
  14780. .next(dbBatches => {
  14781. return PersistencePromise.forEach(dbBatches, (dbBatch) => {
  14782. hardAssert(dbBatch.userId === queue.userId);
  14783. const batch = fromDbMutationBatch(this.serializer, dbBatch);
  14784. return removeMutationBatch(txn, queue.userId, batch).next(() => { });
  14785. });
  14786. });
  14787. });
  14788. });
  14789. }
  14790. /**
  14791. * Ensures that every document in the remote document cache has a corresponding sentinel row
  14792. * with a sequence number. Missing rows are given the most recently used sequence number.
  14793. */
  14794. ensureSequenceNumbers(txn) {
  14795. const documentTargetStore = txn.store(DbTargetDocumentStore);
  14796. const documentsStore = txn.store(DbRemoteDocumentStore$1);
  14797. const globalTargetStore = txn.store(DbTargetGlobalStore);
  14798. return globalTargetStore.get(DbTargetGlobalKey).next(metadata => {
  14799. const writeSentinelKey = (path) => {
  14800. return documentTargetStore.put({
  14801. targetId: 0,
  14802. path: encodeResourcePath(path),
  14803. sequenceNumber: metadata.highestListenSequenceNumber
  14804. });
  14805. };
  14806. const promises = [];
  14807. return documentsStore
  14808. .iterate((key, doc) => {
  14809. const path = new ResourcePath(key);
  14810. const docSentinelKey = sentinelKey(path);
  14811. promises.push(documentTargetStore.get(docSentinelKey).next(maybeSentinel => {
  14812. if (!maybeSentinel) {
  14813. return writeSentinelKey(path);
  14814. }
  14815. else {
  14816. return PersistencePromise.resolve();
  14817. }
  14818. }));
  14819. })
  14820. .next(() => PersistencePromise.waitFor(promises));
  14821. });
  14822. }
  14823. createCollectionParentIndex(db, txn) {
  14824. // Create the index.
  14825. db.createObjectStore(DbCollectionParentStore, {
  14826. keyPath: DbCollectionParentKeyPath
  14827. });
  14828. const collectionParentsStore = txn.store(DbCollectionParentStore);
  14829. // Helper to add an index entry iff we haven't already written it.
  14830. const cache = new MemoryCollectionParentIndex();
  14831. const addEntry = (collectionPath) => {
  14832. if (cache.add(collectionPath)) {
  14833. const collectionId = collectionPath.lastSegment();
  14834. const parentPath = collectionPath.popLast();
  14835. return collectionParentsStore.put({
  14836. collectionId,
  14837. parent: encodeResourcePath(parentPath)
  14838. });
  14839. }
  14840. };
  14841. // Index existing remote documents.
  14842. return txn
  14843. .store(DbRemoteDocumentStore$1)
  14844. .iterate({ keysOnly: true }, (pathSegments, _) => {
  14845. const path = new ResourcePath(pathSegments);
  14846. return addEntry(path.popLast());
  14847. })
  14848. .next(() => {
  14849. // Index existing mutations.
  14850. return txn
  14851. .store(DbDocumentMutationStore)
  14852. .iterate({ keysOnly: true }, ([userID, encodedPath, batchId], _) => {
  14853. const path = decodeResourcePath(encodedPath);
  14854. return addEntry(path.popLast());
  14855. });
  14856. });
  14857. }
  14858. rewriteCanonicalIds(txn) {
  14859. const targetStore = txn.store(DbTargetStore);
  14860. return targetStore.iterate((key, originalDbTarget) => {
  14861. const originalTargetData = fromDbTarget(originalDbTarget);
  14862. const updatedDbTarget = toDbTarget(this.serializer, originalTargetData);
  14863. return targetStore.put(updatedDbTarget);
  14864. });
  14865. }
  14866. rewriteRemoteDocumentCache(db, transaction) {
  14867. const legacyRemoteDocumentStore = transaction.store(DbRemoteDocumentStore$1);
  14868. const writes = [];
  14869. return legacyRemoteDocumentStore
  14870. .iterate((_, legacyDocument) => {
  14871. const remoteDocumentStore = transaction.store(DbRemoteDocumentStore);
  14872. const path = extractKey(legacyDocument).path.toArray();
  14873. const dbRemoteDocument = {
  14874. prefixPath: path.slice(0, path.length - 2),
  14875. collectionGroup: path[path.length - 2],
  14876. documentId: path[path.length - 1],
  14877. readTime: legacyDocument.readTime || [0, 0],
  14878. unknownDocument: legacyDocument.unknownDocument,
  14879. noDocument: legacyDocument.noDocument,
  14880. document: legacyDocument.document,
  14881. hasCommittedMutations: !!legacyDocument.hasCommittedMutations
  14882. };
  14883. writes.push(remoteDocumentStore.put(dbRemoteDocument));
  14884. })
  14885. .next(() => PersistencePromise.waitFor(writes));
  14886. }
  14887. runOverlayMigration(db, transaction) {
  14888. const mutationsStore = transaction.store(DbMutationBatchStore);
  14889. const remoteDocumentCache = newIndexedDbRemoteDocumentCache(this.serializer);
  14890. const memoryPersistence = new MemoryPersistence(MemoryEagerDelegate.factory, this.serializer.remoteSerializer);
  14891. return mutationsStore.loadAll().next(dbBatches => {
  14892. const userToDocumentSet = new Map();
  14893. dbBatches.forEach(dbBatch => {
  14894. var _a;
  14895. let documentSet = (_a = userToDocumentSet.get(dbBatch.userId)) !== null && _a !== void 0 ? _a : documentKeySet();
  14896. const batch = fromDbMutationBatch(this.serializer, dbBatch);
  14897. batch.keys().forEach(key => (documentSet = documentSet.add(key)));
  14898. userToDocumentSet.set(dbBatch.userId, documentSet);
  14899. });
  14900. return PersistencePromise.forEach(userToDocumentSet, (allDocumentKeysForUser, userId) => {
  14901. const user = new User(userId);
  14902. const documentOverlayCache = IndexedDbDocumentOverlayCache.forUser(this.serializer, user);
  14903. // NOTE: The index manager and the reference delegate are
  14904. // irrelevant for the purpose of recalculating and saving
  14905. // overlays. We can therefore simply use the memory
  14906. // implementation.
  14907. const indexManager = memoryPersistence.getIndexManager(user);
  14908. const mutationQueue = IndexedDbMutationQueue.forUser(user, this.serializer, indexManager, memoryPersistence.referenceDelegate);
  14909. const localDocumentsView = new LocalDocumentsView(remoteDocumentCache, mutationQueue, documentOverlayCache, indexManager);
  14910. return localDocumentsView
  14911. .recalculateAndSaveOverlaysForDocumentKeys(new IndexedDbTransaction(transaction, ListenSequence.INVALID), allDocumentKeysForUser)
  14912. .next();
  14913. });
  14914. });
  14915. }
  14916. }
  14917. function sentinelKey(path) {
  14918. return [0, encodeResourcePath(path)];
  14919. }
  14920. function createPrimaryClientStore(db) {
  14921. db.createObjectStore(DbPrimaryClientStore);
  14922. }
  14923. function createMutationQueue(db) {
  14924. db.createObjectStore(DbMutationQueueStore, {
  14925. keyPath: DbMutationQueueKeyPath
  14926. });
  14927. const mutationBatchesStore = db.createObjectStore(DbMutationBatchStore, {
  14928. keyPath: DbMutationBatchKeyPath,
  14929. autoIncrement: true
  14930. });
  14931. mutationBatchesStore.createIndex(DbMutationBatchUserMutationsIndex, DbMutationBatchUserMutationsKeyPath, { unique: true });
  14932. db.createObjectStore(DbDocumentMutationStore);
  14933. }
  14934. /**
  14935. * Upgrade function to migrate the 'mutations' store from V1 to V3. Loads
  14936. * and rewrites all data.
  14937. */
  14938. function upgradeMutationBatchSchemaAndMigrateData(db, txn) {
  14939. const v1MutationsStore = txn.store(DbMutationBatchStore);
  14940. return v1MutationsStore.loadAll().next(existingMutations => {
  14941. db.deleteObjectStore(DbMutationBatchStore);
  14942. const mutationsStore = db.createObjectStore(DbMutationBatchStore, {
  14943. keyPath: DbMutationBatchKeyPath,
  14944. autoIncrement: true
  14945. });
  14946. mutationsStore.createIndex(DbMutationBatchUserMutationsIndex, DbMutationBatchUserMutationsKeyPath, { unique: true });
  14947. const v3MutationsStore = txn.store(DbMutationBatchStore);
  14948. const writeAll = existingMutations.map(mutation => v3MutationsStore.put(mutation));
  14949. return PersistencePromise.waitFor(writeAll);
  14950. });
  14951. }
  14952. function createLegacyRemoteDocumentCache(db) {
  14953. db.createObjectStore(DbRemoteDocumentStore$1);
  14954. }
  14955. function createRemoteDocumentCache(db) {
  14956. const remoteDocumentStore = db.createObjectStore(DbRemoteDocumentStore, {
  14957. keyPath: DbRemoteDocumentKeyPath
  14958. });
  14959. remoteDocumentStore.createIndex(DbRemoteDocumentDocumentKeyIndex, DbRemoteDocumentDocumentKeyIndexPath);
  14960. remoteDocumentStore.createIndex(DbRemoteDocumentCollectionGroupIndex, DbRemoteDocumentCollectionGroupIndexPath);
  14961. }
  14962. function createDocumentGlobalStore(db) {
  14963. db.createObjectStore(DbRemoteDocumentGlobalStore);
  14964. }
  14965. function createQueryCache(db) {
  14966. const targetDocumentsStore = db.createObjectStore(DbTargetDocumentStore, {
  14967. keyPath: DbTargetDocumentKeyPath
  14968. });
  14969. targetDocumentsStore.createIndex(DbTargetDocumentDocumentTargetsIndex, DbTargetDocumentDocumentTargetsKeyPath, { unique: true });
  14970. const targetStore = db.createObjectStore(DbTargetStore, {
  14971. keyPath: DbTargetKeyPath
  14972. });
  14973. // NOTE: This is unique only because the TargetId is the suffix.
  14974. targetStore.createIndex(DbTargetQueryTargetsIndexName, DbTargetQueryTargetsKeyPath, { unique: true });
  14975. db.createObjectStore(DbTargetGlobalStore);
  14976. }
  14977. function dropQueryCache(db) {
  14978. db.deleteObjectStore(DbTargetDocumentStore);
  14979. db.deleteObjectStore(DbTargetStore);
  14980. db.deleteObjectStore(DbTargetGlobalStore);
  14981. }
  14982. function dropRemoteDocumentChangesStore(db) {
  14983. if (db.objectStoreNames.contains('remoteDocumentChanges')) {
  14984. db.deleteObjectStore('remoteDocumentChanges');
  14985. }
  14986. }
  14987. /**
  14988. * Creates the target global singleton row.
  14989. *
  14990. * @param txn - The version upgrade transaction for indexeddb
  14991. */
  14992. function writeEmptyTargetGlobalEntry(txn) {
  14993. const globalStore = txn.store(DbTargetGlobalStore);
  14994. const metadata = {
  14995. highestTargetId: 0,
  14996. highestListenSequenceNumber: 0,
  14997. lastRemoteSnapshotVersion: SnapshotVersion.min().toTimestamp(),
  14998. targetCount: 0
  14999. };
  15000. return globalStore.put(DbTargetGlobalKey, metadata);
  15001. }
  15002. function createClientMetadataStore(db) {
  15003. db.createObjectStore(DbClientMetadataStore, {
  15004. keyPath: DbClientMetadataKeyPath
  15005. });
  15006. }
  15007. function createBundlesStore(db) {
  15008. db.createObjectStore(DbBundleStore, {
  15009. keyPath: DbBundleKeyPath
  15010. });
  15011. }
  15012. function createNamedQueriesStore(db) {
  15013. db.createObjectStore(DbNamedQueryStore, {
  15014. keyPath: DbNamedQueryKeyPath
  15015. });
  15016. }
  15017. function createFieldIndex(db) {
  15018. const indexConfigurationStore = db.createObjectStore(DbIndexConfigurationStore, {
  15019. keyPath: DbIndexConfigurationKeyPath,
  15020. autoIncrement: true
  15021. });
  15022. indexConfigurationStore.createIndex(DbIndexConfigurationCollectionGroupIndex, DbIndexConfigurationCollectionGroupIndexPath, { unique: false });
  15023. const indexStateStore = db.createObjectStore(DbIndexStateStore, {
  15024. keyPath: DbIndexStateKeyPath
  15025. });
  15026. indexStateStore.createIndex(DbIndexStateSequenceNumberIndex, DbIndexStateSequenceNumberIndexPath, { unique: false });
  15027. const indexEntryStore = db.createObjectStore(DbIndexEntryStore, {
  15028. keyPath: DbIndexEntryKeyPath
  15029. });
  15030. indexEntryStore.createIndex(DbIndexEntryDocumentKeyIndex, DbIndexEntryDocumentKeyIndexPath, { unique: false });
  15031. }
  15032. function createDocumentOverlayStore(db) {
  15033. const documentOverlayStore = db.createObjectStore(DbDocumentOverlayStore, {
  15034. keyPath: DbDocumentOverlayKeyPath
  15035. });
  15036. documentOverlayStore.createIndex(DbDocumentOverlayCollectionPathOverlayIndex, DbDocumentOverlayCollectionPathOverlayIndexPath, { unique: false });
  15037. documentOverlayStore.createIndex(DbDocumentOverlayCollectionGroupOverlayIndex, DbDocumentOverlayCollectionGroupOverlayIndexPath, { unique: false });
  15038. }
  15039. function extractKey(remoteDoc) {
  15040. if (remoteDoc.document) {
  15041. return new DocumentKey(ResourcePath.fromString(remoteDoc.document.name).popFirst(5));
  15042. }
  15043. else if (remoteDoc.noDocument) {
  15044. return DocumentKey.fromSegments(remoteDoc.noDocument.path);
  15045. }
  15046. else if (remoteDoc.unknownDocument) {
  15047. return DocumentKey.fromSegments(remoteDoc.unknownDocument.path);
  15048. }
  15049. else {
  15050. return fail();
  15051. }
  15052. }
  15053. /**
  15054. * @license
  15055. * Copyright 2017 Google LLC
  15056. *
  15057. * Licensed under the Apache License, Version 2.0 (the "License");
  15058. * you may not use this file except in compliance with the License.
  15059. * You may obtain a copy of the License at
  15060. *
  15061. * http://www.apache.org/licenses/LICENSE-2.0
  15062. *
  15063. * Unless required by applicable law or agreed to in writing, software
  15064. * distributed under the License is distributed on an "AS IS" BASIS,
  15065. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  15066. * See the License for the specific language governing permissions and
  15067. * limitations under the License.
  15068. */
  15069. const LOG_TAG$c = 'IndexedDbPersistence';
  15070. /**
  15071. * Oldest acceptable age in milliseconds for client metadata before the client
  15072. * is considered inactive and its associated data is garbage collected.
  15073. */
  15074. const MAX_CLIENT_AGE_MS = 30 * 60 * 1000; // 30 minutes
  15075. /**
  15076. * Oldest acceptable metadata age for clients that may participate in the
  15077. * primary lease election. Clients that have not updated their client metadata
  15078. * within 5 seconds are not eligible to receive a primary lease.
  15079. */
  15080. const MAX_PRIMARY_ELIGIBLE_AGE_MS = 5000;
  15081. /**
  15082. * The interval at which clients will update their metadata, including
  15083. * refreshing their primary lease if held or potentially trying to acquire it if
  15084. * not held.
  15085. *
  15086. * Primary clients may opportunistically refresh their metadata earlier
  15087. * if they're already performing an IndexedDB operation.
  15088. */
  15089. const CLIENT_METADATA_REFRESH_INTERVAL_MS = 4000;
  15090. /** User-facing error when the primary lease is required but not available. */
  15091. const PRIMARY_LEASE_EXCLUSIVE_ERROR_MSG = 'Failed to obtain exclusive access to the persistence layer. To allow ' +
  15092. 'shared access, multi-tab synchronization has to be enabled in all tabs. ' +
  15093. 'If you are using `experimentalForceOwningTab:true`, make sure that only ' +
  15094. 'one tab has persistence enabled at any given time.';
  15095. const UNSUPPORTED_PLATFORM_ERROR_MSG = 'This platform is either missing IndexedDB or is known to have ' +
  15096. 'an incomplete implementation. Offline persistence has been disabled.';
  15097. // The format of the LocalStorage key that stores zombied client is:
  15098. // firestore_zombie_<persistence_prefix>_<instance_key>
  15099. const ZOMBIED_CLIENTS_KEY_PREFIX = 'firestore_zombie';
  15100. /**
  15101. * The name of the main (and currently only) IndexedDB database. This name is
  15102. * appended to the prefix provided to the IndexedDbPersistence constructor.
  15103. */
  15104. const MAIN_DATABASE = 'main';
  15105. /**
  15106. * An IndexedDB-backed instance of Persistence. Data is stored persistently
  15107. * across sessions.
  15108. *
  15109. * On Web only, the Firestore SDKs support shared access to its persistence
  15110. * layer. This allows multiple browser tabs to read and write to IndexedDb and
  15111. * to synchronize state even without network connectivity. Shared access is
  15112. * currently optional and not enabled unless all clients invoke
  15113. * `enablePersistence()` with `{synchronizeTabs:true}`.
  15114. *
  15115. * In multi-tab mode, if multiple clients are active at the same time, the SDK
  15116. * will designate one client as the “primary client”. An effort is made to pick
  15117. * a visible, network-connected and active client, and this client is
  15118. * responsible for letting other clients know about its presence. The primary
  15119. * client writes a unique client-generated identifier (the client ID) to
  15120. * IndexedDb’s “owner” store every 4 seconds. If the primary client fails to
  15121. * update this entry, another client can acquire the lease and take over as
  15122. * primary.
  15123. *
  15124. * Some persistence operations in the SDK are designated as primary-client only
  15125. * operations. This includes the acknowledgment of mutations and all updates of
  15126. * remote documents. The effects of these operations are written to persistence
  15127. * and then broadcast to other tabs via LocalStorage (see
  15128. * `WebStorageSharedClientState`), which then refresh their state from
  15129. * persistence.
  15130. *
  15131. * Similarly, the primary client listens to notifications sent by secondary
  15132. * clients to discover persistence changes written by secondary clients, such as
  15133. * the addition of new mutations and query targets.
  15134. *
  15135. * If multi-tab is not enabled and another tab already obtained the primary
  15136. * lease, IndexedDbPersistence enters a failed state and all subsequent
  15137. * operations will automatically fail.
  15138. *
  15139. * Additionally, there is an optimization so that when a tab is closed, the
  15140. * primary lease is released immediately (this is especially important to make
  15141. * sure that a refreshed tab is able to immediately re-acquire the primary
  15142. * lease). Unfortunately, IndexedDB cannot be reliably used in window.unload
  15143. * since it is an asynchronous API. So in addition to attempting to give up the
  15144. * lease, the leaseholder writes its client ID to a "zombiedClient" entry in
  15145. * LocalStorage which acts as an indicator that another tab should go ahead and
  15146. * take the primary lease immediately regardless of the current lease timestamp.
  15147. *
  15148. * TODO(b/114226234): Remove `synchronizeTabs` section when multi-tab is no
  15149. * longer optional.
  15150. */
  15151. class IndexedDbPersistence {
  15152. constructor(
  15153. /**
  15154. * Whether to synchronize the in-memory state of multiple tabs and share
  15155. * access to local persistence.
  15156. */
  15157. allowTabSynchronization, persistenceKey, clientId, lruParams, queue, window, document, serializer, sequenceNumberSyncer,
  15158. /**
  15159. * If set to true, forcefully obtains database access. Existing tabs will
  15160. * no longer be able to access IndexedDB.
  15161. */
  15162. forceOwningTab, schemaVersion = SCHEMA_VERSION) {
  15163. this.allowTabSynchronization = allowTabSynchronization;
  15164. this.persistenceKey = persistenceKey;
  15165. this.clientId = clientId;
  15166. this.queue = queue;
  15167. this.window = window;
  15168. this.document = document;
  15169. this.sequenceNumberSyncer = sequenceNumberSyncer;
  15170. this.forceOwningTab = forceOwningTab;
  15171. this.schemaVersion = schemaVersion;
  15172. this.listenSequence = null;
  15173. this._started = false;
  15174. this.isPrimary = false;
  15175. this.networkEnabled = true;
  15176. /** Our window.unload handler, if registered. */
  15177. this.windowUnloadHandler = null;
  15178. this.inForeground = false;
  15179. /** Our 'visibilitychange' listener if registered. */
  15180. this.documentVisibilityHandler = null;
  15181. /** The client metadata refresh task. */
  15182. this.clientMetadataRefresher = null;
  15183. /** The last time we garbage collected the client metadata object store. */
  15184. this.lastGarbageCollectionTime = Number.NEGATIVE_INFINITY;
  15185. /** A listener to notify on primary state changes. */
  15186. this.primaryStateListener = _ => Promise.resolve();
  15187. if (!IndexedDbPersistence.isAvailable()) {
  15188. throw new FirestoreError(Code.UNIMPLEMENTED, UNSUPPORTED_PLATFORM_ERROR_MSG);
  15189. }
  15190. this.referenceDelegate = new IndexedDbLruDelegateImpl(this, lruParams);
  15191. this.dbName = persistenceKey + MAIN_DATABASE;
  15192. this.serializer = new LocalSerializer(serializer);
  15193. this.simpleDb = new SimpleDb(this.dbName, this.schemaVersion, new SchemaConverter(this.serializer));
  15194. this.targetCache = new IndexedDbTargetCache(this.referenceDelegate, this.serializer);
  15195. this.remoteDocumentCache = newIndexedDbRemoteDocumentCache(this.serializer);
  15196. this.bundleCache = new IndexedDbBundleCache();
  15197. if (this.window && this.window.localStorage) {
  15198. this.webStorage = this.window.localStorage;
  15199. }
  15200. else {
  15201. this.webStorage = null;
  15202. if (forceOwningTab === false) {
  15203. logError(LOG_TAG$c, 'LocalStorage is unavailable. As a result, persistence may not work ' +
  15204. 'reliably. In particular enablePersistence() could fail immediately ' +
  15205. 'after refreshing the page.');
  15206. }
  15207. }
  15208. }
  15209. /**
  15210. * Attempt to start IndexedDb persistence.
  15211. *
  15212. * @returns Whether persistence was enabled.
  15213. */
  15214. start() {
  15215. // NOTE: This is expected to fail sometimes (in the case of another tab
  15216. // already having the persistence lock), so it's the first thing we should
  15217. // do.
  15218. return this.updateClientMetadataAndTryBecomePrimary()
  15219. .then(() => {
  15220. if (!this.isPrimary && !this.allowTabSynchronization) {
  15221. // Fail `start()` if `synchronizeTabs` is disabled and we cannot
  15222. // obtain the primary lease.
  15223. throw new FirestoreError(Code.FAILED_PRECONDITION, PRIMARY_LEASE_EXCLUSIVE_ERROR_MSG);
  15224. }
  15225. this.attachVisibilityHandler();
  15226. this.attachWindowUnloadHook();
  15227. this.scheduleClientMetadataAndPrimaryLeaseRefreshes();
  15228. return this.runTransaction('getHighestListenSequenceNumber', 'readonly', txn => this.targetCache.getHighestSequenceNumber(txn));
  15229. })
  15230. .then(highestListenSequenceNumber => {
  15231. this.listenSequence = new ListenSequence(highestListenSequenceNumber, this.sequenceNumberSyncer);
  15232. })
  15233. .then(() => {
  15234. this._started = true;
  15235. })
  15236. .catch(reason => {
  15237. this.simpleDb && this.simpleDb.close();
  15238. return Promise.reject(reason);
  15239. });
  15240. }
  15241. /**
  15242. * Registers a listener that gets called when the primary state of the
  15243. * instance changes. Upon registering, this listener is invoked immediately
  15244. * with the current primary state.
  15245. *
  15246. * PORTING NOTE: This is only used for Web multi-tab.
  15247. */
  15248. setPrimaryStateListener(primaryStateListener) {
  15249. this.primaryStateListener = async (primaryState) => {
  15250. if (this.started) {
  15251. return primaryStateListener(primaryState);
  15252. }
  15253. };
  15254. return primaryStateListener(this.isPrimary);
  15255. }
  15256. /**
  15257. * Registers a listener that gets called when the database receives a
  15258. * version change event indicating that it has deleted.
  15259. *
  15260. * PORTING NOTE: This is only used for Web multi-tab.
  15261. */
  15262. setDatabaseDeletedListener(databaseDeletedListener) {
  15263. this.simpleDb.setVersionChangeListener(async (event) => {
  15264. // Check if an attempt is made to delete IndexedDB.
  15265. if (event.newVersion === null) {
  15266. await databaseDeletedListener();
  15267. }
  15268. });
  15269. }
  15270. /**
  15271. * Adjusts the current network state in the client's metadata, potentially
  15272. * affecting the primary lease.
  15273. *
  15274. * PORTING NOTE: This is only used for Web multi-tab.
  15275. */
  15276. setNetworkEnabled(networkEnabled) {
  15277. if (this.networkEnabled !== networkEnabled) {
  15278. this.networkEnabled = networkEnabled;
  15279. // Schedule a primary lease refresh for immediate execution. The eventual
  15280. // lease update will be propagated via `primaryStateListener`.
  15281. this.queue.enqueueAndForget(async () => {
  15282. if (this.started) {
  15283. await this.updateClientMetadataAndTryBecomePrimary();
  15284. }
  15285. });
  15286. }
  15287. }
  15288. /**
  15289. * Updates the client metadata in IndexedDb and attempts to either obtain or
  15290. * extend the primary lease for the local client. Asynchronously notifies the
  15291. * primary state listener if the client either newly obtained or released its
  15292. * primary lease.
  15293. */
  15294. updateClientMetadataAndTryBecomePrimary() {
  15295. return this.runTransaction('updateClientMetadataAndTryBecomePrimary', 'readwrite', txn => {
  15296. const metadataStore = clientMetadataStore(txn);
  15297. return metadataStore
  15298. .put({
  15299. clientId: this.clientId,
  15300. updateTimeMs: Date.now(),
  15301. networkEnabled: this.networkEnabled,
  15302. inForeground: this.inForeground
  15303. })
  15304. .next(() => {
  15305. if (this.isPrimary) {
  15306. return this.verifyPrimaryLease(txn).next(success => {
  15307. if (!success) {
  15308. this.isPrimary = false;
  15309. this.queue.enqueueRetryable(() => this.primaryStateListener(false));
  15310. }
  15311. });
  15312. }
  15313. })
  15314. .next(() => this.canActAsPrimary(txn))
  15315. .next(canActAsPrimary => {
  15316. if (this.isPrimary && !canActAsPrimary) {
  15317. return this.releasePrimaryLeaseIfHeld(txn).next(() => false);
  15318. }
  15319. else if (canActAsPrimary) {
  15320. return this.acquireOrExtendPrimaryLease(txn).next(() => true);
  15321. }
  15322. else {
  15323. return /* canActAsPrimary= */ false;
  15324. }
  15325. });
  15326. })
  15327. .catch(e => {
  15328. if (isIndexedDbTransactionError(e)) {
  15329. logDebug(LOG_TAG$c, 'Failed to extend owner lease: ', e);
  15330. // Proceed with the existing state. Any subsequent access to
  15331. // IndexedDB will verify the lease.
  15332. return this.isPrimary;
  15333. }
  15334. if (!this.allowTabSynchronization) {
  15335. throw e;
  15336. }
  15337. logDebug(LOG_TAG$c, 'Releasing owner lease after error during lease refresh', e);
  15338. return /* isPrimary= */ false;
  15339. })
  15340. .then(isPrimary => {
  15341. if (this.isPrimary !== isPrimary) {
  15342. this.queue.enqueueRetryable(() => this.primaryStateListener(isPrimary));
  15343. }
  15344. this.isPrimary = isPrimary;
  15345. });
  15346. }
  15347. verifyPrimaryLease(txn) {
  15348. const store = primaryClientStore(txn);
  15349. return store.get(DbPrimaryClientKey).next(primaryClient => {
  15350. return PersistencePromise.resolve(this.isLocalClient(primaryClient));
  15351. });
  15352. }
  15353. removeClientMetadata(txn) {
  15354. const metadataStore = clientMetadataStore(txn);
  15355. return metadataStore.delete(this.clientId);
  15356. }
  15357. /**
  15358. * If the garbage collection threshold has passed, prunes the
  15359. * RemoteDocumentChanges and the ClientMetadata store based on the last update
  15360. * time of all clients.
  15361. */
  15362. async maybeGarbageCollectMultiClientState() {
  15363. if (this.isPrimary &&
  15364. !this.isWithinAge(this.lastGarbageCollectionTime, MAX_CLIENT_AGE_MS)) {
  15365. this.lastGarbageCollectionTime = Date.now();
  15366. const inactiveClients = await this.runTransaction('maybeGarbageCollectMultiClientState', 'readwrite-primary', txn => {
  15367. const metadataStore = getStore(txn, DbClientMetadataStore);
  15368. return metadataStore.loadAll().next(existingClients => {
  15369. const active = this.filterActiveClients(existingClients, MAX_CLIENT_AGE_MS);
  15370. const inactive = existingClients.filter(client => active.indexOf(client) === -1);
  15371. // Delete metadata for clients that are no longer considered active.
  15372. return PersistencePromise.forEach(inactive, (inactiveClient) => metadataStore.delete(inactiveClient.clientId)).next(() => inactive);
  15373. });
  15374. }).catch(() => {
  15375. // Ignore primary lease violations or any other type of error. The next
  15376. // primary will run `maybeGarbageCollectMultiClientState()` again.
  15377. // We don't use `ignoreIfPrimaryLeaseLoss()` since we don't want to depend
  15378. // on LocalStore.
  15379. return [];
  15380. });
  15381. // Delete potential leftover entries that may continue to mark the
  15382. // inactive clients as zombied in LocalStorage.
  15383. // Ideally we'd delete the IndexedDb and LocalStorage zombie entries for
  15384. // the client atomically, but we can't. So we opt to delete the IndexedDb
  15385. // entries first to avoid potentially reviving a zombied client.
  15386. if (this.webStorage) {
  15387. for (const inactiveClient of inactiveClients) {
  15388. this.webStorage.removeItem(this.zombiedClientLocalStorageKey(inactiveClient.clientId));
  15389. }
  15390. }
  15391. }
  15392. }
  15393. /**
  15394. * Schedules a recurring timer to update the client metadata and to either
  15395. * extend or acquire the primary lease if the client is eligible.
  15396. */
  15397. scheduleClientMetadataAndPrimaryLeaseRefreshes() {
  15398. this.clientMetadataRefresher = this.queue.enqueueAfterDelay("client_metadata_refresh" /* TimerId.ClientMetadataRefresh */, CLIENT_METADATA_REFRESH_INTERVAL_MS, () => {
  15399. return this.updateClientMetadataAndTryBecomePrimary()
  15400. .then(() => this.maybeGarbageCollectMultiClientState())
  15401. .then(() => this.scheduleClientMetadataAndPrimaryLeaseRefreshes());
  15402. });
  15403. }
  15404. /** Checks whether `client` is the local client. */
  15405. isLocalClient(client) {
  15406. return client ? client.ownerId === this.clientId : false;
  15407. }
  15408. /**
  15409. * Evaluate the state of all active clients and determine whether the local
  15410. * client is or can act as the holder of the primary lease. Returns whether
  15411. * the client is eligible for the lease, but does not actually acquire it.
  15412. * May return 'false' even if there is no active leaseholder and another
  15413. * (foreground) client should become leaseholder instead.
  15414. */
  15415. canActAsPrimary(txn) {
  15416. if (this.forceOwningTab) {
  15417. return PersistencePromise.resolve(true);
  15418. }
  15419. const store = primaryClientStore(txn);
  15420. return store
  15421. .get(DbPrimaryClientKey)
  15422. .next(currentPrimary => {
  15423. const currentLeaseIsValid = currentPrimary !== null &&
  15424. this.isWithinAge(currentPrimary.leaseTimestampMs, MAX_PRIMARY_ELIGIBLE_AGE_MS) &&
  15425. !this.isClientZombied(currentPrimary.ownerId);
  15426. // A client is eligible for the primary lease if:
  15427. // - its network is enabled and the client's tab is in the foreground.
  15428. // - its network is enabled and no other client's tab is in the
  15429. // foreground.
  15430. // - every clients network is disabled and the client's tab is in the
  15431. // foreground.
  15432. // - every clients network is disabled and no other client's tab is in
  15433. // the foreground.
  15434. // - the `forceOwningTab` setting was passed in.
  15435. if (currentLeaseIsValid) {
  15436. if (this.isLocalClient(currentPrimary) && this.networkEnabled) {
  15437. return true;
  15438. }
  15439. if (!this.isLocalClient(currentPrimary)) {
  15440. if (!currentPrimary.allowTabSynchronization) {
  15441. // Fail the `canActAsPrimary` check if the current leaseholder has
  15442. // not opted into multi-tab synchronization. If this happens at
  15443. // client startup, we reject the Promise returned by
  15444. // `enablePersistence()` and the user can continue to use Firestore
  15445. // with in-memory persistence.
  15446. // If this fails during a lease refresh, we will instead block the
  15447. // AsyncQueue from executing further operations. Note that this is
  15448. // acceptable since mixing & matching different `synchronizeTabs`
  15449. // settings is not supported.
  15450. //
  15451. // TODO(b/114226234): Remove this check when `synchronizeTabs` can
  15452. // no longer be turned off.
  15453. throw new FirestoreError(Code.FAILED_PRECONDITION, PRIMARY_LEASE_EXCLUSIVE_ERROR_MSG);
  15454. }
  15455. return false;
  15456. }
  15457. }
  15458. if (this.networkEnabled && this.inForeground) {
  15459. return true;
  15460. }
  15461. return clientMetadataStore(txn)
  15462. .loadAll()
  15463. .next(existingClients => {
  15464. // Process all existing clients and determine whether at least one of
  15465. // them is better suited to obtain the primary lease.
  15466. const preferredCandidate = this.filterActiveClients(existingClients, MAX_PRIMARY_ELIGIBLE_AGE_MS).find(otherClient => {
  15467. if (this.clientId !== otherClient.clientId) {
  15468. const otherClientHasBetterNetworkState = !this.networkEnabled && otherClient.networkEnabled;
  15469. const otherClientHasBetterVisibility = !this.inForeground && otherClient.inForeground;
  15470. const otherClientHasSameNetworkState = this.networkEnabled === otherClient.networkEnabled;
  15471. if (otherClientHasBetterNetworkState ||
  15472. (otherClientHasBetterVisibility &&
  15473. otherClientHasSameNetworkState)) {
  15474. return true;
  15475. }
  15476. }
  15477. return false;
  15478. });
  15479. return preferredCandidate === undefined;
  15480. });
  15481. })
  15482. .next(canActAsPrimary => {
  15483. if (this.isPrimary !== canActAsPrimary) {
  15484. logDebug(LOG_TAG$c, `Client ${canActAsPrimary ? 'is' : 'is not'} eligible for a primary lease.`);
  15485. }
  15486. return canActAsPrimary;
  15487. });
  15488. }
  15489. async shutdown() {
  15490. // The shutdown() operations are idempotent and can be called even when
  15491. // start() aborted (e.g. because it couldn't acquire the persistence lease).
  15492. this._started = false;
  15493. this.markClientZombied();
  15494. if (this.clientMetadataRefresher) {
  15495. this.clientMetadataRefresher.cancel();
  15496. this.clientMetadataRefresher = null;
  15497. }
  15498. this.detachVisibilityHandler();
  15499. this.detachWindowUnloadHook();
  15500. // Use `SimpleDb.runTransaction` directly to avoid failing if another tab
  15501. // has obtained the primary lease.
  15502. await this.simpleDb.runTransaction('shutdown', 'readwrite', [DbPrimaryClientStore, DbClientMetadataStore], simpleDbTxn => {
  15503. const persistenceTransaction = new IndexedDbTransaction(simpleDbTxn, ListenSequence.INVALID);
  15504. return this.releasePrimaryLeaseIfHeld(persistenceTransaction).next(() => this.removeClientMetadata(persistenceTransaction));
  15505. });
  15506. this.simpleDb.close();
  15507. // Remove the entry marking the client as zombied from LocalStorage since
  15508. // we successfully deleted its metadata from IndexedDb.
  15509. this.removeClientZombiedEntry();
  15510. }
  15511. /**
  15512. * Returns clients that are not zombied and have an updateTime within the
  15513. * provided threshold.
  15514. */
  15515. filterActiveClients(clients, activityThresholdMs) {
  15516. return clients.filter(client => this.isWithinAge(client.updateTimeMs, activityThresholdMs) &&
  15517. !this.isClientZombied(client.clientId));
  15518. }
  15519. /**
  15520. * Returns the IDs of the clients that are currently active. If multi-tab
  15521. * is not supported, returns an array that only contains the local client's
  15522. * ID.
  15523. *
  15524. * PORTING NOTE: This is only used for Web multi-tab.
  15525. */
  15526. getActiveClients() {
  15527. return this.runTransaction('getActiveClients', 'readonly', txn => {
  15528. return clientMetadataStore(txn)
  15529. .loadAll()
  15530. .next(clients => this.filterActiveClients(clients, MAX_CLIENT_AGE_MS).map(clientMetadata => clientMetadata.clientId));
  15531. });
  15532. }
  15533. get started() {
  15534. return this._started;
  15535. }
  15536. getMutationQueue(user, indexManager) {
  15537. return IndexedDbMutationQueue.forUser(user, this.serializer, indexManager, this.referenceDelegate);
  15538. }
  15539. getTargetCache() {
  15540. return this.targetCache;
  15541. }
  15542. getRemoteDocumentCache() {
  15543. return this.remoteDocumentCache;
  15544. }
  15545. getIndexManager(user) {
  15546. return new IndexedDbIndexManager(user, this.serializer.remoteSerializer.databaseId);
  15547. }
  15548. getDocumentOverlayCache(user) {
  15549. return IndexedDbDocumentOverlayCache.forUser(this.serializer, user);
  15550. }
  15551. getBundleCache() {
  15552. return this.bundleCache;
  15553. }
  15554. runTransaction(action, mode, transactionOperation) {
  15555. logDebug(LOG_TAG$c, 'Starting transaction:', action);
  15556. const simpleDbMode = mode === 'readonly' ? 'readonly' : 'readwrite';
  15557. const objectStores = getObjectStores(this.schemaVersion);
  15558. let persistenceTransaction;
  15559. // Do all transactions as readwrite against all object stores, since we
  15560. // are the only reader/writer.
  15561. return this.simpleDb
  15562. .runTransaction(action, simpleDbMode, objectStores, simpleDbTxn => {
  15563. persistenceTransaction = new IndexedDbTransaction(simpleDbTxn, this.listenSequence
  15564. ? this.listenSequence.next()
  15565. : ListenSequence.INVALID);
  15566. if (mode === 'readwrite-primary') {
  15567. // While we merely verify that we have (or can acquire) the lease
  15568. // immediately, we wait to extend the primary lease until after
  15569. // executing transactionOperation(). This ensures that even if the
  15570. // transactionOperation takes a long time, we'll use a recent
  15571. // leaseTimestampMs in the extended (or newly acquired) lease.
  15572. return this.verifyPrimaryLease(persistenceTransaction)
  15573. .next(holdsPrimaryLease => {
  15574. if (holdsPrimaryLease) {
  15575. return /* holdsPrimaryLease= */ true;
  15576. }
  15577. return this.canActAsPrimary(persistenceTransaction);
  15578. })
  15579. .next(holdsPrimaryLease => {
  15580. if (!holdsPrimaryLease) {
  15581. logError(`Failed to obtain primary lease for action '${action}'.`);
  15582. this.isPrimary = false;
  15583. this.queue.enqueueRetryable(() => this.primaryStateListener(false));
  15584. throw new FirestoreError(Code.FAILED_PRECONDITION, PRIMARY_LEASE_LOST_ERROR_MSG);
  15585. }
  15586. return transactionOperation(persistenceTransaction);
  15587. })
  15588. .next(result => {
  15589. return this.acquireOrExtendPrimaryLease(persistenceTransaction).next(() => result);
  15590. });
  15591. }
  15592. else {
  15593. return this.verifyAllowTabSynchronization(persistenceTransaction).next(() => transactionOperation(persistenceTransaction));
  15594. }
  15595. })
  15596. .then(result => {
  15597. persistenceTransaction.raiseOnCommittedEvent();
  15598. return result;
  15599. });
  15600. }
  15601. /**
  15602. * Verifies that the current tab is the primary leaseholder or alternatively
  15603. * that the leaseholder has opted into multi-tab synchronization.
  15604. */
  15605. // TODO(b/114226234): Remove this check when `synchronizeTabs` can no longer
  15606. // be turned off.
  15607. verifyAllowTabSynchronization(txn) {
  15608. const store = primaryClientStore(txn);
  15609. return store.get(DbPrimaryClientKey).next(currentPrimary => {
  15610. const currentLeaseIsValid = currentPrimary !== null &&
  15611. this.isWithinAge(currentPrimary.leaseTimestampMs, MAX_PRIMARY_ELIGIBLE_AGE_MS) &&
  15612. !this.isClientZombied(currentPrimary.ownerId);
  15613. if (currentLeaseIsValid && !this.isLocalClient(currentPrimary)) {
  15614. if (!this.forceOwningTab &&
  15615. (!this.allowTabSynchronization ||
  15616. !currentPrimary.allowTabSynchronization)) {
  15617. throw new FirestoreError(Code.FAILED_PRECONDITION, PRIMARY_LEASE_EXCLUSIVE_ERROR_MSG);
  15618. }
  15619. }
  15620. });
  15621. }
  15622. /**
  15623. * Obtains or extends the new primary lease for the local client. This
  15624. * method does not verify that the client is eligible for this lease.
  15625. */
  15626. acquireOrExtendPrimaryLease(txn) {
  15627. const newPrimary = {
  15628. ownerId: this.clientId,
  15629. allowTabSynchronization: this.allowTabSynchronization,
  15630. leaseTimestampMs: Date.now()
  15631. };
  15632. return primaryClientStore(txn).put(DbPrimaryClientKey, newPrimary);
  15633. }
  15634. static isAvailable() {
  15635. return SimpleDb.isAvailable();
  15636. }
  15637. /** Checks the primary lease and removes it if we are the current primary. */
  15638. releasePrimaryLeaseIfHeld(txn) {
  15639. const store = primaryClientStore(txn);
  15640. return store.get(DbPrimaryClientKey).next(primaryClient => {
  15641. if (this.isLocalClient(primaryClient)) {
  15642. logDebug(LOG_TAG$c, 'Releasing primary lease.');
  15643. return store.delete(DbPrimaryClientKey);
  15644. }
  15645. else {
  15646. return PersistencePromise.resolve();
  15647. }
  15648. });
  15649. }
  15650. /** Verifies that `updateTimeMs` is within `maxAgeMs`. */
  15651. isWithinAge(updateTimeMs, maxAgeMs) {
  15652. const now = Date.now();
  15653. const minAcceptable = now - maxAgeMs;
  15654. const maxAcceptable = now;
  15655. if (updateTimeMs < minAcceptable) {
  15656. return false;
  15657. }
  15658. else if (updateTimeMs > maxAcceptable) {
  15659. logError(`Detected an update time that is in the future: ${updateTimeMs} > ${maxAcceptable}`);
  15660. return false;
  15661. }
  15662. return true;
  15663. }
  15664. attachVisibilityHandler() {
  15665. if (this.document !== null &&
  15666. typeof this.document.addEventListener === 'function') {
  15667. this.documentVisibilityHandler = () => {
  15668. this.queue.enqueueAndForget(() => {
  15669. this.inForeground = this.document.visibilityState === 'visible';
  15670. return this.updateClientMetadataAndTryBecomePrimary();
  15671. });
  15672. };
  15673. this.document.addEventListener('visibilitychange', this.documentVisibilityHandler);
  15674. this.inForeground = this.document.visibilityState === 'visible';
  15675. }
  15676. }
  15677. detachVisibilityHandler() {
  15678. if (this.documentVisibilityHandler) {
  15679. this.document.removeEventListener('visibilitychange', this.documentVisibilityHandler);
  15680. this.documentVisibilityHandler = null;
  15681. }
  15682. }
  15683. /**
  15684. * Attaches a window.unload handler that will synchronously write our
  15685. * clientId to a "zombie client id" location in LocalStorage. This can be used
  15686. * by tabs trying to acquire the primary lease to determine that the lease
  15687. * is no longer valid even if the timestamp is recent. This is particularly
  15688. * important for the refresh case (so the tab correctly re-acquires the
  15689. * primary lease). LocalStorage is used for this rather than IndexedDb because
  15690. * it is a synchronous API and so can be used reliably from an unload
  15691. * handler.
  15692. */
  15693. attachWindowUnloadHook() {
  15694. var _a;
  15695. if (typeof ((_a = this.window) === null || _a === void 0 ? void 0 : _a.addEventListener) === 'function') {
  15696. this.windowUnloadHandler = () => {
  15697. // Note: In theory, this should be scheduled on the AsyncQueue since it
  15698. // accesses internal state. We execute this code directly during shutdown
  15699. // to make sure it gets a chance to run.
  15700. this.markClientZombied();
  15701. if (isSafari() && navigator.appVersion.match(/Version\/1[45]/)) {
  15702. // On Safari 14 and 15, we do not run any cleanup actions as it might
  15703. // trigger a bug that prevents Safari from re-opening IndexedDB during
  15704. // the next page load.
  15705. // See https://bugs.webkit.org/show_bug.cgi?id=226547
  15706. this.queue.enterRestrictedMode(/* purgeExistingTasks= */ true);
  15707. }
  15708. this.queue.enqueueAndForget(() => {
  15709. // Attempt graceful shutdown (including releasing our primary lease),
  15710. // but there's no guarantee it will complete.
  15711. return this.shutdown();
  15712. });
  15713. };
  15714. this.window.addEventListener('pagehide', this.windowUnloadHandler);
  15715. }
  15716. }
  15717. detachWindowUnloadHook() {
  15718. if (this.windowUnloadHandler) {
  15719. this.window.removeEventListener('pagehide', this.windowUnloadHandler);
  15720. this.windowUnloadHandler = null;
  15721. }
  15722. }
  15723. /**
  15724. * Returns whether a client is "zombied" based on its LocalStorage entry.
  15725. * Clients become zombied when their tab closes without running all of the
  15726. * cleanup logic in `shutdown()`.
  15727. */
  15728. isClientZombied(clientId) {
  15729. var _a;
  15730. try {
  15731. const isZombied = ((_a = this.webStorage) === null || _a === void 0 ? void 0 : _a.getItem(this.zombiedClientLocalStorageKey(clientId))) !== null;
  15732. logDebug(LOG_TAG$c, `Client '${clientId}' ${isZombied ? 'is' : 'is not'} zombied in LocalStorage`);
  15733. return isZombied;
  15734. }
  15735. catch (e) {
  15736. // Gracefully handle if LocalStorage isn't working.
  15737. logError(LOG_TAG$c, 'Failed to get zombied client id.', e);
  15738. return false;
  15739. }
  15740. }
  15741. /**
  15742. * Record client as zombied (a client that had its tab closed). Zombied
  15743. * clients are ignored during primary tab selection.
  15744. */
  15745. markClientZombied() {
  15746. if (!this.webStorage) {
  15747. return;
  15748. }
  15749. try {
  15750. this.webStorage.setItem(this.zombiedClientLocalStorageKey(this.clientId), String(Date.now()));
  15751. }
  15752. catch (e) {
  15753. // Gracefully handle if LocalStorage isn't available / working.
  15754. logError('Failed to set zombie client id.', e);
  15755. }
  15756. }
  15757. /** Removes the zombied client entry if it exists. */
  15758. removeClientZombiedEntry() {
  15759. if (!this.webStorage) {
  15760. return;
  15761. }
  15762. try {
  15763. this.webStorage.removeItem(this.zombiedClientLocalStorageKey(this.clientId));
  15764. }
  15765. catch (e) {
  15766. // Ignore
  15767. }
  15768. }
  15769. zombiedClientLocalStorageKey(clientId) {
  15770. return `${ZOMBIED_CLIENTS_KEY_PREFIX}_${this.persistenceKey}_${clientId}`;
  15771. }
  15772. }
  15773. /**
  15774. * Helper to get a typed SimpleDbStore for the primary client object store.
  15775. */
  15776. function primaryClientStore(txn) {
  15777. return getStore(txn, DbPrimaryClientStore);
  15778. }
  15779. /**
  15780. * Helper to get a typed SimpleDbStore for the client metadata object store.
  15781. */
  15782. function clientMetadataStore(txn) {
  15783. return getStore(txn, DbClientMetadataStore);
  15784. }
  15785. /**
  15786. * Generates a string used as a prefix when storing data in IndexedDB and
  15787. * LocalStorage.
  15788. */
  15789. function indexedDbStoragePrefix(databaseId, persistenceKey) {
  15790. // Use two different prefix formats:
  15791. //
  15792. // * firestore / persistenceKey / projectID . databaseID / ...
  15793. // * firestore / persistenceKey / projectID / ...
  15794. //
  15795. // projectIDs are DNS-compatible names and cannot contain dots
  15796. // so there's no danger of collisions.
  15797. let database = databaseId.projectId;
  15798. if (!databaseId.isDefaultDatabase) {
  15799. database += '.' + databaseId.database;
  15800. }
  15801. return 'firestore/' + persistenceKey + '/' + database + '/';
  15802. }
  15803. async function indexedDbClearPersistence(persistenceKey) {
  15804. if (!SimpleDb.isAvailable()) {
  15805. return Promise.resolve();
  15806. }
  15807. const dbName = persistenceKey + MAIN_DATABASE;
  15808. await SimpleDb.delete(dbName);
  15809. }
  15810. /**
  15811. * @license
  15812. * Copyright 2017 Google LLC
  15813. *
  15814. * Licensed under the Apache License, Version 2.0 (the "License");
  15815. * you may not use this file except in compliance with the License.
  15816. * You may obtain a copy of the License at
  15817. *
  15818. * http://www.apache.org/licenses/LICENSE-2.0
  15819. *
  15820. * Unless required by applicable law or agreed to in writing, software
  15821. * distributed under the License is distributed on an "AS IS" BASIS,
  15822. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  15823. * See the License for the specific language governing permissions and
  15824. * limitations under the License.
  15825. */
  15826. /**
  15827. * Compares two array for equality using comparator. The method computes the
  15828. * intersection and invokes `onAdd` for every element that is in `after` but not
  15829. * `before`. `onRemove` is invoked for every element in `before` but missing
  15830. * from `after`.
  15831. *
  15832. * The method creates a copy of both `before` and `after` and runs in O(n log
  15833. * n), where n is the size of the two lists.
  15834. *
  15835. * @param before - The elements that exist in the original array.
  15836. * @param after - The elements to diff against the original array.
  15837. * @param comparator - The comparator for the elements in before and after.
  15838. * @param onAdd - A function to invoke for every element that is part of `
  15839. * after` but not `before`.
  15840. * @param onRemove - A function to invoke for every element that is part of
  15841. * `before` but not `after`.
  15842. */
  15843. function diffArrays(before, after, comparator, onAdd, onRemove) {
  15844. before = [...before];
  15845. after = [...after];
  15846. before.sort(comparator);
  15847. after.sort(comparator);
  15848. const bLen = before.length;
  15849. const aLen = after.length;
  15850. let a = 0;
  15851. let b = 0;
  15852. while (a < aLen && b < bLen) {
  15853. const cmp = comparator(before[b], after[a]);
  15854. if (cmp < 0) {
  15855. // The element was removed if the next element in our ordered
  15856. // walkthrough is only in `before`.
  15857. onRemove(before[b++]);
  15858. }
  15859. else if (cmp > 0) {
  15860. // The element was added if the next element in our ordered walkthrough
  15861. // is only in `after`.
  15862. onAdd(after[a++]);
  15863. }
  15864. else {
  15865. a++;
  15866. b++;
  15867. }
  15868. }
  15869. while (a < aLen) {
  15870. onAdd(after[a++]);
  15871. }
  15872. while (b < bLen) {
  15873. onRemove(before[b++]);
  15874. }
  15875. }
  15876. /**
  15877. * @license
  15878. * Copyright 2020 Google LLC
  15879. *
  15880. * Licensed under the Apache License, Version 2.0 (the "License");
  15881. * you may not use this file except in compliance with the License.
  15882. * You may obtain a copy of the License at
  15883. *
  15884. * http://www.apache.org/licenses/LICENSE-2.0
  15885. *
  15886. * Unless required by applicable law or agreed to in writing, software
  15887. * distributed under the License is distributed on an "AS IS" BASIS,
  15888. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  15889. * See the License for the specific language governing permissions and
  15890. * limitations under the License.
  15891. */
  15892. const LOG_TAG$b = 'LocalStore';
  15893. /**
  15894. * The maximum time to leave a resume token buffered without writing it out.
  15895. * This value is arbitrary: it's long enough to avoid several writes
  15896. * (possibly indefinitely if updates come more frequently than this) but
  15897. * short enough that restarting after crashing will still have a pretty
  15898. * recent resume token.
  15899. */
  15900. const RESUME_TOKEN_MAX_AGE_MICROS = 5 * 60 * 1e6;
  15901. /**
  15902. * Implements `LocalStore` interface.
  15903. *
  15904. * Note: some field defined in this class might have public access level, but
  15905. * the class is not exported so they are only accessible from this module.
  15906. * This is useful to implement optional features (like bundles) in free
  15907. * functions, such that they are tree-shakeable.
  15908. */
  15909. class LocalStoreImpl {
  15910. constructor(
  15911. /** Manages our in-memory or durable persistence. */
  15912. persistence, queryEngine, initialUser, serializer) {
  15913. this.persistence = persistence;
  15914. this.queryEngine = queryEngine;
  15915. this.serializer = serializer;
  15916. /**
  15917. * Maps a targetID to data about its target.
  15918. *
  15919. * PORTING NOTE: We are using an immutable data structure on Web to make re-runs
  15920. * of `applyRemoteEvent()` idempotent.
  15921. */
  15922. this.targetDataByTarget = new SortedMap(primitiveComparator);
  15923. /** Maps a target to its targetID. */
  15924. // TODO(wuandy): Evaluate if TargetId can be part of Target.
  15925. this.targetIdByTarget = new ObjectMap(t => canonifyTarget(t), targetEquals);
  15926. /**
  15927. * A per collection group index of the last read time processed by
  15928. * `getNewDocumentChanges()`.
  15929. *
  15930. * PORTING NOTE: This is only used for multi-tab synchronization.
  15931. */
  15932. this.collectionGroupReadTime = new Map();
  15933. this.remoteDocuments = persistence.getRemoteDocumentCache();
  15934. this.targetCache = persistence.getTargetCache();
  15935. this.bundleCache = persistence.getBundleCache();
  15936. this.initializeUserComponents(initialUser);
  15937. }
  15938. initializeUserComponents(user) {
  15939. // TODO(indexing): Add spec tests that test these components change after a
  15940. // user change
  15941. this.documentOverlayCache = this.persistence.getDocumentOverlayCache(user);
  15942. this.indexManager = this.persistence.getIndexManager(user);
  15943. this.mutationQueue = this.persistence.getMutationQueue(user, this.indexManager);
  15944. this.localDocuments = new LocalDocumentsView(this.remoteDocuments, this.mutationQueue, this.documentOverlayCache, this.indexManager);
  15945. this.remoteDocuments.setIndexManager(this.indexManager);
  15946. this.queryEngine.initialize(this.localDocuments, this.indexManager);
  15947. }
  15948. collectGarbage(garbageCollector) {
  15949. return this.persistence.runTransaction('Collect garbage', 'readwrite-primary', txn => garbageCollector.collect(txn, this.targetDataByTarget));
  15950. }
  15951. }
  15952. function newLocalStore(
  15953. /** Manages our in-memory or durable persistence. */
  15954. persistence, queryEngine, initialUser, serializer) {
  15955. return new LocalStoreImpl(persistence, queryEngine, initialUser, serializer);
  15956. }
  15957. /**
  15958. * Tells the LocalStore that the currently authenticated user has changed.
  15959. *
  15960. * In response the local store switches the mutation queue to the new user and
  15961. * returns any resulting document changes.
  15962. */
  15963. // PORTING NOTE: Android and iOS only return the documents affected by the
  15964. // change.
  15965. async function localStoreHandleUserChange(localStore, user) {
  15966. const localStoreImpl = debugCast(localStore);
  15967. const result = await localStoreImpl.persistence.runTransaction('Handle user change', 'readonly', txn => {
  15968. // Swap out the mutation queue, grabbing the pending mutation batches
  15969. // before and after.
  15970. let oldBatches;
  15971. return localStoreImpl.mutationQueue
  15972. .getAllMutationBatches(txn)
  15973. .next(promisedOldBatches => {
  15974. oldBatches = promisedOldBatches;
  15975. localStoreImpl.initializeUserComponents(user);
  15976. return localStoreImpl.mutationQueue.getAllMutationBatches(txn);
  15977. })
  15978. .next(newBatches => {
  15979. const removedBatchIds = [];
  15980. const addedBatchIds = [];
  15981. // Union the old/new changed keys.
  15982. let changedKeys = documentKeySet();
  15983. for (const batch of oldBatches) {
  15984. removedBatchIds.push(batch.batchId);
  15985. for (const mutation of batch.mutations) {
  15986. changedKeys = changedKeys.add(mutation.key);
  15987. }
  15988. }
  15989. for (const batch of newBatches) {
  15990. addedBatchIds.push(batch.batchId);
  15991. for (const mutation of batch.mutations) {
  15992. changedKeys = changedKeys.add(mutation.key);
  15993. }
  15994. }
  15995. // Return the set of all (potentially) changed documents and the list
  15996. // of mutation batch IDs that were affected by change.
  15997. return localStoreImpl.localDocuments
  15998. .getDocuments(txn, changedKeys)
  15999. .next(affectedDocuments => {
  16000. return {
  16001. affectedDocuments,
  16002. removedBatchIds,
  16003. addedBatchIds
  16004. };
  16005. });
  16006. });
  16007. });
  16008. return result;
  16009. }
  16010. /* Accepts locally generated Mutations and commit them to storage. */
  16011. function localStoreWriteLocally(localStore, mutations) {
  16012. const localStoreImpl = debugCast(localStore);
  16013. const localWriteTime = Timestamp.now();
  16014. const keys = mutations.reduce((keys, m) => keys.add(m.key), documentKeySet());
  16015. let overlayedDocuments;
  16016. let mutationBatch;
  16017. return localStoreImpl.persistence
  16018. .runTransaction('Locally write mutations', 'readwrite', txn => {
  16019. // Figure out which keys do not have a remote version in the cache, this
  16020. // is needed to create the right overlay mutation: if no remote version
  16021. // presents, we do not need to create overlays as patch mutations.
  16022. // TODO(Overlay): Is there a better way to determine this? Using the
  16023. // document version does not work because local mutations set them back
  16024. // to 0.
  16025. let remoteDocs = mutableDocumentMap();
  16026. let docsWithoutRemoteVersion = documentKeySet();
  16027. return localStoreImpl.remoteDocuments
  16028. .getEntries(txn, keys)
  16029. .next(docs => {
  16030. remoteDocs = docs;
  16031. remoteDocs.forEach((key, doc) => {
  16032. if (!doc.isValidDocument()) {
  16033. docsWithoutRemoteVersion = docsWithoutRemoteVersion.add(key);
  16034. }
  16035. });
  16036. })
  16037. .next(() => {
  16038. // Load and apply all existing mutations. This lets us compute the
  16039. // current base state for all non-idempotent transforms before applying
  16040. // any additional user-provided writes.
  16041. return localStoreImpl.localDocuments.getOverlayedDocuments(txn, remoteDocs);
  16042. })
  16043. .next((docs) => {
  16044. overlayedDocuments = docs;
  16045. // For non-idempotent mutations (such as `FieldValue.increment()`),
  16046. // we record the base state in a separate patch mutation. This is
  16047. // later used to guarantee consistent values and prevents flicker
  16048. // even if the backend sends us an update that already includes our
  16049. // transform.
  16050. const baseMutations = [];
  16051. for (const mutation of mutations) {
  16052. const baseValue = mutationExtractBaseValue(mutation, overlayedDocuments.get(mutation.key).overlayedDocument);
  16053. if (baseValue != null) {
  16054. // NOTE: The base state should only be applied if there's some
  16055. // existing document to override, so use a Precondition of
  16056. // exists=true
  16057. baseMutations.push(new PatchMutation(mutation.key, baseValue, extractFieldMask(baseValue.value.mapValue), Precondition.exists(true)));
  16058. }
  16059. }
  16060. return localStoreImpl.mutationQueue.addMutationBatch(txn, localWriteTime, baseMutations, mutations);
  16061. })
  16062. .next(batch => {
  16063. mutationBatch = batch;
  16064. const overlays = batch.applyToLocalDocumentSet(overlayedDocuments, docsWithoutRemoteVersion);
  16065. return localStoreImpl.documentOverlayCache.saveOverlays(txn, batch.batchId, overlays);
  16066. });
  16067. })
  16068. .then(() => ({
  16069. batchId: mutationBatch.batchId,
  16070. changes: convertOverlayedDocumentMapToDocumentMap(overlayedDocuments)
  16071. }));
  16072. }
  16073. /**
  16074. * Acknowledges the given batch.
  16075. *
  16076. * On the happy path when a batch is acknowledged, the local store will
  16077. *
  16078. * + remove the batch from the mutation queue;
  16079. * + apply the changes to the remote document cache;
  16080. * + recalculate the latency compensated view implied by those changes (there
  16081. * may be mutations in the queue that affect the documents but haven't been
  16082. * acknowledged yet); and
  16083. * + give the changed documents back the sync engine
  16084. *
  16085. * @returns The resulting (modified) documents.
  16086. */
  16087. function localStoreAcknowledgeBatch(localStore, batchResult) {
  16088. const localStoreImpl = debugCast(localStore);
  16089. return localStoreImpl.persistence.runTransaction('Acknowledge batch', 'readwrite-primary', txn => {
  16090. const affected = batchResult.batch.keys();
  16091. const documentBuffer = localStoreImpl.remoteDocuments.newChangeBuffer({
  16092. trackRemovals: true // Make sure document removals show up in `getNewDocumentChanges()`
  16093. });
  16094. return applyWriteToRemoteDocuments(localStoreImpl, txn, batchResult, documentBuffer)
  16095. .next(() => documentBuffer.apply(txn))
  16096. .next(() => localStoreImpl.mutationQueue.performConsistencyCheck(txn))
  16097. .next(() => localStoreImpl.documentOverlayCache.removeOverlaysForBatchId(txn, affected, batchResult.batch.batchId))
  16098. .next(() => localStoreImpl.localDocuments.recalculateAndSaveOverlaysForDocumentKeys(txn, getKeysWithTransformResults(batchResult)))
  16099. .next(() => localStoreImpl.localDocuments.getDocuments(txn, affected));
  16100. });
  16101. }
  16102. function getKeysWithTransformResults(batchResult) {
  16103. let result = documentKeySet();
  16104. for (let i = 0; i < batchResult.mutationResults.length; ++i) {
  16105. const mutationResult = batchResult.mutationResults[i];
  16106. if (mutationResult.transformResults.length > 0) {
  16107. result = result.add(batchResult.batch.mutations[i].key);
  16108. }
  16109. }
  16110. return result;
  16111. }
  16112. /**
  16113. * Removes mutations from the MutationQueue for the specified batch;
  16114. * LocalDocuments will be recalculated.
  16115. *
  16116. * @returns The resulting modified documents.
  16117. */
  16118. function localStoreRejectBatch(localStore, batchId) {
  16119. const localStoreImpl = debugCast(localStore);
  16120. return localStoreImpl.persistence.runTransaction('Reject batch', 'readwrite-primary', txn => {
  16121. let affectedKeys;
  16122. return localStoreImpl.mutationQueue
  16123. .lookupMutationBatch(txn, batchId)
  16124. .next((batch) => {
  16125. hardAssert(batch !== null);
  16126. affectedKeys = batch.keys();
  16127. return localStoreImpl.mutationQueue.removeMutationBatch(txn, batch);
  16128. })
  16129. .next(() => localStoreImpl.mutationQueue.performConsistencyCheck(txn))
  16130. .next(() => localStoreImpl.documentOverlayCache.removeOverlaysForBatchId(txn, affectedKeys, batchId))
  16131. .next(() => localStoreImpl.localDocuments.recalculateAndSaveOverlaysForDocumentKeys(txn, affectedKeys))
  16132. .next(() => localStoreImpl.localDocuments.getDocuments(txn, affectedKeys));
  16133. });
  16134. }
  16135. /**
  16136. * Returns the largest (latest) batch id in mutation queue that is pending
  16137. * server response.
  16138. *
  16139. * Returns `BATCHID_UNKNOWN` if the queue is empty.
  16140. */
  16141. function localStoreGetHighestUnacknowledgedBatchId(localStore) {
  16142. const localStoreImpl = debugCast(localStore);
  16143. return localStoreImpl.persistence.runTransaction('Get highest unacknowledged batch id', 'readonly', txn => localStoreImpl.mutationQueue.getHighestUnacknowledgedBatchId(txn));
  16144. }
  16145. /**
  16146. * Returns the last consistent snapshot processed (used by the RemoteStore to
  16147. * determine whether to buffer incoming snapshots from the backend).
  16148. */
  16149. function localStoreGetLastRemoteSnapshotVersion(localStore) {
  16150. const localStoreImpl = debugCast(localStore);
  16151. return localStoreImpl.persistence.runTransaction('Get last remote snapshot version', 'readonly', txn => localStoreImpl.targetCache.getLastRemoteSnapshotVersion(txn));
  16152. }
  16153. /**
  16154. * Updates the "ground-state" (remote) documents. We assume that the remote
  16155. * event reflects any write batches that have been acknowledged or rejected
  16156. * (i.e. we do not re-apply local mutations to updates from this event).
  16157. *
  16158. * LocalDocuments are re-calculated if there are remaining mutations in the
  16159. * queue.
  16160. */
  16161. function localStoreApplyRemoteEventToLocalCache(localStore, remoteEvent) {
  16162. const localStoreImpl = debugCast(localStore);
  16163. const remoteVersion = remoteEvent.snapshotVersion;
  16164. let newTargetDataByTargetMap = localStoreImpl.targetDataByTarget;
  16165. return localStoreImpl.persistence
  16166. .runTransaction('Apply remote event', 'readwrite-primary', txn => {
  16167. const documentBuffer = localStoreImpl.remoteDocuments.newChangeBuffer({
  16168. trackRemovals: true // Make sure document removals show up in `getNewDocumentChanges()`
  16169. });
  16170. // Reset newTargetDataByTargetMap in case this transaction gets re-run.
  16171. newTargetDataByTargetMap = localStoreImpl.targetDataByTarget;
  16172. const promises = [];
  16173. remoteEvent.targetChanges.forEach((change, targetId) => {
  16174. const oldTargetData = newTargetDataByTargetMap.get(targetId);
  16175. if (!oldTargetData) {
  16176. return;
  16177. }
  16178. // Only update the remote keys if the target is still active. This
  16179. // ensures that we can persist the updated target data along with
  16180. // the updated assignment.
  16181. promises.push(localStoreImpl.targetCache
  16182. .removeMatchingKeys(txn, change.removedDocuments, targetId)
  16183. .next(() => {
  16184. return localStoreImpl.targetCache.addMatchingKeys(txn, change.addedDocuments, targetId);
  16185. }));
  16186. let newTargetData = oldTargetData.withSequenceNumber(txn.currentSequenceNumber);
  16187. if (remoteEvent.targetMismatches.has(targetId)) {
  16188. newTargetData = newTargetData
  16189. .withResumeToken(ByteString.EMPTY_BYTE_STRING, SnapshotVersion.min())
  16190. .withLastLimboFreeSnapshotVersion(SnapshotVersion.min());
  16191. }
  16192. else if (change.resumeToken.approximateByteSize() > 0) {
  16193. newTargetData = newTargetData.withResumeToken(change.resumeToken, remoteVersion);
  16194. }
  16195. newTargetDataByTargetMap = newTargetDataByTargetMap.insert(targetId, newTargetData);
  16196. // Update the target data if there are target changes (or if
  16197. // sufficient time has passed since the last update).
  16198. if (shouldPersistTargetData(oldTargetData, newTargetData, change)) {
  16199. promises.push(localStoreImpl.targetCache.updateTargetData(txn, newTargetData));
  16200. }
  16201. });
  16202. let changedDocs = mutableDocumentMap();
  16203. let existenceChangedKeys = documentKeySet();
  16204. remoteEvent.documentUpdates.forEach(key => {
  16205. if (remoteEvent.resolvedLimboDocuments.has(key)) {
  16206. promises.push(localStoreImpl.persistence.referenceDelegate.updateLimboDocument(txn, key));
  16207. }
  16208. });
  16209. // Each loop iteration only affects its "own" doc, so it's safe to get all
  16210. // the remote documents in advance in a single call.
  16211. promises.push(populateDocumentChangeBuffer(txn, documentBuffer, remoteEvent.documentUpdates).next(result => {
  16212. changedDocs = result.changedDocuments;
  16213. existenceChangedKeys = result.existenceChangedKeys;
  16214. }));
  16215. // HACK: The only reason we allow a null snapshot version is so that we
  16216. // can synthesize remote events when we get permission denied errors while
  16217. // trying to resolve the state of a locally cached document that is in
  16218. // limbo.
  16219. if (!remoteVersion.isEqual(SnapshotVersion.min())) {
  16220. const updateRemoteVersion = localStoreImpl.targetCache
  16221. .getLastRemoteSnapshotVersion(txn)
  16222. .next(lastRemoteSnapshotVersion => {
  16223. return localStoreImpl.targetCache.setTargetsMetadata(txn, txn.currentSequenceNumber, remoteVersion);
  16224. });
  16225. promises.push(updateRemoteVersion);
  16226. }
  16227. return PersistencePromise.waitFor(promises)
  16228. .next(() => documentBuffer.apply(txn))
  16229. .next(() => localStoreImpl.localDocuments.getLocalViewOfDocuments(txn, changedDocs, existenceChangedKeys))
  16230. .next(() => changedDocs);
  16231. })
  16232. .then(changedDocs => {
  16233. localStoreImpl.targetDataByTarget = newTargetDataByTargetMap;
  16234. return changedDocs;
  16235. });
  16236. }
  16237. /**
  16238. * Populates document change buffer with documents from backend or a bundle.
  16239. * Returns the document changes resulting from applying those documents, and
  16240. * also a set of documents whose existence state are changed as a result.
  16241. *
  16242. * @param txn - Transaction to use to read existing documents from storage.
  16243. * @param documentBuffer - Document buffer to collect the resulted changes to be
  16244. * applied to storage.
  16245. * @param documents - Documents to be applied.
  16246. */
  16247. function populateDocumentChangeBuffer(txn, documentBuffer, documents) {
  16248. let updatedKeys = documentKeySet();
  16249. let existenceChangedKeys = documentKeySet();
  16250. documents.forEach(k => (updatedKeys = updatedKeys.add(k)));
  16251. return documentBuffer.getEntries(txn, updatedKeys).next(existingDocs => {
  16252. let changedDocuments = mutableDocumentMap();
  16253. documents.forEach((key, doc) => {
  16254. const existingDoc = existingDocs.get(key);
  16255. // Check if see if there is a existence state change for this document.
  16256. if (doc.isFoundDocument() !== existingDoc.isFoundDocument()) {
  16257. existenceChangedKeys = existenceChangedKeys.add(key);
  16258. }
  16259. // Note: The order of the steps below is important, since we want
  16260. // to ensure that rejected limbo resolutions (which fabricate
  16261. // NoDocuments with SnapshotVersion.min()) never add documents to
  16262. // cache.
  16263. if (doc.isNoDocument() && doc.version.isEqual(SnapshotVersion.min())) {
  16264. // NoDocuments with SnapshotVersion.min() are used in manufactured
  16265. // events. We remove these documents from cache since we lost
  16266. // access.
  16267. documentBuffer.removeEntry(key, doc.readTime);
  16268. changedDocuments = changedDocuments.insert(key, doc);
  16269. }
  16270. else if (!existingDoc.isValidDocument() ||
  16271. doc.version.compareTo(existingDoc.version) > 0 ||
  16272. (doc.version.compareTo(existingDoc.version) === 0 &&
  16273. existingDoc.hasPendingWrites)) {
  16274. documentBuffer.addEntry(doc);
  16275. changedDocuments = changedDocuments.insert(key, doc);
  16276. }
  16277. else {
  16278. logDebug(LOG_TAG$b, 'Ignoring outdated watch update for ', key, '. Current version:', existingDoc.version, ' Watch version:', doc.version);
  16279. }
  16280. });
  16281. return { changedDocuments, existenceChangedKeys };
  16282. });
  16283. }
  16284. /**
  16285. * Returns true if the newTargetData should be persisted during an update of
  16286. * an active target. TargetData should always be persisted when a target is
  16287. * being released and should not call this function.
  16288. *
  16289. * While the target is active, TargetData updates can be omitted when nothing
  16290. * about the target has changed except metadata like the resume token or
  16291. * snapshot version. Occasionally it's worth the extra write to prevent these
  16292. * values from getting too stale after a crash, but this doesn't have to be
  16293. * too frequent.
  16294. */
  16295. function shouldPersistTargetData(oldTargetData, newTargetData, change) {
  16296. // Always persist target data if we don't already have a resume token.
  16297. if (oldTargetData.resumeToken.approximateByteSize() === 0) {
  16298. return true;
  16299. }
  16300. // Don't allow resume token changes to be buffered indefinitely. This
  16301. // allows us to be reasonably up-to-date after a crash and avoids needing
  16302. // to loop over all active queries on shutdown. Especially in the browser
  16303. // we may not get time to do anything interesting while the current tab is
  16304. // closing.
  16305. const timeDelta = newTargetData.snapshotVersion.toMicroseconds() -
  16306. oldTargetData.snapshotVersion.toMicroseconds();
  16307. if (timeDelta >= RESUME_TOKEN_MAX_AGE_MICROS) {
  16308. return true;
  16309. }
  16310. // Otherwise if the only thing that has changed about a target is its resume
  16311. // token it's not worth persisting. Note that the RemoteStore keeps an
  16312. // in-memory view of the currently active targets which includes the current
  16313. // resume token, so stream failure or user changes will still use an
  16314. // up-to-date resume token regardless of what we do here.
  16315. const changes = change.addedDocuments.size +
  16316. change.modifiedDocuments.size +
  16317. change.removedDocuments.size;
  16318. return changes > 0;
  16319. }
  16320. /**
  16321. * Notifies local store of the changed views to locally pin documents.
  16322. */
  16323. async function localStoreNotifyLocalViewChanges(localStore, viewChanges) {
  16324. const localStoreImpl = debugCast(localStore);
  16325. try {
  16326. await localStoreImpl.persistence.runTransaction('notifyLocalViewChanges', 'readwrite', txn => {
  16327. return PersistencePromise.forEach(viewChanges, (viewChange) => {
  16328. return PersistencePromise.forEach(viewChange.addedKeys, (key) => localStoreImpl.persistence.referenceDelegate.addReference(txn, viewChange.targetId, key)).next(() => PersistencePromise.forEach(viewChange.removedKeys, (key) => localStoreImpl.persistence.referenceDelegate.removeReference(txn, viewChange.targetId, key)));
  16329. });
  16330. });
  16331. }
  16332. catch (e) {
  16333. if (isIndexedDbTransactionError(e)) {
  16334. // If `notifyLocalViewChanges` fails, we did not advance the sequence
  16335. // number for the documents that were included in this transaction.
  16336. // This might trigger them to be deleted earlier than they otherwise
  16337. // would have, but it should not invalidate the integrity of the data.
  16338. logDebug(LOG_TAG$b, 'Failed to update sequence numbers: ' + e);
  16339. }
  16340. else {
  16341. throw e;
  16342. }
  16343. }
  16344. for (const viewChange of viewChanges) {
  16345. const targetId = viewChange.targetId;
  16346. if (!viewChange.fromCache) {
  16347. const targetData = localStoreImpl.targetDataByTarget.get(targetId);
  16348. // Advance the last limbo free snapshot version
  16349. const lastLimboFreeSnapshotVersion = targetData.snapshotVersion;
  16350. const updatedTargetData = targetData.withLastLimboFreeSnapshotVersion(lastLimboFreeSnapshotVersion);
  16351. localStoreImpl.targetDataByTarget =
  16352. localStoreImpl.targetDataByTarget.insert(targetId, updatedTargetData);
  16353. }
  16354. }
  16355. }
  16356. /**
  16357. * Gets the mutation batch after the passed in batchId in the mutation queue
  16358. * or null if empty.
  16359. * @param afterBatchId - If provided, the batch to search after.
  16360. * @returns The next mutation or null if there wasn't one.
  16361. */
  16362. function localStoreGetNextMutationBatch(localStore, afterBatchId) {
  16363. const localStoreImpl = debugCast(localStore);
  16364. return localStoreImpl.persistence.runTransaction('Get next mutation batch', 'readonly', txn => {
  16365. if (afterBatchId === undefined) {
  16366. afterBatchId = BATCHID_UNKNOWN;
  16367. }
  16368. return localStoreImpl.mutationQueue.getNextMutationBatchAfterBatchId(txn, afterBatchId);
  16369. });
  16370. }
  16371. /**
  16372. * Reads the current value of a Document with a given key or null if not
  16373. * found - used for testing.
  16374. */
  16375. function localStoreReadDocument(localStore, key) {
  16376. const localStoreImpl = debugCast(localStore);
  16377. return localStoreImpl.persistence.runTransaction('read document', 'readonly', txn => localStoreImpl.localDocuments.getDocument(txn, key));
  16378. }
  16379. /**
  16380. * Assigns the given target an internal ID so that its results can be pinned so
  16381. * they don't get GC'd. A target must be allocated in the local store before
  16382. * the store can be used to manage its view.
  16383. *
  16384. * Allocating an already allocated `Target` will return the existing `TargetData`
  16385. * for that `Target`.
  16386. */
  16387. function localStoreAllocateTarget(localStore, target) {
  16388. const localStoreImpl = debugCast(localStore);
  16389. return localStoreImpl.persistence
  16390. .runTransaction('Allocate target', 'readwrite', txn => {
  16391. let targetData;
  16392. return localStoreImpl.targetCache
  16393. .getTargetData(txn, target)
  16394. .next((cached) => {
  16395. if (cached) {
  16396. // This target has been listened to previously, so reuse the
  16397. // previous targetID.
  16398. // TODO(mcg): freshen last accessed date?
  16399. targetData = cached;
  16400. return PersistencePromise.resolve(targetData);
  16401. }
  16402. else {
  16403. return localStoreImpl.targetCache
  16404. .allocateTargetId(txn)
  16405. .next(targetId => {
  16406. targetData = new TargetData(target, targetId, 0 /* TargetPurpose.Listen */, txn.currentSequenceNumber);
  16407. return localStoreImpl.targetCache
  16408. .addTargetData(txn, targetData)
  16409. .next(() => targetData);
  16410. });
  16411. }
  16412. });
  16413. })
  16414. .then(targetData => {
  16415. // If Multi-Tab is enabled, the existing target data may be newer than
  16416. // the in-memory data
  16417. const cachedTargetData = localStoreImpl.targetDataByTarget.get(targetData.targetId);
  16418. if (cachedTargetData === null ||
  16419. targetData.snapshotVersion.compareTo(cachedTargetData.snapshotVersion) >
  16420. 0) {
  16421. localStoreImpl.targetDataByTarget =
  16422. localStoreImpl.targetDataByTarget.insert(targetData.targetId, targetData);
  16423. localStoreImpl.targetIdByTarget.set(target, targetData.targetId);
  16424. }
  16425. return targetData;
  16426. });
  16427. }
  16428. /**
  16429. * Returns the TargetData as seen by the LocalStore, including updates that may
  16430. * have not yet been persisted to the TargetCache.
  16431. */
  16432. // Visible for testing.
  16433. function localStoreGetTargetData(localStore, transaction, target) {
  16434. const localStoreImpl = debugCast(localStore);
  16435. const targetId = localStoreImpl.targetIdByTarget.get(target);
  16436. if (targetId !== undefined) {
  16437. return PersistencePromise.resolve(localStoreImpl.targetDataByTarget.get(targetId));
  16438. }
  16439. else {
  16440. return localStoreImpl.targetCache.getTargetData(transaction, target);
  16441. }
  16442. }
  16443. /**
  16444. * Unpins all the documents associated with the given target. If
  16445. * `keepPersistedTargetData` is set to false and Eager GC enabled, the method
  16446. * directly removes the associated target data from the target cache.
  16447. *
  16448. * Releasing a non-existing `Target` is a no-op.
  16449. */
  16450. // PORTING NOTE: `keepPersistedTargetData` is multi-tab only.
  16451. async function localStoreReleaseTarget(localStore, targetId, keepPersistedTargetData) {
  16452. const localStoreImpl = debugCast(localStore);
  16453. const targetData = localStoreImpl.targetDataByTarget.get(targetId);
  16454. const mode = keepPersistedTargetData ? 'readwrite' : 'readwrite-primary';
  16455. try {
  16456. if (!keepPersistedTargetData) {
  16457. await localStoreImpl.persistence.runTransaction('Release target', mode, txn => {
  16458. return localStoreImpl.persistence.referenceDelegate.removeTarget(txn, targetData);
  16459. });
  16460. }
  16461. }
  16462. catch (e) {
  16463. if (isIndexedDbTransactionError(e)) {
  16464. // All `releaseTarget` does is record the final metadata state for the
  16465. // target, but we've been recording this periodically during target
  16466. // activity. If we lose this write this could cause a very slight
  16467. // difference in the order of target deletion during GC, but we
  16468. // don't define exact LRU semantics so this is acceptable.
  16469. logDebug(LOG_TAG$b, `Failed to update sequence numbers for target ${targetId}: ${e}`);
  16470. }
  16471. else {
  16472. throw e;
  16473. }
  16474. }
  16475. localStoreImpl.targetDataByTarget =
  16476. localStoreImpl.targetDataByTarget.remove(targetId);
  16477. localStoreImpl.targetIdByTarget.delete(targetData.target);
  16478. }
  16479. /**
  16480. * Runs the specified query against the local store and returns the results,
  16481. * potentially taking advantage of query data from previous executions (such
  16482. * as the set of remote keys).
  16483. *
  16484. * @param usePreviousResults - Whether results from previous executions can
  16485. * be used to optimize this query execution.
  16486. */
  16487. function localStoreExecuteQuery(localStore, query, usePreviousResults) {
  16488. const localStoreImpl = debugCast(localStore);
  16489. let lastLimboFreeSnapshotVersion = SnapshotVersion.min();
  16490. let remoteKeys = documentKeySet();
  16491. return localStoreImpl.persistence.runTransaction('Execute query', 'readonly', txn => {
  16492. return localStoreGetTargetData(localStoreImpl, txn, queryToTarget(query))
  16493. .next(targetData => {
  16494. if (targetData) {
  16495. lastLimboFreeSnapshotVersion =
  16496. targetData.lastLimboFreeSnapshotVersion;
  16497. return localStoreImpl.targetCache
  16498. .getMatchingKeysForTargetId(txn, targetData.targetId)
  16499. .next(result => {
  16500. remoteKeys = result;
  16501. });
  16502. }
  16503. })
  16504. .next(() => localStoreImpl.queryEngine.getDocumentsMatchingQuery(txn, query, usePreviousResults
  16505. ? lastLimboFreeSnapshotVersion
  16506. : SnapshotVersion.min(), usePreviousResults ? remoteKeys : documentKeySet()))
  16507. .next(documents => {
  16508. setMaxReadTime(localStoreImpl, queryCollectionGroup(query), documents);
  16509. return { documents, remoteKeys };
  16510. });
  16511. });
  16512. }
  16513. function applyWriteToRemoteDocuments(localStoreImpl, txn, batchResult, documentBuffer) {
  16514. const batch = batchResult.batch;
  16515. const docKeys = batch.keys();
  16516. let promiseChain = PersistencePromise.resolve();
  16517. docKeys.forEach(docKey => {
  16518. promiseChain = promiseChain
  16519. .next(() => documentBuffer.getEntry(txn, docKey))
  16520. .next(doc => {
  16521. const ackVersion = batchResult.docVersions.get(docKey);
  16522. hardAssert(ackVersion !== null);
  16523. if (doc.version.compareTo(ackVersion) < 0) {
  16524. batch.applyToRemoteDocument(doc, batchResult);
  16525. if (doc.isValidDocument()) {
  16526. // We use the commitVersion as the readTime rather than the
  16527. // document's updateTime since the updateTime is not advanced
  16528. // for updates that do not modify the underlying document.
  16529. doc.setReadTime(batchResult.commitVersion);
  16530. documentBuffer.addEntry(doc);
  16531. }
  16532. }
  16533. });
  16534. });
  16535. return promiseChain.next(() => localStoreImpl.mutationQueue.removeMutationBatch(txn, batch));
  16536. }
  16537. /** Returns the local view of the documents affected by a mutation batch. */
  16538. // PORTING NOTE: Multi-Tab only.
  16539. function localStoreLookupMutationDocuments(localStore, batchId) {
  16540. const localStoreImpl = debugCast(localStore);
  16541. const mutationQueueImpl = debugCast(localStoreImpl.mutationQueue);
  16542. return localStoreImpl.persistence.runTransaction('Lookup mutation documents', 'readonly', txn => {
  16543. return mutationQueueImpl.lookupMutationKeys(txn, batchId).next(keys => {
  16544. if (keys) {
  16545. return localStoreImpl.localDocuments.getDocuments(txn, keys);
  16546. }
  16547. else {
  16548. return PersistencePromise.resolve(null);
  16549. }
  16550. });
  16551. });
  16552. }
  16553. // PORTING NOTE: Multi-Tab only.
  16554. function localStoreRemoveCachedMutationBatchMetadata(localStore, batchId) {
  16555. const mutationQueueImpl = debugCast(debugCast(localStore, LocalStoreImpl).mutationQueue);
  16556. mutationQueueImpl.removeCachedMutationKeys(batchId);
  16557. }
  16558. // PORTING NOTE: Multi-Tab only.
  16559. function localStoreGetActiveClients(localStore) {
  16560. const persistenceImpl = debugCast(debugCast(localStore, LocalStoreImpl).persistence);
  16561. return persistenceImpl.getActiveClients();
  16562. }
  16563. // PORTING NOTE: Multi-Tab only.
  16564. function localStoreGetCachedTarget(localStore, targetId) {
  16565. const localStoreImpl = debugCast(localStore);
  16566. const targetCacheImpl = debugCast(localStoreImpl.targetCache);
  16567. const cachedTargetData = localStoreImpl.targetDataByTarget.get(targetId);
  16568. if (cachedTargetData) {
  16569. return Promise.resolve(cachedTargetData.target);
  16570. }
  16571. else {
  16572. return localStoreImpl.persistence.runTransaction('Get target data', 'readonly', txn => {
  16573. return targetCacheImpl
  16574. .getTargetDataForTarget(txn, targetId)
  16575. .next(targetData => (targetData ? targetData.target : null));
  16576. });
  16577. }
  16578. }
  16579. /**
  16580. * Returns the set of documents that have been updated since the last call.
  16581. * If this is the first call, returns the set of changes since client
  16582. * initialization. Further invocations will return document that have changed
  16583. * since the prior call.
  16584. */
  16585. // PORTING NOTE: Multi-Tab only.
  16586. function localStoreGetNewDocumentChanges(localStore, collectionGroup) {
  16587. const localStoreImpl = debugCast(localStore);
  16588. // Get the current maximum read time for the collection. This should always
  16589. // exist, but to reduce the chance for regressions we default to
  16590. // SnapshotVersion.Min()
  16591. // TODO(indexing): Consider removing the default value.
  16592. const readTime = localStoreImpl.collectionGroupReadTime.get(collectionGroup) ||
  16593. SnapshotVersion.min();
  16594. return localStoreImpl.persistence
  16595. .runTransaction('Get new document changes', 'readonly', txn => localStoreImpl.remoteDocuments.getAllFromCollectionGroup(txn, collectionGroup, newIndexOffsetSuccessorFromReadTime(readTime, INITIAL_LARGEST_BATCH_ID),
  16596. /* limit= */ Number.MAX_SAFE_INTEGER))
  16597. .then(changedDocs => {
  16598. setMaxReadTime(localStoreImpl, collectionGroup, changedDocs);
  16599. return changedDocs;
  16600. });
  16601. }
  16602. /** Sets the collection group's maximum read time from the given documents. */
  16603. // PORTING NOTE: Multi-Tab only.
  16604. function setMaxReadTime(localStoreImpl, collectionGroup, changedDocs) {
  16605. let readTime = localStoreImpl.collectionGroupReadTime.get(collectionGroup) ||
  16606. SnapshotVersion.min();
  16607. changedDocs.forEach((_, doc) => {
  16608. if (doc.readTime.compareTo(readTime) > 0) {
  16609. readTime = doc.readTime;
  16610. }
  16611. });
  16612. localStoreImpl.collectionGroupReadTime.set(collectionGroup, readTime);
  16613. }
  16614. /**
  16615. * Creates a new target using the given bundle name, which will be used to
  16616. * hold the keys of all documents from the bundle in query-document mappings.
  16617. * This ensures that the loaded documents do not get garbage collected
  16618. * right away.
  16619. */
  16620. function umbrellaTarget(bundleName) {
  16621. // It is OK that the path used for the query is not valid, because this will
  16622. // not be read and queried.
  16623. return queryToTarget(newQueryForPath(ResourcePath.fromString(`__bundle__/docs/${bundleName}`)));
  16624. }
  16625. /**
  16626. * Applies the documents from a bundle to the "ground-state" (remote)
  16627. * documents.
  16628. *
  16629. * LocalDocuments are re-calculated if there are remaining mutations in the
  16630. * queue.
  16631. */
  16632. async function localStoreApplyBundledDocuments(localStore, bundleConverter, documents, bundleName) {
  16633. const localStoreImpl = debugCast(localStore);
  16634. let documentKeys = documentKeySet();
  16635. let documentMap = mutableDocumentMap();
  16636. for (const bundleDoc of documents) {
  16637. const documentKey = bundleConverter.toDocumentKey(bundleDoc.metadata.name);
  16638. if (bundleDoc.document) {
  16639. documentKeys = documentKeys.add(documentKey);
  16640. }
  16641. const doc = bundleConverter.toMutableDocument(bundleDoc);
  16642. doc.setReadTime(bundleConverter.toSnapshotVersion(bundleDoc.metadata.readTime));
  16643. documentMap = documentMap.insert(documentKey, doc);
  16644. }
  16645. const documentBuffer = localStoreImpl.remoteDocuments.newChangeBuffer({
  16646. trackRemovals: true // Make sure document removals show up in `getNewDocumentChanges()`
  16647. });
  16648. // Allocates a target to hold all document keys from the bundle, such that
  16649. // they will not get garbage collected right away.
  16650. const umbrellaTargetData = await localStoreAllocateTarget(localStoreImpl, umbrellaTarget(bundleName));
  16651. return localStoreImpl.persistence.runTransaction('Apply bundle documents', 'readwrite', txn => {
  16652. return populateDocumentChangeBuffer(txn, documentBuffer, documentMap)
  16653. .next(documentChangeResult => {
  16654. documentBuffer.apply(txn);
  16655. return documentChangeResult;
  16656. })
  16657. .next(documentChangeResult => {
  16658. return localStoreImpl.targetCache
  16659. .removeMatchingKeysForTargetId(txn, umbrellaTargetData.targetId)
  16660. .next(() => localStoreImpl.targetCache.addMatchingKeys(txn, documentKeys, umbrellaTargetData.targetId))
  16661. .next(() => localStoreImpl.localDocuments.getLocalViewOfDocuments(txn, documentChangeResult.changedDocuments, documentChangeResult.existenceChangedKeys))
  16662. .next(() => documentChangeResult.changedDocuments);
  16663. });
  16664. });
  16665. }
  16666. /**
  16667. * Returns a promise of a boolean to indicate if the given bundle has already
  16668. * been loaded and the create time is newer than the current loading bundle.
  16669. */
  16670. function localStoreHasNewerBundle(localStore, bundleMetadata) {
  16671. const localStoreImpl = debugCast(localStore);
  16672. const currentReadTime = fromVersion(bundleMetadata.createTime);
  16673. return localStoreImpl.persistence
  16674. .runTransaction('hasNewerBundle', 'readonly', transaction => {
  16675. return localStoreImpl.bundleCache.getBundleMetadata(transaction, bundleMetadata.id);
  16676. })
  16677. .then(cached => {
  16678. return !!cached && cached.createTime.compareTo(currentReadTime) >= 0;
  16679. });
  16680. }
  16681. /**
  16682. * Saves the given `BundleMetadata` to local persistence.
  16683. */
  16684. function localStoreSaveBundle(localStore, bundleMetadata) {
  16685. const localStoreImpl = debugCast(localStore);
  16686. return localStoreImpl.persistence.runTransaction('Save bundle', 'readwrite', transaction => {
  16687. return localStoreImpl.bundleCache.saveBundleMetadata(transaction, bundleMetadata);
  16688. });
  16689. }
  16690. /**
  16691. * Returns a promise of a `NamedQuery` associated with given query name. Promise
  16692. * resolves to undefined if no persisted data can be found.
  16693. */
  16694. function localStoreGetNamedQuery(localStore, queryName) {
  16695. const localStoreImpl = debugCast(localStore);
  16696. return localStoreImpl.persistence.runTransaction('Get named query', 'readonly', transaction => localStoreImpl.bundleCache.getNamedQuery(transaction, queryName));
  16697. }
  16698. /**
  16699. * Saves the given `NamedQuery` to local persistence.
  16700. */
  16701. async function localStoreSaveNamedQuery(localStore, query, documents = documentKeySet()) {
  16702. // Allocate a target for the named query such that it can be resumed
  16703. // from associated read time if users use it to listen.
  16704. // NOTE: this also means if no corresponding target exists, the new target
  16705. // will remain active and will not get collected, unless users happen to
  16706. // unlisten the query somehow.
  16707. const allocated = await localStoreAllocateTarget(localStore, queryToTarget(fromBundledQuery(query.bundledQuery)));
  16708. const localStoreImpl = debugCast(localStore);
  16709. return localStoreImpl.persistence.runTransaction('Save named query', 'readwrite', transaction => {
  16710. const readTime = fromVersion(query.readTime);
  16711. // Simply save the query itself if it is older than what the SDK already
  16712. // has.
  16713. if (allocated.snapshotVersion.compareTo(readTime) >= 0) {
  16714. return localStoreImpl.bundleCache.saveNamedQuery(transaction, query);
  16715. }
  16716. // Update existing target data because the query from the bundle is newer.
  16717. const newTargetData = allocated.withResumeToken(ByteString.EMPTY_BYTE_STRING, readTime);
  16718. localStoreImpl.targetDataByTarget =
  16719. localStoreImpl.targetDataByTarget.insert(newTargetData.targetId, newTargetData);
  16720. return localStoreImpl.targetCache
  16721. .updateTargetData(transaction, newTargetData)
  16722. .next(() => localStoreImpl.targetCache.removeMatchingKeysForTargetId(transaction, allocated.targetId))
  16723. .next(() => localStoreImpl.targetCache.addMatchingKeys(transaction, documents, allocated.targetId))
  16724. .next(() => localStoreImpl.bundleCache.saveNamedQuery(transaction, query));
  16725. });
  16726. }
  16727. async function localStoreConfigureFieldIndexes(localStore, newFieldIndexes) {
  16728. const localStoreImpl = debugCast(localStore);
  16729. const indexManager = localStoreImpl.indexManager;
  16730. const promises = [];
  16731. return localStoreImpl.persistence.runTransaction('Configure indexes', 'readwrite', transaction => indexManager
  16732. .getFieldIndexes(transaction)
  16733. .next(oldFieldIndexes => diffArrays(oldFieldIndexes, newFieldIndexes, fieldIndexSemanticComparator, fieldIndex => {
  16734. promises.push(indexManager.addFieldIndex(transaction, fieldIndex));
  16735. }, fieldIndex => {
  16736. promises.push(indexManager.deleteFieldIndex(transaction, fieldIndex));
  16737. }))
  16738. .next(() => PersistencePromise.waitFor(promises)));
  16739. }
  16740. /**
  16741. * @license
  16742. * Copyright 2019 Google LLC
  16743. *
  16744. * Licensed under the Apache License, Version 2.0 (the "License");
  16745. * you may not use this file except in compliance with the License.
  16746. * You may obtain a copy of the License at
  16747. *
  16748. * http://www.apache.org/licenses/LICENSE-2.0
  16749. *
  16750. * Unless required by applicable law or agreed to in writing, software
  16751. * distributed under the License is distributed on an "AS IS" BASIS,
  16752. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  16753. * See the License for the specific language governing permissions and
  16754. * limitations under the License.
  16755. */
  16756. /**
  16757. * The Firestore query engine.
  16758. *
  16759. * Firestore queries can be executed in three modes. The Query Engine determines
  16760. * what mode to use based on what data is persisted. The mode only determines
  16761. * the runtime complexity of the query - the result set is equivalent across all
  16762. * implementations.
  16763. *
  16764. * The Query engine will use indexed-based execution if a user has configured
  16765. * any index that can be used to execute query (via `setIndexConfiguration()`).
  16766. * Otherwise, the engine will try to optimize the query by re-using a previously
  16767. * persisted query result. If that is not possible, the query will be executed
  16768. * via a full collection scan.
  16769. *
  16770. * Index-based execution is the default when available. The query engine
  16771. * supports partial indexed execution and merges the result from the index
  16772. * lookup with documents that have not yet been indexed. The index evaluation
  16773. * matches the backend's format and as such, the SDK can use indexing for all
  16774. * queries that the backend supports.
  16775. *
  16776. * If no index exists, the query engine tries to take advantage of the target
  16777. * document mapping in the TargetCache. These mappings exists for all queries
  16778. * that have been synced with the backend at least once and allow the query
  16779. * engine to only read documents that previously matched a query plus any
  16780. * documents that were edited after the query was last listened to.
  16781. *
  16782. * There are some cases when this optimization is not guaranteed to produce
  16783. * the same results as full collection scans. In these cases, query
  16784. * processing falls back to full scans. These cases are:
  16785. *
  16786. * - Limit queries where a document that matched the query previously no longer
  16787. * matches the query.
  16788. *
  16789. * - Limit queries where a document edit may cause the document to sort below
  16790. * another document that is in the local cache.
  16791. *
  16792. * - Queries that have never been CURRENT or free of limbo documents.
  16793. */
  16794. class QueryEngine {
  16795. constructor() {
  16796. this.initialized = false;
  16797. }
  16798. /** Sets the document view to query against. */
  16799. initialize(localDocuments, indexManager) {
  16800. this.localDocumentsView = localDocuments;
  16801. this.indexManager = indexManager;
  16802. this.initialized = true;
  16803. }
  16804. /** Returns all local documents matching the specified query. */
  16805. getDocumentsMatchingQuery(transaction, query, lastLimboFreeSnapshotVersion, remoteKeys) {
  16806. return this.performQueryUsingIndex(transaction, query)
  16807. .next(result => result
  16808. ? result
  16809. : this.performQueryUsingRemoteKeys(transaction, query, remoteKeys, lastLimboFreeSnapshotVersion))
  16810. .next(result => result ? result : this.executeFullCollectionScan(transaction, query));
  16811. }
  16812. /**
  16813. * Performs an indexed query that evaluates the query based on a collection's
  16814. * persisted index values. Returns `null` if an index is not available.
  16815. */
  16816. performQueryUsingIndex(transaction, query) {
  16817. if (queryMatchesAllDocuments(query)) {
  16818. // Queries that match all documents don't benefit from using
  16819. // key-based lookups. It is more efficient to scan all documents in a
  16820. // collection, rather than to perform individual lookups.
  16821. return PersistencePromise.resolve(null);
  16822. }
  16823. let target = queryToTarget(query);
  16824. return this.indexManager
  16825. .getIndexType(transaction, target)
  16826. .next(indexType => {
  16827. if (indexType === 0 /* IndexType.NONE */) {
  16828. // The target cannot be served from any index.
  16829. return null;
  16830. }
  16831. if (query.limit !== null && indexType === 1 /* IndexType.PARTIAL */) {
  16832. // We cannot apply a limit for targets that are served using a partial
  16833. // index. If a partial index will be used to serve the target, the
  16834. // query may return a superset of documents that match the target
  16835. // (e.g. if the index doesn't include all the target's filters), or
  16836. // may return the correct set of documents in the wrong order (e.g. if
  16837. // the index doesn't include a segment for one of the orderBys).
  16838. // Therefore, a limit should not be applied in such cases.
  16839. query = queryWithLimit(query, null, "F" /* LimitType.First */);
  16840. target = queryToTarget(query);
  16841. }
  16842. return this.indexManager
  16843. .getDocumentsMatchingTarget(transaction, target)
  16844. .next(keys => {
  16845. const sortedKeys = documentKeySet(...keys);
  16846. return this.localDocumentsView
  16847. .getDocuments(transaction, sortedKeys)
  16848. .next(indexedDocuments => {
  16849. return this.indexManager
  16850. .getMinOffset(transaction, target)
  16851. .next(offset => {
  16852. const previousResults = this.applyQuery(query, indexedDocuments);
  16853. if (this.needsRefill(query, previousResults, sortedKeys, offset.readTime)) {
  16854. // A limit query whose boundaries change due to local
  16855. // edits can be re-run against the cache by excluding the
  16856. // limit. This ensures that all documents that match the
  16857. // query's filters are included in the result set. The SDK
  16858. // can then apply the limit once all local edits are
  16859. // incorporated.
  16860. return this.performQueryUsingIndex(transaction, queryWithLimit(query, null, "F" /* LimitType.First */));
  16861. }
  16862. return this.appendRemainingResults(transaction, previousResults, query, offset);
  16863. });
  16864. });
  16865. });
  16866. });
  16867. }
  16868. /**
  16869. * Performs a query based on the target's persisted query mapping. Returns
  16870. * `null` if the mapping is not available or cannot be used.
  16871. */
  16872. performQueryUsingRemoteKeys(transaction, query, remoteKeys, lastLimboFreeSnapshotVersion) {
  16873. if (queryMatchesAllDocuments(query)) {
  16874. // Queries that match all documents don't benefit from using
  16875. // key-based lookups. It is more efficient to scan all documents in a
  16876. // collection, rather than to perform individual lookups.
  16877. return this.executeFullCollectionScan(transaction, query);
  16878. }
  16879. // Queries that have never seen a snapshot without limbo free documents
  16880. // should also be run as a full collection scan.
  16881. if (lastLimboFreeSnapshotVersion.isEqual(SnapshotVersion.min())) {
  16882. return this.executeFullCollectionScan(transaction, query);
  16883. }
  16884. return this.localDocumentsView.getDocuments(transaction, remoteKeys).next(documents => {
  16885. const previousResults = this.applyQuery(query, documents);
  16886. if (this.needsRefill(query, previousResults, remoteKeys, lastLimboFreeSnapshotVersion)) {
  16887. return this.executeFullCollectionScan(transaction, query);
  16888. }
  16889. if (getLogLevel() <= LogLevel.DEBUG) {
  16890. logDebug('QueryEngine', 'Re-using previous result from %s to execute query: %s', lastLimboFreeSnapshotVersion.toString(), stringifyQuery(query));
  16891. }
  16892. // Retrieve all results for documents that were updated since the last
  16893. // limbo-document free remote snapshot.
  16894. return this.appendRemainingResults(transaction, previousResults, query, newIndexOffsetSuccessorFromReadTime(lastLimboFreeSnapshotVersion, INITIAL_LARGEST_BATCH_ID));
  16895. });
  16896. }
  16897. /** Applies the query filter and sorting to the provided documents. */
  16898. applyQuery(query, documents) {
  16899. // Sort the documents and re-apply the query filter since previously
  16900. // matching documents do not necessarily still match the query.
  16901. let queryResults = new SortedSet(newQueryComparator(query));
  16902. documents.forEach((_, maybeDoc) => {
  16903. if (queryMatches(query, maybeDoc)) {
  16904. queryResults = queryResults.add(maybeDoc);
  16905. }
  16906. });
  16907. return queryResults;
  16908. }
  16909. /**
  16910. * Determines if a limit query needs to be refilled from cache, making it
  16911. * ineligible for index-free execution.
  16912. *
  16913. * @param query - The query.
  16914. * @param sortedPreviousResults - The documents that matched the query when it
  16915. * was last synchronized, sorted by the query's comparator.
  16916. * @param remoteKeys - The document keys that matched the query at the last
  16917. * snapshot.
  16918. * @param limboFreeSnapshotVersion - The version of the snapshot when the
  16919. * query was last synchronized.
  16920. */
  16921. needsRefill(query, sortedPreviousResults, remoteKeys, limboFreeSnapshotVersion) {
  16922. if (query.limit === null) {
  16923. // Queries without limits do not need to be refilled.
  16924. return false;
  16925. }
  16926. if (remoteKeys.size !== sortedPreviousResults.size) {
  16927. // The query needs to be refilled if a previously matching document no
  16928. // longer matches.
  16929. return true;
  16930. }
  16931. // Limit queries are not eligible for index-free query execution if there is
  16932. // a potential that an older document from cache now sorts before a document
  16933. // that was previously part of the limit. This, however, can only happen if
  16934. // the document at the edge of the limit goes out of limit.
  16935. // If a document that is not the limit boundary sorts differently,
  16936. // the boundary of the limit itself did not change and documents from cache
  16937. // will continue to be "rejected" by this boundary. Therefore, we can ignore
  16938. // any modifications that don't affect the last document.
  16939. const docAtLimitEdge = query.limitType === "F" /* LimitType.First */
  16940. ? sortedPreviousResults.last()
  16941. : sortedPreviousResults.first();
  16942. if (!docAtLimitEdge) {
  16943. // We don't need to refill the query if there were already no documents.
  16944. return false;
  16945. }
  16946. return (docAtLimitEdge.hasPendingWrites ||
  16947. docAtLimitEdge.version.compareTo(limboFreeSnapshotVersion) > 0);
  16948. }
  16949. executeFullCollectionScan(transaction, query) {
  16950. if (getLogLevel() <= LogLevel.DEBUG) {
  16951. logDebug('QueryEngine', 'Using full collection scan to execute query:', stringifyQuery(query));
  16952. }
  16953. return this.localDocumentsView.getDocumentsMatchingQuery(transaction, query, IndexOffset.min());
  16954. }
  16955. /**
  16956. * Combines the results from an indexed execution with the remaining documents
  16957. * that have not yet been indexed.
  16958. */
  16959. appendRemainingResults(transaction, indexedResults, query, offset) {
  16960. // Retrieve all results for documents that were updated since the offset.
  16961. return this.localDocumentsView
  16962. .getDocumentsMatchingQuery(transaction, query, offset)
  16963. .next(remainingResults => {
  16964. // Merge with existing results
  16965. indexedResults.forEach(d => {
  16966. remainingResults = remainingResults.insert(d.key, d);
  16967. });
  16968. return remainingResults;
  16969. });
  16970. }
  16971. }
  16972. /**
  16973. * @license
  16974. * Copyright 2019 Google LLC
  16975. *
  16976. * Licensed under the Apache License, Version 2.0 (the "License");
  16977. * you may not use this file except in compliance with the License.
  16978. * You may obtain a copy of the License at
  16979. *
  16980. * http://www.apache.org/licenses/LICENSE-2.0
  16981. *
  16982. * Unless required by applicable law or agreed to in writing, software
  16983. * distributed under the License is distributed on an "AS IS" BASIS,
  16984. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  16985. * See the License for the specific language governing permissions and
  16986. * limitations under the License.
  16987. */
  16988. // The format of the LocalStorage key that stores the client state is:
  16989. // firestore_clients_<persistence_prefix>_<instance_key>
  16990. const CLIENT_STATE_KEY_PREFIX = 'firestore_clients';
  16991. /** Assembles the key for a client state in WebStorage */
  16992. function createWebStorageClientStateKey(persistenceKey, clientId) {
  16993. return `${CLIENT_STATE_KEY_PREFIX}_${persistenceKey}_${clientId}`;
  16994. }
  16995. // The format of the WebStorage key that stores the mutation state is:
  16996. // firestore_mutations_<persistence_prefix>_<batch_id>
  16997. // (for unauthenticated users)
  16998. // or: firestore_mutations_<persistence_prefix>_<batch_id>_<user_uid>
  16999. //
  17000. // 'user_uid' is last to avoid needing to escape '_' characters that it might
  17001. // contain.
  17002. const MUTATION_BATCH_KEY_PREFIX = 'firestore_mutations';
  17003. /** Assembles the key for a mutation batch in WebStorage */
  17004. function createWebStorageMutationBatchKey(persistenceKey, user, batchId) {
  17005. let mutationKey = `${MUTATION_BATCH_KEY_PREFIX}_${persistenceKey}_${batchId}`;
  17006. if (user.isAuthenticated()) {
  17007. mutationKey += `_${user.uid}`;
  17008. }
  17009. return mutationKey;
  17010. }
  17011. // The format of the WebStorage key that stores a query target's metadata is:
  17012. // firestore_targets_<persistence_prefix>_<target_id>
  17013. const QUERY_TARGET_KEY_PREFIX = 'firestore_targets';
  17014. /** Assembles the key for a query state in WebStorage */
  17015. function createWebStorageQueryTargetMetadataKey(persistenceKey, targetId) {
  17016. return `${QUERY_TARGET_KEY_PREFIX}_${persistenceKey}_${targetId}`;
  17017. }
  17018. // The WebStorage prefix that stores the primary tab's online state. The
  17019. // format of the key is:
  17020. // firestore_online_state_<persistence_prefix>
  17021. const ONLINE_STATE_KEY_PREFIX = 'firestore_online_state';
  17022. /** Assembles the key for the online state of the primary tab. */
  17023. function createWebStorageOnlineStateKey(persistenceKey) {
  17024. return `${ONLINE_STATE_KEY_PREFIX}_${persistenceKey}`;
  17025. }
  17026. // The WebStorage prefix that plays as a event to indicate the remote documents
  17027. // might have changed due to some secondary tabs loading a bundle.
  17028. // format of the key is:
  17029. // firestore_bundle_loaded_v2_<persistenceKey>
  17030. // The version ending with "v2" stores the list of modified collection groups.
  17031. const BUNDLE_LOADED_KEY_PREFIX = 'firestore_bundle_loaded_v2';
  17032. function createBundleLoadedKey(persistenceKey) {
  17033. return `${BUNDLE_LOADED_KEY_PREFIX}_${persistenceKey}`;
  17034. }
  17035. // The WebStorage key prefix for the key that stores the last sequence number allocated. The key
  17036. // looks like 'firestore_sequence_number_<persistence_prefix>'.
  17037. const SEQUENCE_NUMBER_KEY_PREFIX = 'firestore_sequence_number';
  17038. /** Assembles the key for the current sequence number. */
  17039. function createWebStorageSequenceNumberKey(persistenceKey) {
  17040. return `${SEQUENCE_NUMBER_KEY_PREFIX}_${persistenceKey}`;
  17041. }
  17042. /**
  17043. * @license
  17044. * Copyright 2018 Google LLC
  17045. *
  17046. * Licensed under the Apache License, Version 2.0 (the "License");
  17047. * you may not use this file except in compliance with the License.
  17048. * You may obtain a copy of the License at
  17049. *
  17050. * http://www.apache.org/licenses/LICENSE-2.0
  17051. *
  17052. * Unless required by applicable law or agreed to in writing, software
  17053. * distributed under the License is distributed on an "AS IS" BASIS,
  17054. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  17055. * See the License for the specific language governing permissions and
  17056. * limitations under the License.
  17057. */
  17058. const LOG_TAG$a = 'SharedClientState';
  17059. /**
  17060. * Holds the state of a mutation batch, including its user ID, batch ID and
  17061. * whether the batch is 'pending', 'acknowledged' or 'rejected'.
  17062. */
  17063. // Visible for testing
  17064. class MutationMetadata {
  17065. constructor(user, batchId, state, error) {
  17066. this.user = user;
  17067. this.batchId = batchId;
  17068. this.state = state;
  17069. this.error = error;
  17070. }
  17071. /**
  17072. * Parses a MutationMetadata from its JSON representation in WebStorage.
  17073. * Logs a warning and returns null if the format of the data is not valid.
  17074. */
  17075. static fromWebStorageEntry(user, batchId, value) {
  17076. const mutationBatch = JSON.parse(value);
  17077. let validData = typeof mutationBatch === 'object' &&
  17078. ['pending', 'acknowledged', 'rejected'].indexOf(mutationBatch.state) !==
  17079. -1 &&
  17080. (mutationBatch.error === undefined ||
  17081. typeof mutationBatch.error === 'object');
  17082. let firestoreError = undefined;
  17083. if (validData && mutationBatch.error) {
  17084. validData =
  17085. typeof mutationBatch.error.message === 'string' &&
  17086. typeof mutationBatch.error.code === 'string';
  17087. if (validData) {
  17088. firestoreError = new FirestoreError(mutationBatch.error.code, mutationBatch.error.message);
  17089. }
  17090. }
  17091. if (validData) {
  17092. return new MutationMetadata(user, batchId, mutationBatch.state, firestoreError);
  17093. }
  17094. else {
  17095. logError(LOG_TAG$a, `Failed to parse mutation state for ID '${batchId}': ${value}`);
  17096. return null;
  17097. }
  17098. }
  17099. toWebStorageJSON() {
  17100. const batchMetadata = {
  17101. state: this.state,
  17102. updateTimeMs: Date.now() // Modify the existing value to trigger update.
  17103. };
  17104. if (this.error) {
  17105. batchMetadata.error = {
  17106. code: this.error.code,
  17107. message: this.error.message
  17108. };
  17109. }
  17110. return JSON.stringify(batchMetadata);
  17111. }
  17112. }
  17113. /**
  17114. * Holds the state of a query target, including its target ID and whether the
  17115. * target is 'not-current', 'current' or 'rejected'.
  17116. */
  17117. // Visible for testing
  17118. class QueryTargetMetadata {
  17119. constructor(targetId, state, error) {
  17120. this.targetId = targetId;
  17121. this.state = state;
  17122. this.error = error;
  17123. }
  17124. /**
  17125. * Parses a QueryTargetMetadata from its JSON representation in WebStorage.
  17126. * Logs a warning and returns null if the format of the data is not valid.
  17127. */
  17128. static fromWebStorageEntry(targetId, value) {
  17129. const targetState = JSON.parse(value);
  17130. let validData = typeof targetState === 'object' &&
  17131. ['not-current', 'current', 'rejected'].indexOf(targetState.state) !==
  17132. -1 &&
  17133. (targetState.error === undefined ||
  17134. typeof targetState.error === 'object');
  17135. let firestoreError = undefined;
  17136. if (validData && targetState.error) {
  17137. validData =
  17138. typeof targetState.error.message === 'string' &&
  17139. typeof targetState.error.code === 'string';
  17140. if (validData) {
  17141. firestoreError = new FirestoreError(targetState.error.code, targetState.error.message);
  17142. }
  17143. }
  17144. if (validData) {
  17145. return new QueryTargetMetadata(targetId, targetState.state, firestoreError);
  17146. }
  17147. else {
  17148. logError(LOG_TAG$a, `Failed to parse target state for ID '${targetId}': ${value}`);
  17149. return null;
  17150. }
  17151. }
  17152. toWebStorageJSON() {
  17153. const targetState = {
  17154. state: this.state,
  17155. updateTimeMs: Date.now() // Modify the existing value to trigger update.
  17156. };
  17157. if (this.error) {
  17158. targetState.error = {
  17159. code: this.error.code,
  17160. message: this.error.message
  17161. };
  17162. }
  17163. return JSON.stringify(targetState);
  17164. }
  17165. }
  17166. /**
  17167. * This class represents the immutable ClientState for a client read from
  17168. * WebStorage, containing the list of active query targets.
  17169. */
  17170. class RemoteClientState {
  17171. constructor(clientId, activeTargetIds) {
  17172. this.clientId = clientId;
  17173. this.activeTargetIds = activeTargetIds;
  17174. }
  17175. /**
  17176. * Parses a RemoteClientState from the JSON representation in WebStorage.
  17177. * Logs a warning and returns null if the format of the data is not valid.
  17178. */
  17179. static fromWebStorageEntry(clientId, value) {
  17180. const clientState = JSON.parse(value);
  17181. let validData = typeof clientState === 'object' &&
  17182. clientState.activeTargetIds instanceof Array;
  17183. let activeTargetIdsSet = targetIdSet();
  17184. for (let i = 0; validData && i < clientState.activeTargetIds.length; ++i) {
  17185. validData = isSafeInteger(clientState.activeTargetIds[i]);
  17186. activeTargetIdsSet = activeTargetIdsSet.add(clientState.activeTargetIds[i]);
  17187. }
  17188. if (validData) {
  17189. return new RemoteClientState(clientId, activeTargetIdsSet);
  17190. }
  17191. else {
  17192. logError(LOG_TAG$a, `Failed to parse client data for instance '${clientId}': ${value}`);
  17193. return null;
  17194. }
  17195. }
  17196. }
  17197. /**
  17198. * This class represents the online state for all clients participating in
  17199. * multi-tab. The online state is only written to by the primary client, and
  17200. * used in secondary clients to update their query views.
  17201. */
  17202. class SharedOnlineState {
  17203. constructor(clientId, onlineState) {
  17204. this.clientId = clientId;
  17205. this.onlineState = onlineState;
  17206. }
  17207. /**
  17208. * Parses a SharedOnlineState from its JSON representation in WebStorage.
  17209. * Logs a warning and returns null if the format of the data is not valid.
  17210. */
  17211. static fromWebStorageEntry(value) {
  17212. const onlineState = JSON.parse(value);
  17213. const validData = typeof onlineState === 'object' &&
  17214. ['Unknown', 'Online', 'Offline'].indexOf(onlineState.onlineState) !==
  17215. -1 &&
  17216. typeof onlineState.clientId === 'string';
  17217. if (validData) {
  17218. return new SharedOnlineState(onlineState.clientId, onlineState.onlineState);
  17219. }
  17220. else {
  17221. logError(LOG_TAG$a, `Failed to parse online state: ${value}`);
  17222. return null;
  17223. }
  17224. }
  17225. }
  17226. /**
  17227. * Metadata state of the local client. Unlike `RemoteClientState`, this class is
  17228. * mutable and keeps track of all pending mutations, which allows us to
  17229. * update the range of pending mutation batch IDs as new mutations are added or
  17230. * removed.
  17231. *
  17232. * The data in `LocalClientState` is not read from WebStorage and instead
  17233. * updated via its instance methods. The updated state can be serialized via
  17234. * `toWebStorageJSON()`.
  17235. */
  17236. // Visible for testing.
  17237. class LocalClientState {
  17238. constructor() {
  17239. this.activeTargetIds = targetIdSet();
  17240. }
  17241. addQueryTarget(targetId) {
  17242. this.activeTargetIds = this.activeTargetIds.add(targetId);
  17243. }
  17244. removeQueryTarget(targetId) {
  17245. this.activeTargetIds = this.activeTargetIds.delete(targetId);
  17246. }
  17247. /**
  17248. * Converts this entry into a JSON-encoded format we can use for WebStorage.
  17249. * Does not encode `clientId` as it is part of the key in WebStorage.
  17250. */
  17251. toWebStorageJSON() {
  17252. const data = {
  17253. activeTargetIds: this.activeTargetIds.toArray(),
  17254. updateTimeMs: Date.now() // Modify the existing value to trigger update.
  17255. };
  17256. return JSON.stringify(data);
  17257. }
  17258. }
  17259. /**
  17260. * `WebStorageSharedClientState` uses WebStorage (window.localStorage) as the
  17261. * backing store for the SharedClientState. It keeps track of all active
  17262. * clients and supports modifications of the local client's data.
  17263. */
  17264. class WebStorageSharedClientState {
  17265. constructor(window, queue, persistenceKey, localClientId, initialUser) {
  17266. this.window = window;
  17267. this.queue = queue;
  17268. this.persistenceKey = persistenceKey;
  17269. this.localClientId = localClientId;
  17270. this.syncEngine = null;
  17271. this.onlineStateHandler = null;
  17272. this.sequenceNumberHandler = null;
  17273. this.storageListener = this.handleWebStorageEvent.bind(this);
  17274. this.activeClients = new SortedMap(primitiveComparator);
  17275. this.started = false;
  17276. /**
  17277. * Captures WebStorage events that occur before `start()` is called. These
  17278. * events are replayed once `WebStorageSharedClientState` is started.
  17279. */
  17280. this.earlyEvents = [];
  17281. // Escape the special characters mentioned here:
  17282. // https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Regular_Expressions
  17283. const escapedPersistenceKey = persistenceKey.replace(/[.*+?^${}()|[\]\\]/g, '\\$&');
  17284. this.storage = this.window.localStorage;
  17285. this.currentUser = initialUser;
  17286. this.localClientStorageKey = createWebStorageClientStateKey(this.persistenceKey, this.localClientId);
  17287. this.sequenceNumberKey = createWebStorageSequenceNumberKey(this.persistenceKey);
  17288. this.activeClients = this.activeClients.insert(this.localClientId, new LocalClientState());
  17289. this.clientStateKeyRe = new RegExp(`^${CLIENT_STATE_KEY_PREFIX}_${escapedPersistenceKey}_([^_]*)$`);
  17290. this.mutationBatchKeyRe = new RegExp(`^${MUTATION_BATCH_KEY_PREFIX}_${escapedPersistenceKey}_(\\d+)(?:_(.*))?$`);
  17291. this.queryTargetKeyRe = new RegExp(`^${QUERY_TARGET_KEY_PREFIX}_${escapedPersistenceKey}_(\\d+)$`);
  17292. this.onlineStateKey = createWebStorageOnlineStateKey(this.persistenceKey);
  17293. this.bundleLoadedKey = createBundleLoadedKey(this.persistenceKey);
  17294. // Rather than adding the storage observer during start(), we add the
  17295. // storage observer during initialization. This ensures that we collect
  17296. // events before other components populate their initial state (during their
  17297. // respective start() calls). Otherwise, we might for example miss a
  17298. // mutation that is added after LocalStore's start() processed the existing
  17299. // mutations but before we observe WebStorage events.
  17300. this.window.addEventListener('storage', this.storageListener);
  17301. }
  17302. /** Returns 'true' if WebStorage is available in the current environment. */
  17303. static isAvailable(window) {
  17304. return !!(window && window.localStorage);
  17305. }
  17306. async start() {
  17307. // Retrieve the list of existing clients to backfill the data in
  17308. // SharedClientState.
  17309. const existingClients = await this.syncEngine.getActiveClients();
  17310. for (const clientId of existingClients) {
  17311. if (clientId === this.localClientId) {
  17312. continue;
  17313. }
  17314. const storageItem = this.getItem(createWebStorageClientStateKey(this.persistenceKey, clientId));
  17315. if (storageItem) {
  17316. const clientState = RemoteClientState.fromWebStorageEntry(clientId, storageItem);
  17317. if (clientState) {
  17318. this.activeClients = this.activeClients.insert(clientState.clientId, clientState);
  17319. }
  17320. }
  17321. }
  17322. this.persistClientState();
  17323. // Check if there is an existing online state and call the callback handler
  17324. // if applicable.
  17325. const onlineStateJSON = this.storage.getItem(this.onlineStateKey);
  17326. if (onlineStateJSON) {
  17327. const onlineState = this.fromWebStorageOnlineState(onlineStateJSON);
  17328. if (onlineState) {
  17329. this.handleOnlineStateEvent(onlineState);
  17330. }
  17331. }
  17332. for (const event of this.earlyEvents) {
  17333. this.handleWebStorageEvent(event);
  17334. }
  17335. this.earlyEvents = [];
  17336. // Register a window unload hook to remove the client metadata entry from
  17337. // WebStorage even if `shutdown()` was not called.
  17338. this.window.addEventListener('pagehide', () => this.shutdown());
  17339. this.started = true;
  17340. }
  17341. writeSequenceNumber(sequenceNumber) {
  17342. this.setItem(this.sequenceNumberKey, JSON.stringify(sequenceNumber));
  17343. }
  17344. getAllActiveQueryTargets() {
  17345. return this.extractActiveQueryTargets(this.activeClients);
  17346. }
  17347. isActiveQueryTarget(targetId) {
  17348. let found = false;
  17349. this.activeClients.forEach((key, value) => {
  17350. if (value.activeTargetIds.has(targetId)) {
  17351. found = true;
  17352. }
  17353. });
  17354. return found;
  17355. }
  17356. addPendingMutation(batchId) {
  17357. this.persistMutationState(batchId, 'pending');
  17358. }
  17359. updateMutationState(batchId, state, error) {
  17360. this.persistMutationState(batchId, state, error);
  17361. // Once a final mutation result is observed by other clients, they no longer
  17362. // access the mutation's metadata entry. Since WebStorage replays events
  17363. // in order, it is safe to delete the entry right after updating it.
  17364. this.removeMutationState(batchId);
  17365. }
  17366. addLocalQueryTarget(targetId) {
  17367. let queryState = 'not-current';
  17368. // Lookup an existing query state if the target ID was already registered
  17369. // by another tab
  17370. if (this.isActiveQueryTarget(targetId)) {
  17371. const storageItem = this.storage.getItem(createWebStorageQueryTargetMetadataKey(this.persistenceKey, targetId));
  17372. if (storageItem) {
  17373. const metadata = QueryTargetMetadata.fromWebStorageEntry(targetId, storageItem);
  17374. if (metadata) {
  17375. queryState = metadata.state;
  17376. }
  17377. }
  17378. }
  17379. this.localClientState.addQueryTarget(targetId);
  17380. this.persistClientState();
  17381. return queryState;
  17382. }
  17383. removeLocalQueryTarget(targetId) {
  17384. this.localClientState.removeQueryTarget(targetId);
  17385. this.persistClientState();
  17386. }
  17387. isLocalQueryTarget(targetId) {
  17388. return this.localClientState.activeTargetIds.has(targetId);
  17389. }
  17390. clearQueryState(targetId) {
  17391. this.removeItem(createWebStorageQueryTargetMetadataKey(this.persistenceKey, targetId));
  17392. }
  17393. updateQueryState(targetId, state, error) {
  17394. this.persistQueryTargetState(targetId, state, error);
  17395. }
  17396. handleUserChange(user, removedBatchIds, addedBatchIds) {
  17397. removedBatchIds.forEach(batchId => {
  17398. this.removeMutationState(batchId);
  17399. });
  17400. this.currentUser = user;
  17401. addedBatchIds.forEach(batchId => {
  17402. this.addPendingMutation(batchId);
  17403. });
  17404. }
  17405. setOnlineState(onlineState) {
  17406. this.persistOnlineState(onlineState);
  17407. }
  17408. notifyBundleLoaded(collectionGroups) {
  17409. this.persistBundleLoadedState(collectionGroups);
  17410. }
  17411. shutdown() {
  17412. if (this.started) {
  17413. this.window.removeEventListener('storage', this.storageListener);
  17414. this.removeItem(this.localClientStorageKey);
  17415. this.started = false;
  17416. }
  17417. }
  17418. getItem(key) {
  17419. const value = this.storage.getItem(key);
  17420. logDebug(LOG_TAG$a, 'READ', key, value);
  17421. return value;
  17422. }
  17423. setItem(key, value) {
  17424. logDebug(LOG_TAG$a, 'SET', key, value);
  17425. this.storage.setItem(key, value);
  17426. }
  17427. removeItem(key) {
  17428. logDebug(LOG_TAG$a, 'REMOVE', key);
  17429. this.storage.removeItem(key);
  17430. }
  17431. handleWebStorageEvent(event) {
  17432. // Note: The function is typed to take Event to be interface-compatible with
  17433. // `Window.addEventListener`.
  17434. const storageEvent = event;
  17435. if (storageEvent.storageArea === this.storage) {
  17436. logDebug(LOG_TAG$a, 'EVENT', storageEvent.key, storageEvent.newValue);
  17437. if (storageEvent.key === this.localClientStorageKey) {
  17438. logError('Received WebStorage notification for local change. Another client might have ' +
  17439. 'garbage-collected our state');
  17440. return;
  17441. }
  17442. this.queue.enqueueRetryable(async () => {
  17443. if (!this.started) {
  17444. this.earlyEvents.push(storageEvent);
  17445. return;
  17446. }
  17447. if (storageEvent.key === null) {
  17448. return;
  17449. }
  17450. if (this.clientStateKeyRe.test(storageEvent.key)) {
  17451. if (storageEvent.newValue != null) {
  17452. const clientState = this.fromWebStorageClientState(storageEvent.key, storageEvent.newValue);
  17453. if (clientState) {
  17454. return this.handleClientStateEvent(clientState.clientId, clientState);
  17455. }
  17456. }
  17457. else {
  17458. const clientId = this.fromWebStorageClientStateKey(storageEvent.key);
  17459. return this.handleClientStateEvent(clientId, null);
  17460. }
  17461. }
  17462. else if (this.mutationBatchKeyRe.test(storageEvent.key)) {
  17463. if (storageEvent.newValue !== null) {
  17464. const mutationMetadata = this.fromWebStorageMutationMetadata(storageEvent.key, storageEvent.newValue);
  17465. if (mutationMetadata) {
  17466. return this.handleMutationBatchEvent(mutationMetadata);
  17467. }
  17468. }
  17469. }
  17470. else if (this.queryTargetKeyRe.test(storageEvent.key)) {
  17471. if (storageEvent.newValue !== null) {
  17472. const queryTargetMetadata = this.fromWebStorageQueryTargetMetadata(storageEvent.key, storageEvent.newValue);
  17473. if (queryTargetMetadata) {
  17474. return this.handleQueryTargetEvent(queryTargetMetadata);
  17475. }
  17476. }
  17477. }
  17478. else if (storageEvent.key === this.onlineStateKey) {
  17479. if (storageEvent.newValue !== null) {
  17480. const onlineState = this.fromWebStorageOnlineState(storageEvent.newValue);
  17481. if (onlineState) {
  17482. return this.handleOnlineStateEvent(onlineState);
  17483. }
  17484. }
  17485. }
  17486. else if (storageEvent.key === this.sequenceNumberKey) {
  17487. const sequenceNumber = fromWebStorageSequenceNumber(storageEvent.newValue);
  17488. if (sequenceNumber !== ListenSequence.INVALID) {
  17489. this.sequenceNumberHandler(sequenceNumber);
  17490. }
  17491. }
  17492. else if (storageEvent.key === this.bundleLoadedKey) {
  17493. const collectionGroups = this.fromWebStoreBundleLoadedState(storageEvent.newValue);
  17494. await Promise.all(collectionGroups.map(cg => this.syncEngine.synchronizeWithChangedDocuments(cg)));
  17495. }
  17496. });
  17497. }
  17498. }
  17499. get localClientState() {
  17500. return this.activeClients.get(this.localClientId);
  17501. }
  17502. persistClientState() {
  17503. this.setItem(this.localClientStorageKey, this.localClientState.toWebStorageJSON());
  17504. }
  17505. persistMutationState(batchId, state, error) {
  17506. const mutationState = new MutationMetadata(this.currentUser, batchId, state, error);
  17507. const mutationKey = createWebStorageMutationBatchKey(this.persistenceKey, this.currentUser, batchId);
  17508. this.setItem(mutationKey, mutationState.toWebStorageJSON());
  17509. }
  17510. removeMutationState(batchId) {
  17511. const mutationKey = createWebStorageMutationBatchKey(this.persistenceKey, this.currentUser, batchId);
  17512. this.removeItem(mutationKey);
  17513. }
  17514. persistOnlineState(onlineState) {
  17515. const entry = {
  17516. clientId: this.localClientId,
  17517. onlineState
  17518. };
  17519. this.storage.setItem(this.onlineStateKey, JSON.stringify(entry));
  17520. }
  17521. persistQueryTargetState(targetId, state, error) {
  17522. const targetKey = createWebStorageQueryTargetMetadataKey(this.persistenceKey, targetId);
  17523. const targetMetadata = new QueryTargetMetadata(targetId, state, error);
  17524. this.setItem(targetKey, targetMetadata.toWebStorageJSON());
  17525. }
  17526. persistBundleLoadedState(collectionGroups) {
  17527. const json = JSON.stringify(Array.from(collectionGroups));
  17528. this.setItem(this.bundleLoadedKey, json);
  17529. }
  17530. /**
  17531. * Parses a client state key in WebStorage. Returns null if the key does not
  17532. * match the expected key format.
  17533. */
  17534. fromWebStorageClientStateKey(key) {
  17535. const match = this.clientStateKeyRe.exec(key);
  17536. return match ? match[1] : null;
  17537. }
  17538. /**
  17539. * Parses a client state in WebStorage. Returns 'null' if the value could not
  17540. * be parsed.
  17541. */
  17542. fromWebStorageClientState(key, value) {
  17543. const clientId = this.fromWebStorageClientStateKey(key);
  17544. return RemoteClientState.fromWebStorageEntry(clientId, value);
  17545. }
  17546. /**
  17547. * Parses a mutation batch state in WebStorage. Returns 'null' if the value
  17548. * could not be parsed.
  17549. */
  17550. fromWebStorageMutationMetadata(key, value) {
  17551. const match = this.mutationBatchKeyRe.exec(key);
  17552. const batchId = Number(match[1]);
  17553. const userId = match[2] !== undefined ? match[2] : null;
  17554. return MutationMetadata.fromWebStorageEntry(new User(userId), batchId, value);
  17555. }
  17556. /**
  17557. * Parses a query target state from WebStorage. Returns 'null' if the value
  17558. * could not be parsed.
  17559. */
  17560. fromWebStorageQueryTargetMetadata(key, value) {
  17561. const match = this.queryTargetKeyRe.exec(key);
  17562. const targetId = Number(match[1]);
  17563. return QueryTargetMetadata.fromWebStorageEntry(targetId, value);
  17564. }
  17565. /**
  17566. * Parses an online state from WebStorage. Returns 'null' if the value
  17567. * could not be parsed.
  17568. */
  17569. fromWebStorageOnlineState(value) {
  17570. return SharedOnlineState.fromWebStorageEntry(value);
  17571. }
  17572. fromWebStoreBundleLoadedState(value) {
  17573. return JSON.parse(value);
  17574. }
  17575. async handleMutationBatchEvent(mutationBatch) {
  17576. if (mutationBatch.user.uid !== this.currentUser.uid) {
  17577. logDebug(LOG_TAG$a, `Ignoring mutation for non-active user ${mutationBatch.user.uid}`);
  17578. return;
  17579. }
  17580. return this.syncEngine.applyBatchState(mutationBatch.batchId, mutationBatch.state, mutationBatch.error);
  17581. }
  17582. handleQueryTargetEvent(targetMetadata) {
  17583. return this.syncEngine.applyTargetState(targetMetadata.targetId, targetMetadata.state, targetMetadata.error);
  17584. }
  17585. handleClientStateEvent(clientId, clientState) {
  17586. const updatedClients = clientState
  17587. ? this.activeClients.insert(clientId, clientState)
  17588. : this.activeClients.remove(clientId);
  17589. const existingTargets = this.extractActiveQueryTargets(this.activeClients);
  17590. const newTargets = this.extractActiveQueryTargets(updatedClients);
  17591. const addedTargets = [];
  17592. const removedTargets = [];
  17593. newTargets.forEach(targetId => {
  17594. if (!existingTargets.has(targetId)) {
  17595. addedTargets.push(targetId);
  17596. }
  17597. });
  17598. existingTargets.forEach(targetId => {
  17599. if (!newTargets.has(targetId)) {
  17600. removedTargets.push(targetId);
  17601. }
  17602. });
  17603. return this.syncEngine.applyActiveTargetsChange(addedTargets, removedTargets).then(() => {
  17604. this.activeClients = updatedClients;
  17605. });
  17606. }
  17607. handleOnlineStateEvent(onlineState) {
  17608. // We check whether the client that wrote this online state is still active
  17609. // by comparing its client ID to the list of clients kept active in
  17610. // IndexedDb. If a client does not update their IndexedDb client state
  17611. // within 5 seconds, it is considered inactive and we don't emit an online
  17612. // state event.
  17613. if (this.activeClients.get(onlineState.clientId)) {
  17614. this.onlineStateHandler(onlineState.onlineState);
  17615. }
  17616. }
  17617. extractActiveQueryTargets(clients) {
  17618. let activeTargets = targetIdSet();
  17619. clients.forEach((kev, value) => {
  17620. activeTargets = activeTargets.unionWith(value.activeTargetIds);
  17621. });
  17622. return activeTargets;
  17623. }
  17624. }
  17625. function fromWebStorageSequenceNumber(seqString) {
  17626. let sequenceNumber = ListenSequence.INVALID;
  17627. if (seqString != null) {
  17628. try {
  17629. const parsed = JSON.parse(seqString);
  17630. hardAssert(typeof parsed === 'number');
  17631. sequenceNumber = parsed;
  17632. }
  17633. catch (e) {
  17634. logError(LOG_TAG$a, 'Failed to read sequence number from WebStorage', e);
  17635. }
  17636. }
  17637. return sequenceNumber;
  17638. }
  17639. /**
  17640. * `MemorySharedClientState` is a simple implementation of SharedClientState for
  17641. * clients using memory persistence. The state in this class remains fully
  17642. * isolated and no synchronization is performed.
  17643. */
  17644. class MemorySharedClientState {
  17645. constructor() {
  17646. this.localState = new LocalClientState();
  17647. this.queryState = {};
  17648. this.onlineStateHandler = null;
  17649. this.sequenceNumberHandler = null;
  17650. }
  17651. addPendingMutation(batchId) {
  17652. // No op.
  17653. }
  17654. updateMutationState(batchId, state, error) {
  17655. // No op.
  17656. }
  17657. addLocalQueryTarget(targetId) {
  17658. this.localState.addQueryTarget(targetId);
  17659. return this.queryState[targetId] || 'not-current';
  17660. }
  17661. updateQueryState(targetId, state, error) {
  17662. this.queryState[targetId] = state;
  17663. }
  17664. removeLocalQueryTarget(targetId) {
  17665. this.localState.removeQueryTarget(targetId);
  17666. }
  17667. isLocalQueryTarget(targetId) {
  17668. return this.localState.activeTargetIds.has(targetId);
  17669. }
  17670. clearQueryState(targetId) {
  17671. delete this.queryState[targetId];
  17672. }
  17673. getAllActiveQueryTargets() {
  17674. return this.localState.activeTargetIds;
  17675. }
  17676. isActiveQueryTarget(targetId) {
  17677. return this.localState.activeTargetIds.has(targetId);
  17678. }
  17679. start() {
  17680. this.localState = new LocalClientState();
  17681. return Promise.resolve();
  17682. }
  17683. handleUserChange(user, removedBatchIds, addedBatchIds) {
  17684. // No op.
  17685. }
  17686. setOnlineState(onlineState) {
  17687. // No op.
  17688. }
  17689. shutdown() { }
  17690. writeSequenceNumber(sequenceNumber) { }
  17691. notifyBundleLoaded(collectionGroups) {
  17692. // No op.
  17693. }
  17694. }
  17695. /**
  17696. * @license
  17697. * Copyright 2019 Google LLC
  17698. *
  17699. * Licensed under the Apache License, Version 2.0 (the "License");
  17700. * you may not use this file except in compliance with the License.
  17701. * You may obtain a copy of the License at
  17702. *
  17703. * http://www.apache.org/licenses/LICENSE-2.0
  17704. *
  17705. * Unless required by applicable law or agreed to in writing, software
  17706. * distributed under the License is distributed on an "AS IS" BASIS,
  17707. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  17708. * See the License for the specific language governing permissions and
  17709. * limitations under the License.
  17710. */
  17711. class NoopConnectivityMonitor {
  17712. addCallback(callback) {
  17713. // No-op.
  17714. }
  17715. shutdown() {
  17716. // No-op.
  17717. }
  17718. }
  17719. /**
  17720. * @license
  17721. * Copyright 2017 Google LLC
  17722. *
  17723. * Licensed under the Apache License, Version 2.0 (the "License");
  17724. * you may not use this file except in compliance with the License.
  17725. * You may obtain a copy of the License at
  17726. *
  17727. * http://www.apache.org/licenses/LICENSE-2.0
  17728. *
  17729. * Unless required by applicable law or agreed to in writing, software
  17730. * distributed under the License is distributed on an "AS IS" BASIS,
  17731. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  17732. * See the License for the specific language governing permissions and
  17733. * limitations under the License.
  17734. */
  17735. /**
  17736. * Provides a simple helper class that implements the Stream interface to
  17737. * bridge to other implementations that are streams but do not implement the
  17738. * interface. The stream callbacks are invoked with the callOn... methods.
  17739. */
  17740. class StreamBridge {
  17741. constructor(args) {
  17742. this.sendFn = args.sendFn;
  17743. this.closeFn = args.closeFn;
  17744. }
  17745. onOpen(callback) {
  17746. this.wrappedOnOpen = callback;
  17747. }
  17748. onClose(callback) {
  17749. this.wrappedOnClose = callback;
  17750. }
  17751. onMessage(callback) {
  17752. this.wrappedOnMessage = callback;
  17753. }
  17754. close() {
  17755. this.closeFn();
  17756. }
  17757. send(msg) {
  17758. this.sendFn(msg);
  17759. }
  17760. callOnOpen() {
  17761. this.wrappedOnOpen();
  17762. }
  17763. callOnClose(err) {
  17764. this.wrappedOnClose(err);
  17765. }
  17766. callOnMessage(msg) {
  17767. this.wrappedOnMessage(msg);
  17768. }
  17769. }
  17770. /**
  17771. * @license
  17772. * Copyright 2017 Google LLC
  17773. *
  17774. * Licensed under the Apache License, Version 2.0 (the "License");
  17775. * you may not use this file except in compliance with the License.
  17776. * You may obtain a copy of the License at
  17777. *
  17778. * http://www.apache.org/licenses/LICENSE-2.0
  17779. *
  17780. * Unless required by applicable law or agreed to in writing, software
  17781. * distributed under the License is distributed on an "AS IS" BASIS,
  17782. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  17783. * See the License for the specific language governing permissions and
  17784. * limitations under the License.
  17785. */
  17786. /*
  17787. * Utilities for dealing with node.js-style APIs. See nodePromise for more
  17788. * details.
  17789. */
  17790. /**
  17791. * Creates a node-style callback that resolves or rejects a new Promise. The
  17792. * callback is passed to the given action which can then use the callback as
  17793. * a parameter to a node-style function.
  17794. *
  17795. * The intent is to directly bridge a node-style function (which takes a
  17796. * callback) into a Promise without manually converting between the node-style
  17797. * callback and the promise at each call.
  17798. *
  17799. * In effect it allows you to convert:
  17800. *
  17801. * @example
  17802. * new Promise((resolve: (value?: fs.Stats) => void,
  17803. * reject: (error?: any) => void) => {
  17804. * fs.stat(path, (error?: any, stat?: fs.Stats) => {
  17805. * if (error) {
  17806. * reject(error);
  17807. * } else {
  17808. * resolve(stat);
  17809. * }
  17810. * });
  17811. * });
  17812. *
  17813. * Into
  17814. * @example
  17815. * nodePromise((callback: NodeCallback<fs.Stats>) => {
  17816. * fs.stat(path, callback);
  17817. * });
  17818. *
  17819. * @param action - a function that takes a node-style callback as an argument
  17820. * and then uses that callback to invoke some node-style API.
  17821. * @returns a new Promise which will be rejected if the callback is given the
  17822. * first Error parameter or will resolve to the value given otherwise.
  17823. */
  17824. function nodePromise(action) {
  17825. return new Promise((resolve, reject) => {
  17826. action((error, value) => {
  17827. if (error) {
  17828. reject(error);
  17829. }
  17830. else {
  17831. resolve(value);
  17832. }
  17833. });
  17834. });
  17835. }
  17836. /**
  17837. * @license
  17838. * Copyright 2017 Google LLC
  17839. *
  17840. * Licensed under the Apache License, Version 2.0 (the "License");
  17841. * you may not use this file except in compliance with the License.
  17842. * You may obtain a copy of the License at
  17843. *
  17844. * http://www.apache.org/licenses/LICENSE-2.0
  17845. *
  17846. * Unless required by applicable law or agreed to in writing, software
  17847. * distributed under the License is distributed on an "AS IS" BASIS,
  17848. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  17849. * See the License for the specific language governing permissions and
  17850. * limitations under the License.
  17851. */
  17852. // TODO: Fetch runtime version from grpc-js/package.json instead
  17853. // when there's a cleaner way to dynamic require JSON in both Node ESM and CJS
  17854. const grpcVersion = '1.7.3';
  17855. const LOG_TAG$9 = 'Connection';
  17856. const X_GOOG_API_CLIENT_VALUE = `gl-node/${process.versions.node} fire/${SDK_VERSION} grpc/${grpcVersion}`;
  17857. function createMetadata(databasePath, authToken, appCheckToken, appId) {
  17858. hardAssert(authToken === null || authToken.type === 'OAuth');
  17859. const metadata = new grpc.Metadata();
  17860. if (authToken) {
  17861. authToken.headers.forEach((value, key) => metadata.set(key, value));
  17862. }
  17863. if (appCheckToken) {
  17864. appCheckToken.headers.forEach((value, key) => metadata.set(key, value));
  17865. }
  17866. if (appId) {
  17867. metadata.set('X-Firebase-GMPID', appId);
  17868. }
  17869. metadata.set('X-Goog-Api-Client', X_GOOG_API_CLIENT_VALUE);
  17870. // These headers are used to improve routing and project isolation by the
  17871. // backend.
  17872. // TODO(b/199767712): We are keeping 'Google-Cloud-Resource-Prefix' until Emulators can be
  17873. // released with cl/428820046. Currently blocked because Emulators are now built with Java
  17874. // 11 from Google3.
  17875. metadata.set('Google-Cloud-Resource-Prefix', databasePath);
  17876. metadata.set('x-goog-request-params', databasePath);
  17877. return metadata;
  17878. }
  17879. /**
  17880. * A Connection implemented by GRPC-Node.
  17881. */
  17882. class GrpcConnection {
  17883. constructor(protos, databaseInfo) {
  17884. this.databaseInfo = databaseInfo;
  17885. // We cache stubs for the most-recently-used token.
  17886. this.cachedStub = null;
  17887. // eslint-disable-next-line @typescript-eslint/no-explicit-any
  17888. this.firestore = protos['google']['firestore']['v1'];
  17889. this.databasePath = `projects/${databaseInfo.databaseId.projectId}/databases/${databaseInfo.databaseId.database}`;
  17890. }
  17891. get shouldResourcePathBeIncludedInRequest() {
  17892. // Both `invokeRPC()` and `invokeStreamingRPC()` ignore their `path` arguments, and expect
  17893. // the "path" to be part of the given `request`.
  17894. return true;
  17895. }
  17896. ensureActiveStub() {
  17897. if (!this.cachedStub) {
  17898. logDebug(LOG_TAG$9, 'Creating Firestore stub.');
  17899. const credentials = this.databaseInfo.ssl
  17900. ? grpc.credentials.createSsl()
  17901. : grpc.credentials.createInsecure();
  17902. this.cachedStub = new this.firestore.Firestore(this.databaseInfo.host, credentials);
  17903. }
  17904. return this.cachedStub;
  17905. }
  17906. invokeRPC(rpcName, path, request, authToken, appCheckToken) {
  17907. const stub = this.ensureActiveStub();
  17908. const metadata = createMetadata(this.databasePath, authToken, appCheckToken, this.databaseInfo.appId);
  17909. const jsonRequest = Object.assign({ database: this.databasePath }, request);
  17910. return nodePromise((callback) => {
  17911. logDebug(LOG_TAG$9, `RPC '${rpcName}' invoked with request:`, request);
  17912. return stub[rpcName](jsonRequest, metadata, (grpcError, value) => {
  17913. if (grpcError) {
  17914. logDebug(LOG_TAG$9, `RPC '${rpcName}' failed with error:`, grpcError);
  17915. callback(new FirestoreError(mapCodeFromRpcCode(grpcError.code), grpcError.message));
  17916. }
  17917. else {
  17918. logDebug(LOG_TAG$9, `RPC '${rpcName}' completed with response:`, value);
  17919. callback(undefined, value);
  17920. }
  17921. });
  17922. });
  17923. }
  17924. invokeStreamingRPC(rpcName, path, request, authToken, appCheckToken, expectedResponseCount) {
  17925. const results = [];
  17926. const responseDeferred = new Deferred();
  17927. logDebug(LOG_TAG$9, `RPC '${rpcName}' invoked (streaming) with request:`, request);
  17928. const stub = this.ensureActiveStub();
  17929. const metadata = createMetadata(this.databasePath, authToken, appCheckToken, this.databaseInfo.appId);
  17930. const jsonRequest = Object.assign(Object.assign({}, request), { database: this.databasePath });
  17931. const stream = stub[rpcName](jsonRequest, metadata);
  17932. let callbackFired = false;
  17933. stream.on('data', (response) => {
  17934. logDebug(LOG_TAG$9, `RPC ${rpcName} received result:`, response);
  17935. results.push(response);
  17936. if (expectedResponseCount !== undefined &&
  17937. results.length === expectedResponseCount) {
  17938. callbackFired = true;
  17939. responseDeferred.resolve(results);
  17940. }
  17941. });
  17942. stream.on('end', () => {
  17943. logDebug(LOG_TAG$9, `RPC '${rpcName}' completed.`);
  17944. if (!callbackFired) {
  17945. callbackFired = true;
  17946. responseDeferred.resolve(results);
  17947. }
  17948. });
  17949. stream.on('error', (grpcError) => {
  17950. logDebug(LOG_TAG$9, `RPC '${rpcName}' failed with error:`, grpcError);
  17951. const code = mapCodeFromRpcCode(grpcError.code);
  17952. responseDeferred.reject(new FirestoreError(code, grpcError.message));
  17953. });
  17954. return responseDeferred.promise;
  17955. }
  17956. // TODO(mikelehen): This "method" is a monster. Should be refactored.
  17957. openStream(rpcName, authToken, appCheckToken) {
  17958. const stub = this.ensureActiveStub();
  17959. const metadata = createMetadata(this.databasePath, authToken, appCheckToken, this.databaseInfo.appId);
  17960. const grpcStream = stub[rpcName](metadata);
  17961. let closed = false;
  17962. const close = (err) => {
  17963. if (!closed) {
  17964. closed = true;
  17965. stream.callOnClose(err);
  17966. grpcStream.end();
  17967. }
  17968. };
  17969. const stream = new StreamBridge({
  17970. sendFn: (msg) => {
  17971. if (!closed) {
  17972. logDebug(LOG_TAG$9, 'GRPC stream sending:', msg);
  17973. try {
  17974. grpcStream.write(msg);
  17975. }
  17976. catch (e) {
  17977. // This probably means we didn't conform to the proto. Make sure to
  17978. // log the message we sent.
  17979. logError('Failure sending:', msg);
  17980. logError('Error:', e);
  17981. throw e;
  17982. }
  17983. }
  17984. else {
  17985. logDebug(LOG_TAG$9, 'Not sending because gRPC stream is closed:', msg);
  17986. }
  17987. },
  17988. closeFn: () => {
  17989. logDebug(LOG_TAG$9, 'GRPC stream closed locally via close().');
  17990. close();
  17991. }
  17992. });
  17993. grpcStream.on('data', (msg) => {
  17994. if (!closed) {
  17995. logDebug(LOG_TAG$9, 'GRPC stream received:', msg);
  17996. stream.callOnMessage(msg);
  17997. }
  17998. });
  17999. grpcStream.on('end', () => {
  18000. logDebug(LOG_TAG$9, 'GRPC stream ended.');
  18001. close();
  18002. });
  18003. grpcStream.on('error', (grpcError) => {
  18004. if (!closed) {
  18005. logWarn(LOG_TAG$9, 'GRPC stream error. Code:', grpcError.code, 'Message:', grpcError.message);
  18006. const code = mapCodeFromRpcCode(grpcError.code);
  18007. close(new FirestoreError(code, grpcError.message));
  18008. }
  18009. });
  18010. logDebug(LOG_TAG$9, 'Opening GRPC stream');
  18011. // TODO(dimond): Since grpc has no explicit open status (or does it?) we
  18012. // simulate an onOpen in the next loop after the stream had it's listeners
  18013. // registered
  18014. setTimeout(() => {
  18015. stream.callOnOpen();
  18016. }, 0);
  18017. return stream;
  18018. }
  18019. }
  18020. const nested = {
  18021. google: {
  18022. nested: {
  18023. protobuf: {
  18024. options: {
  18025. csharp_namespace: "Google.Protobuf.WellKnownTypes",
  18026. go_package: "github.com/golang/protobuf/ptypes/wrappers",
  18027. java_package: "com.google.protobuf",
  18028. java_outer_classname: "WrappersProto",
  18029. java_multiple_files: true,
  18030. objc_class_prefix: "GPB",
  18031. cc_enable_arenas: true,
  18032. optimize_for: "SPEED"
  18033. },
  18034. nested: {
  18035. Timestamp: {
  18036. fields: {
  18037. seconds: {
  18038. type: "int64",
  18039. id: 1
  18040. },
  18041. nanos: {
  18042. type: "int32",
  18043. id: 2
  18044. }
  18045. }
  18046. },
  18047. FileDescriptorSet: {
  18048. fields: {
  18049. file: {
  18050. rule: "repeated",
  18051. type: "FileDescriptorProto",
  18052. id: 1
  18053. }
  18054. }
  18055. },
  18056. FileDescriptorProto: {
  18057. fields: {
  18058. name: {
  18059. type: "string",
  18060. id: 1
  18061. },
  18062. "package": {
  18063. type: "string",
  18064. id: 2
  18065. },
  18066. dependency: {
  18067. rule: "repeated",
  18068. type: "string",
  18069. id: 3
  18070. },
  18071. publicDependency: {
  18072. rule: "repeated",
  18073. type: "int32",
  18074. id: 10,
  18075. options: {
  18076. packed: false
  18077. }
  18078. },
  18079. weakDependency: {
  18080. rule: "repeated",
  18081. type: "int32",
  18082. id: 11,
  18083. options: {
  18084. packed: false
  18085. }
  18086. },
  18087. messageType: {
  18088. rule: "repeated",
  18089. type: "DescriptorProto",
  18090. id: 4
  18091. },
  18092. enumType: {
  18093. rule: "repeated",
  18094. type: "EnumDescriptorProto",
  18095. id: 5
  18096. },
  18097. service: {
  18098. rule: "repeated",
  18099. type: "ServiceDescriptorProto",
  18100. id: 6
  18101. },
  18102. extension: {
  18103. rule: "repeated",
  18104. type: "FieldDescriptorProto",
  18105. id: 7
  18106. },
  18107. options: {
  18108. type: "FileOptions",
  18109. id: 8
  18110. },
  18111. sourceCodeInfo: {
  18112. type: "SourceCodeInfo",
  18113. id: 9
  18114. },
  18115. syntax: {
  18116. type: "string",
  18117. id: 12
  18118. }
  18119. }
  18120. },
  18121. DescriptorProto: {
  18122. fields: {
  18123. name: {
  18124. type: "string",
  18125. id: 1
  18126. },
  18127. field: {
  18128. rule: "repeated",
  18129. type: "FieldDescriptorProto",
  18130. id: 2
  18131. },
  18132. extension: {
  18133. rule: "repeated",
  18134. type: "FieldDescriptorProto",
  18135. id: 6
  18136. },
  18137. nestedType: {
  18138. rule: "repeated",
  18139. type: "DescriptorProto",
  18140. id: 3
  18141. },
  18142. enumType: {
  18143. rule: "repeated",
  18144. type: "EnumDescriptorProto",
  18145. id: 4
  18146. },
  18147. extensionRange: {
  18148. rule: "repeated",
  18149. type: "ExtensionRange",
  18150. id: 5
  18151. },
  18152. oneofDecl: {
  18153. rule: "repeated",
  18154. type: "OneofDescriptorProto",
  18155. id: 8
  18156. },
  18157. options: {
  18158. type: "MessageOptions",
  18159. id: 7
  18160. },
  18161. reservedRange: {
  18162. rule: "repeated",
  18163. type: "ReservedRange",
  18164. id: 9
  18165. },
  18166. reservedName: {
  18167. rule: "repeated",
  18168. type: "string",
  18169. id: 10
  18170. }
  18171. },
  18172. nested: {
  18173. ExtensionRange: {
  18174. fields: {
  18175. start: {
  18176. type: "int32",
  18177. id: 1
  18178. },
  18179. end: {
  18180. type: "int32",
  18181. id: 2
  18182. }
  18183. }
  18184. },
  18185. ReservedRange: {
  18186. fields: {
  18187. start: {
  18188. type: "int32",
  18189. id: 1
  18190. },
  18191. end: {
  18192. type: "int32",
  18193. id: 2
  18194. }
  18195. }
  18196. }
  18197. }
  18198. },
  18199. FieldDescriptorProto: {
  18200. fields: {
  18201. name: {
  18202. type: "string",
  18203. id: 1
  18204. },
  18205. number: {
  18206. type: "int32",
  18207. id: 3
  18208. },
  18209. label: {
  18210. type: "Label",
  18211. id: 4
  18212. },
  18213. type: {
  18214. type: "Type",
  18215. id: 5
  18216. },
  18217. typeName: {
  18218. type: "string",
  18219. id: 6
  18220. },
  18221. extendee: {
  18222. type: "string",
  18223. id: 2
  18224. },
  18225. defaultValue: {
  18226. type: "string",
  18227. id: 7
  18228. },
  18229. oneofIndex: {
  18230. type: "int32",
  18231. id: 9
  18232. },
  18233. jsonName: {
  18234. type: "string",
  18235. id: 10
  18236. },
  18237. options: {
  18238. type: "FieldOptions",
  18239. id: 8
  18240. }
  18241. },
  18242. nested: {
  18243. Type: {
  18244. values: {
  18245. TYPE_DOUBLE: 1,
  18246. TYPE_FLOAT: 2,
  18247. TYPE_INT64: 3,
  18248. TYPE_UINT64: 4,
  18249. TYPE_INT32: 5,
  18250. TYPE_FIXED64: 6,
  18251. TYPE_FIXED32: 7,
  18252. TYPE_BOOL: 8,
  18253. TYPE_STRING: 9,
  18254. TYPE_GROUP: 10,
  18255. TYPE_MESSAGE: 11,
  18256. TYPE_BYTES: 12,
  18257. TYPE_UINT32: 13,
  18258. TYPE_ENUM: 14,
  18259. TYPE_SFIXED32: 15,
  18260. TYPE_SFIXED64: 16,
  18261. TYPE_SINT32: 17,
  18262. TYPE_SINT64: 18
  18263. }
  18264. },
  18265. Label: {
  18266. values: {
  18267. LABEL_OPTIONAL: 1,
  18268. LABEL_REQUIRED: 2,
  18269. LABEL_REPEATED: 3
  18270. }
  18271. }
  18272. }
  18273. },
  18274. OneofDescriptorProto: {
  18275. fields: {
  18276. name: {
  18277. type: "string",
  18278. id: 1
  18279. },
  18280. options: {
  18281. type: "OneofOptions",
  18282. id: 2
  18283. }
  18284. }
  18285. },
  18286. EnumDescriptorProto: {
  18287. fields: {
  18288. name: {
  18289. type: "string",
  18290. id: 1
  18291. },
  18292. value: {
  18293. rule: "repeated",
  18294. type: "EnumValueDescriptorProto",
  18295. id: 2
  18296. },
  18297. options: {
  18298. type: "EnumOptions",
  18299. id: 3
  18300. }
  18301. }
  18302. },
  18303. EnumValueDescriptorProto: {
  18304. fields: {
  18305. name: {
  18306. type: "string",
  18307. id: 1
  18308. },
  18309. number: {
  18310. type: "int32",
  18311. id: 2
  18312. },
  18313. options: {
  18314. type: "EnumValueOptions",
  18315. id: 3
  18316. }
  18317. }
  18318. },
  18319. ServiceDescriptorProto: {
  18320. fields: {
  18321. name: {
  18322. type: "string",
  18323. id: 1
  18324. },
  18325. method: {
  18326. rule: "repeated",
  18327. type: "MethodDescriptorProto",
  18328. id: 2
  18329. },
  18330. options: {
  18331. type: "ServiceOptions",
  18332. id: 3
  18333. }
  18334. }
  18335. },
  18336. MethodDescriptorProto: {
  18337. fields: {
  18338. name: {
  18339. type: "string",
  18340. id: 1
  18341. },
  18342. inputType: {
  18343. type: "string",
  18344. id: 2
  18345. },
  18346. outputType: {
  18347. type: "string",
  18348. id: 3
  18349. },
  18350. options: {
  18351. type: "MethodOptions",
  18352. id: 4
  18353. },
  18354. clientStreaming: {
  18355. type: "bool",
  18356. id: 5
  18357. },
  18358. serverStreaming: {
  18359. type: "bool",
  18360. id: 6
  18361. }
  18362. }
  18363. },
  18364. FileOptions: {
  18365. fields: {
  18366. javaPackage: {
  18367. type: "string",
  18368. id: 1
  18369. },
  18370. javaOuterClassname: {
  18371. type: "string",
  18372. id: 8
  18373. },
  18374. javaMultipleFiles: {
  18375. type: "bool",
  18376. id: 10
  18377. },
  18378. javaGenerateEqualsAndHash: {
  18379. type: "bool",
  18380. id: 20,
  18381. options: {
  18382. deprecated: true
  18383. }
  18384. },
  18385. javaStringCheckUtf8: {
  18386. type: "bool",
  18387. id: 27
  18388. },
  18389. optimizeFor: {
  18390. type: "OptimizeMode",
  18391. id: 9,
  18392. options: {
  18393. "default": "SPEED"
  18394. }
  18395. },
  18396. goPackage: {
  18397. type: "string",
  18398. id: 11
  18399. },
  18400. ccGenericServices: {
  18401. type: "bool",
  18402. id: 16
  18403. },
  18404. javaGenericServices: {
  18405. type: "bool",
  18406. id: 17
  18407. },
  18408. pyGenericServices: {
  18409. type: "bool",
  18410. id: 18
  18411. },
  18412. deprecated: {
  18413. type: "bool",
  18414. id: 23
  18415. },
  18416. ccEnableArenas: {
  18417. type: "bool",
  18418. id: 31
  18419. },
  18420. objcClassPrefix: {
  18421. type: "string",
  18422. id: 36
  18423. },
  18424. csharpNamespace: {
  18425. type: "string",
  18426. id: 37
  18427. },
  18428. uninterpretedOption: {
  18429. rule: "repeated",
  18430. type: "UninterpretedOption",
  18431. id: 999
  18432. }
  18433. },
  18434. extensions: [
  18435. [
  18436. 1000,
  18437. 536870911
  18438. ]
  18439. ],
  18440. reserved: [
  18441. [
  18442. 38,
  18443. 38
  18444. ]
  18445. ],
  18446. nested: {
  18447. OptimizeMode: {
  18448. values: {
  18449. SPEED: 1,
  18450. CODE_SIZE: 2,
  18451. LITE_RUNTIME: 3
  18452. }
  18453. }
  18454. }
  18455. },
  18456. MessageOptions: {
  18457. fields: {
  18458. messageSetWireFormat: {
  18459. type: "bool",
  18460. id: 1
  18461. },
  18462. noStandardDescriptorAccessor: {
  18463. type: "bool",
  18464. id: 2
  18465. },
  18466. deprecated: {
  18467. type: "bool",
  18468. id: 3
  18469. },
  18470. mapEntry: {
  18471. type: "bool",
  18472. id: 7
  18473. },
  18474. uninterpretedOption: {
  18475. rule: "repeated",
  18476. type: "UninterpretedOption",
  18477. id: 999
  18478. }
  18479. },
  18480. extensions: [
  18481. [
  18482. 1000,
  18483. 536870911
  18484. ]
  18485. ],
  18486. reserved: [
  18487. [
  18488. 8,
  18489. 8
  18490. ]
  18491. ]
  18492. },
  18493. FieldOptions: {
  18494. fields: {
  18495. ctype: {
  18496. type: "CType",
  18497. id: 1,
  18498. options: {
  18499. "default": "STRING"
  18500. }
  18501. },
  18502. packed: {
  18503. type: "bool",
  18504. id: 2
  18505. },
  18506. jstype: {
  18507. type: "JSType",
  18508. id: 6,
  18509. options: {
  18510. "default": "JS_NORMAL"
  18511. }
  18512. },
  18513. lazy: {
  18514. type: "bool",
  18515. id: 5
  18516. },
  18517. deprecated: {
  18518. type: "bool",
  18519. id: 3
  18520. },
  18521. weak: {
  18522. type: "bool",
  18523. id: 10
  18524. },
  18525. uninterpretedOption: {
  18526. rule: "repeated",
  18527. type: "UninterpretedOption",
  18528. id: 999
  18529. }
  18530. },
  18531. extensions: [
  18532. [
  18533. 1000,
  18534. 536870911
  18535. ]
  18536. ],
  18537. reserved: [
  18538. [
  18539. 4,
  18540. 4
  18541. ]
  18542. ],
  18543. nested: {
  18544. CType: {
  18545. values: {
  18546. STRING: 0,
  18547. CORD: 1,
  18548. STRING_PIECE: 2
  18549. }
  18550. },
  18551. JSType: {
  18552. values: {
  18553. JS_NORMAL: 0,
  18554. JS_STRING: 1,
  18555. JS_NUMBER: 2
  18556. }
  18557. }
  18558. }
  18559. },
  18560. OneofOptions: {
  18561. fields: {
  18562. uninterpretedOption: {
  18563. rule: "repeated",
  18564. type: "UninterpretedOption",
  18565. id: 999
  18566. }
  18567. },
  18568. extensions: [
  18569. [
  18570. 1000,
  18571. 536870911
  18572. ]
  18573. ]
  18574. },
  18575. EnumOptions: {
  18576. fields: {
  18577. allowAlias: {
  18578. type: "bool",
  18579. id: 2
  18580. },
  18581. deprecated: {
  18582. type: "bool",
  18583. id: 3
  18584. },
  18585. uninterpretedOption: {
  18586. rule: "repeated",
  18587. type: "UninterpretedOption",
  18588. id: 999
  18589. }
  18590. },
  18591. extensions: [
  18592. [
  18593. 1000,
  18594. 536870911
  18595. ]
  18596. ]
  18597. },
  18598. EnumValueOptions: {
  18599. fields: {
  18600. deprecated: {
  18601. type: "bool",
  18602. id: 1
  18603. },
  18604. uninterpretedOption: {
  18605. rule: "repeated",
  18606. type: "UninterpretedOption",
  18607. id: 999
  18608. }
  18609. },
  18610. extensions: [
  18611. [
  18612. 1000,
  18613. 536870911
  18614. ]
  18615. ]
  18616. },
  18617. ServiceOptions: {
  18618. fields: {
  18619. deprecated: {
  18620. type: "bool",
  18621. id: 33
  18622. },
  18623. uninterpretedOption: {
  18624. rule: "repeated",
  18625. type: "UninterpretedOption",
  18626. id: 999
  18627. }
  18628. },
  18629. extensions: [
  18630. [
  18631. 1000,
  18632. 536870911
  18633. ]
  18634. ]
  18635. },
  18636. MethodOptions: {
  18637. fields: {
  18638. deprecated: {
  18639. type: "bool",
  18640. id: 33
  18641. },
  18642. uninterpretedOption: {
  18643. rule: "repeated",
  18644. type: "UninterpretedOption",
  18645. id: 999
  18646. }
  18647. },
  18648. extensions: [
  18649. [
  18650. 1000,
  18651. 536870911
  18652. ]
  18653. ]
  18654. },
  18655. UninterpretedOption: {
  18656. fields: {
  18657. name: {
  18658. rule: "repeated",
  18659. type: "NamePart",
  18660. id: 2
  18661. },
  18662. identifierValue: {
  18663. type: "string",
  18664. id: 3
  18665. },
  18666. positiveIntValue: {
  18667. type: "uint64",
  18668. id: 4
  18669. },
  18670. negativeIntValue: {
  18671. type: "int64",
  18672. id: 5
  18673. },
  18674. doubleValue: {
  18675. type: "double",
  18676. id: 6
  18677. },
  18678. stringValue: {
  18679. type: "bytes",
  18680. id: 7
  18681. },
  18682. aggregateValue: {
  18683. type: "string",
  18684. id: 8
  18685. }
  18686. },
  18687. nested: {
  18688. NamePart: {
  18689. fields: {
  18690. namePart: {
  18691. rule: "required",
  18692. type: "string",
  18693. id: 1
  18694. },
  18695. isExtension: {
  18696. rule: "required",
  18697. type: "bool",
  18698. id: 2
  18699. }
  18700. }
  18701. }
  18702. }
  18703. },
  18704. SourceCodeInfo: {
  18705. fields: {
  18706. location: {
  18707. rule: "repeated",
  18708. type: "Location",
  18709. id: 1
  18710. }
  18711. },
  18712. nested: {
  18713. Location: {
  18714. fields: {
  18715. path: {
  18716. rule: "repeated",
  18717. type: "int32",
  18718. id: 1
  18719. },
  18720. span: {
  18721. rule: "repeated",
  18722. type: "int32",
  18723. id: 2
  18724. },
  18725. leadingComments: {
  18726. type: "string",
  18727. id: 3
  18728. },
  18729. trailingComments: {
  18730. type: "string",
  18731. id: 4
  18732. },
  18733. leadingDetachedComments: {
  18734. rule: "repeated",
  18735. type: "string",
  18736. id: 6
  18737. }
  18738. }
  18739. }
  18740. }
  18741. },
  18742. GeneratedCodeInfo: {
  18743. fields: {
  18744. annotation: {
  18745. rule: "repeated",
  18746. type: "Annotation",
  18747. id: 1
  18748. }
  18749. },
  18750. nested: {
  18751. Annotation: {
  18752. fields: {
  18753. path: {
  18754. rule: "repeated",
  18755. type: "int32",
  18756. id: 1
  18757. },
  18758. sourceFile: {
  18759. type: "string",
  18760. id: 2
  18761. },
  18762. begin: {
  18763. type: "int32",
  18764. id: 3
  18765. },
  18766. end: {
  18767. type: "int32",
  18768. id: 4
  18769. }
  18770. }
  18771. }
  18772. }
  18773. },
  18774. Struct: {
  18775. fields: {
  18776. fields: {
  18777. keyType: "string",
  18778. type: "Value",
  18779. id: 1
  18780. }
  18781. }
  18782. },
  18783. Value: {
  18784. oneofs: {
  18785. kind: {
  18786. oneof: [
  18787. "nullValue",
  18788. "numberValue",
  18789. "stringValue",
  18790. "boolValue",
  18791. "structValue",
  18792. "listValue"
  18793. ]
  18794. }
  18795. },
  18796. fields: {
  18797. nullValue: {
  18798. type: "NullValue",
  18799. id: 1
  18800. },
  18801. numberValue: {
  18802. type: "double",
  18803. id: 2
  18804. },
  18805. stringValue: {
  18806. type: "string",
  18807. id: 3
  18808. },
  18809. boolValue: {
  18810. type: "bool",
  18811. id: 4
  18812. },
  18813. structValue: {
  18814. type: "Struct",
  18815. id: 5
  18816. },
  18817. listValue: {
  18818. type: "ListValue",
  18819. id: 6
  18820. }
  18821. }
  18822. },
  18823. NullValue: {
  18824. values: {
  18825. NULL_VALUE: 0
  18826. }
  18827. },
  18828. ListValue: {
  18829. fields: {
  18830. values: {
  18831. rule: "repeated",
  18832. type: "Value",
  18833. id: 1
  18834. }
  18835. }
  18836. },
  18837. Empty: {
  18838. fields: {
  18839. }
  18840. },
  18841. DoubleValue: {
  18842. fields: {
  18843. value: {
  18844. type: "double",
  18845. id: 1
  18846. }
  18847. }
  18848. },
  18849. FloatValue: {
  18850. fields: {
  18851. value: {
  18852. type: "float",
  18853. id: 1
  18854. }
  18855. }
  18856. },
  18857. Int64Value: {
  18858. fields: {
  18859. value: {
  18860. type: "int64",
  18861. id: 1
  18862. }
  18863. }
  18864. },
  18865. UInt64Value: {
  18866. fields: {
  18867. value: {
  18868. type: "uint64",
  18869. id: 1
  18870. }
  18871. }
  18872. },
  18873. Int32Value: {
  18874. fields: {
  18875. value: {
  18876. type: "int32",
  18877. id: 1
  18878. }
  18879. }
  18880. },
  18881. UInt32Value: {
  18882. fields: {
  18883. value: {
  18884. type: "uint32",
  18885. id: 1
  18886. }
  18887. }
  18888. },
  18889. BoolValue: {
  18890. fields: {
  18891. value: {
  18892. type: "bool",
  18893. id: 1
  18894. }
  18895. }
  18896. },
  18897. StringValue: {
  18898. fields: {
  18899. value: {
  18900. type: "string",
  18901. id: 1
  18902. }
  18903. }
  18904. },
  18905. BytesValue: {
  18906. fields: {
  18907. value: {
  18908. type: "bytes",
  18909. id: 1
  18910. }
  18911. }
  18912. },
  18913. Any: {
  18914. fields: {
  18915. typeUrl: {
  18916. type: "string",
  18917. id: 1
  18918. },
  18919. value: {
  18920. type: "bytes",
  18921. id: 2
  18922. }
  18923. }
  18924. }
  18925. }
  18926. },
  18927. firestore: {
  18928. nested: {
  18929. v1: {
  18930. options: {
  18931. csharp_namespace: "Google.Cloud.Firestore.V1",
  18932. go_package: "google.golang.org/genproto/googleapis/firestore/v1;firestore",
  18933. java_multiple_files: true,
  18934. java_outer_classname: "WriteProto",
  18935. java_package: "com.google.firestore.v1",
  18936. objc_class_prefix: "GCFS",
  18937. php_namespace: "Google\\Cloud\\Firestore\\V1",
  18938. ruby_package: "Google::Cloud::Firestore::V1"
  18939. },
  18940. nested: {
  18941. AggregationResult: {
  18942. fields: {
  18943. aggregateFields: {
  18944. keyType: "string",
  18945. type: "Value",
  18946. id: 2
  18947. }
  18948. }
  18949. },
  18950. DocumentMask: {
  18951. fields: {
  18952. fieldPaths: {
  18953. rule: "repeated",
  18954. type: "string",
  18955. id: 1
  18956. }
  18957. }
  18958. },
  18959. Precondition: {
  18960. oneofs: {
  18961. conditionType: {
  18962. oneof: [
  18963. "exists",
  18964. "updateTime"
  18965. ]
  18966. }
  18967. },
  18968. fields: {
  18969. exists: {
  18970. type: "bool",
  18971. id: 1
  18972. },
  18973. updateTime: {
  18974. type: "google.protobuf.Timestamp",
  18975. id: 2
  18976. }
  18977. }
  18978. },
  18979. TransactionOptions: {
  18980. oneofs: {
  18981. mode: {
  18982. oneof: [
  18983. "readOnly",
  18984. "readWrite"
  18985. ]
  18986. }
  18987. },
  18988. fields: {
  18989. readOnly: {
  18990. type: "ReadOnly",
  18991. id: 2
  18992. },
  18993. readWrite: {
  18994. type: "ReadWrite",
  18995. id: 3
  18996. }
  18997. },
  18998. nested: {
  18999. ReadWrite: {
  19000. fields: {
  19001. retryTransaction: {
  19002. type: "bytes",
  19003. id: 1
  19004. }
  19005. }
  19006. },
  19007. ReadOnly: {
  19008. oneofs: {
  19009. consistencySelector: {
  19010. oneof: [
  19011. "readTime"
  19012. ]
  19013. }
  19014. },
  19015. fields: {
  19016. readTime: {
  19017. type: "google.protobuf.Timestamp",
  19018. id: 2
  19019. }
  19020. }
  19021. }
  19022. }
  19023. },
  19024. Document: {
  19025. fields: {
  19026. name: {
  19027. type: "string",
  19028. id: 1
  19029. },
  19030. fields: {
  19031. keyType: "string",
  19032. type: "Value",
  19033. id: 2
  19034. },
  19035. createTime: {
  19036. type: "google.protobuf.Timestamp",
  19037. id: 3
  19038. },
  19039. updateTime: {
  19040. type: "google.protobuf.Timestamp",
  19041. id: 4
  19042. }
  19043. }
  19044. },
  19045. Value: {
  19046. oneofs: {
  19047. valueType: {
  19048. oneof: [
  19049. "nullValue",
  19050. "booleanValue",
  19051. "integerValue",
  19052. "doubleValue",
  19053. "timestampValue",
  19054. "stringValue",
  19055. "bytesValue",
  19056. "referenceValue",
  19057. "geoPointValue",
  19058. "arrayValue",
  19059. "mapValue"
  19060. ]
  19061. }
  19062. },
  19063. fields: {
  19064. nullValue: {
  19065. type: "google.protobuf.NullValue",
  19066. id: 11
  19067. },
  19068. booleanValue: {
  19069. type: "bool",
  19070. id: 1
  19071. },
  19072. integerValue: {
  19073. type: "int64",
  19074. id: 2
  19075. },
  19076. doubleValue: {
  19077. type: "double",
  19078. id: 3
  19079. },
  19080. timestampValue: {
  19081. type: "google.protobuf.Timestamp",
  19082. id: 10
  19083. },
  19084. stringValue: {
  19085. type: "string",
  19086. id: 17
  19087. },
  19088. bytesValue: {
  19089. type: "bytes",
  19090. id: 18
  19091. },
  19092. referenceValue: {
  19093. type: "string",
  19094. id: 5
  19095. },
  19096. geoPointValue: {
  19097. type: "google.type.LatLng",
  19098. id: 8
  19099. },
  19100. arrayValue: {
  19101. type: "ArrayValue",
  19102. id: 9
  19103. },
  19104. mapValue: {
  19105. type: "MapValue",
  19106. id: 6
  19107. }
  19108. }
  19109. },
  19110. ArrayValue: {
  19111. fields: {
  19112. values: {
  19113. rule: "repeated",
  19114. type: "Value",
  19115. id: 1
  19116. }
  19117. }
  19118. },
  19119. MapValue: {
  19120. fields: {
  19121. fields: {
  19122. keyType: "string",
  19123. type: "Value",
  19124. id: 1
  19125. }
  19126. }
  19127. },
  19128. Firestore: {
  19129. options: {
  19130. "(google.api.default_host)": "firestore.googleapis.com",
  19131. "(google.api.oauth_scopes)": "https://www.googleapis.com/auth/cloud-platform,https://www.googleapis.com/auth/datastore"
  19132. },
  19133. methods: {
  19134. GetDocument: {
  19135. requestType: "GetDocumentRequest",
  19136. responseType: "Document",
  19137. options: {
  19138. "(google.api.http).get": "/v1/{name=projects/*/databases/*/documents/*/**}"
  19139. },
  19140. parsedOptions: [
  19141. {
  19142. "(google.api.http)": {
  19143. get: "/v1/{name=projects/*/databases/*/documents/*/**}"
  19144. }
  19145. }
  19146. ]
  19147. },
  19148. ListDocuments: {
  19149. requestType: "ListDocumentsRequest",
  19150. responseType: "ListDocumentsResponse",
  19151. options: {
  19152. "(google.api.http).get": "/v1/{parent=projects/*/databases/*/documents/*/**}/{collection_id}"
  19153. },
  19154. parsedOptions: [
  19155. {
  19156. "(google.api.http)": {
  19157. get: "/v1/{parent=projects/*/databases/*/documents/*/**}/{collection_id}"
  19158. }
  19159. }
  19160. ]
  19161. },
  19162. UpdateDocument: {
  19163. requestType: "UpdateDocumentRequest",
  19164. responseType: "Document",
  19165. options: {
  19166. "(google.api.http).patch": "/v1/{document.name=projects/*/databases/*/documents/*/**}",
  19167. "(google.api.http).body": "document",
  19168. "(google.api.method_signature)": "document,update_mask"
  19169. },
  19170. parsedOptions: [
  19171. {
  19172. "(google.api.http)": {
  19173. patch: "/v1/{document.name=projects/*/databases/*/documents/*/**}",
  19174. body: "document"
  19175. }
  19176. },
  19177. {
  19178. "(google.api.method_signature)": "document,update_mask"
  19179. }
  19180. ]
  19181. },
  19182. DeleteDocument: {
  19183. requestType: "DeleteDocumentRequest",
  19184. responseType: "google.protobuf.Empty",
  19185. options: {
  19186. "(google.api.http).delete": "/v1/{name=projects/*/databases/*/documents/*/**}",
  19187. "(google.api.method_signature)": "name"
  19188. },
  19189. parsedOptions: [
  19190. {
  19191. "(google.api.http)": {
  19192. "delete": "/v1/{name=projects/*/databases/*/documents/*/**}"
  19193. }
  19194. },
  19195. {
  19196. "(google.api.method_signature)": "name"
  19197. }
  19198. ]
  19199. },
  19200. BatchGetDocuments: {
  19201. requestType: "BatchGetDocumentsRequest",
  19202. responseType: "BatchGetDocumentsResponse",
  19203. responseStream: true,
  19204. options: {
  19205. "(google.api.http).post": "/v1/{database=projects/*/databases/*}/documents:batchGet",
  19206. "(google.api.http).body": "*"
  19207. },
  19208. parsedOptions: [
  19209. {
  19210. "(google.api.http)": {
  19211. post: "/v1/{database=projects/*/databases/*}/documents:batchGet",
  19212. body: "*"
  19213. }
  19214. }
  19215. ]
  19216. },
  19217. BeginTransaction: {
  19218. requestType: "BeginTransactionRequest",
  19219. responseType: "BeginTransactionResponse",
  19220. options: {
  19221. "(google.api.http).post": "/v1/{database=projects/*/databases/*}/documents:beginTransaction",
  19222. "(google.api.http).body": "*",
  19223. "(google.api.method_signature)": "database"
  19224. },
  19225. parsedOptions: [
  19226. {
  19227. "(google.api.http)": {
  19228. post: "/v1/{database=projects/*/databases/*}/documents:beginTransaction",
  19229. body: "*"
  19230. }
  19231. },
  19232. {
  19233. "(google.api.method_signature)": "database"
  19234. }
  19235. ]
  19236. },
  19237. Commit: {
  19238. requestType: "CommitRequest",
  19239. responseType: "CommitResponse",
  19240. options: {
  19241. "(google.api.http).post": "/v1/{database=projects/*/databases/*}/documents:commit",
  19242. "(google.api.http).body": "*",
  19243. "(google.api.method_signature)": "database,writes"
  19244. },
  19245. parsedOptions: [
  19246. {
  19247. "(google.api.http)": {
  19248. post: "/v1/{database=projects/*/databases/*}/documents:commit",
  19249. body: "*"
  19250. }
  19251. },
  19252. {
  19253. "(google.api.method_signature)": "database,writes"
  19254. }
  19255. ]
  19256. },
  19257. Rollback: {
  19258. requestType: "RollbackRequest",
  19259. responseType: "google.protobuf.Empty",
  19260. options: {
  19261. "(google.api.http).post": "/v1/{database=projects/*/databases/*}/documents:rollback",
  19262. "(google.api.http).body": "*",
  19263. "(google.api.method_signature)": "database,transaction"
  19264. },
  19265. parsedOptions: [
  19266. {
  19267. "(google.api.http)": {
  19268. post: "/v1/{database=projects/*/databases/*}/documents:rollback",
  19269. body: "*"
  19270. }
  19271. },
  19272. {
  19273. "(google.api.method_signature)": "database,transaction"
  19274. }
  19275. ]
  19276. },
  19277. RunQuery: {
  19278. requestType: "RunQueryRequest",
  19279. responseType: "RunQueryResponse",
  19280. responseStream: true,
  19281. options: {
  19282. "(google.api.http).post": "/v1/{parent=projects/*/databases/*/documents}:runQuery",
  19283. "(google.api.http).body": "*",
  19284. "(google.api.http).additional_bindings.post": "/v1/{parent=projects/*/databases/*/documents/*/**}:runQuery",
  19285. "(google.api.http).additional_bindings.body": "*"
  19286. },
  19287. parsedOptions: [
  19288. {
  19289. "(google.api.http)": {
  19290. post: "/v1/{parent=projects/*/databases/*/documents}:runQuery",
  19291. body: "*",
  19292. additional_bindings: {
  19293. post: "/v1/{parent=projects/*/databases/*/documents/*/**}:runQuery",
  19294. body: "*"
  19295. }
  19296. }
  19297. }
  19298. ]
  19299. },
  19300. RunAggregationQuery: {
  19301. requestType: "RunAggregationQueryRequest",
  19302. responseType: "RunAggregationQueryResponse",
  19303. responseStream: true,
  19304. options: {
  19305. "(google.api.http).post": "/v1/{parent=projects/*/databases/*/documents}:runAggregationQuery",
  19306. "(google.api.http).body": "*",
  19307. "(google.api.http).additional_bindings.post": "/v1/{parent=projects/*/databases/*/documents/*/**}:runAggregationQuery",
  19308. "(google.api.http).additional_bindings.body": "*"
  19309. },
  19310. parsedOptions: [
  19311. {
  19312. "(google.api.http)": {
  19313. post: "/v1/{parent=projects/*/databases/*/documents}:runAggregationQuery",
  19314. body: "*",
  19315. additional_bindings: {
  19316. post: "/v1/{parent=projects/*/databases/*/documents/*/**}:runAggregationQuery",
  19317. body: "*"
  19318. }
  19319. }
  19320. }
  19321. ]
  19322. },
  19323. PartitionQuery: {
  19324. requestType: "PartitionQueryRequest",
  19325. responseType: "PartitionQueryResponse",
  19326. options: {
  19327. "(google.api.http).post": "/v1/{parent=projects/*/databases/*/documents}:partitionQuery",
  19328. "(google.api.http).body": "*",
  19329. "(google.api.http).additional_bindings.post": "/v1/{parent=projects/*/databases/*/documents/*/**}:partitionQuery",
  19330. "(google.api.http).additional_bindings.body": "*"
  19331. },
  19332. parsedOptions: [
  19333. {
  19334. "(google.api.http)": {
  19335. post: "/v1/{parent=projects/*/databases/*/documents}:partitionQuery",
  19336. body: "*",
  19337. additional_bindings: {
  19338. post: "/v1/{parent=projects/*/databases/*/documents/*/**}:partitionQuery",
  19339. body: "*"
  19340. }
  19341. }
  19342. }
  19343. ]
  19344. },
  19345. Write: {
  19346. requestType: "WriteRequest",
  19347. requestStream: true,
  19348. responseType: "WriteResponse",
  19349. responseStream: true,
  19350. options: {
  19351. "(google.api.http).post": "/v1/{database=projects/*/databases/*}/documents:write",
  19352. "(google.api.http).body": "*"
  19353. },
  19354. parsedOptions: [
  19355. {
  19356. "(google.api.http)": {
  19357. post: "/v1/{database=projects/*/databases/*}/documents:write",
  19358. body: "*"
  19359. }
  19360. }
  19361. ]
  19362. },
  19363. Listen: {
  19364. requestType: "ListenRequest",
  19365. requestStream: true,
  19366. responseType: "ListenResponse",
  19367. responseStream: true,
  19368. options: {
  19369. "(google.api.http).post": "/v1/{database=projects/*/databases/*}/documents:listen",
  19370. "(google.api.http).body": "*"
  19371. },
  19372. parsedOptions: [
  19373. {
  19374. "(google.api.http)": {
  19375. post: "/v1/{database=projects/*/databases/*}/documents:listen",
  19376. body: "*"
  19377. }
  19378. }
  19379. ]
  19380. },
  19381. ListCollectionIds: {
  19382. requestType: "ListCollectionIdsRequest",
  19383. responseType: "ListCollectionIdsResponse",
  19384. options: {
  19385. "(google.api.http).post": "/v1/{parent=projects/*/databases/*/documents}:listCollectionIds",
  19386. "(google.api.http).body": "*",
  19387. "(google.api.http).additional_bindings.post": "/v1/{parent=projects/*/databases/*/documents/*/**}:listCollectionIds",
  19388. "(google.api.http).additional_bindings.body": "*",
  19389. "(google.api.method_signature)": "parent"
  19390. },
  19391. parsedOptions: [
  19392. {
  19393. "(google.api.http)": {
  19394. post: "/v1/{parent=projects/*/databases/*/documents}:listCollectionIds",
  19395. body: "*",
  19396. additional_bindings: {
  19397. post: "/v1/{parent=projects/*/databases/*/documents/*/**}:listCollectionIds",
  19398. body: "*"
  19399. }
  19400. }
  19401. },
  19402. {
  19403. "(google.api.method_signature)": "parent"
  19404. }
  19405. ]
  19406. },
  19407. BatchWrite: {
  19408. requestType: "BatchWriteRequest",
  19409. responseType: "BatchWriteResponse",
  19410. options: {
  19411. "(google.api.http).post": "/v1/{database=projects/*/databases/*}/documents:batchWrite",
  19412. "(google.api.http).body": "*"
  19413. },
  19414. parsedOptions: [
  19415. {
  19416. "(google.api.http)": {
  19417. post: "/v1/{database=projects/*/databases/*}/documents:batchWrite",
  19418. body: "*"
  19419. }
  19420. }
  19421. ]
  19422. },
  19423. CreateDocument: {
  19424. requestType: "CreateDocumentRequest",
  19425. responseType: "Document",
  19426. options: {
  19427. "(google.api.http).post": "/v1/{parent=projects/*/databases/*/documents/**}/{collection_id}",
  19428. "(google.api.http).body": "document"
  19429. },
  19430. parsedOptions: [
  19431. {
  19432. "(google.api.http)": {
  19433. post: "/v1/{parent=projects/*/databases/*/documents/**}/{collection_id}",
  19434. body: "document"
  19435. }
  19436. }
  19437. ]
  19438. }
  19439. }
  19440. },
  19441. GetDocumentRequest: {
  19442. oneofs: {
  19443. consistencySelector: {
  19444. oneof: [
  19445. "transaction",
  19446. "readTime"
  19447. ]
  19448. }
  19449. },
  19450. fields: {
  19451. name: {
  19452. type: "string",
  19453. id: 1,
  19454. options: {
  19455. "(google.api.field_behavior)": "REQUIRED"
  19456. }
  19457. },
  19458. mask: {
  19459. type: "DocumentMask",
  19460. id: 2
  19461. },
  19462. transaction: {
  19463. type: "bytes",
  19464. id: 3
  19465. },
  19466. readTime: {
  19467. type: "google.protobuf.Timestamp",
  19468. id: 5
  19469. }
  19470. }
  19471. },
  19472. ListDocumentsRequest: {
  19473. oneofs: {
  19474. consistencySelector: {
  19475. oneof: [
  19476. "transaction",
  19477. "readTime"
  19478. ]
  19479. }
  19480. },
  19481. fields: {
  19482. parent: {
  19483. type: "string",
  19484. id: 1,
  19485. options: {
  19486. "(google.api.field_behavior)": "REQUIRED"
  19487. }
  19488. },
  19489. collectionId: {
  19490. type: "string",
  19491. id: 2,
  19492. options: {
  19493. "(google.api.field_behavior)": "REQUIRED"
  19494. }
  19495. },
  19496. pageSize: {
  19497. type: "int32",
  19498. id: 3
  19499. },
  19500. pageToken: {
  19501. type: "string",
  19502. id: 4
  19503. },
  19504. orderBy: {
  19505. type: "string",
  19506. id: 6
  19507. },
  19508. mask: {
  19509. type: "DocumentMask",
  19510. id: 7
  19511. },
  19512. transaction: {
  19513. type: "bytes",
  19514. id: 8
  19515. },
  19516. readTime: {
  19517. type: "google.protobuf.Timestamp",
  19518. id: 10
  19519. },
  19520. showMissing: {
  19521. type: "bool",
  19522. id: 12
  19523. }
  19524. }
  19525. },
  19526. ListDocumentsResponse: {
  19527. fields: {
  19528. documents: {
  19529. rule: "repeated",
  19530. type: "Document",
  19531. id: 1
  19532. },
  19533. nextPageToken: {
  19534. type: "string",
  19535. id: 2
  19536. }
  19537. }
  19538. },
  19539. CreateDocumentRequest: {
  19540. fields: {
  19541. parent: {
  19542. type: "string",
  19543. id: 1,
  19544. options: {
  19545. "(google.api.field_behavior)": "REQUIRED"
  19546. }
  19547. },
  19548. collectionId: {
  19549. type: "string",
  19550. id: 2,
  19551. options: {
  19552. "(google.api.field_behavior)": "REQUIRED"
  19553. }
  19554. },
  19555. documentId: {
  19556. type: "string",
  19557. id: 3
  19558. },
  19559. document: {
  19560. type: "Document",
  19561. id: 4,
  19562. options: {
  19563. "(google.api.field_behavior)": "REQUIRED"
  19564. }
  19565. },
  19566. mask: {
  19567. type: "DocumentMask",
  19568. id: 5
  19569. }
  19570. }
  19571. },
  19572. UpdateDocumentRequest: {
  19573. fields: {
  19574. document: {
  19575. type: "Document",
  19576. id: 1,
  19577. options: {
  19578. "(google.api.field_behavior)": "REQUIRED"
  19579. }
  19580. },
  19581. updateMask: {
  19582. type: "DocumentMask",
  19583. id: 2
  19584. },
  19585. mask: {
  19586. type: "DocumentMask",
  19587. id: 3
  19588. },
  19589. currentDocument: {
  19590. type: "Precondition",
  19591. id: 4
  19592. }
  19593. }
  19594. },
  19595. DeleteDocumentRequest: {
  19596. fields: {
  19597. name: {
  19598. type: "string",
  19599. id: 1,
  19600. options: {
  19601. "(google.api.field_behavior)": "REQUIRED"
  19602. }
  19603. },
  19604. currentDocument: {
  19605. type: "Precondition",
  19606. id: 2
  19607. }
  19608. }
  19609. },
  19610. BatchGetDocumentsRequest: {
  19611. oneofs: {
  19612. consistencySelector: {
  19613. oneof: [
  19614. "transaction",
  19615. "newTransaction",
  19616. "readTime"
  19617. ]
  19618. }
  19619. },
  19620. fields: {
  19621. database: {
  19622. type: "string",
  19623. id: 1,
  19624. options: {
  19625. "(google.api.field_behavior)": "REQUIRED"
  19626. }
  19627. },
  19628. documents: {
  19629. rule: "repeated",
  19630. type: "string",
  19631. id: 2
  19632. },
  19633. mask: {
  19634. type: "DocumentMask",
  19635. id: 3
  19636. },
  19637. transaction: {
  19638. type: "bytes",
  19639. id: 4
  19640. },
  19641. newTransaction: {
  19642. type: "TransactionOptions",
  19643. id: 5
  19644. },
  19645. readTime: {
  19646. type: "google.protobuf.Timestamp",
  19647. id: 7
  19648. }
  19649. }
  19650. },
  19651. BatchGetDocumentsResponse: {
  19652. oneofs: {
  19653. result: {
  19654. oneof: [
  19655. "found",
  19656. "missing"
  19657. ]
  19658. }
  19659. },
  19660. fields: {
  19661. found: {
  19662. type: "Document",
  19663. id: 1
  19664. },
  19665. missing: {
  19666. type: "string",
  19667. id: 2
  19668. },
  19669. transaction: {
  19670. type: "bytes",
  19671. id: 3
  19672. },
  19673. readTime: {
  19674. type: "google.protobuf.Timestamp",
  19675. id: 4
  19676. }
  19677. }
  19678. },
  19679. BeginTransactionRequest: {
  19680. fields: {
  19681. database: {
  19682. type: "string",
  19683. id: 1,
  19684. options: {
  19685. "(google.api.field_behavior)": "REQUIRED"
  19686. }
  19687. },
  19688. options: {
  19689. type: "TransactionOptions",
  19690. id: 2
  19691. }
  19692. }
  19693. },
  19694. BeginTransactionResponse: {
  19695. fields: {
  19696. transaction: {
  19697. type: "bytes",
  19698. id: 1
  19699. }
  19700. }
  19701. },
  19702. CommitRequest: {
  19703. fields: {
  19704. database: {
  19705. type: "string",
  19706. id: 1,
  19707. options: {
  19708. "(google.api.field_behavior)": "REQUIRED"
  19709. }
  19710. },
  19711. writes: {
  19712. rule: "repeated",
  19713. type: "Write",
  19714. id: 2
  19715. },
  19716. transaction: {
  19717. type: "bytes",
  19718. id: 3
  19719. }
  19720. }
  19721. },
  19722. CommitResponse: {
  19723. fields: {
  19724. writeResults: {
  19725. rule: "repeated",
  19726. type: "WriteResult",
  19727. id: 1
  19728. },
  19729. commitTime: {
  19730. type: "google.protobuf.Timestamp",
  19731. id: 2
  19732. }
  19733. }
  19734. },
  19735. RollbackRequest: {
  19736. fields: {
  19737. database: {
  19738. type: "string",
  19739. id: 1,
  19740. options: {
  19741. "(google.api.field_behavior)": "REQUIRED"
  19742. }
  19743. },
  19744. transaction: {
  19745. type: "bytes",
  19746. id: 2,
  19747. options: {
  19748. "(google.api.field_behavior)": "REQUIRED"
  19749. }
  19750. }
  19751. }
  19752. },
  19753. RunQueryRequest: {
  19754. oneofs: {
  19755. queryType: {
  19756. oneof: [
  19757. "structuredQuery"
  19758. ]
  19759. },
  19760. consistencySelector: {
  19761. oneof: [
  19762. "transaction",
  19763. "newTransaction",
  19764. "readTime"
  19765. ]
  19766. }
  19767. },
  19768. fields: {
  19769. parent: {
  19770. type: "string",
  19771. id: 1,
  19772. options: {
  19773. "(google.api.field_behavior)": "REQUIRED"
  19774. }
  19775. },
  19776. structuredQuery: {
  19777. type: "StructuredQuery",
  19778. id: 2
  19779. },
  19780. transaction: {
  19781. type: "bytes",
  19782. id: 5
  19783. },
  19784. newTransaction: {
  19785. type: "TransactionOptions",
  19786. id: 6
  19787. },
  19788. readTime: {
  19789. type: "google.protobuf.Timestamp",
  19790. id: 7
  19791. }
  19792. }
  19793. },
  19794. RunQueryResponse: {
  19795. fields: {
  19796. transaction: {
  19797. type: "bytes",
  19798. id: 2
  19799. },
  19800. document: {
  19801. type: "Document",
  19802. id: 1
  19803. },
  19804. readTime: {
  19805. type: "google.protobuf.Timestamp",
  19806. id: 3
  19807. },
  19808. skippedResults: {
  19809. type: "int32",
  19810. id: 4
  19811. }
  19812. }
  19813. },
  19814. RunAggregationQueryRequest: {
  19815. oneofs: {
  19816. queryType: {
  19817. oneof: [
  19818. "structuredAggregationQuery"
  19819. ]
  19820. },
  19821. consistencySelector: {
  19822. oneof: [
  19823. "transaction",
  19824. "newTransaction",
  19825. "readTime"
  19826. ]
  19827. }
  19828. },
  19829. fields: {
  19830. parent: {
  19831. type: "string",
  19832. id: 1,
  19833. options: {
  19834. "(google.api.field_behavior)": "REQUIRED"
  19835. }
  19836. },
  19837. structuredAggregationQuery: {
  19838. type: "StructuredAggregationQuery",
  19839. id: 2
  19840. },
  19841. transaction: {
  19842. type: "bytes",
  19843. id: 4
  19844. },
  19845. newTransaction: {
  19846. type: "TransactionOptions",
  19847. id: 5
  19848. },
  19849. readTime: {
  19850. type: "google.protobuf.Timestamp",
  19851. id: 6
  19852. }
  19853. }
  19854. },
  19855. RunAggregationQueryResponse: {
  19856. fields: {
  19857. result: {
  19858. type: "AggregationResult",
  19859. id: 1
  19860. },
  19861. transaction: {
  19862. type: "bytes",
  19863. id: 2
  19864. },
  19865. readTime: {
  19866. type: "google.protobuf.Timestamp",
  19867. id: 3
  19868. }
  19869. }
  19870. },
  19871. PartitionQueryRequest: {
  19872. oneofs: {
  19873. queryType: {
  19874. oneof: [
  19875. "structuredQuery"
  19876. ]
  19877. }
  19878. },
  19879. fields: {
  19880. parent: {
  19881. type: "string",
  19882. id: 1,
  19883. options: {
  19884. "(google.api.field_behavior)": "REQUIRED"
  19885. }
  19886. },
  19887. structuredQuery: {
  19888. type: "StructuredQuery",
  19889. id: 2
  19890. },
  19891. partitionCount: {
  19892. type: "int64",
  19893. id: 3
  19894. },
  19895. pageToken: {
  19896. type: "string",
  19897. id: 4
  19898. },
  19899. pageSize: {
  19900. type: "int32",
  19901. id: 5
  19902. }
  19903. }
  19904. },
  19905. PartitionQueryResponse: {
  19906. fields: {
  19907. partitions: {
  19908. rule: "repeated",
  19909. type: "Cursor",
  19910. id: 1
  19911. },
  19912. nextPageToken: {
  19913. type: "string",
  19914. id: 2
  19915. }
  19916. }
  19917. },
  19918. WriteRequest: {
  19919. fields: {
  19920. database: {
  19921. type: "string",
  19922. id: 1,
  19923. options: {
  19924. "(google.api.field_behavior)": "REQUIRED"
  19925. }
  19926. },
  19927. streamId: {
  19928. type: "string",
  19929. id: 2
  19930. },
  19931. writes: {
  19932. rule: "repeated",
  19933. type: "Write",
  19934. id: 3
  19935. },
  19936. streamToken: {
  19937. type: "bytes",
  19938. id: 4
  19939. },
  19940. labels: {
  19941. keyType: "string",
  19942. type: "string",
  19943. id: 5
  19944. }
  19945. }
  19946. },
  19947. WriteResponse: {
  19948. fields: {
  19949. streamId: {
  19950. type: "string",
  19951. id: 1
  19952. },
  19953. streamToken: {
  19954. type: "bytes",
  19955. id: 2
  19956. },
  19957. writeResults: {
  19958. rule: "repeated",
  19959. type: "WriteResult",
  19960. id: 3
  19961. },
  19962. commitTime: {
  19963. type: "google.protobuf.Timestamp",
  19964. id: 4
  19965. }
  19966. }
  19967. },
  19968. ListenRequest: {
  19969. oneofs: {
  19970. targetChange: {
  19971. oneof: [
  19972. "addTarget",
  19973. "removeTarget"
  19974. ]
  19975. }
  19976. },
  19977. fields: {
  19978. database: {
  19979. type: "string",
  19980. id: 1,
  19981. options: {
  19982. "(google.api.field_behavior)": "REQUIRED"
  19983. }
  19984. },
  19985. addTarget: {
  19986. type: "Target",
  19987. id: 2
  19988. },
  19989. removeTarget: {
  19990. type: "int32",
  19991. id: 3
  19992. },
  19993. labels: {
  19994. keyType: "string",
  19995. type: "string",
  19996. id: 4
  19997. }
  19998. }
  19999. },
  20000. ListenResponse: {
  20001. oneofs: {
  20002. responseType: {
  20003. oneof: [
  20004. "targetChange",
  20005. "documentChange",
  20006. "documentDelete",
  20007. "documentRemove",
  20008. "filter"
  20009. ]
  20010. }
  20011. },
  20012. fields: {
  20013. targetChange: {
  20014. type: "TargetChange",
  20015. id: 2
  20016. },
  20017. documentChange: {
  20018. type: "DocumentChange",
  20019. id: 3
  20020. },
  20021. documentDelete: {
  20022. type: "DocumentDelete",
  20023. id: 4
  20024. },
  20025. documentRemove: {
  20026. type: "DocumentRemove",
  20027. id: 6
  20028. },
  20029. filter: {
  20030. type: "ExistenceFilter",
  20031. id: 5
  20032. }
  20033. }
  20034. },
  20035. Target: {
  20036. oneofs: {
  20037. targetType: {
  20038. oneof: [
  20039. "query",
  20040. "documents"
  20041. ]
  20042. },
  20043. resumeType: {
  20044. oneof: [
  20045. "resumeToken",
  20046. "readTime"
  20047. ]
  20048. }
  20049. },
  20050. fields: {
  20051. query: {
  20052. type: "QueryTarget",
  20053. id: 2
  20054. },
  20055. documents: {
  20056. type: "DocumentsTarget",
  20057. id: 3
  20058. },
  20059. resumeToken: {
  20060. type: "bytes",
  20061. id: 4
  20062. },
  20063. readTime: {
  20064. type: "google.protobuf.Timestamp",
  20065. id: 11
  20066. },
  20067. targetId: {
  20068. type: "int32",
  20069. id: 5
  20070. },
  20071. once: {
  20072. type: "bool",
  20073. id: 6
  20074. }
  20075. },
  20076. nested: {
  20077. DocumentsTarget: {
  20078. fields: {
  20079. documents: {
  20080. rule: "repeated",
  20081. type: "string",
  20082. id: 2
  20083. }
  20084. }
  20085. },
  20086. QueryTarget: {
  20087. oneofs: {
  20088. queryType: {
  20089. oneof: [
  20090. "structuredQuery"
  20091. ]
  20092. }
  20093. },
  20094. fields: {
  20095. parent: {
  20096. type: "string",
  20097. id: 1
  20098. },
  20099. structuredQuery: {
  20100. type: "StructuredQuery",
  20101. id: 2
  20102. }
  20103. }
  20104. }
  20105. }
  20106. },
  20107. TargetChange: {
  20108. fields: {
  20109. targetChangeType: {
  20110. type: "TargetChangeType",
  20111. id: 1
  20112. },
  20113. targetIds: {
  20114. rule: "repeated",
  20115. type: "int32",
  20116. id: 2
  20117. },
  20118. cause: {
  20119. type: "google.rpc.Status",
  20120. id: 3
  20121. },
  20122. resumeToken: {
  20123. type: "bytes",
  20124. id: 4
  20125. },
  20126. readTime: {
  20127. type: "google.protobuf.Timestamp",
  20128. id: 6
  20129. }
  20130. },
  20131. nested: {
  20132. TargetChangeType: {
  20133. values: {
  20134. NO_CHANGE: 0,
  20135. ADD: 1,
  20136. REMOVE: 2,
  20137. CURRENT: 3,
  20138. RESET: 4
  20139. }
  20140. }
  20141. }
  20142. },
  20143. ListCollectionIdsRequest: {
  20144. fields: {
  20145. parent: {
  20146. type: "string",
  20147. id: 1,
  20148. options: {
  20149. "(google.api.field_behavior)": "REQUIRED"
  20150. }
  20151. },
  20152. pageSize: {
  20153. type: "int32",
  20154. id: 2
  20155. },
  20156. pageToken: {
  20157. type: "string",
  20158. id: 3
  20159. }
  20160. }
  20161. },
  20162. ListCollectionIdsResponse: {
  20163. fields: {
  20164. collectionIds: {
  20165. rule: "repeated",
  20166. type: "string",
  20167. id: 1
  20168. },
  20169. nextPageToken: {
  20170. type: "string",
  20171. id: 2
  20172. }
  20173. }
  20174. },
  20175. BatchWriteRequest: {
  20176. fields: {
  20177. database: {
  20178. type: "string",
  20179. id: 1,
  20180. options: {
  20181. "(google.api.field_behavior)": "REQUIRED"
  20182. }
  20183. },
  20184. writes: {
  20185. rule: "repeated",
  20186. type: "Write",
  20187. id: 2
  20188. },
  20189. labels: {
  20190. keyType: "string",
  20191. type: "string",
  20192. id: 3
  20193. }
  20194. }
  20195. },
  20196. BatchWriteResponse: {
  20197. fields: {
  20198. writeResults: {
  20199. rule: "repeated",
  20200. type: "WriteResult",
  20201. id: 1
  20202. },
  20203. status: {
  20204. rule: "repeated",
  20205. type: "google.rpc.Status",
  20206. id: 2
  20207. }
  20208. }
  20209. },
  20210. StructuredQuery: {
  20211. fields: {
  20212. select: {
  20213. type: "Projection",
  20214. id: 1
  20215. },
  20216. from: {
  20217. rule: "repeated",
  20218. type: "CollectionSelector",
  20219. id: 2
  20220. },
  20221. where: {
  20222. type: "Filter",
  20223. id: 3
  20224. },
  20225. orderBy: {
  20226. rule: "repeated",
  20227. type: "Order",
  20228. id: 4
  20229. },
  20230. startAt: {
  20231. type: "Cursor",
  20232. id: 7
  20233. },
  20234. endAt: {
  20235. type: "Cursor",
  20236. id: 8
  20237. },
  20238. offset: {
  20239. type: "int32",
  20240. id: 6
  20241. },
  20242. limit: {
  20243. type: "google.protobuf.Int32Value",
  20244. id: 5
  20245. }
  20246. },
  20247. nested: {
  20248. CollectionSelector: {
  20249. fields: {
  20250. collectionId: {
  20251. type: "string",
  20252. id: 2
  20253. },
  20254. allDescendants: {
  20255. type: "bool",
  20256. id: 3
  20257. }
  20258. }
  20259. },
  20260. Filter: {
  20261. oneofs: {
  20262. filterType: {
  20263. oneof: [
  20264. "compositeFilter",
  20265. "fieldFilter",
  20266. "unaryFilter"
  20267. ]
  20268. }
  20269. },
  20270. fields: {
  20271. compositeFilter: {
  20272. type: "CompositeFilter",
  20273. id: 1
  20274. },
  20275. fieldFilter: {
  20276. type: "FieldFilter",
  20277. id: 2
  20278. },
  20279. unaryFilter: {
  20280. type: "UnaryFilter",
  20281. id: 3
  20282. }
  20283. }
  20284. },
  20285. CompositeFilter: {
  20286. fields: {
  20287. op: {
  20288. type: "Operator",
  20289. id: 1
  20290. },
  20291. filters: {
  20292. rule: "repeated",
  20293. type: "Filter",
  20294. id: 2
  20295. }
  20296. },
  20297. nested: {
  20298. Operator: {
  20299. values: {
  20300. OPERATOR_UNSPECIFIED: 0,
  20301. AND: 1,
  20302. OR: 2
  20303. }
  20304. }
  20305. }
  20306. },
  20307. FieldFilter: {
  20308. fields: {
  20309. field: {
  20310. type: "FieldReference",
  20311. id: 1
  20312. },
  20313. op: {
  20314. type: "Operator",
  20315. id: 2
  20316. },
  20317. value: {
  20318. type: "Value",
  20319. id: 3
  20320. }
  20321. },
  20322. nested: {
  20323. Operator: {
  20324. values: {
  20325. OPERATOR_UNSPECIFIED: 0,
  20326. LESS_THAN: 1,
  20327. LESS_THAN_OR_EQUAL: 2,
  20328. GREATER_THAN: 3,
  20329. GREATER_THAN_OR_EQUAL: 4,
  20330. EQUAL: 5,
  20331. NOT_EQUAL: 6,
  20332. ARRAY_CONTAINS: 7,
  20333. IN: 8,
  20334. ARRAY_CONTAINS_ANY: 9,
  20335. NOT_IN: 10
  20336. }
  20337. }
  20338. }
  20339. },
  20340. UnaryFilter: {
  20341. oneofs: {
  20342. operandType: {
  20343. oneof: [
  20344. "field"
  20345. ]
  20346. }
  20347. },
  20348. fields: {
  20349. op: {
  20350. type: "Operator",
  20351. id: 1
  20352. },
  20353. field: {
  20354. type: "FieldReference",
  20355. id: 2
  20356. }
  20357. },
  20358. nested: {
  20359. Operator: {
  20360. values: {
  20361. OPERATOR_UNSPECIFIED: 0,
  20362. IS_NAN: 2,
  20363. IS_NULL: 3,
  20364. IS_NOT_NAN: 4,
  20365. IS_NOT_NULL: 5
  20366. }
  20367. }
  20368. }
  20369. },
  20370. Order: {
  20371. fields: {
  20372. field: {
  20373. type: "FieldReference",
  20374. id: 1
  20375. },
  20376. direction: {
  20377. type: "Direction",
  20378. id: 2
  20379. }
  20380. }
  20381. },
  20382. FieldReference: {
  20383. fields: {
  20384. fieldPath: {
  20385. type: "string",
  20386. id: 2
  20387. }
  20388. }
  20389. },
  20390. Projection: {
  20391. fields: {
  20392. fields: {
  20393. rule: "repeated",
  20394. type: "FieldReference",
  20395. id: 2
  20396. }
  20397. }
  20398. },
  20399. Direction: {
  20400. values: {
  20401. DIRECTION_UNSPECIFIED: 0,
  20402. ASCENDING: 1,
  20403. DESCENDING: 2
  20404. }
  20405. }
  20406. }
  20407. },
  20408. StructuredAggregationQuery: {
  20409. oneofs: {
  20410. queryType: {
  20411. oneof: [
  20412. "structuredQuery"
  20413. ]
  20414. }
  20415. },
  20416. fields: {
  20417. structuredQuery: {
  20418. type: "StructuredQuery",
  20419. id: 1
  20420. },
  20421. aggregations: {
  20422. rule: "repeated",
  20423. type: "Aggregation",
  20424. id: 3
  20425. }
  20426. },
  20427. nested: {
  20428. Aggregation: {
  20429. oneofs: {
  20430. operator: {
  20431. oneof: [
  20432. "count"
  20433. ]
  20434. }
  20435. },
  20436. fields: {
  20437. count: {
  20438. type: "Count",
  20439. id: 1
  20440. },
  20441. alias: {
  20442. type: "string",
  20443. id: 7
  20444. }
  20445. },
  20446. nested: {
  20447. Count: {
  20448. fields: {
  20449. upTo: {
  20450. type: "google.protobuf.Int64Value",
  20451. id: 1
  20452. }
  20453. }
  20454. }
  20455. }
  20456. }
  20457. }
  20458. },
  20459. Cursor: {
  20460. fields: {
  20461. values: {
  20462. rule: "repeated",
  20463. type: "Value",
  20464. id: 1
  20465. },
  20466. before: {
  20467. type: "bool",
  20468. id: 2
  20469. }
  20470. }
  20471. },
  20472. Write: {
  20473. oneofs: {
  20474. operation: {
  20475. oneof: [
  20476. "update",
  20477. "delete",
  20478. "verify",
  20479. "transform"
  20480. ]
  20481. }
  20482. },
  20483. fields: {
  20484. update: {
  20485. type: "Document",
  20486. id: 1
  20487. },
  20488. "delete": {
  20489. type: "string",
  20490. id: 2
  20491. },
  20492. verify: {
  20493. type: "string",
  20494. id: 5
  20495. },
  20496. transform: {
  20497. type: "DocumentTransform",
  20498. id: 6
  20499. },
  20500. updateMask: {
  20501. type: "DocumentMask",
  20502. id: 3
  20503. },
  20504. updateTransforms: {
  20505. rule: "repeated",
  20506. type: "DocumentTransform.FieldTransform",
  20507. id: 7
  20508. },
  20509. currentDocument: {
  20510. type: "Precondition",
  20511. id: 4
  20512. }
  20513. }
  20514. },
  20515. DocumentTransform: {
  20516. fields: {
  20517. document: {
  20518. type: "string",
  20519. id: 1
  20520. },
  20521. fieldTransforms: {
  20522. rule: "repeated",
  20523. type: "FieldTransform",
  20524. id: 2
  20525. }
  20526. },
  20527. nested: {
  20528. FieldTransform: {
  20529. oneofs: {
  20530. transformType: {
  20531. oneof: [
  20532. "setToServerValue",
  20533. "increment",
  20534. "maximum",
  20535. "minimum",
  20536. "appendMissingElements",
  20537. "removeAllFromArray"
  20538. ]
  20539. }
  20540. },
  20541. fields: {
  20542. fieldPath: {
  20543. type: "string",
  20544. id: 1
  20545. },
  20546. setToServerValue: {
  20547. type: "ServerValue",
  20548. id: 2
  20549. },
  20550. increment: {
  20551. type: "Value",
  20552. id: 3
  20553. },
  20554. maximum: {
  20555. type: "Value",
  20556. id: 4
  20557. },
  20558. minimum: {
  20559. type: "Value",
  20560. id: 5
  20561. },
  20562. appendMissingElements: {
  20563. type: "ArrayValue",
  20564. id: 6
  20565. },
  20566. removeAllFromArray: {
  20567. type: "ArrayValue",
  20568. id: 7
  20569. }
  20570. },
  20571. nested: {
  20572. ServerValue: {
  20573. values: {
  20574. SERVER_VALUE_UNSPECIFIED: 0,
  20575. REQUEST_TIME: 1
  20576. }
  20577. }
  20578. }
  20579. }
  20580. }
  20581. },
  20582. WriteResult: {
  20583. fields: {
  20584. updateTime: {
  20585. type: "google.protobuf.Timestamp",
  20586. id: 1
  20587. },
  20588. transformResults: {
  20589. rule: "repeated",
  20590. type: "Value",
  20591. id: 2
  20592. }
  20593. }
  20594. },
  20595. DocumentChange: {
  20596. fields: {
  20597. document: {
  20598. type: "Document",
  20599. id: 1
  20600. },
  20601. targetIds: {
  20602. rule: "repeated",
  20603. type: "int32",
  20604. id: 5
  20605. },
  20606. removedTargetIds: {
  20607. rule: "repeated",
  20608. type: "int32",
  20609. id: 6
  20610. }
  20611. }
  20612. },
  20613. DocumentDelete: {
  20614. fields: {
  20615. document: {
  20616. type: "string",
  20617. id: 1
  20618. },
  20619. removedTargetIds: {
  20620. rule: "repeated",
  20621. type: "int32",
  20622. id: 6
  20623. },
  20624. readTime: {
  20625. type: "google.protobuf.Timestamp",
  20626. id: 4
  20627. }
  20628. }
  20629. },
  20630. DocumentRemove: {
  20631. fields: {
  20632. document: {
  20633. type: "string",
  20634. id: 1
  20635. },
  20636. removedTargetIds: {
  20637. rule: "repeated",
  20638. type: "int32",
  20639. id: 2
  20640. },
  20641. readTime: {
  20642. type: "google.protobuf.Timestamp",
  20643. id: 4
  20644. }
  20645. }
  20646. },
  20647. ExistenceFilter: {
  20648. fields: {
  20649. targetId: {
  20650. type: "int32",
  20651. id: 1
  20652. },
  20653. count: {
  20654. type: "int32",
  20655. id: 2
  20656. }
  20657. }
  20658. }
  20659. }
  20660. }
  20661. }
  20662. },
  20663. api: {
  20664. options: {
  20665. go_package: "google.golang.org/genproto/googleapis/api/annotations;annotations",
  20666. java_multiple_files: true,
  20667. java_outer_classname: "HttpProto",
  20668. java_package: "com.google.api",
  20669. objc_class_prefix: "GAPI",
  20670. cc_enable_arenas: true
  20671. },
  20672. nested: {
  20673. http: {
  20674. type: "HttpRule",
  20675. id: 72295728,
  20676. extend: "google.protobuf.MethodOptions"
  20677. },
  20678. Http: {
  20679. fields: {
  20680. rules: {
  20681. rule: "repeated",
  20682. type: "HttpRule",
  20683. id: 1
  20684. }
  20685. }
  20686. },
  20687. HttpRule: {
  20688. oneofs: {
  20689. pattern: {
  20690. oneof: [
  20691. "get",
  20692. "put",
  20693. "post",
  20694. "delete",
  20695. "patch",
  20696. "custom"
  20697. ]
  20698. }
  20699. },
  20700. fields: {
  20701. get: {
  20702. type: "string",
  20703. id: 2
  20704. },
  20705. put: {
  20706. type: "string",
  20707. id: 3
  20708. },
  20709. post: {
  20710. type: "string",
  20711. id: 4
  20712. },
  20713. "delete": {
  20714. type: "string",
  20715. id: 5
  20716. },
  20717. patch: {
  20718. type: "string",
  20719. id: 6
  20720. },
  20721. custom: {
  20722. type: "CustomHttpPattern",
  20723. id: 8
  20724. },
  20725. selector: {
  20726. type: "string",
  20727. id: 1
  20728. },
  20729. body: {
  20730. type: "string",
  20731. id: 7
  20732. },
  20733. additionalBindings: {
  20734. rule: "repeated",
  20735. type: "HttpRule",
  20736. id: 11
  20737. }
  20738. }
  20739. },
  20740. CustomHttpPattern: {
  20741. fields: {
  20742. kind: {
  20743. type: "string",
  20744. id: 1
  20745. },
  20746. path: {
  20747. type: "string",
  20748. id: 2
  20749. }
  20750. }
  20751. },
  20752. methodSignature: {
  20753. rule: "repeated",
  20754. type: "string",
  20755. id: 1051,
  20756. extend: "google.protobuf.MethodOptions"
  20757. },
  20758. defaultHost: {
  20759. type: "string",
  20760. id: 1049,
  20761. extend: "google.protobuf.ServiceOptions"
  20762. },
  20763. oauthScopes: {
  20764. type: "string",
  20765. id: 1050,
  20766. extend: "google.protobuf.ServiceOptions"
  20767. },
  20768. fieldBehavior: {
  20769. rule: "repeated",
  20770. type: "google.api.FieldBehavior",
  20771. id: 1052,
  20772. extend: "google.protobuf.FieldOptions"
  20773. },
  20774. FieldBehavior: {
  20775. values: {
  20776. FIELD_BEHAVIOR_UNSPECIFIED: 0,
  20777. OPTIONAL: 1,
  20778. REQUIRED: 2,
  20779. OUTPUT_ONLY: 3,
  20780. INPUT_ONLY: 4,
  20781. IMMUTABLE: 5,
  20782. UNORDERED_LIST: 6,
  20783. NON_EMPTY_DEFAULT: 7
  20784. }
  20785. }
  20786. }
  20787. },
  20788. type: {
  20789. options: {
  20790. cc_enable_arenas: true,
  20791. go_package: "google.golang.org/genproto/googleapis/type/latlng;latlng",
  20792. java_multiple_files: true,
  20793. java_outer_classname: "LatLngProto",
  20794. java_package: "com.google.type",
  20795. objc_class_prefix: "GTP"
  20796. },
  20797. nested: {
  20798. LatLng: {
  20799. fields: {
  20800. latitude: {
  20801. type: "double",
  20802. id: 1
  20803. },
  20804. longitude: {
  20805. type: "double",
  20806. id: 2
  20807. }
  20808. }
  20809. }
  20810. }
  20811. },
  20812. rpc: {
  20813. options: {
  20814. cc_enable_arenas: true,
  20815. go_package: "google.golang.org/genproto/googleapis/rpc/status;status",
  20816. java_multiple_files: true,
  20817. java_outer_classname: "StatusProto",
  20818. java_package: "com.google.rpc",
  20819. objc_class_prefix: "RPC"
  20820. },
  20821. nested: {
  20822. Status: {
  20823. fields: {
  20824. code: {
  20825. type: "int32",
  20826. id: 1
  20827. },
  20828. message: {
  20829. type: "string",
  20830. id: 2
  20831. },
  20832. details: {
  20833. rule: "repeated",
  20834. type: "google.protobuf.Any",
  20835. id: 3
  20836. }
  20837. }
  20838. }
  20839. }
  20840. }
  20841. }
  20842. }
  20843. };
  20844. var protos = {
  20845. nested: nested
  20846. };
  20847. var protos$1 = /*#__PURE__*/Object.freeze({
  20848. __proto__: null,
  20849. nested: nested,
  20850. 'default': protos
  20851. });
  20852. /**
  20853. * @license
  20854. * Copyright 2020 Google LLC
  20855. *
  20856. * Licensed under the Apache License, Version 2.0 (the "License");
  20857. * you may not use this file except in compliance with the License.
  20858. * You may obtain a copy of the License at
  20859. *
  20860. * http://www.apache.org/licenses/LICENSE-2.0
  20861. *
  20862. * Unless required by applicable law or agreed to in writing, software
  20863. * distributed under the License is distributed on an "AS IS" BASIS,
  20864. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  20865. * See the License for the specific language governing permissions and
  20866. * limitations under the License.
  20867. */
  20868. /** Used by tests so we can match @grpc/proto-loader behavior. */
  20869. const protoLoaderOptions = {
  20870. longs: String,
  20871. enums: String,
  20872. defaults: true,
  20873. oneofs: false
  20874. };
  20875. /**
  20876. * Loads the protocol buffer definitions for Firestore.
  20877. *
  20878. * @returns The GrpcObject representing our protos.
  20879. */
  20880. function loadProtos() {
  20881. const packageDefinition = protoLoader.fromJSON(protos$1, protoLoaderOptions);
  20882. return grpc.loadPackageDefinition(packageDefinition);
  20883. }
  20884. /**
  20885. * @license
  20886. * Copyright 2020 Google LLC
  20887. *
  20888. * Licensed under the Apache License, Version 2.0 (the "License");
  20889. * you may not use this file except in compliance with the License.
  20890. * You may obtain a copy of the License at
  20891. *
  20892. * http://www.apache.org/licenses/LICENSE-2.0
  20893. *
  20894. * Unless required by applicable law or agreed to in writing, software
  20895. * distributed under the License is distributed on an "AS IS" BASIS,
  20896. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  20897. * See the License for the specific language governing permissions and
  20898. * limitations under the License.
  20899. */
  20900. /** Loads the GRPC stack */
  20901. function newConnection(databaseInfo) {
  20902. const protos = loadProtos();
  20903. return new GrpcConnection(protos, databaseInfo);
  20904. }
  20905. /** Return the Platform-specific connectivity monitor. */
  20906. function newConnectivityMonitor() {
  20907. return new NoopConnectivityMonitor();
  20908. }
  20909. /**
  20910. * @license
  20911. * Copyright 2020 Google LLC
  20912. *
  20913. * Licensed under the Apache License, Version 2.0 (the "License");
  20914. * you may not use this file except in compliance with the License.
  20915. * You may obtain a copy of the License at
  20916. *
  20917. * http://www.apache.org/licenses/LICENSE-2.0
  20918. *
  20919. * Unless required by applicable law or agreed to in writing, software
  20920. * distributed under the License is distributed on an "AS IS" BASIS,
  20921. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  20922. * See the License for the specific language governing permissions and
  20923. * limitations under the License.
  20924. */
  20925. /** The Platform's 'window' implementation or null if not available. */
  20926. function getWindow() {
  20927. if (process.env.USE_MOCK_PERSISTENCE === 'YES') {
  20928. // eslint-disable-next-line no-restricted-globals
  20929. return window;
  20930. }
  20931. return null;
  20932. }
  20933. /** The Platform's 'document' implementation or null if not available. */
  20934. function getDocument() {
  20935. return null;
  20936. }
  20937. /**
  20938. * @license
  20939. * Copyright 2020 Google LLC
  20940. *
  20941. * Licensed under the Apache License, Version 2.0 (the "License");
  20942. * you may not use this file except in compliance with the License.
  20943. * You may obtain a copy of the License at
  20944. *
  20945. * http://www.apache.org/licenses/LICENSE-2.0
  20946. *
  20947. * Unless required by applicable law or agreed to in writing, software
  20948. * distributed under the License is distributed on an "AS IS" BASIS,
  20949. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  20950. * See the License for the specific language governing permissions and
  20951. * limitations under the License.
  20952. */
  20953. function newSerializer(databaseId) {
  20954. return new JsonProtoSerializer(databaseId, /* useProto3Json= */ false);
  20955. }
  20956. /**
  20957. * An instance of the Platform's 'TextEncoder' implementation.
  20958. */
  20959. function newTextEncoder() {
  20960. return new TextEncoder();
  20961. }
  20962. /**
  20963. * An instance of the Platform's 'TextDecoder' implementation.
  20964. */
  20965. function newTextDecoder() {
  20966. return new TextDecoder('utf-8');
  20967. }
  20968. /**
  20969. * @license
  20970. * Copyright 2017 Google LLC
  20971. *
  20972. * Licensed under the Apache License, Version 2.0 (the "License");
  20973. * you may not use this file except in compliance with the License.
  20974. * You may obtain a copy of the License at
  20975. *
  20976. * http://www.apache.org/licenses/LICENSE-2.0
  20977. *
  20978. * Unless required by applicable law or agreed to in writing, software
  20979. * distributed under the License is distributed on an "AS IS" BASIS,
  20980. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  20981. * See the License for the specific language governing permissions and
  20982. * limitations under the License.
  20983. */
  20984. const LOG_TAG$8 = 'ExponentialBackoff';
  20985. /**
  20986. * Initial backoff time in milliseconds after an error.
  20987. * Set to 1s according to https://cloud.google.com/apis/design/errors.
  20988. */
  20989. const DEFAULT_BACKOFF_INITIAL_DELAY_MS = 1000;
  20990. const DEFAULT_BACKOFF_FACTOR = 1.5;
  20991. /** Maximum backoff time in milliseconds */
  20992. const DEFAULT_BACKOFF_MAX_DELAY_MS = 60 * 1000;
  20993. /**
  20994. * A helper for running delayed tasks following an exponential backoff curve
  20995. * between attempts.
  20996. *
  20997. * Each delay is made up of a "base" delay which follows the exponential
  20998. * backoff curve, and a +/- 50% "jitter" that is calculated and added to the
  20999. * base delay. This prevents clients from accidentally synchronizing their
  21000. * delays causing spikes of load to the backend.
  21001. */
  21002. class ExponentialBackoff {
  21003. constructor(
  21004. /**
  21005. * The AsyncQueue to run backoff operations on.
  21006. */
  21007. queue,
  21008. /**
  21009. * The ID to use when scheduling backoff operations on the AsyncQueue.
  21010. */
  21011. timerId,
  21012. /**
  21013. * The initial delay (used as the base delay on the first retry attempt).
  21014. * Note that jitter will still be applied, so the actual delay could be as
  21015. * little as 0.5*initialDelayMs.
  21016. */
  21017. initialDelayMs = DEFAULT_BACKOFF_INITIAL_DELAY_MS,
  21018. /**
  21019. * The multiplier to use to determine the extended base delay after each
  21020. * attempt.
  21021. */
  21022. backoffFactor = DEFAULT_BACKOFF_FACTOR,
  21023. /**
  21024. * The maximum base delay after which no further backoff is performed.
  21025. * Note that jitter will still be applied, so the actual delay could be as
  21026. * much as 1.5*maxDelayMs.
  21027. */
  21028. maxDelayMs = DEFAULT_BACKOFF_MAX_DELAY_MS) {
  21029. this.queue = queue;
  21030. this.timerId = timerId;
  21031. this.initialDelayMs = initialDelayMs;
  21032. this.backoffFactor = backoffFactor;
  21033. this.maxDelayMs = maxDelayMs;
  21034. this.currentBaseMs = 0;
  21035. this.timerPromise = null;
  21036. /** The last backoff attempt, as epoch milliseconds. */
  21037. this.lastAttemptTime = Date.now();
  21038. this.reset();
  21039. }
  21040. /**
  21041. * Resets the backoff delay.
  21042. *
  21043. * The very next backoffAndWait() will have no delay. If it is called again
  21044. * (i.e. due to an error), initialDelayMs (plus jitter) will be used, and
  21045. * subsequent ones will increase according to the backoffFactor.
  21046. */
  21047. reset() {
  21048. this.currentBaseMs = 0;
  21049. }
  21050. /**
  21051. * Resets the backoff delay to the maximum delay (e.g. for use after a
  21052. * RESOURCE_EXHAUSTED error).
  21053. */
  21054. resetToMax() {
  21055. this.currentBaseMs = this.maxDelayMs;
  21056. }
  21057. /**
  21058. * Returns a promise that resolves after currentDelayMs, and increases the
  21059. * delay for any subsequent attempts. If there was a pending backoff operation
  21060. * already, it will be canceled.
  21061. */
  21062. backoffAndRun(op) {
  21063. // Cancel any pending backoff operation.
  21064. this.cancel();
  21065. // First schedule using the current base (which may be 0 and should be
  21066. // honored as such).
  21067. const desiredDelayWithJitterMs = Math.floor(this.currentBaseMs + this.jitterDelayMs());
  21068. // Guard against lastAttemptTime being in the future due to a clock change.
  21069. const delaySoFarMs = Math.max(0, Date.now() - this.lastAttemptTime);
  21070. // Guard against the backoff delay already being past.
  21071. const remainingDelayMs = Math.max(0, desiredDelayWithJitterMs - delaySoFarMs);
  21072. if (remainingDelayMs > 0) {
  21073. logDebug(LOG_TAG$8, `Backing off for ${remainingDelayMs} ms ` +
  21074. `(base delay: ${this.currentBaseMs} ms, ` +
  21075. `delay with jitter: ${desiredDelayWithJitterMs} ms, ` +
  21076. `last attempt: ${delaySoFarMs} ms ago)`);
  21077. }
  21078. this.timerPromise = this.queue.enqueueAfterDelay(this.timerId, remainingDelayMs, () => {
  21079. this.lastAttemptTime = Date.now();
  21080. return op();
  21081. });
  21082. // Apply backoff factor to determine next delay and ensure it is within
  21083. // bounds.
  21084. this.currentBaseMs *= this.backoffFactor;
  21085. if (this.currentBaseMs < this.initialDelayMs) {
  21086. this.currentBaseMs = this.initialDelayMs;
  21087. }
  21088. if (this.currentBaseMs > this.maxDelayMs) {
  21089. this.currentBaseMs = this.maxDelayMs;
  21090. }
  21091. }
  21092. skipBackoff() {
  21093. if (this.timerPromise !== null) {
  21094. this.timerPromise.skipDelay();
  21095. this.timerPromise = null;
  21096. }
  21097. }
  21098. cancel() {
  21099. if (this.timerPromise !== null) {
  21100. this.timerPromise.cancel();
  21101. this.timerPromise = null;
  21102. }
  21103. }
  21104. /** Returns a random value in the range [-currentBaseMs/2, currentBaseMs/2] */
  21105. jitterDelayMs() {
  21106. return (Math.random() - 0.5) * this.currentBaseMs;
  21107. }
  21108. }
  21109. /**
  21110. * @license
  21111. * Copyright 2017 Google LLC
  21112. *
  21113. * Licensed under the Apache License, Version 2.0 (the "License");
  21114. * you may not use this file except in compliance with the License.
  21115. * You may obtain a copy of the License at
  21116. *
  21117. * http://www.apache.org/licenses/LICENSE-2.0
  21118. *
  21119. * Unless required by applicable law or agreed to in writing, software
  21120. * distributed under the License is distributed on an "AS IS" BASIS,
  21121. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  21122. * See the License for the specific language governing permissions and
  21123. * limitations under the License.
  21124. */
  21125. const LOG_TAG$7 = 'PersistentStream';
  21126. /** The time a stream stays open after it is marked idle. */
  21127. const IDLE_TIMEOUT_MS = 60 * 1000;
  21128. /** The time a stream stays open until we consider it healthy. */
  21129. const HEALTHY_TIMEOUT_MS = 10 * 1000;
  21130. /**
  21131. * A PersistentStream is an abstract base class that represents a streaming RPC
  21132. * to the Firestore backend. It's built on top of the connections own support
  21133. * for streaming RPCs, and adds several critical features for our clients:
  21134. *
  21135. * - Exponential backoff on failure
  21136. * - Authentication via CredentialsProvider
  21137. * - Dispatching all callbacks into the shared worker queue
  21138. * - Closing idle streams after 60 seconds of inactivity
  21139. *
  21140. * Subclasses of PersistentStream implement serialization of models to and
  21141. * from the JSON representation of the protocol buffers for a specific
  21142. * streaming RPC.
  21143. *
  21144. * ## Starting and Stopping
  21145. *
  21146. * Streaming RPCs are stateful and need to be start()ed before messages can
  21147. * be sent and received. The PersistentStream will call the onOpen() function
  21148. * of the listener once the stream is ready to accept requests.
  21149. *
  21150. * Should a start() fail, PersistentStream will call the registered onClose()
  21151. * listener with a FirestoreError indicating what went wrong.
  21152. *
  21153. * A PersistentStream can be started and stopped repeatedly.
  21154. *
  21155. * Generic types:
  21156. * SendType: The type of the outgoing message of the underlying
  21157. * connection stream
  21158. * ReceiveType: The type of the incoming message of the underlying
  21159. * connection stream
  21160. * ListenerType: The type of the listener that will be used for callbacks
  21161. */
  21162. class PersistentStream {
  21163. constructor(queue, connectionTimerId, idleTimerId, healthTimerId, connection, authCredentialsProvider, appCheckCredentialsProvider, listener) {
  21164. this.queue = queue;
  21165. this.idleTimerId = idleTimerId;
  21166. this.healthTimerId = healthTimerId;
  21167. this.connection = connection;
  21168. this.authCredentialsProvider = authCredentialsProvider;
  21169. this.appCheckCredentialsProvider = appCheckCredentialsProvider;
  21170. this.listener = listener;
  21171. this.state = 0 /* PersistentStreamState.Initial */;
  21172. /**
  21173. * A close count that's incremented every time the stream is closed; used by
  21174. * getCloseGuardedDispatcher() to invalidate callbacks that happen after
  21175. * close.
  21176. */
  21177. this.closeCount = 0;
  21178. this.idleTimer = null;
  21179. this.healthCheck = null;
  21180. this.stream = null;
  21181. this.backoff = new ExponentialBackoff(queue, connectionTimerId);
  21182. }
  21183. /**
  21184. * Returns true if start() has been called and no error has occurred. True
  21185. * indicates the stream is open or in the process of opening (which
  21186. * encompasses respecting backoff, getting auth tokens, and starting the
  21187. * actual RPC). Use isOpen() to determine if the stream is open and ready for
  21188. * outbound requests.
  21189. */
  21190. isStarted() {
  21191. return (this.state === 1 /* PersistentStreamState.Starting */ ||
  21192. this.state === 5 /* PersistentStreamState.Backoff */ ||
  21193. this.isOpen());
  21194. }
  21195. /**
  21196. * Returns true if the underlying RPC is open (the onOpen() listener has been
  21197. * called) and the stream is ready for outbound requests.
  21198. */
  21199. isOpen() {
  21200. return (this.state === 2 /* PersistentStreamState.Open */ ||
  21201. this.state === 3 /* PersistentStreamState.Healthy */);
  21202. }
  21203. /**
  21204. * Starts the RPC. Only allowed if isStarted() returns false. The stream is
  21205. * not immediately ready for use: onOpen() will be invoked when the RPC is
  21206. * ready for outbound requests, at which point isOpen() will return true.
  21207. *
  21208. * When start returns, isStarted() will return true.
  21209. */
  21210. start() {
  21211. if (this.state === 4 /* PersistentStreamState.Error */) {
  21212. this.performBackoff();
  21213. return;
  21214. }
  21215. this.auth();
  21216. }
  21217. /**
  21218. * Stops the RPC. This call is idempotent and allowed regardless of the
  21219. * current isStarted() state.
  21220. *
  21221. * When stop returns, isStarted() and isOpen() will both return false.
  21222. */
  21223. async stop() {
  21224. if (this.isStarted()) {
  21225. await this.close(0 /* PersistentStreamState.Initial */);
  21226. }
  21227. }
  21228. /**
  21229. * After an error the stream will usually back off on the next attempt to
  21230. * start it. If the error warrants an immediate restart of the stream, the
  21231. * sender can use this to indicate that the receiver should not back off.
  21232. *
  21233. * Each error will call the onClose() listener. That function can decide to
  21234. * inhibit backoff if required.
  21235. */
  21236. inhibitBackoff() {
  21237. this.state = 0 /* PersistentStreamState.Initial */;
  21238. this.backoff.reset();
  21239. }
  21240. /**
  21241. * Marks this stream as idle. If no further actions are performed on the
  21242. * stream for one minute, the stream will automatically close itself and
  21243. * notify the stream's onClose() handler with Status.OK. The stream will then
  21244. * be in a !isStarted() state, requiring the caller to start the stream again
  21245. * before further use.
  21246. *
  21247. * Only streams that are in state 'Open' can be marked idle, as all other
  21248. * states imply pending network operations.
  21249. */
  21250. markIdle() {
  21251. // Starts the idle time if we are in state 'Open' and are not yet already
  21252. // running a timer (in which case the previous idle timeout still applies).
  21253. if (this.isOpen() && this.idleTimer === null) {
  21254. this.idleTimer = this.queue.enqueueAfterDelay(this.idleTimerId, IDLE_TIMEOUT_MS, () => this.handleIdleCloseTimer());
  21255. }
  21256. }
  21257. /** Sends a message to the underlying stream. */
  21258. sendRequest(msg) {
  21259. this.cancelIdleCheck();
  21260. this.stream.send(msg);
  21261. }
  21262. /** Called by the idle timer when the stream should close due to inactivity. */
  21263. async handleIdleCloseTimer() {
  21264. if (this.isOpen()) {
  21265. // When timing out an idle stream there's no reason to force the stream into backoff when
  21266. // it restarts so set the stream state to Initial instead of Error.
  21267. return this.close(0 /* PersistentStreamState.Initial */);
  21268. }
  21269. }
  21270. /** Marks the stream as active again. */
  21271. cancelIdleCheck() {
  21272. if (this.idleTimer) {
  21273. this.idleTimer.cancel();
  21274. this.idleTimer = null;
  21275. }
  21276. }
  21277. /** Cancels the health check delayed operation. */
  21278. cancelHealthCheck() {
  21279. if (this.healthCheck) {
  21280. this.healthCheck.cancel();
  21281. this.healthCheck = null;
  21282. }
  21283. }
  21284. /**
  21285. * Closes the stream and cleans up as necessary:
  21286. *
  21287. * * closes the underlying GRPC stream;
  21288. * * calls the onClose handler with the given 'error';
  21289. * * sets internal stream state to 'finalState';
  21290. * * adjusts the backoff timer based on the error
  21291. *
  21292. * A new stream can be opened by calling start().
  21293. *
  21294. * @param finalState - the intended state of the stream after closing.
  21295. * @param error - the error the connection was closed with.
  21296. */
  21297. async close(finalState, error) {
  21298. // Cancel any outstanding timers (they're guaranteed not to execute).
  21299. this.cancelIdleCheck();
  21300. this.cancelHealthCheck();
  21301. this.backoff.cancel();
  21302. // Invalidates any stream-related callbacks (e.g. from auth or the
  21303. // underlying stream), guaranteeing they won't execute.
  21304. this.closeCount++;
  21305. if (finalState !== 4 /* PersistentStreamState.Error */) {
  21306. // If this is an intentional close ensure we don't delay our next connection attempt.
  21307. this.backoff.reset();
  21308. }
  21309. else if (error && error.code === Code.RESOURCE_EXHAUSTED) {
  21310. // Log the error. (Probably either 'quota exceeded' or 'max queue length reached'.)
  21311. logError(error.toString());
  21312. logError('Using maximum backoff delay to prevent overloading the backend.');
  21313. this.backoff.resetToMax();
  21314. }
  21315. else if (error &&
  21316. error.code === Code.UNAUTHENTICATED &&
  21317. this.state !== 3 /* PersistentStreamState.Healthy */) {
  21318. // "unauthenticated" error means the token was rejected. This should rarely
  21319. // happen since both Auth and AppCheck ensure a sufficient TTL when we
  21320. // request a token. If a user manually resets their system clock this can
  21321. // fail, however. In this case, we should get a Code.UNAUTHENTICATED error
  21322. // before we received the first message and we need to invalidate the token
  21323. // to ensure that we fetch a new token.
  21324. this.authCredentialsProvider.invalidateToken();
  21325. this.appCheckCredentialsProvider.invalidateToken();
  21326. }
  21327. // Clean up the underlying stream because we are no longer interested in events.
  21328. if (this.stream !== null) {
  21329. this.tearDown();
  21330. this.stream.close();
  21331. this.stream = null;
  21332. }
  21333. // This state must be assigned before calling onClose() to allow the callback to
  21334. // inhibit backoff or otherwise manipulate the state in its non-started state.
  21335. this.state = finalState;
  21336. // Notify the listener that the stream closed.
  21337. await this.listener.onClose(error);
  21338. }
  21339. /**
  21340. * Can be overridden to perform additional cleanup before the stream is closed.
  21341. * Calling super.tearDown() is not required.
  21342. */
  21343. tearDown() { }
  21344. auth() {
  21345. this.state = 1 /* PersistentStreamState.Starting */;
  21346. const dispatchIfNotClosed = this.getCloseGuardedDispatcher(this.closeCount);
  21347. // TODO(mikelehen): Just use dispatchIfNotClosed, but see TODO below.
  21348. const closeCount = this.closeCount;
  21349. Promise.all([
  21350. this.authCredentialsProvider.getToken(),
  21351. this.appCheckCredentialsProvider.getToken()
  21352. ]).then(([authToken, appCheckToken]) => {
  21353. // Stream can be stopped while waiting for authentication.
  21354. // TODO(mikelehen): We really should just use dispatchIfNotClosed
  21355. // and let this dispatch onto the queue, but that opened a spec test can
  21356. // of worms that I don't want to deal with in this PR.
  21357. if (this.closeCount === closeCount) {
  21358. // Normally we'd have to schedule the callback on the AsyncQueue.
  21359. // However, the following calls are safe to be called outside the
  21360. // AsyncQueue since they don't chain asynchronous calls
  21361. this.startStream(authToken, appCheckToken);
  21362. }
  21363. }, (error) => {
  21364. dispatchIfNotClosed(() => {
  21365. const rpcError = new FirestoreError(Code.UNKNOWN, 'Fetching auth token failed: ' + error.message);
  21366. return this.handleStreamClose(rpcError);
  21367. });
  21368. });
  21369. }
  21370. startStream(authToken, appCheckToken) {
  21371. const dispatchIfNotClosed = this.getCloseGuardedDispatcher(this.closeCount);
  21372. this.stream = this.startRpc(authToken, appCheckToken);
  21373. this.stream.onOpen(() => {
  21374. dispatchIfNotClosed(() => {
  21375. this.state = 2 /* PersistentStreamState.Open */;
  21376. this.healthCheck = this.queue.enqueueAfterDelay(this.healthTimerId, HEALTHY_TIMEOUT_MS, () => {
  21377. if (this.isOpen()) {
  21378. this.state = 3 /* PersistentStreamState.Healthy */;
  21379. }
  21380. return Promise.resolve();
  21381. });
  21382. return this.listener.onOpen();
  21383. });
  21384. });
  21385. this.stream.onClose((error) => {
  21386. dispatchIfNotClosed(() => {
  21387. return this.handleStreamClose(error);
  21388. });
  21389. });
  21390. this.stream.onMessage((msg) => {
  21391. dispatchIfNotClosed(() => {
  21392. return this.onMessage(msg);
  21393. });
  21394. });
  21395. }
  21396. performBackoff() {
  21397. this.state = 5 /* PersistentStreamState.Backoff */;
  21398. this.backoff.backoffAndRun(async () => {
  21399. this.state = 0 /* PersistentStreamState.Initial */;
  21400. this.start();
  21401. });
  21402. }
  21403. // Visible for tests
  21404. handleStreamClose(error) {
  21405. logDebug(LOG_TAG$7, `close with error: ${error}`);
  21406. this.stream = null;
  21407. // In theory the stream could close cleanly, however, in our current model
  21408. // we never expect this to happen because if we stop a stream ourselves,
  21409. // this callback will never be called. To prevent cases where we retry
  21410. // without a backoff accidentally, we set the stream to error in all cases.
  21411. return this.close(4 /* PersistentStreamState.Error */, error);
  21412. }
  21413. /**
  21414. * Returns a "dispatcher" function that dispatches operations onto the
  21415. * AsyncQueue but only runs them if closeCount remains unchanged. This allows
  21416. * us to turn auth / stream callbacks into no-ops if the stream is closed /
  21417. * re-opened, etc.
  21418. */
  21419. getCloseGuardedDispatcher(startCloseCount) {
  21420. return (fn) => {
  21421. this.queue.enqueueAndForget(() => {
  21422. if (this.closeCount === startCloseCount) {
  21423. return fn();
  21424. }
  21425. else {
  21426. logDebug(LOG_TAG$7, 'stream callback skipped by getCloseGuardedDispatcher.');
  21427. return Promise.resolve();
  21428. }
  21429. });
  21430. };
  21431. }
  21432. }
  21433. /**
  21434. * A PersistentStream that implements the Listen RPC.
  21435. *
  21436. * Once the Listen stream has called the onOpen() listener, any number of
  21437. * listen() and unlisten() calls can be made to control what changes will be
  21438. * sent from the server for ListenResponses.
  21439. */
  21440. class PersistentListenStream extends PersistentStream {
  21441. constructor(queue, connection, authCredentials, appCheckCredentials, serializer, listener) {
  21442. super(queue, "listen_stream_connection_backoff" /* TimerId.ListenStreamConnectionBackoff */, "listen_stream_idle" /* TimerId.ListenStreamIdle */, "health_check_timeout" /* TimerId.HealthCheckTimeout */, connection, authCredentials, appCheckCredentials, listener);
  21443. this.serializer = serializer;
  21444. }
  21445. startRpc(authToken, appCheckToken) {
  21446. return this.connection.openStream('Listen', authToken, appCheckToken);
  21447. }
  21448. onMessage(watchChangeProto) {
  21449. // A successful response means the stream is healthy
  21450. this.backoff.reset();
  21451. const watchChange = fromWatchChange(this.serializer, watchChangeProto);
  21452. const snapshot = versionFromListenResponse(watchChangeProto);
  21453. return this.listener.onWatchChange(watchChange, snapshot);
  21454. }
  21455. /**
  21456. * Registers interest in the results of the given target. If the target
  21457. * includes a resumeToken it will be included in the request. Results that
  21458. * affect the target will be streamed back as WatchChange messages that
  21459. * reference the targetId.
  21460. */
  21461. watch(targetData) {
  21462. const request = {};
  21463. request.database = getEncodedDatabaseId(this.serializer);
  21464. request.addTarget = toTarget(this.serializer, targetData);
  21465. const labels = toListenRequestLabels(this.serializer, targetData);
  21466. if (labels) {
  21467. request.labels = labels;
  21468. }
  21469. this.sendRequest(request);
  21470. }
  21471. /**
  21472. * Unregisters interest in the results of the target associated with the
  21473. * given targetId.
  21474. */
  21475. unwatch(targetId) {
  21476. const request = {};
  21477. request.database = getEncodedDatabaseId(this.serializer);
  21478. request.removeTarget = targetId;
  21479. this.sendRequest(request);
  21480. }
  21481. }
  21482. /**
  21483. * A Stream that implements the Write RPC.
  21484. *
  21485. * The Write RPC requires the caller to maintain special streamToken
  21486. * state in between calls, to help the server understand which responses the
  21487. * client has processed by the time the next request is made. Every response
  21488. * will contain a streamToken; this value must be passed to the next
  21489. * request.
  21490. *
  21491. * After calling start() on this stream, the next request must be a handshake,
  21492. * containing whatever streamToken is on hand. Once a response to this
  21493. * request is received, all pending mutations may be submitted. When
  21494. * submitting multiple batches of mutations at the same time, it's
  21495. * okay to use the same streamToken for the calls to writeMutations.
  21496. *
  21497. * TODO(b/33271235): Use proto types
  21498. */
  21499. class PersistentWriteStream extends PersistentStream {
  21500. constructor(queue, connection, authCredentials, appCheckCredentials, serializer, listener) {
  21501. super(queue, "write_stream_connection_backoff" /* TimerId.WriteStreamConnectionBackoff */, "write_stream_idle" /* TimerId.WriteStreamIdle */, "health_check_timeout" /* TimerId.HealthCheckTimeout */, connection, authCredentials, appCheckCredentials, listener);
  21502. this.serializer = serializer;
  21503. this.handshakeComplete_ = false;
  21504. }
  21505. /**
  21506. * Tracks whether or not a handshake has been successfully exchanged and
  21507. * the stream is ready to accept mutations.
  21508. */
  21509. get handshakeComplete() {
  21510. return this.handshakeComplete_;
  21511. }
  21512. // Override of PersistentStream.start
  21513. start() {
  21514. this.handshakeComplete_ = false;
  21515. this.lastStreamToken = undefined;
  21516. super.start();
  21517. }
  21518. tearDown() {
  21519. if (this.handshakeComplete_) {
  21520. this.writeMutations([]);
  21521. }
  21522. }
  21523. startRpc(authToken, appCheckToken) {
  21524. return this.connection.openStream('Write', authToken, appCheckToken);
  21525. }
  21526. onMessage(responseProto) {
  21527. // Always capture the last stream token.
  21528. hardAssert(!!responseProto.streamToken);
  21529. this.lastStreamToken = responseProto.streamToken;
  21530. if (!this.handshakeComplete_) {
  21531. // The first response is always the handshake response
  21532. hardAssert(!responseProto.writeResults || responseProto.writeResults.length === 0);
  21533. this.handshakeComplete_ = true;
  21534. return this.listener.onHandshakeComplete();
  21535. }
  21536. else {
  21537. // A successful first write response means the stream is healthy,
  21538. // Note, that we could consider a successful handshake healthy, however,
  21539. // the write itself might be causing an error we want to back off from.
  21540. this.backoff.reset();
  21541. const results = fromWriteResults(responseProto.writeResults, responseProto.commitTime);
  21542. const commitVersion = fromVersion(responseProto.commitTime);
  21543. return this.listener.onMutationResult(commitVersion, results);
  21544. }
  21545. }
  21546. /**
  21547. * Sends an initial streamToken to the server, performing the handshake
  21548. * required to make the StreamingWrite RPC work. Subsequent
  21549. * calls should wait until onHandshakeComplete was called.
  21550. */
  21551. writeHandshake() {
  21552. // TODO(dimond): Support stream resumption. We intentionally do not set the
  21553. // stream token on the handshake, ignoring any stream token we might have.
  21554. const request = {};
  21555. request.database = getEncodedDatabaseId(this.serializer);
  21556. this.sendRequest(request);
  21557. }
  21558. /** Sends a group of mutations to the Firestore backend to apply. */
  21559. writeMutations(mutations) {
  21560. const request = {
  21561. streamToken: this.lastStreamToken,
  21562. writes: mutations.map(mutation => toMutation(this.serializer, mutation))
  21563. };
  21564. this.sendRequest(request);
  21565. }
  21566. }
  21567. /**
  21568. * @license
  21569. * Copyright 2017 Google LLC
  21570. *
  21571. * Licensed under the Apache License, Version 2.0 (the "License");
  21572. * you may not use this file except in compliance with the License.
  21573. * You may obtain a copy of the License at
  21574. *
  21575. * http://www.apache.org/licenses/LICENSE-2.0
  21576. *
  21577. * Unless required by applicable law or agreed to in writing, software
  21578. * distributed under the License is distributed on an "AS IS" BASIS,
  21579. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  21580. * See the License for the specific language governing permissions and
  21581. * limitations under the License.
  21582. */
  21583. /**
  21584. * Datastore and its related methods are a wrapper around the external Google
  21585. * Cloud Datastore grpc API, which provides an interface that is more convenient
  21586. * for the rest of the client SDK architecture to consume.
  21587. */
  21588. class Datastore {
  21589. }
  21590. /**
  21591. * An implementation of Datastore that exposes additional state for internal
  21592. * consumption.
  21593. */
  21594. class DatastoreImpl extends Datastore {
  21595. constructor(authCredentials, appCheckCredentials, connection, serializer) {
  21596. super();
  21597. this.authCredentials = authCredentials;
  21598. this.appCheckCredentials = appCheckCredentials;
  21599. this.connection = connection;
  21600. this.serializer = serializer;
  21601. this.terminated = false;
  21602. }
  21603. verifyInitialized() {
  21604. if (this.terminated) {
  21605. throw new FirestoreError(Code.FAILED_PRECONDITION, 'The client has already been terminated.');
  21606. }
  21607. }
  21608. /** Invokes the provided RPC with auth and AppCheck tokens. */
  21609. invokeRPC(rpcName, path, request) {
  21610. this.verifyInitialized();
  21611. return Promise.all([
  21612. this.authCredentials.getToken(),
  21613. this.appCheckCredentials.getToken()
  21614. ])
  21615. .then(([authToken, appCheckToken]) => {
  21616. return this.connection.invokeRPC(rpcName, path, request, authToken, appCheckToken);
  21617. })
  21618. .catch((error) => {
  21619. if (error.name === 'FirebaseError') {
  21620. if (error.code === Code.UNAUTHENTICATED) {
  21621. this.authCredentials.invalidateToken();
  21622. this.appCheckCredentials.invalidateToken();
  21623. }
  21624. throw error;
  21625. }
  21626. else {
  21627. throw new FirestoreError(Code.UNKNOWN, error.toString());
  21628. }
  21629. });
  21630. }
  21631. /** Invokes the provided RPC with streamed results with auth and AppCheck tokens. */
  21632. invokeStreamingRPC(rpcName, path, request, expectedResponseCount) {
  21633. this.verifyInitialized();
  21634. return Promise.all([
  21635. this.authCredentials.getToken(),
  21636. this.appCheckCredentials.getToken()
  21637. ])
  21638. .then(([authToken, appCheckToken]) => {
  21639. return this.connection.invokeStreamingRPC(rpcName, path, request, authToken, appCheckToken, expectedResponseCount);
  21640. })
  21641. .catch((error) => {
  21642. if (error.name === 'FirebaseError') {
  21643. if (error.code === Code.UNAUTHENTICATED) {
  21644. this.authCredentials.invalidateToken();
  21645. this.appCheckCredentials.invalidateToken();
  21646. }
  21647. throw error;
  21648. }
  21649. else {
  21650. throw new FirestoreError(Code.UNKNOWN, error.toString());
  21651. }
  21652. });
  21653. }
  21654. terminate() {
  21655. this.terminated = true;
  21656. }
  21657. }
  21658. // TODO(firestorexp): Make sure there is only one Datastore instance per
  21659. // firestore-exp client.
  21660. function newDatastore(authCredentials, appCheckCredentials, connection, serializer) {
  21661. return new DatastoreImpl(authCredentials, appCheckCredentials, connection, serializer);
  21662. }
  21663. async function invokeCommitRpc(datastore, mutations) {
  21664. const datastoreImpl = debugCast(datastore);
  21665. const path = getEncodedDatabaseId(datastoreImpl.serializer) + '/documents';
  21666. const request = {
  21667. writes: mutations.map(m => toMutation(datastoreImpl.serializer, m))
  21668. };
  21669. await datastoreImpl.invokeRPC('Commit', path, request);
  21670. }
  21671. async function invokeBatchGetDocumentsRpc(datastore, keys) {
  21672. const datastoreImpl = debugCast(datastore);
  21673. const path = getEncodedDatabaseId(datastoreImpl.serializer) + '/documents';
  21674. const request = {
  21675. documents: keys.map(k => toName(datastoreImpl.serializer, k))
  21676. };
  21677. const response = await datastoreImpl.invokeStreamingRPC('BatchGetDocuments', path, request, keys.length);
  21678. const docs = new Map();
  21679. response.forEach(proto => {
  21680. const doc = fromBatchGetDocumentsResponse(datastoreImpl.serializer, proto);
  21681. docs.set(doc.key.toString(), doc);
  21682. });
  21683. const result = [];
  21684. keys.forEach(key => {
  21685. const doc = docs.get(key.toString());
  21686. hardAssert(!!doc);
  21687. result.push(doc);
  21688. });
  21689. return result;
  21690. }
  21691. async function invokeRunAggregationQueryRpc(datastore, query) {
  21692. const datastoreImpl = debugCast(datastore);
  21693. const request = toRunAggregationQueryRequest(datastoreImpl.serializer, queryToTarget(query));
  21694. const parent = request.parent;
  21695. if (!datastoreImpl.connection.shouldResourcePathBeIncludedInRequest) {
  21696. delete request.parent;
  21697. }
  21698. const response = await datastoreImpl.invokeStreamingRPC('RunAggregationQuery', parent, request, /*expectedResponseCount=*/ 1);
  21699. return (response
  21700. // Omit RunAggregationQueryResponse that only contain readTimes.
  21701. .filter(proto => !!proto.result)
  21702. .map(proto => proto.result.aggregateFields));
  21703. }
  21704. function newPersistentWriteStream(datastore, queue, listener) {
  21705. const datastoreImpl = debugCast(datastore);
  21706. datastoreImpl.verifyInitialized();
  21707. return new PersistentWriteStream(queue, datastoreImpl.connection, datastoreImpl.authCredentials, datastoreImpl.appCheckCredentials, datastoreImpl.serializer, listener);
  21708. }
  21709. function newPersistentWatchStream(datastore, queue, listener) {
  21710. const datastoreImpl = debugCast(datastore);
  21711. datastoreImpl.verifyInitialized();
  21712. return new PersistentListenStream(queue, datastoreImpl.connection, datastoreImpl.authCredentials, datastoreImpl.appCheckCredentials, datastoreImpl.serializer, listener);
  21713. }
  21714. /**
  21715. * @license
  21716. * Copyright 2018 Google LLC
  21717. *
  21718. * Licensed under the Apache License, Version 2.0 (the "License");
  21719. * you may not use this file except in compliance with the License.
  21720. * You may obtain a copy of the License at
  21721. *
  21722. * http://www.apache.org/licenses/LICENSE-2.0
  21723. *
  21724. * Unless required by applicable law or agreed to in writing, software
  21725. * distributed under the License is distributed on an "AS IS" BASIS,
  21726. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  21727. * See the License for the specific language governing permissions and
  21728. * limitations under the License.
  21729. */
  21730. const LOG_TAG$6 = 'OnlineStateTracker';
  21731. // To deal with transient failures, we allow multiple stream attempts before
  21732. // giving up and transitioning from OnlineState.Unknown to Offline.
  21733. // TODO(mikelehen): This used to be set to 2 as a mitigation for b/66228394.
  21734. // @jdimond thinks that bug is sufficiently fixed so that we can set this back
  21735. // to 1. If that works okay, we could potentially remove this logic entirely.
  21736. const MAX_WATCH_STREAM_FAILURES = 1;
  21737. // To deal with stream attempts that don't succeed or fail in a timely manner,
  21738. // we have a timeout for OnlineState to reach Online or Offline.
  21739. // If the timeout is reached, we transition to Offline rather than waiting
  21740. // indefinitely.
  21741. const ONLINE_STATE_TIMEOUT_MS = 10 * 1000;
  21742. /**
  21743. * A component used by the RemoteStore to track the OnlineState (that is,
  21744. * whether or not the client as a whole should be considered to be online or
  21745. * offline), implementing the appropriate heuristics.
  21746. *
  21747. * In particular, when the client is trying to connect to the backend, we
  21748. * allow up to MAX_WATCH_STREAM_FAILURES within ONLINE_STATE_TIMEOUT_MS for
  21749. * a connection to succeed. If we have too many failures or the timeout elapses,
  21750. * then we set the OnlineState to Offline, and the client will behave as if
  21751. * it is offline (get()s will return cached data, etc.).
  21752. */
  21753. class OnlineStateTracker {
  21754. constructor(asyncQueue, onlineStateHandler) {
  21755. this.asyncQueue = asyncQueue;
  21756. this.onlineStateHandler = onlineStateHandler;
  21757. /** The current OnlineState. */
  21758. this.state = "Unknown" /* OnlineState.Unknown */;
  21759. /**
  21760. * A count of consecutive failures to open the stream. If it reaches the
  21761. * maximum defined by MAX_WATCH_STREAM_FAILURES, we'll set the OnlineState to
  21762. * Offline.
  21763. */
  21764. this.watchStreamFailures = 0;
  21765. /**
  21766. * A timer that elapses after ONLINE_STATE_TIMEOUT_MS, at which point we
  21767. * transition from OnlineState.Unknown to OnlineState.Offline without waiting
  21768. * for the stream to actually fail (MAX_WATCH_STREAM_FAILURES times).
  21769. */
  21770. this.onlineStateTimer = null;
  21771. /**
  21772. * Whether the client should log a warning message if it fails to connect to
  21773. * the backend (initially true, cleared after a successful stream, or if we've
  21774. * logged the message already).
  21775. */
  21776. this.shouldWarnClientIsOffline = true;
  21777. }
  21778. /**
  21779. * Called by RemoteStore when a watch stream is started (including on each
  21780. * backoff attempt).
  21781. *
  21782. * If this is the first attempt, it sets the OnlineState to Unknown and starts
  21783. * the onlineStateTimer.
  21784. */
  21785. handleWatchStreamStart() {
  21786. if (this.watchStreamFailures === 0) {
  21787. this.setAndBroadcast("Unknown" /* OnlineState.Unknown */);
  21788. this.onlineStateTimer = this.asyncQueue.enqueueAfterDelay("online_state_timeout" /* TimerId.OnlineStateTimeout */, ONLINE_STATE_TIMEOUT_MS, () => {
  21789. this.onlineStateTimer = null;
  21790. this.logClientOfflineWarningIfNecessary(`Backend didn't respond within ${ONLINE_STATE_TIMEOUT_MS / 1000} ` +
  21791. `seconds.`);
  21792. this.setAndBroadcast("Offline" /* OnlineState.Offline */);
  21793. // NOTE: handleWatchStreamFailure() will continue to increment
  21794. // watchStreamFailures even though we are already marked Offline,
  21795. // but this is non-harmful.
  21796. return Promise.resolve();
  21797. });
  21798. }
  21799. }
  21800. /**
  21801. * Updates our OnlineState as appropriate after the watch stream reports a
  21802. * failure. The first failure moves us to the 'Unknown' state. We then may
  21803. * allow multiple failures (based on MAX_WATCH_STREAM_FAILURES) before we
  21804. * actually transition to the 'Offline' state.
  21805. */
  21806. handleWatchStreamFailure(error) {
  21807. if (this.state === "Online" /* OnlineState.Online */) {
  21808. this.setAndBroadcast("Unknown" /* OnlineState.Unknown */);
  21809. }
  21810. else {
  21811. this.watchStreamFailures++;
  21812. if (this.watchStreamFailures >= MAX_WATCH_STREAM_FAILURES) {
  21813. this.clearOnlineStateTimer();
  21814. this.logClientOfflineWarningIfNecessary(`Connection failed ${MAX_WATCH_STREAM_FAILURES} ` +
  21815. `times. Most recent error: ${error.toString()}`);
  21816. this.setAndBroadcast("Offline" /* OnlineState.Offline */);
  21817. }
  21818. }
  21819. }
  21820. /**
  21821. * Explicitly sets the OnlineState to the specified state.
  21822. *
  21823. * Note that this resets our timers / failure counters, etc. used by our
  21824. * Offline heuristics, so must not be used in place of
  21825. * handleWatchStreamStart() and handleWatchStreamFailure().
  21826. */
  21827. set(newState) {
  21828. this.clearOnlineStateTimer();
  21829. this.watchStreamFailures = 0;
  21830. if (newState === "Online" /* OnlineState.Online */) {
  21831. // We've connected to watch at least once. Don't warn the developer
  21832. // about being offline going forward.
  21833. this.shouldWarnClientIsOffline = false;
  21834. }
  21835. this.setAndBroadcast(newState);
  21836. }
  21837. setAndBroadcast(newState) {
  21838. if (newState !== this.state) {
  21839. this.state = newState;
  21840. this.onlineStateHandler(newState);
  21841. }
  21842. }
  21843. logClientOfflineWarningIfNecessary(details) {
  21844. const message = `Could not reach Cloud Firestore backend. ${details}\n` +
  21845. `This typically indicates that your device does not have a healthy ` +
  21846. `Internet connection at the moment. The client will operate in offline ` +
  21847. `mode until it is able to successfully connect to the backend.`;
  21848. if (this.shouldWarnClientIsOffline) {
  21849. logError(message);
  21850. this.shouldWarnClientIsOffline = false;
  21851. }
  21852. else {
  21853. logDebug(LOG_TAG$6, message);
  21854. }
  21855. }
  21856. clearOnlineStateTimer() {
  21857. if (this.onlineStateTimer !== null) {
  21858. this.onlineStateTimer.cancel();
  21859. this.onlineStateTimer = null;
  21860. }
  21861. }
  21862. }
  21863. /**
  21864. * @license
  21865. * Copyright 2017 Google LLC
  21866. *
  21867. * Licensed under the Apache License, Version 2.0 (the "License");
  21868. * you may not use this file except in compliance with the License.
  21869. * You may obtain a copy of the License at
  21870. *
  21871. * http://www.apache.org/licenses/LICENSE-2.0
  21872. *
  21873. * Unless required by applicable law or agreed to in writing, software
  21874. * distributed under the License is distributed on an "AS IS" BASIS,
  21875. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  21876. * See the License for the specific language governing permissions and
  21877. * limitations under the License.
  21878. */
  21879. const LOG_TAG$5 = 'RemoteStore';
  21880. // TODO(b/35853402): Negotiate this with the stream.
  21881. const MAX_PENDING_WRITES = 10;
  21882. class RemoteStoreImpl {
  21883. constructor(
  21884. /**
  21885. * The local store, used to fill the write pipeline with outbound mutations.
  21886. */
  21887. localStore,
  21888. /** The client-side proxy for interacting with the backend. */
  21889. datastore, asyncQueue, onlineStateHandler, connectivityMonitor) {
  21890. this.localStore = localStore;
  21891. this.datastore = datastore;
  21892. this.asyncQueue = asyncQueue;
  21893. this.remoteSyncer = {};
  21894. /**
  21895. * A list of up to MAX_PENDING_WRITES writes that we have fetched from the
  21896. * LocalStore via fillWritePipeline() and have or will send to the write
  21897. * stream.
  21898. *
  21899. * Whenever writePipeline.length > 0 the RemoteStore will attempt to start or
  21900. * restart the write stream. When the stream is established the writes in the
  21901. * pipeline will be sent in order.
  21902. *
  21903. * Writes remain in writePipeline until they are acknowledged by the backend
  21904. * and thus will automatically be re-sent if the stream is interrupted /
  21905. * restarted before they're acknowledged.
  21906. *
  21907. * Write responses from the backend are linked to their originating request
  21908. * purely based on order, and so we can just shift() writes from the front of
  21909. * the writePipeline as we receive responses.
  21910. */
  21911. this.writePipeline = [];
  21912. /**
  21913. * A mapping of watched targets that the client cares about tracking and the
  21914. * user has explicitly called a 'listen' for this target.
  21915. *
  21916. * These targets may or may not have been sent to or acknowledged by the
  21917. * server. On re-establishing the listen stream, these targets should be sent
  21918. * to the server. The targets removed with unlistens are removed eagerly
  21919. * without waiting for confirmation from the listen stream.
  21920. */
  21921. this.listenTargets = new Map();
  21922. /**
  21923. * A set of reasons for why the RemoteStore may be offline. If empty, the
  21924. * RemoteStore may start its network connections.
  21925. */
  21926. this.offlineCauses = new Set();
  21927. /**
  21928. * Event handlers that get called when the network is disabled or enabled.
  21929. *
  21930. * PORTING NOTE: These functions are used on the Web client to create the
  21931. * underlying streams (to support tree-shakeable streams). On Android and iOS,
  21932. * the streams are created during construction of RemoteStore.
  21933. */
  21934. this.onNetworkStatusChange = [];
  21935. this.connectivityMonitor = connectivityMonitor;
  21936. this.connectivityMonitor.addCallback((_) => {
  21937. asyncQueue.enqueueAndForget(async () => {
  21938. // Porting Note: Unlike iOS, `restartNetwork()` is called even when the
  21939. // network becomes unreachable as we don't have any other way to tear
  21940. // down our streams.
  21941. if (canUseNetwork(this)) {
  21942. logDebug(LOG_TAG$5, 'Restarting streams for network reachability change.');
  21943. await restartNetwork(this);
  21944. }
  21945. });
  21946. });
  21947. this.onlineStateTracker = new OnlineStateTracker(asyncQueue, onlineStateHandler);
  21948. }
  21949. }
  21950. function newRemoteStore(localStore, datastore, asyncQueue, onlineStateHandler, connectivityMonitor) {
  21951. return new RemoteStoreImpl(localStore, datastore, asyncQueue, onlineStateHandler, connectivityMonitor);
  21952. }
  21953. /** Re-enables the network. Idempotent. */
  21954. function remoteStoreEnableNetwork(remoteStore) {
  21955. const remoteStoreImpl = debugCast(remoteStore);
  21956. remoteStoreImpl.offlineCauses.delete(0 /* OfflineCause.UserDisabled */);
  21957. return enableNetworkInternal(remoteStoreImpl);
  21958. }
  21959. async function enableNetworkInternal(remoteStoreImpl) {
  21960. if (canUseNetwork(remoteStoreImpl)) {
  21961. for (const networkStatusHandler of remoteStoreImpl.onNetworkStatusChange) {
  21962. await networkStatusHandler(/* enabled= */ true);
  21963. }
  21964. }
  21965. }
  21966. /**
  21967. * Temporarily disables the network. The network can be re-enabled using
  21968. * enableNetwork().
  21969. */
  21970. async function remoteStoreDisableNetwork(remoteStore) {
  21971. const remoteStoreImpl = debugCast(remoteStore);
  21972. remoteStoreImpl.offlineCauses.add(0 /* OfflineCause.UserDisabled */);
  21973. await disableNetworkInternal(remoteStoreImpl);
  21974. // Set the OnlineState to Offline so get()s return from cache, etc.
  21975. remoteStoreImpl.onlineStateTracker.set("Offline" /* OnlineState.Offline */);
  21976. }
  21977. async function disableNetworkInternal(remoteStoreImpl) {
  21978. for (const networkStatusHandler of remoteStoreImpl.onNetworkStatusChange) {
  21979. await networkStatusHandler(/* enabled= */ false);
  21980. }
  21981. }
  21982. async function remoteStoreShutdown(remoteStore) {
  21983. const remoteStoreImpl = debugCast(remoteStore);
  21984. logDebug(LOG_TAG$5, 'RemoteStore shutting down.');
  21985. remoteStoreImpl.offlineCauses.add(5 /* OfflineCause.Shutdown */);
  21986. await disableNetworkInternal(remoteStoreImpl);
  21987. remoteStoreImpl.connectivityMonitor.shutdown();
  21988. // Set the OnlineState to Unknown (rather than Offline) to avoid potentially
  21989. // triggering spurious listener events with cached data, etc.
  21990. remoteStoreImpl.onlineStateTracker.set("Unknown" /* OnlineState.Unknown */);
  21991. }
  21992. /**
  21993. * Starts new listen for the given target. Uses resume token if provided. It
  21994. * is a no-op if the target of given `TargetData` is already being listened to.
  21995. */
  21996. function remoteStoreListen(remoteStore, targetData) {
  21997. const remoteStoreImpl = debugCast(remoteStore);
  21998. if (remoteStoreImpl.listenTargets.has(targetData.targetId)) {
  21999. return;
  22000. }
  22001. // Mark this as something the client is currently listening for.
  22002. remoteStoreImpl.listenTargets.set(targetData.targetId, targetData);
  22003. if (shouldStartWatchStream(remoteStoreImpl)) {
  22004. // The listen will be sent in onWatchStreamOpen
  22005. startWatchStream(remoteStoreImpl);
  22006. }
  22007. else if (ensureWatchStream(remoteStoreImpl).isOpen()) {
  22008. sendWatchRequest(remoteStoreImpl, targetData);
  22009. }
  22010. }
  22011. /**
  22012. * Removes the listen from server. It is a no-op if the given target id is
  22013. * not being listened to.
  22014. */
  22015. function remoteStoreUnlisten(remoteStore, targetId) {
  22016. const remoteStoreImpl = debugCast(remoteStore);
  22017. const watchStream = ensureWatchStream(remoteStoreImpl);
  22018. remoteStoreImpl.listenTargets.delete(targetId);
  22019. if (watchStream.isOpen()) {
  22020. sendUnwatchRequest(remoteStoreImpl, targetId);
  22021. }
  22022. if (remoteStoreImpl.listenTargets.size === 0) {
  22023. if (watchStream.isOpen()) {
  22024. watchStream.markIdle();
  22025. }
  22026. else if (canUseNetwork(remoteStoreImpl)) {
  22027. // Revert to OnlineState.Unknown if the watch stream is not open and we
  22028. // have no listeners, since without any listens to send we cannot
  22029. // confirm if the stream is healthy and upgrade to OnlineState.Online.
  22030. remoteStoreImpl.onlineStateTracker.set("Unknown" /* OnlineState.Unknown */);
  22031. }
  22032. }
  22033. }
  22034. /**
  22035. * We need to increment the the expected number of pending responses we're due
  22036. * from watch so we wait for the ack to process any messages from this target.
  22037. */
  22038. function sendWatchRequest(remoteStoreImpl, targetData) {
  22039. remoteStoreImpl.watchChangeAggregator.recordPendingTargetRequest(targetData.targetId);
  22040. ensureWatchStream(remoteStoreImpl).watch(targetData);
  22041. }
  22042. /**
  22043. * We need to increment the expected number of pending responses we're due
  22044. * from watch so we wait for the removal on the server before we process any
  22045. * messages from this target.
  22046. */
  22047. function sendUnwatchRequest(remoteStoreImpl, targetId) {
  22048. remoteStoreImpl.watchChangeAggregator.recordPendingTargetRequest(targetId);
  22049. ensureWatchStream(remoteStoreImpl).unwatch(targetId);
  22050. }
  22051. function startWatchStream(remoteStoreImpl) {
  22052. remoteStoreImpl.watchChangeAggregator = new WatchChangeAggregator({
  22053. getRemoteKeysForTarget: targetId => remoteStoreImpl.remoteSyncer.getRemoteKeysForTarget(targetId),
  22054. getTargetDataForTarget: targetId => remoteStoreImpl.listenTargets.get(targetId) || null
  22055. });
  22056. ensureWatchStream(remoteStoreImpl).start();
  22057. remoteStoreImpl.onlineStateTracker.handleWatchStreamStart();
  22058. }
  22059. /**
  22060. * Returns whether the watch stream should be started because it's necessary
  22061. * and has not yet been started.
  22062. */
  22063. function shouldStartWatchStream(remoteStoreImpl) {
  22064. return (canUseNetwork(remoteStoreImpl) &&
  22065. !ensureWatchStream(remoteStoreImpl).isStarted() &&
  22066. remoteStoreImpl.listenTargets.size > 0);
  22067. }
  22068. function canUseNetwork(remoteStore) {
  22069. const remoteStoreImpl = debugCast(remoteStore);
  22070. return remoteStoreImpl.offlineCauses.size === 0;
  22071. }
  22072. function cleanUpWatchStreamState(remoteStoreImpl) {
  22073. remoteStoreImpl.watchChangeAggregator = undefined;
  22074. }
  22075. async function onWatchStreamOpen(remoteStoreImpl) {
  22076. remoteStoreImpl.listenTargets.forEach((targetData, targetId) => {
  22077. sendWatchRequest(remoteStoreImpl, targetData);
  22078. });
  22079. }
  22080. async function onWatchStreamClose(remoteStoreImpl, error) {
  22081. cleanUpWatchStreamState(remoteStoreImpl);
  22082. // If we still need the watch stream, retry the connection.
  22083. if (shouldStartWatchStream(remoteStoreImpl)) {
  22084. remoteStoreImpl.onlineStateTracker.handleWatchStreamFailure(error);
  22085. startWatchStream(remoteStoreImpl);
  22086. }
  22087. else {
  22088. // No need to restart watch stream because there are no active targets.
  22089. // The online state is set to unknown because there is no active attempt
  22090. // at establishing a connection
  22091. remoteStoreImpl.onlineStateTracker.set("Unknown" /* OnlineState.Unknown */);
  22092. }
  22093. }
  22094. async function onWatchStreamChange(remoteStoreImpl, watchChange, snapshotVersion) {
  22095. // Mark the client as online since we got a message from the server
  22096. remoteStoreImpl.onlineStateTracker.set("Online" /* OnlineState.Online */);
  22097. if (watchChange instanceof WatchTargetChange &&
  22098. watchChange.state === 2 /* WatchTargetChangeState.Removed */ &&
  22099. watchChange.cause) {
  22100. // There was an error on a target, don't wait for a consistent snapshot
  22101. // to raise events
  22102. try {
  22103. await handleTargetError(remoteStoreImpl, watchChange);
  22104. }
  22105. catch (e) {
  22106. logDebug(LOG_TAG$5, 'Failed to remove targets %s: %s ', watchChange.targetIds.join(','), e);
  22107. await disableNetworkUntilRecovery(remoteStoreImpl, e);
  22108. }
  22109. return;
  22110. }
  22111. if (watchChange instanceof DocumentWatchChange) {
  22112. remoteStoreImpl.watchChangeAggregator.handleDocumentChange(watchChange);
  22113. }
  22114. else if (watchChange instanceof ExistenceFilterChange) {
  22115. remoteStoreImpl.watchChangeAggregator.handleExistenceFilter(watchChange);
  22116. }
  22117. else {
  22118. remoteStoreImpl.watchChangeAggregator.handleTargetChange(watchChange);
  22119. }
  22120. if (!snapshotVersion.isEqual(SnapshotVersion.min())) {
  22121. try {
  22122. const lastRemoteSnapshotVersion = await localStoreGetLastRemoteSnapshotVersion(remoteStoreImpl.localStore);
  22123. if (snapshotVersion.compareTo(lastRemoteSnapshotVersion) >= 0) {
  22124. // We have received a target change with a global snapshot if the snapshot
  22125. // version is not equal to SnapshotVersion.min().
  22126. await raiseWatchSnapshot(remoteStoreImpl, snapshotVersion);
  22127. }
  22128. }
  22129. catch (e) {
  22130. logDebug(LOG_TAG$5, 'Failed to raise snapshot:', e);
  22131. await disableNetworkUntilRecovery(remoteStoreImpl, e);
  22132. }
  22133. }
  22134. }
  22135. /**
  22136. * Recovery logic for IndexedDB errors that takes the network offline until
  22137. * `op` succeeds. Retries are scheduled with backoff using
  22138. * `enqueueRetryable()`. If `op()` is not provided, IndexedDB access is
  22139. * validated via a generic operation.
  22140. *
  22141. * The returned Promise is resolved once the network is disabled and before
  22142. * any retry attempt.
  22143. */
  22144. async function disableNetworkUntilRecovery(remoteStoreImpl, e, op) {
  22145. if (isIndexedDbTransactionError(e)) {
  22146. remoteStoreImpl.offlineCauses.add(1 /* OfflineCause.IndexedDbFailed */);
  22147. // Disable network and raise offline snapshots
  22148. await disableNetworkInternal(remoteStoreImpl);
  22149. remoteStoreImpl.onlineStateTracker.set("Offline" /* OnlineState.Offline */);
  22150. if (!op) {
  22151. // Use a simple read operation to determine if IndexedDB recovered.
  22152. // Ideally, we would expose a health check directly on SimpleDb, but
  22153. // RemoteStore only has access to persistence through LocalStore.
  22154. op = () => localStoreGetLastRemoteSnapshotVersion(remoteStoreImpl.localStore);
  22155. }
  22156. // Probe IndexedDB periodically and re-enable network
  22157. remoteStoreImpl.asyncQueue.enqueueRetryable(async () => {
  22158. logDebug(LOG_TAG$5, 'Retrying IndexedDB access');
  22159. await op();
  22160. remoteStoreImpl.offlineCauses.delete(1 /* OfflineCause.IndexedDbFailed */);
  22161. await enableNetworkInternal(remoteStoreImpl);
  22162. });
  22163. }
  22164. else {
  22165. throw e;
  22166. }
  22167. }
  22168. /**
  22169. * Executes `op`. If `op` fails, takes the network offline until `op`
  22170. * succeeds. Returns after the first attempt.
  22171. */
  22172. function executeWithRecovery(remoteStoreImpl, op) {
  22173. return op().catch(e => disableNetworkUntilRecovery(remoteStoreImpl, e, op));
  22174. }
  22175. /**
  22176. * Takes a batch of changes from the Datastore, repackages them as a
  22177. * RemoteEvent, and passes that on to the listener, which is typically the
  22178. * SyncEngine.
  22179. */
  22180. function raiseWatchSnapshot(remoteStoreImpl, snapshotVersion) {
  22181. const remoteEvent = remoteStoreImpl.watchChangeAggregator.createRemoteEvent(snapshotVersion);
  22182. // Update in-memory resume tokens. LocalStore will update the
  22183. // persistent view of these when applying the completed RemoteEvent.
  22184. remoteEvent.targetChanges.forEach((change, targetId) => {
  22185. if (change.resumeToken.approximateByteSize() > 0) {
  22186. const targetData = remoteStoreImpl.listenTargets.get(targetId);
  22187. // A watched target might have been removed already.
  22188. if (targetData) {
  22189. remoteStoreImpl.listenTargets.set(targetId, targetData.withResumeToken(change.resumeToken, snapshotVersion));
  22190. }
  22191. }
  22192. });
  22193. // Re-establish listens for the targets that have been invalidated by
  22194. // existence filter mismatches.
  22195. remoteEvent.targetMismatches.forEach(targetId => {
  22196. const targetData = remoteStoreImpl.listenTargets.get(targetId);
  22197. if (!targetData) {
  22198. // A watched target might have been removed already.
  22199. return;
  22200. }
  22201. // Clear the resume token for the target, since we're in a known mismatch
  22202. // state.
  22203. remoteStoreImpl.listenTargets.set(targetId, targetData.withResumeToken(ByteString.EMPTY_BYTE_STRING, targetData.snapshotVersion));
  22204. // Cause a hard reset by unwatching and rewatching immediately, but
  22205. // deliberately don't send a resume token so that we get a full update.
  22206. sendUnwatchRequest(remoteStoreImpl, targetId);
  22207. // Mark the target we send as being on behalf of an existence filter
  22208. // mismatch, but don't actually retain that in listenTargets. This ensures
  22209. // that we flag the first re-listen this way without impacting future
  22210. // listens of this target (that might happen e.g. on reconnect).
  22211. const requestTargetData = new TargetData(targetData.target, targetId, 1 /* TargetPurpose.ExistenceFilterMismatch */, targetData.sequenceNumber);
  22212. sendWatchRequest(remoteStoreImpl, requestTargetData);
  22213. });
  22214. return remoteStoreImpl.remoteSyncer.applyRemoteEvent(remoteEvent);
  22215. }
  22216. /** Handles an error on a target */
  22217. async function handleTargetError(remoteStoreImpl, watchChange) {
  22218. const error = watchChange.cause;
  22219. for (const targetId of watchChange.targetIds) {
  22220. // A watched target might have been removed already.
  22221. if (remoteStoreImpl.listenTargets.has(targetId)) {
  22222. await remoteStoreImpl.remoteSyncer.rejectListen(targetId, error);
  22223. remoteStoreImpl.listenTargets.delete(targetId);
  22224. remoteStoreImpl.watchChangeAggregator.removeTarget(targetId);
  22225. }
  22226. }
  22227. }
  22228. /**
  22229. * Attempts to fill our write pipeline with writes from the LocalStore.
  22230. *
  22231. * Called internally to bootstrap or refill the write pipeline and by
  22232. * SyncEngine whenever there are new mutations to process.
  22233. *
  22234. * Starts the write stream if necessary.
  22235. */
  22236. async function fillWritePipeline(remoteStore) {
  22237. const remoteStoreImpl = debugCast(remoteStore);
  22238. const writeStream = ensureWriteStream(remoteStoreImpl);
  22239. let lastBatchIdRetrieved = remoteStoreImpl.writePipeline.length > 0
  22240. ? remoteStoreImpl.writePipeline[remoteStoreImpl.writePipeline.length - 1]
  22241. .batchId
  22242. : BATCHID_UNKNOWN;
  22243. while (canAddToWritePipeline(remoteStoreImpl)) {
  22244. try {
  22245. const batch = await localStoreGetNextMutationBatch(remoteStoreImpl.localStore, lastBatchIdRetrieved);
  22246. if (batch === null) {
  22247. if (remoteStoreImpl.writePipeline.length === 0) {
  22248. writeStream.markIdle();
  22249. }
  22250. break;
  22251. }
  22252. else {
  22253. lastBatchIdRetrieved = batch.batchId;
  22254. addToWritePipeline(remoteStoreImpl, batch);
  22255. }
  22256. }
  22257. catch (e) {
  22258. await disableNetworkUntilRecovery(remoteStoreImpl, e);
  22259. }
  22260. }
  22261. if (shouldStartWriteStream(remoteStoreImpl)) {
  22262. startWriteStream(remoteStoreImpl);
  22263. }
  22264. }
  22265. /**
  22266. * Returns true if we can add to the write pipeline (i.e. the network is
  22267. * enabled and the write pipeline is not full).
  22268. */
  22269. function canAddToWritePipeline(remoteStoreImpl) {
  22270. return (canUseNetwork(remoteStoreImpl) &&
  22271. remoteStoreImpl.writePipeline.length < MAX_PENDING_WRITES);
  22272. }
  22273. /**
  22274. * Queues additional writes to be sent to the write stream, sending them
  22275. * immediately if the write stream is established.
  22276. */
  22277. function addToWritePipeline(remoteStoreImpl, batch) {
  22278. remoteStoreImpl.writePipeline.push(batch);
  22279. const writeStream = ensureWriteStream(remoteStoreImpl);
  22280. if (writeStream.isOpen() && writeStream.handshakeComplete) {
  22281. writeStream.writeMutations(batch.mutations);
  22282. }
  22283. }
  22284. function shouldStartWriteStream(remoteStoreImpl) {
  22285. return (canUseNetwork(remoteStoreImpl) &&
  22286. !ensureWriteStream(remoteStoreImpl).isStarted() &&
  22287. remoteStoreImpl.writePipeline.length > 0);
  22288. }
  22289. function startWriteStream(remoteStoreImpl) {
  22290. ensureWriteStream(remoteStoreImpl).start();
  22291. }
  22292. async function onWriteStreamOpen(remoteStoreImpl) {
  22293. ensureWriteStream(remoteStoreImpl).writeHandshake();
  22294. }
  22295. async function onWriteHandshakeComplete(remoteStoreImpl) {
  22296. const writeStream = ensureWriteStream(remoteStoreImpl);
  22297. // Send the write pipeline now that the stream is established.
  22298. for (const batch of remoteStoreImpl.writePipeline) {
  22299. writeStream.writeMutations(batch.mutations);
  22300. }
  22301. }
  22302. async function onMutationResult(remoteStoreImpl, commitVersion, results) {
  22303. const batch = remoteStoreImpl.writePipeline.shift();
  22304. const success = MutationBatchResult.from(batch, commitVersion, results);
  22305. await executeWithRecovery(remoteStoreImpl, () => remoteStoreImpl.remoteSyncer.applySuccessfulWrite(success));
  22306. // It's possible that with the completion of this mutation another
  22307. // slot has freed up.
  22308. await fillWritePipeline(remoteStoreImpl);
  22309. }
  22310. async function onWriteStreamClose(remoteStoreImpl, error) {
  22311. // If the write stream closed after the write handshake completes, a write
  22312. // operation failed and we fail the pending operation.
  22313. if (error && ensureWriteStream(remoteStoreImpl).handshakeComplete) {
  22314. // This error affects the actual write.
  22315. await handleWriteError(remoteStoreImpl, error);
  22316. }
  22317. // The write stream might have been started by refilling the write
  22318. // pipeline for failed writes
  22319. if (shouldStartWriteStream(remoteStoreImpl)) {
  22320. startWriteStream(remoteStoreImpl);
  22321. }
  22322. }
  22323. async function handleWriteError(remoteStoreImpl, error) {
  22324. // Only handle permanent errors here. If it's transient, just let the retry
  22325. // logic kick in.
  22326. if (isPermanentWriteError(error.code)) {
  22327. // This was a permanent error, the request itself was the problem
  22328. // so it's not going to succeed if we resend it.
  22329. const batch = remoteStoreImpl.writePipeline.shift();
  22330. // In this case it's also unlikely that the server itself is melting
  22331. // down -- this was just a bad request so inhibit backoff on the next
  22332. // restart.
  22333. ensureWriteStream(remoteStoreImpl).inhibitBackoff();
  22334. await executeWithRecovery(remoteStoreImpl, () => remoteStoreImpl.remoteSyncer.rejectFailedWrite(batch.batchId, error));
  22335. // It's possible that with the completion of this mutation
  22336. // another slot has freed up.
  22337. await fillWritePipeline(remoteStoreImpl);
  22338. }
  22339. }
  22340. async function restartNetwork(remoteStore) {
  22341. const remoteStoreImpl = debugCast(remoteStore);
  22342. remoteStoreImpl.offlineCauses.add(4 /* OfflineCause.ConnectivityChange */);
  22343. await disableNetworkInternal(remoteStoreImpl);
  22344. remoteStoreImpl.onlineStateTracker.set("Unknown" /* OnlineState.Unknown */);
  22345. remoteStoreImpl.offlineCauses.delete(4 /* OfflineCause.ConnectivityChange */);
  22346. await enableNetworkInternal(remoteStoreImpl);
  22347. }
  22348. async function remoteStoreHandleCredentialChange(remoteStore, user) {
  22349. const remoteStoreImpl = debugCast(remoteStore);
  22350. remoteStoreImpl.asyncQueue.verifyOperationInProgress();
  22351. logDebug(LOG_TAG$5, 'RemoteStore received new credentials');
  22352. const usesNetwork = canUseNetwork(remoteStoreImpl);
  22353. // Tear down and re-create our network streams. This will ensure we get a
  22354. // fresh auth token for the new user and re-fill the write pipeline with
  22355. // new mutations from the LocalStore (since mutations are per-user).
  22356. remoteStoreImpl.offlineCauses.add(3 /* OfflineCause.CredentialChange */);
  22357. await disableNetworkInternal(remoteStoreImpl);
  22358. if (usesNetwork) {
  22359. // Don't set the network status to Unknown if we are offline.
  22360. remoteStoreImpl.onlineStateTracker.set("Unknown" /* OnlineState.Unknown */);
  22361. }
  22362. await remoteStoreImpl.remoteSyncer.handleCredentialChange(user);
  22363. remoteStoreImpl.offlineCauses.delete(3 /* OfflineCause.CredentialChange */);
  22364. await enableNetworkInternal(remoteStoreImpl);
  22365. }
  22366. /**
  22367. * Toggles the network state when the client gains or loses its primary lease.
  22368. */
  22369. async function remoteStoreApplyPrimaryState(remoteStore, isPrimary) {
  22370. const remoteStoreImpl = debugCast(remoteStore);
  22371. if (isPrimary) {
  22372. remoteStoreImpl.offlineCauses.delete(2 /* OfflineCause.IsSecondary */);
  22373. await enableNetworkInternal(remoteStoreImpl);
  22374. }
  22375. else if (!isPrimary) {
  22376. remoteStoreImpl.offlineCauses.add(2 /* OfflineCause.IsSecondary */);
  22377. await disableNetworkInternal(remoteStoreImpl);
  22378. remoteStoreImpl.onlineStateTracker.set("Unknown" /* OnlineState.Unknown */);
  22379. }
  22380. }
  22381. /**
  22382. * If not yet initialized, registers the WatchStream and its network state
  22383. * callback with `remoteStoreImpl`. Returns the existing stream if one is
  22384. * already available.
  22385. *
  22386. * PORTING NOTE: On iOS and Android, the WatchStream gets registered on startup.
  22387. * This is not done on Web to allow it to be tree-shaken.
  22388. */
  22389. function ensureWatchStream(remoteStoreImpl) {
  22390. if (!remoteStoreImpl.watchStream) {
  22391. // Create stream (but note that it is not started yet).
  22392. remoteStoreImpl.watchStream = newPersistentWatchStream(remoteStoreImpl.datastore, remoteStoreImpl.asyncQueue, {
  22393. onOpen: onWatchStreamOpen.bind(null, remoteStoreImpl),
  22394. onClose: onWatchStreamClose.bind(null, remoteStoreImpl),
  22395. onWatchChange: onWatchStreamChange.bind(null, remoteStoreImpl)
  22396. });
  22397. remoteStoreImpl.onNetworkStatusChange.push(async (enabled) => {
  22398. if (enabled) {
  22399. remoteStoreImpl.watchStream.inhibitBackoff();
  22400. if (shouldStartWatchStream(remoteStoreImpl)) {
  22401. startWatchStream(remoteStoreImpl);
  22402. }
  22403. else {
  22404. remoteStoreImpl.onlineStateTracker.set("Unknown" /* OnlineState.Unknown */);
  22405. }
  22406. }
  22407. else {
  22408. await remoteStoreImpl.watchStream.stop();
  22409. cleanUpWatchStreamState(remoteStoreImpl);
  22410. }
  22411. });
  22412. }
  22413. return remoteStoreImpl.watchStream;
  22414. }
  22415. /**
  22416. * If not yet initialized, registers the WriteStream and its network state
  22417. * callback with `remoteStoreImpl`. Returns the existing stream if one is
  22418. * already available.
  22419. *
  22420. * PORTING NOTE: On iOS and Android, the WriteStream gets registered on startup.
  22421. * This is not done on Web to allow it to be tree-shaken.
  22422. */
  22423. function ensureWriteStream(remoteStoreImpl) {
  22424. if (!remoteStoreImpl.writeStream) {
  22425. // Create stream (but note that it is not started yet).
  22426. remoteStoreImpl.writeStream = newPersistentWriteStream(remoteStoreImpl.datastore, remoteStoreImpl.asyncQueue, {
  22427. onOpen: onWriteStreamOpen.bind(null, remoteStoreImpl),
  22428. onClose: onWriteStreamClose.bind(null, remoteStoreImpl),
  22429. onHandshakeComplete: onWriteHandshakeComplete.bind(null, remoteStoreImpl),
  22430. onMutationResult: onMutationResult.bind(null, remoteStoreImpl)
  22431. });
  22432. remoteStoreImpl.onNetworkStatusChange.push(async (enabled) => {
  22433. if (enabled) {
  22434. remoteStoreImpl.writeStream.inhibitBackoff();
  22435. // This will start the write stream if necessary.
  22436. await fillWritePipeline(remoteStoreImpl);
  22437. }
  22438. else {
  22439. await remoteStoreImpl.writeStream.stop();
  22440. if (remoteStoreImpl.writePipeline.length > 0) {
  22441. logDebug(LOG_TAG$5, `Stopping write stream with ${remoteStoreImpl.writePipeline.length} pending writes`);
  22442. remoteStoreImpl.writePipeline = [];
  22443. }
  22444. }
  22445. });
  22446. }
  22447. return remoteStoreImpl.writeStream;
  22448. }
  22449. /**
  22450. * @license
  22451. * Copyright 2017 Google LLC
  22452. *
  22453. * Licensed under the Apache License, Version 2.0 (the "License");
  22454. * you may not use this file except in compliance with the License.
  22455. * You may obtain a copy of the License at
  22456. *
  22457. * http://www.apache.org/licenses/LICENSE-2.0
  22458. *
  22459. * Unless required by applicable law or agreed to in writing, software
  22460. * distributed under the License is distributed on an "AS IS" BASIS,
  22461. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  22462. * See the License for the specific language governing permissions and
  22463. * limitations under the License.
  22464. */
  22465. const LOG_TAG$4 = 'AsyncQueue';
  22466. /**
  22467. * Represents an operation scheduled to be run in the future on an AsyncQueue.
  22468. *
  22469. * It is created via DelayedOperation.createAndSchedule().
  22470. *
  22471. * Supports cancellation (via cancel()) and early execution (via skipDelay()).
  22472. *
  22473. * Note: We implement `PromiseLike` instead of `Promise`, as the `Promise` type
  22474. * in newer versions of TypeScript defines `finally`, which is not available in
  22475. * IE.
  22476. */
  22477. class DelayedOperation {
  22478. constructor(asyncQueue, timerId, targetTimeMs, op, removalCallback) {
  22479. this.asyncQueue = asyncQueue;
  22480. this.timerId = timerId;
  22481. this.targetTimeMs = targetTimeMs;
  22482. this.op = op;
  22483. this.removalCallback = removalCallback;
  22484. this.deferred = new Deferred();
  22485. this.then = this.deferred.promise.then.bind(this.deferred.promise);
  22486. // It's normal for the deferred promise to be canceled (due to cancellation)
  22487. // and so we attach a dummy catch callback to avoid
  22488. // 'UnhandledPromiseRejectionWarning' log spam.
  22489. this.deferred.promise.catch(err => { });
  22490. }
  22491. /**
  22492. * Creates and returns a DelayedOperation that has been scheduled to be
  22493. * executed on the provided asyncQueue after the provided delayMs.
  22494. *
  22495. * @param asyncQueue - The queue to schedule the operation on.
  22496. * @param id - A Timer ID identifying the type of operation this is.
  22497. * @param delayMs - The delay (ms) before the operation should be scheduled.
  22498. * @param op - The operation to run.
  22499. * @param removalCallback - A callback to be called synchronously once the
  22500. * operation is executed or canceled, notifying the AsyncQueue to remove it
  22501. * from its delayedOperations list.
  22502. * PORTING NOTE: This exists to prevent making removeDelayedOperation() and
  22503. * the DelayedOperation class public.
  22504. */
  22505. static createAndSchedule(asyncQueue, timerId, delayMs, op, removalCallback) {
  22506. const targetTime = Date.now() + delayMs;
  22507. const delayedOp = new DelayedOperation(asyncQueue, timerId, targetTime, op, removalCallback);
  22508. delayedOp.start(delayMs);
  22509. return delayedOp;
  22510. }
  22511. /**
  22512. * Starts the timer. This is called immediately after construction by
  22513. * createAndSchedule().
  22514. */
  22515. start(delayMs) {
  22516. this.timerHandle = setTimeout(() => this.handleDelayElapsed(), delayMs);
  22517. }
  22518. /**
  22519. * Queues the operation to run immediately (if it hasn't already been run or
  22520. * canceled).
  22521. */
  22522. skipDelay() {
  22523. return this.handleDelayElapsed();
  22524. }
  22525. /**
  22526. * Cancels the operation if it hasn't already been executed or canceled. The
  22527. * promise will be rejected.
  22528. *
  22529. * As long as the operation has not yet been run, calling cancel() provides a
  22530. * guarantee that the operation will not be run.
  22531. */
  22532. cancel(reason) {
  22533. if (this.timerHandle !== null) {
  22534. this.clearTimeout();
  22535. this.deferred.reject(new FirestoreError(Code.CANCELLED, 'Operation cancelled' + (reason ? ': ' + reason : '')));
  22536. }
  22537. }
  22538. handleDelayElapsed() {
  22539. this.asyncQueue.enqueueAndForget(() => {
  22540. if (this.timerHandle !== null) {
  22541. this.clearTimeout();
  22542. return this.op().then(result => {
  22543. return this.deferred.resolve(result);
  22544. });
  22545. }
  22546. else {
  22547. return Promise.resolve();
  22548. }
  22549. });
  22550. }
  22551. clearTimeout() {
  22552. if (this.timerHandle !== null) {
  22553. this.removalCallback(this);
  22554. clearTimeout(this.timerHandle);
  22555. this.timerHandle = null;
  22556. }
  22557. }
  22558. }
  22559. /**
  22560. * Returns a FirestoreError that can be surfaced to the user if the provided
  22561. * error is an IndexedDbTransactionError. Re-throws the error otherwise.
  22562. */
  22563. function wrapInUserErrorIfRecoverable(e, msg) {
  22564. logError(LOG_TAG$4, `${msg}: ${e}`);
  22565. if (isIndexedDbTransactionError(e)) {
  22566. return new FirestoreError(Code.UNAVAILABLE, `${msg}: ${e}`);
  22567. }
  22568. else {
  22569. throw e;
  22570. }
  22571. }
  22572. /**
  22573. * @license
  22574. * Copyright 2017 Google LLC
  22575. *
  22576. * Licensed under the Apache License, Version 2.0 (the "License");
  22577. * you may not use this file except in compliance with the License.
  22578. * You may obtain a copy of the License at
  22579. *
  22580. * http://www.apache.org/licenses/LICENSE-2.0
  22581. *
  22582. * Unless required by applicable law or agreed to in writing, software
  22583. * distributed under the License is distributed on an "AS IS" BASIS,
  22584. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  22585. * See the License for the specific language governing permissions and
  22586. * limitations under the License.
  22587. */
  22588. /**
  22589. * DocumentSet is an immutable (copy-on-write) collection that holds documents
  22590. * in order specified by the provided comparator. We always add a document key
  22591. * comparator on top of what is provided to guarantee document equality based on
  22592. * the key.
  22593. */
  22594. class DocumentSet {
  22595. /** The default ordering is by key if the comparator is omitted */
  22596. constructor(comp) {
  22597. // We are adding document key comparator to the end as it's the only
  22598. // guaranteed unique property of a document.
  22599. if (comp) {
  22600. this.comparator = (d1, d2) => comp(d1, d2) || DocumentKey.comparator(d1.key, d2.key);
  22601. }
  22602. else {
  22603. this.comparator = (d1, d2) => DocumentKey.comparator(d1.key, d2.key);
  22604. }
  22605. this.keyedMap = documentMap();
  22606. this.sortedSet = new SortedMap(this.comparator);
  22607. }
  22608. /**
  22609. * Returns an empty copy of the existing DocumentSet, using the same
  22610. * comparator.
  22611. */
  22612. static emptySet(oldSet) {
  22613. return new DocumentSet(oldSet.comparator);
  22614. }
  22615. has(key) {
  22616. return this.keyedMap.get(key) != null;
  22617. }
  22618. get(key) {
  22619. return this.keyedMap.get(key);
  22620. }
  22621. first() {
  22622. return this.sortedSet.minKey();
  22623. }
  22624. last() {
  22625. return this.sortedSet.maxKey();
  22626. }
  22627. isEmpty() {
  22628. return this.sortedSet.isEmpty();
  22629. }
  22630. /**
  22631. * Returns the index of the provided key in the document set, or -1 if the
  22632. * document key is not present in the set;
  22633. */
  22634. indexOf(key) {
  22635. const doc = this.keyedMap.get(key);
  22636. return doc ? this.sortedSet.indexOf(doc) : -1;
  22637. }
  22638. get size() {
  22639. return this.sortedSet.size;
  22640. }
  22641. /** Iterates documents in order defined by "comparator" */
  22642. forEach(cb) {
  22643. this.sortedSet.inorderTraversal((k, v) => {
  22644. cb(k);
  22645. return false;
  22646. });
  22647. }
  22648. /** Inserts or updates a document with the same key */
  22649. add(doc) {
  22650. // First remove the element if we have it.
  22651. const set = this.delete(doc.key);
  22652. return set.copy(set.keyedMap.insert(doc.key, doc), set.sortedSet.insert(doc, null));
  22653. }
  22654. /** Deletes a document with a given key */
  22655. delete(key) {
  22656. const doc = this.get(key);
  22657. if (!doc) {
  22658. return this;
  22659. }
  22660. return this.copy(this.keyedMap.remove(key), this.sortedSet.remove(doc));
  22661. }
  22662. isEqual(other) {
  22663. if (!(other instanceof DocumentSet)) {
  22664. return false;
  22665. }
  22666. if (this.size !== other.size) {
  22667. return false;
  22668. }
  22669. const thisIt = this.sortedSet.getIterator();
  22670. const otherIt = other.sortedSet.getIterator();
  22671. while (thisIt.hasNext()) {
  22672. const thisDoc = thisIt.getNext().key;
  22673. const otherDoc = otherIt.getNext().key;
  22674. if (!thisDoc.isEqual(otherDoc)) {
  22675. return false;
  22676. }
  22677. }
  22678. return true;
  22679. }
  22680. toString() {
  22681. const docStrings = [];
  22682. this.forEach(doc => {
  22683. docStrings.push(doc.toString());
  22684. });
  22685. if (docStrings.length === 0) {
  22686. return 'DocumentSet ()';
  22687. }
  22688. else {
  22689. return 'DocumentSet (\n ' + docStrings.join(' \n') + '\n)';
  22690. }
  22691. }
  22692. copy(keyedMap, sortedSet) {
  22693. const newSet = new DocumentSet();
  22694. newSet.comparator = this.comparator;
  22695. newSet.keyedMap = keyedMap;
  22696. newSet.sortedSet = sortedSet;
  22697. return newSet;
  22698. }
  22699. }
  22700. /**
  22701. * @license
  22702. * Copyright 2017 Google LLC
  22703. *
  22704. * Licensed under the Apache License, Version 2.0 (the "License");
  22705. * you may not use this file except in compliance with the License.
  22706. * You may obtain a copy of the License at
  22707. *
  22708. * http://www.apache.org/licenses/LICENSE-2.0
  22709. *
  22710. * Unless required by applicable law or agreed to in writing, software
  22711. * distributed under the License is distributed on an "AS IS" BASIS,
  22712. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  22713. * See the License for the specific language governing permissions and
  22714. * limitations under the License.
  22715. */
  22716. /**
  22717. * DocumentChangeSet keeps track of a set of changes to docs in a query, merging
  22718. * duplicate events for the same doc.
  22719. */
  22720. class DocumentChangeSet {
  22721. constructor() {
  22722. this.changeMap = new SortedMap(DocumentKey.comparator);
  22723. }
  22724. track(change) {
  22725. const key = change.doc.key;
  22726. const oldChange = this.changeMap.get(key);
  22727. if (!oldChange) {
  22728. this.changeMap = this.changeMap.insert(key, change);
  22729. return;
  22730. }
  22731. // Merge the new change with the existing change.
  22732. if (change.type !== 0 /* ChangeType.Added */ &&
  22733. oldChange.type === 3 /* ChangeType.Metadata */) {
  22734. this.changeMap = this.changeMap.insert(key, change);
  22735. }
  22736. else if (change.type === 3 /* ChangeType.Metadata */ &&
  22737. oldChange.type !== 1 /* ChangeType.Removed */) {
  22738. this.changeMap = this.changeMap.insert(key, {
  22739. type: oldChange.type,
  22740. doc: change.doc
  22741. });
  22742. }
  22743. else if (change.type === 2 /* ChangeType.Modified */ &&
  22744. oldChange.type === 2 /* ChangeType.Modified */) {
  22745. this.changeMap = this.changeMap.insert(key, {
  22746. type: 2 /* ChangeType.Modified */,
  22747. doc: change.doc
  22748. });
  22749. }
  22750. else if (change.type === 2 /* ChangeType.Modified */ &&
  22751. oldChange.type === 0 /* ChangeType.Added */) {
  22752. this.changeMap = this.changeMap.insert(key, {
  22753. type: 0 /* ChangeType.Added */,
  22754. doc: change.doc
  22755. });
  22756. }
  22757. else if (change.type === 1 /* ChangeType.Removed */ &&
  22758. oldChange.type === 0 /* ChangeType.Added */) {
  22759. this.changeMap = this.changeMap.remove(key);
  22760. }
  22761. else if (change.type === 1 /* ChangeType.Removed */ &&
  22762. oldChange.type === 2 /* ChangeType.Modified */) {
  22763. this.changeMap = this.changeMap.insert(key, {
  22764. type: 1 /* ChangeType.Removed */,
  22765. doc: oldChange.doc
  22766. });
  22767. }
  22768. else if (change.type === 0 /* ChangeType.Added */ &&
  22769. oldChange.type === 1 /* ChangeType.Removed */) {
  22770. this.changeMap = this.changeMap.insert(key, {
  22771. type: 2 /* ChangeType.Modified */,
  22772. doc: change.doc
  22773. });
  22774. }
  22775. else {
  22776. // This includes these cases, which don't make sense:
  22777. // Added->Added
  22778. // Removed->Removed
  22779. // Modified->Added
  22780. // Removed->Modified
  22781. // Metadata->Added
  22782. // Removed->Metadata
  22783. fail();
  22784. }
  22785. }
  22786. getChanges() {
  22787. const changes = [];
  22788. this.changeMap.inorderTraversal((key, change) => {
  22789. changes.push(change);
  22790. });
  22791. return changes;
  22792. }
  22793. }
  22794. class ViewSnapshot {
  22795. constructor(query, docs, oldDocs, docChanges, mutatedKeys, fromCache, syncStateChanged, excludesMetadataChanges, hasCachedResults) {
  22796. this.query = query;
  22797. this.docs = docs;
  22798. this.oldDocs = oldDocs;
  22799. this.docChanges = docChanges;
  22800. this.mutatedKeys = mutatedKeys;
  22801. this.fromCache = fromCache;
  22802. this.syncStateChanged = syncStateChanged;
  22803. this.excludesMetadataChanges = excludesMetadataChanges;
  22804. this.hasCachedResults = hasCachedResults;
  22805. }
  22806. /** Returns a view snapshot as if all documents in the snapshot were added. */
  22807. static fromInitialDocuments(query, documents, mutatedKeys, fromCache, hasCachedResults) {
  22808. const changes = [];
  22809. documents.forEach(doc => {
  22810. changes.push({ type: 0 /* ChangeType.Added */, doc });
  22811. });
  22812. return new ViewSnapshot(query, documents, DocumentSet.emptySet(documents), changes, mutatedKeys, fromCache,
  22813. /* syncStateChanged= */ true,
  22814. /* excludesMetadataChanges= */ false, hasCachedResults);
  22815. }
  22816. get hasPendingWrites() {
  22817. return !this.mutatedKeys.isEmpty();
  22818. }
  22819. isEqual(other) {
  22820. if (this.fromCache !== other.fromCache ||
  22821. this.hasCachedResults !== other.hasCachedResults ||
  22822. this.syncStateChanged !== other.syncStateChanged ||
  22823. !this.mutatedKeys.isEqual(other.mutatedKeys) ||
  22824. !queryEquals(this.query, other.query) ||
  22825. !this.docs.isEqual(other.docs) ||
  22826. !this.oldDocs.isEqual(other.oldDocs)) {
  22827. return false;
  22828. }
  22829. const changes = this.docChanges;
  22830. const otherChanges = other.docChanges;
  22831. if (changes.length !== otherChanges.length) {
  22832. return false;
  22833. }
  22834. for (let i = 0; i < changes.length; i++) {
  22835. if (changes[i].type !== otherChanges[i].type ||
  22836. !changes[i].doc.isEqual(otherChanges[i].doc)) {
  22837. return false;
  22838. }
  22839. }
  22840. return true;
  22841. }
  22842. }
  22843. /**
  22844. * @license
  22845. * Copyright 2017 Google LLC
  22846. *
  22847. * Licensed under the Apache License, Version 2.0 (the "License");
  22848. * you may not use this file except in compliance with the License.
  22849. * You may obtain a copy of the License at
  22850. *
  22851. * http://www.apache.org/licenses/LICENSE-2.0
  22852. *
  22853. * Unless required by applicable law or agreed to in writing, software
  22854. * distributed under the License is distributed on an "AS IS" BASIS,
  22855. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  22856. * See the License for the specific language governing permissions and
  22857. * limitations under the License.
  22858. */
  22859. /**
  22860. * Holds the listeners and the last received ViewSnapshot for a query being
  22861. * tracked by EventManager.
  22862. */
  22863. class QueryListenersInfo {
  22864. constructor() {
  22865. this.viewSnap = undefined;
  22866. this.listeners = [];
  22867. }
  22868. }
  22869. function newEventManager() {
  22870. return new EventManagerImpl();
  22871. }
  22872. class EventManagerImpl {
  22873. constructor() {
  22874. this.queries = new ObjectMap(q => canonifyQuery(q), queryEquals);
  22875. this.onlineState = "Unknown" /* OnlineState.Unknown */;
  22876. this.snapshotsInSyncListeners = new Set();
  22877. }
  22878. }
  22879. async function eventManagerListen(eventManager, listener) {
  22880. const eventManagerImpl = debugCast(eventManager);
  22881. const query = listener.query;
  22882. let firstListen = false;
  22883. let queryInfo = eventManagerImpl.queries.get(query);
  22884. if (!queryInfo) {
  22885. firstListen = true;
  22886. queryInfo = new QueryListenersInfo();
  22887. }
  22888. if (firstListen) {
  22889. try {
  22890. queryInfo.viewSnap = await eventManagerImpl.onListen(query);
  22891. }
  22892. catch (e) {
  22893. const firestoreError = wrapInUserErrorIfRecoverable(e, `Initialization of query '${stringifyQuery(listener.query)}' failed`);
  22894. listener.onError(firestoreError);
  22895. return;
  22896. }
  22897. }
  22898. eventManagerImpl.queries.set(query, queryInfo);
  22899. queryInfo.listeners.push(listener);
  22900. // Run global snapshot listeners if a consistent snapshot has been emitted.
  22901. listener.applyOnlineStateChange(eventManagerImpl.onlineState);
  22902. if (queryInfo.viewSnap) {
  22903. const raisedEvent = listener.onViewSnapshot(queryInfo.viewSnap);
  22904. if (raisedEvent) {
  22905. raiseSnapshotsInSyncEvent(eventManagerImpl);
  22906. }
  22907. }
  22908. }
  22909. async function eventManagerUnlisten(eventManager, listener) {
  22910. const eventManagerImpl = debugCast(eventManager);
  22911. const query = listener.query;
  22912. let lastListen = false;
  22913. const queryInfo = eventManagerImpl.queries.get(query);
  22914. if (queryInfo) {
  22915. const i = queryInfo.listeners.indexOf(listener);
  22916. if (i >= 0) {
  22917. queryInfo.listeners.splice(i, 1);
  22918. lastListen = queryInfo.listeners.length === 0;
  22919. }
  22920. }
  22921. if (lastListen) {
  22922. eventManagerImpl.queries.delete(query);
  22923. return eventManagerImpl.onUnlisten(query);
  22924. }
  22925. }
  22926. function eventManagerOnWatchChange(eventManager, viewSnaps) {
  22927. const eventManagerImpl = debugCast(eventManager);
  22928. let raisedEvent = false;
  22929. for (const viewSnap of viewSnaps) {
  22930. const query = viewSnap.query;
  22931. const queryInfo = eventManagerImpl.queries.get(query);
  22932. if (queryInfo) {
  22933. for (const listener of queryInfo.listeners) {
  22934. if (listener.onViewSnapshot(viewSnap)) {
  22935. raisedEvent = true;
  22936. }
  22937. }
  22938. queryInfo.viewSnap = viewSnap;
  22939. }
  22940. }
  22941. if (raisedEvent) {
  22942. raiseSnapshotsInSyncEvent(eventManagerImpl);
  22943. }
  22944. }
  22945. function eventManagerOnWatchError(eventManager, query, error) {
  22946. const eventManagerImpl = debugCast(eventManager);
  22947. const queryInfo = eventManagerImpl.queries.get(query);
  22948. if (queryInfo) {
  22949. for (const listener of queryInfo.listeners) {
  22950. listener.onError(error);
  22951. }
  22952. }
  22953. // Remove all listeners. NOTE: We don't need to call syncEngine.unlisten()
  22954. // after an error.
  22955. eventManagerImpl.queries.delete(query);
  22956. }
  22957. function eventManagerOnOnlineStateChange(eventManager, onlineState) {
  22958. const eventManagerImpl = debugCast(eventManager);
  22959. eventManagerImpl.onlineState = onlineState;
  22960. let raisedEvent = false;
  22961. eventManagerImpl.queries.forEach((_, queryInfo) => {
  22962. for (const listener of queryInfo.listeners) {
  22963. // Run global snapshot listeners if a consistent snapshot has been emitted.
  22964. if (listener.applyOnlineStateChange(onlineState)) {
  22965. raisedEvent = true;
  22966. }
  22967. }
  22968. });
  22969. if (raisedEvent) {
  22970. raiseSnapshotsInSyncEvent(eventManagerImpl);
  22971. }
  22972. }
  22973. function addSnapshotsInSyncListener(eventManager, observer) {
  22974. const eventManagerImpl = debugCast(eventManager);
  22975. eventManagerImpl.snapshotsInSyncListeners.add(observer);
  22976. // Immediately fire an initial event, indicating all existing listeners
  22977. // are in-sync.
  22978. observer.next();
  22979. }
  22980. function removeSnapshotsInSyncListener(eventManager, observer) {
  22981. const eventManagerImpl = debugCast(eventManager);
  22982. eventManagerImpl.snapshotsInSyncListeners.delete(observer);
  22983. }
  22984. // Call all global snapshot listeners that have been set.
  22985. function raiseSnapshotsInSyncEvent(eventManagerImpl) {
  22986. eventManagerImpl.snapshotsInSyncListeners.forEach(observer => {
  22987. observer.next();
  22988. });
  22989. }
  22990. /**
  22991. * QueryListener takes a series of internal view snapshots and determines
  22992. * when to raise the event.
  22993. *
  22994. * It uses an Observer to dispatch events.
  22995. */
  22996. class QueryListener {
  22997. constructor(query, queryObserver, options) {
  22998. this.query = query;
  22999. this.queryObserver = queryObserver;
  23000. /**
  23001. * Initial snapshots (e.g. from cache) may not be propagated to the wrapped
  23002. * observer. This flag is set to true once we've actually raised an event.
  23003. */
  23004. this.raisedInitialEvent = false;
  23005. this.snap = null;
  23006. this.onlineState = "Unknown" /* OnlineState.Unknown */;
  23007. this.options = options || {};
  23008. }
  23009. /**
  23010. * Applies the new ViewSnapshot to this listener, raising a user-facing event
  23011. * if applicable (depending on what changed, whether the user has opted into
  23012. * metadata-only changes, etc.). Returns true if a user-facing event was
  23013. * indeed raised.
  23014. */
  23015. onViewSnapshot(snap) {
  23016. if (!this.options.includeMetadataChanges) {
  23017. // Remove the metadata only changes.
  23018. const docChanges = [];
  23019. for (const docChange of snap.docChanges) {
  23020. if (docChange.type !== 3 /* ChangeType.Metadata */) {
  23021. docChanges.push(docChange);
  23022. }
  23023. }
  23024. snap = new ViewSnapshot(snap.query, snap.docs, snap.oldDocs, docChanges, snap.mutatedKeys, snap.fromCache, snap.syncStateChanged,
  23025. /* excludesMetadataChanges= */ true, snap.hasCachedResults);
  23026. }
  23027. let raisedEvent = false;
  23028. if (!this.raisedInitialEvent) {
  23029. if (this.shouldRaiseInitialEvent(snap, this.onlineState)) {
  23030. this.raiseInitialEvent(snap);
  23031. raisedEvent = true;
  23032. }
  23033. }
  23034. else if (this.shouldRaiseEvent(snap)) {
  23035. this.queryObserver.next(snap);
  23036. raisedEvent = true;
  23037. }
  23038. this.snap = snap;
  23039. return raisedEvent;
  23040. }
  23041. onError(error) {
  23042. this.queryObserver.error(error);
  23043. }
  23044. /** Returns whether a snapshot was raised. */
  23045. applyOnlineStateChange(onlineState) {
  23046. this.onlineState = onlineState;
  23047. let raisedEvent = false;
  23048. if (this.snap &&
  23049. !this.raisedInitialEvent &&
  23050. this.shouldRaiseInitialEvent(this.snap, onlineState)) {
  23051. this.raiseInitialEvent(this.snap);
  23052. raisedEvent = true;
  23053. }
  23054. return raisedEvent;
  23055. }
  23056. shouldRaiseInitialEvent(snap, onlineState) {
  23057. // Always raise the first event when we're synced
  23058. if (!snap.fromCache) {
  23059. return true;
  23060. }
  23061. // NOTE: We consider OnlineState.Unknown as online (it should become Offline
  23062. // or Online if we wait long enough).
  23063. const maybeOnline = onlineState !== "Offline" /* OnlineState.Offline */;
  23064. // Don't raise the event if we're online, aren't synced yet (checked
  23065. // above) and are waiting for a sync.
  23066. if (this.options.waitForSyncWhenOnline && maybeOnline) {
  23067. return false;
  23068. }
  23069. // Raise data from cache if we have any documents, have cached results before,
  23070. // or we are offline.
  23071. return (!snap.docs.isEmpty() ||
  23072. snap.hasCachedResults ||
  23073. onlineState === "Offline" /* OnlineState.Offline */);
  23074. }
  23075. shouldRaiseEvent(snap) {
  23076. // We don't need to handle includeDocumentMetadataChanges here because
  23077. // the Metadata only changes have already been stripped out if needed.
  23078. // At this point the only changes we will see are the ones we should
  23079. // propagate.
  23080. if (snap.docChanges.length > 0) {
  23081. return true;
  23082. }
  23083. const hasPendingWritesChanged = this.snap && this.snap.hasPendingWrites !== snap.hasPendingWrites;
  23084. if (snap.syncStateChanged || hasPendingWritesChanged) {
  23085. return this.options.includeMetadataChanges === true;
  23086. }
  23087. // Generally we should have hit one of the cases above, but it's possible
  23088. // to get here if there were only metadata docChanges and they got
  23089. // stripped out.
  23090. return false;
  23091. }
  23092. raiseInitialEvent(snap) {
  23093. snap = ViewSnapshot.fromInitialDocuments(snap.query, snap.docs, snap.mutatedKeys, snap.fromCache, snap.hasCachedResults);
  23094. this.raisedInitialEvent = true;
  23095. this.queryObserver.next(snap);
  23096. }
  23097. }
  23098. /**
  23099. * @license
  23100. * Copyright 2017 Google LLC
  23101. *
  23102. * Licensed under the Apache License, Version 2.0 (the "License");
  23103. * you may not use this file except in compliance with the License.
  23104. * You may obtain a copy of the License at
  23105. *
  23106. * http://www.apache.org/licenses/LICENSE-2.0
  23107. *
  23108. * Unless required by applicable law or agreed to in writing, software
  23109. * distributed under the License is distributed on an "AS IS" BASIS,
  23110. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  23111. * See the License for the specific language governing permissions and
  23112. * limitations under the License.
  23113. */
  23114. /**
  23115. * A set of changes to what documents are currently in view and out of view for
  23116. * a given query. These changes are sent to the LocalStore by the View (via
  23117. * the SyncEngine) and are used to pin / unpin documents as appropriate.
  23118. */
  23119. class LocalViewChanges {
  23120. constructor(targetId, fromCache, addedKeys, removedKeys) {
  23121. this.targetId = targetId;
  23122. this.fromCache = fromCache;
  23123. this.addedKeys = addedKeys;
  23124. this.removedKeys = removedKeys;
  23125. }
  23126. static fromSnapshot(targetId, viewSnapshot) {
  23127. let addedKeys = documentKeySet();
  23128. let removedKeys = documentKeySet();
  23129. for (const docChange of viewSnapshot.docChanges) {
  23130. switch (docChange.type) {
  23131. case 0 /* ChangeType.Added */:
  23132. addedKeys = addedKeys.add(docChange.doc.key);
  23133. break;
  23134. case 1 /* ChangeType.Removed */:
  23135. removedKeys = removedKeys.add(docChange.doc.key);
  23136. break;
  23137. // do nothing
  23138. }
  23139. }
  23140. return new LocalViewChanges(targetId, viewSnapshot.fromCache, addedKeys, removedKeys);
  23141. }
  23142. }
  23143. /**
  23144. * @license
  23145. * Copyright 2020 Google LLC
  23146. *
  23147. * Licensed under the Apache License, Version 2.0 (the "License");
  23148. * you may not use this file except in compliance with the License.
  23149. * You may obtain a copy of the License at
  23150. *
  23151. * http://www.apache.org/licenses/LICENSE-2.0
  23152. *
  23153. * Unless required by applicable law or agreed to in writing, software
  23154. * distributed under the License is distributed on an "AS IS" BASIS,
  23155. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  23156. * See the License for the specific language governing permissions and
  23157. * limitations under the License.
  23158. */
  23159. /**
  23160. * Helper to convert objects from bundles to model objects in the SDK.
  23161. */
  23162. class BundleConverterImpl {
  23163. constructor(serializer) {
  23164. this.serializer = serializer;
  23165. }
  23166. toDocumentKey(name) {
  23167. return fromName(this.serializer, name);
  23168. }
  23169. /**
  23170. * Converts a BundleDocument to a MutableDocument.
  23171. */
  23172. toMutableDocument(bundledDoc) {
  23173. if (bundledDoc.metadata.exists) {
  23174. return fromDocument(this.serializer, bundledDoc.document, false);
  23175. }
  23176. else {
  23177. return MutableDocument.newNoDocument(this.toDocumentKey(bundledDoc.metadata.name), this.toSnapshotVersion(bundledDoc.metadata.readTime));
  23178. }
  23179. }
  23180. toSnapshotVersion(time) {
  23181. return fromVersion(time);
  23182. }
  23183. }
  23184. /**
  23185. * A class to process the elements from a bundle, load them into local
  23186. * storage and provide progress update while loading.
  23187. */
  23188. class BundleLoader {
  23189. constructor(bundleMetadata, localStore, serializer) {
  23190. this.bundleMetadata = bundleMetadata;
  23191. this.localStore = localStore;
  23192. this.serializer = serializer;
  23193. /** Batched queries to be saved into storage */
  23194. this.queries = [];
  23195. /** Batched documents to be saved into storage */
  23196. this.documents = [];
  23197. /** The collection groups affected by this bundle. */
  23198. this.collectionGroups = new Set();
  23199. this.progress = bundleInitialProgress(bundleMetadata);
  23200. }
  23201. /**
  23202. * Adds an element from the bundle to the loader.
  23203. *
  23204. * Returns a new progress if adding the element leads to a new progress,
  23205. * otherwise returns null.
  23206. */
  23207. addSizedElement(element) {
  23208. this.progress.bytesLoaded += element.byteLength;
  23209. let documentsLoaded = this.progress.documentsLoaded;
  23210. if (element.payload.namedQuery) {
  23211. this.queries.push(element.payload.namedQuery);
  23212. }
  23213. else if (element.payload.documentMetadata) {
  23214. this.documents.push({ metadata: element.payload.documentMetadata });
  23215. if (!element.payload.documentMetadata.exists) {
  23216. ++documentsLoaded;
  23217. }
  23218. const path = ResourcePath.fromString(element.payload.documentMetadata.name);
  23219. this.collectionGroups.add(path.get(path.length - 2));
  23220. }
  23221. else if (element.payload.document) {
  23222. this.documents[this.documents.length - 1].document =
  23223. element.payload.document;
  23224. ++documentsLoaded;
  23225. }
  23226. if (documentsLoaded !== this.progress.documentsLoaded) {
  23227. this.progress.documentsLoaded = documentsLoaded;
  23228. return Object.assign({}, this.progress);
  23229. }
  23230. return null;
  23231. }
  23232. getQueryDocumentMapping(documents) {
  23233. const queryDocumentMap = new Map();
  23234. const bundleConverter = new BundleConverterImpl(this.serializer);
  23235. for (const bundleDoc of documents) {
  23236. if (bundleDoc.metadata.queries) {
  23237. const documentKey = bundleConverter.toDocumentKey(bundleDoc.metadata.name);
  23238. for (const queryName of bundleDoc.metadata.queries) {
  23239. const documentKeys = (queryDocumentMap.get(queryName) || documentKeySet()).add(documentKey);
  23240. queryDocumentMap.set(queryName, documentKeys);
  23241. }
  23242. }
  23243. }
  23244. return queryDocumentMap;
  23245. }
  23246. /**
  23247. * Update the progress to 'Success' and return the updated progress.
  23248. */
  23249. async complete() {
  23250. const changedDocs = await localStoreApplyBundledDocuments(this.localStore, new BundleConverterImpl(this.serializer), this.documents, this.bundleMetadata.id);
  23251. const queryDocumentMap = this.getQueryDocumentMapping(this.documents);
  23252. for (const q of this.queries) {
  23253. await localStoreSaveNamedQuery(this.localStore, q, queryDocumentMap.get(q.name));
  23254. }
  23255. this.progress.taskState = 'Success';
  23256. return {
  23257. progress: this.progress,
  23258. changedCollectionGroups: this.collectionGroups,
  23259. changedDocs
  23260. };
  23261. }
  23262. }
  23263. /**
  23264. * Returns a `LoadBundleTaskProgress` representing the initial progress of
  23265. * loading a bundle.
  23266. */
  23267. function bundleInitialProgress(metadata) {
  23268. return {
  23269. taskState: 'Running',
  23270. documentsLoaded: 0,
  23271. bytesLoaded: 0,
  23272. totalDocuments: metadata.totalDocuments,
  23273. totalBytes: metadata.totalBytes
  23274. };
  23275. }
  23276. /**
  23277. * Returns a `LoadBundleTaskProgress` representing the progress that the loading
  23278. * has succeeded.
  23279. */
  23280. function bundleSuccessProgress(metadata) {
  23281. return {
  23282. taskState: 'Success',
  23283. documentsLoaded: metadata.totalDocuments,
  23284. bytesLoaded: metadata.totalBytes,
  23285. totalDocuments: metadata.totalDocuments,
  23286. totalBytes: metadata.totalBytes
  23287. };
  23288. }
  23289. /**
  23290. * @license
  23291. * Copyright 2017 Google LLC
  23292. *
  23293. * Licensed under the Apache License, Version 2.0 (the "License");
  23294. * you may not use this file except in compliance with the License.
  23295. * You may obtain a copy of the License at
  23296. *
  23297. * http://www.apache.org/licenses/LICENSE-2.0
  23298. *
  23299. * Unless required by applicable law or agreed to in writing, software
  23300. * distributed under the License is distributed on an "AS IS" BASIS,
  23301. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  23302. * See the License for the specific language governing permissions and
  23303. * limitations under the License.
  23304. */
  23305. class AddedLimboDocument {
  23306. constructor(key) {
  23307. this.key = key;
  23308. }
  23309. }
  23310. class RemovedLimboDocument {
  23311. constructor(key) {
  23312. this.key = key;
  23313. }
  23314. }
  23315. /**
  23316. * View is responsible for computing the final merged truth of what docs are in
  23317. * a query. It gets notified of local and remote changes to docs, and applies
  23318. * the query filters and limits to determine the most correct possible results.
  23319. */
  23320. class View {
  23321. constructor(query,
  23322. /** Documents included in the remote target */
  23323. _syncedDocuments) {
  23324. this.query = query;
  23325. this._syncedDocuments = _syncedDocuments;
  23326. this.syncState = null;
  23327. this.hasCachedResults = false;
  23328. /**
  23329. * A flag whether the view is current with the backend. A view is considered
  23330. * current after it has seen the current flag from the backend and did not
  23331. * lose consistency within the watch stream (e.g. because of an existence
  23332. * filter mismatch).
  23333. */
  23334. this.current = false;
  23335. /** Documents in the view but not in the remote target */
  23336. this.limboDocuments = documentKeySet();
  23337. /** Document Keys that have local changes */
  23338. this.mutatedKeys = documentKeySet();
  23339. this.docComparator = newQueryComparator(query);
  23340. this.documentSet = new DocumentSet(this.docComparator);
  23341. }
  23342. /**
  23343. * The set of remote documents that the server has told us belongs to the target associated with
  23344. * this view.
  23345. */
  23346. get syncedDocuments() {
  23347. return this._syncedDocuments;
  23348. }
  23349. /**
  23350. * Iterates over a set of doc changes, applies the query limit, and computes
  23351. * what the new results should be, what the changes were, and whether we may
  23352. * need to go back to the local cache for more results. Does not make any
  23353. * changes to the view.
  23354. * @param docChanges - The doc changes to apply to this view.
  23355. * @param previousChanges - If this is being called with a refill, then start
  23356. * with this set of docs and changes instead of the current view.
  23357. * @returns a new set of docs, changes, and refill flag.
  23358. */
  23359. computeDocChanges(docChanges, previousChanges) {
  23360. const changeSet = previousChanges
  23361. ? previousChanges.changeSet
  23362. : new DocumentChangeSet();
  23363. const oldDocumentSet = previousChanges
  23364. ? previousChanges.documentSet
  23365. : this.documentSet;
  23366. let newMutatedKeys = previousChanges
  23367. ? previousChanges.mutatedKeys
  23368. : this.mutatedKeys;
  23369. let newDocumentSet = oldDocumentSet;
  23370. let needsRefill = false;
  23371. // Track the last doc in a (full) limit. This is necessary, because some
  23372. // update (a delete, or an update moving a doc past the old limit) might
  23373. // mean there is some other document in the local cache that either should
  23374. // come (1) between the old last limit doc and the new last document, in the
  23375. // case of updates, or (2) after the new last document, in the case of
  23376. // deletes. So we keep this doc at the old limit to compare the updates to.
  23377. //
  23378. // Note that this should never get used in a refill (when previousChanges is
  23379. // set), because there will only be adds -- no deletes or updates.
  23380. const lastDocInLimit = this.query.limitType === "F" /* LimitType.First */ &&
  23381. oldDocumentSet.size === this.query.limit
  23382. ? oldDocumentSet.last()
  23383. : null;
  23384. const firstDocInLimit = this.query.limitType === "L" /* LimitType.Last */ &&
  23385. oldDocumentSet.size === this.query.limit
  23386. ? oldDocumentSet.first()
  23387. : null;
  23388. docChanges.inorderTraversal((key, entry) => {
  23389. const oldDoc = oldDocumentSet.get(key);
  23390. const newDoc = queryMatches(this.query, entry) ? entry : null;
  23391. const oldDocHadPendingMutations = oldDoc
  23392. ? this.mutatedKeys.has(oldDoc.key)
  23393. : false;
  23394. const newDocHasPendingMutations = newDoc
  23395. ? newDoc.hasLocalMutations ||
  23396. // We only consider committed mutations for documents that were
  23397. // mutated during the lifetime of the view.
  23398. (this.mutatedKeys.has(newDoc.key) && newDoc.hasCommittedMutations)
  23399. : false;
  23400. let changeApplied = false;
  23401. // Calculate change
  23402. if (oldDoc && newDoc) {
  23403. const docsEqual = oldDoc.data.isEqual(newDoc.data);
  23404. if (!docsEqual) {
  23405. if (!this.shouldWaitForSyncedDocument(oldDoc, newDoc)) {
  23406. changeSet.track({
  23407. type: 2 /* ChangeType.Modified */,
  23408. doc: newDoc
  23409. });
  23410. changeApplied = true;
  23411. if ((lastDocInLimit &&
  23412. this.docComparator(newDoc, lastDocInLimit) > 0) ||
  23413. (firstDocInLimit &&
  23414. this.docComparator(newDoc, firstDocInLimit) < 0)) {
  23415. // This doc moved from inside the limit to outside the limit.
  23416. // That means there may be some other doc in the local cache
  23417. // that should be included instead.
  23418. needsRefill = true;
  23419. }
  23420. }
  23421. }
  23422. else if (oldDocHadPendingMutations !== newDocHasPendingMutations) {
  23423. changeSet.track({ type: 3 /* ChangeType.Metadata */, doc: newDoc });
  23424. changeApplied = true;
  23425. }
  23426. }
  23427. else if (!oldDoc && newDoc) {
  23428. changeSet.track({ type: 0 /* ChangeType.Added */, doc: newDoc });
  23429. changeApplied = true;
  23430. }
  23431. else if (oldDoc && !newDoc) {
  23432. changeSet.track({ type: 1 /* ChangeType.Removed */, doc: oldDoc });
  23433. changeApplied = true;
  23434. if (lastDocInLimit || firstDocInLimit) {
  23435. // A doc was removed from a full limit query. We'll need to
  23436. // requery from the local cache to see if we know about some other
  23437. // doc that should be in the results.
  23438. needsRefill = true;
  23439. }
  23440. }
  23441. if (changeApplied) {
  23442. if (newDoc) {
  23443. newDocumentSet = newDocumentSet.add(newDoc);
  23444. if (newDocHasPendingMutations) {
  23445. newMutatedKeys = newMutatedKeys.add(key);
  23446. }
  23447. else {
  23448. newMutatedKeys = newMutatedKeys.delete(key);
  23449. }
  23450. }
  23451. else {
  23452. newDocumentSet = newDocumentSet.delete(key);
  23453. newMutatedKeys = newMutatedKeys.delete(key);
  23454. }
  23455. }
  23456. });
  23457. // Drop documents out to meet limit/limitToLast requirement.
  23458. if (this.query.limit !== null) {
  23459. while (newDocumentSet.size > this.query.limit) {
  23460. const oldDoc = this.query.limitType === "F" /* LimitType.First */
  23461. ? newDocumentSet.last()
  23462. : newDocumentSet.first();
  23463. newDocumentSet = newDocumentSet.delete(oldDoc.key);
  23464. newMutatedKeys = newMutatedKeys.delete(oldDoc.key);
  23465. changeSet.track({ type: 1 /* ChangeType.Removed */, doc: oldDoc });
  23466. }
  23467. }
  23468. return {
  23469. documentSet: newDocumentSet,
  23470. changeSet,
  23471. needsRefill,
  23472. mutatedKeys: newMutatedKeys
  23473. };
  23474. }
  23475. shouldWaitForSyncedDocument(oldDoc, newDoc) {
  23476. // We suppress the initial change event for documents that were modified as
  23477. // part of a write acknowledgment (e.g. when the value of a server transform
  23478. // is applied) as Watch will send us the same document again.
  23479. // By suppressing the event, we only raise two user visible events (one with
  23480. // `hasPendingWrites` and the final state of the document) instead of three
  23481. // (one with `hasPendingWrites`, the modified document with
  23482. // `hasPendingWrites` and the final state of the document).
  23483. return (oldDoc.hasLocalMutations &&
  23484. newDoc.hasCommittedMutations &&
  23485. !newDoc.hasLocalMutations);
  23486. }
  23487. /**
  23488. * Updates the view with the given ViewDocumentChanges and optionally updates
  23489. * limbo docs and sync state from the provided target change.
  23490. * @param docChanges - The set of changes to make to the view's docs.
  23491. * @param updateLimboDocuments - Whether to update limbo documents based on
  23492. * this change.
  23493. * @param targetChange - A target change to apply for computing limbo docs and
  23494. * sync state.
  23495. * @returns A new ViewChange with the given docs, changes, and sync state.
  23496. */
  23497. // PORTING NOTE: The iOS/Android clients always compute limbo document changes.
  23498. applyChanges(docChanges, updateLimboDocuments, targetChange) {
  23499. const oldDocs = this.documentSet;
  23500. this.documentSet = docChanges.documentSet;
  23501. this.mutatedKeys = docChanges.mutatedKeys;
  23502. // Sort changes based on type and query comparator
  23503. const changes = docChanges.changeSet.getChanges();
  23504. changes.sort((c1, c2) => {
  23505. return (compareChangeType(c1.type, c2.type) ||
  23506. this.docComparator(c1.doc, c2.doc));
  23507. });
  23508. this.applyTargetChange(targetChange);
  23509. const limboChanges = updateLimboDocuments
  23510. ? this.updateLimboDocuments()
  23511. : [];
  23512. const synced = this.limboDocuments.size === 0 && this.current;
  23513. const newSyncState = synced ? 1 /* SyncState.Synced */ : 0 /* SyncState.Local */;
  23514. const syncStateChanged = newSyncState !== this.syncState;
  23515. this.syncState = newSyncState;
  23516. if (changes.length === 0 && !syncStateChanged) {
  23517. // no changes
  23518. return { limboChanges };
  23519. }
  23520. else {
  23521. const snap = new ViewSnapshot(this.query, docChanges.documentSet, oldDocs, changes, docChanges.mutatedKeys, newSyncState === 0 /* SyncState.Local */, syncStateChanged,
  23522. /* excludesMetadataChanges= */ false, targetChange
  23523. ? targetChange.resumeToken.approximateByteSize() > 0
  23524. : false);
  23525. return {
  23526. snapshot: snap,
  23527. limboChanges
  23528. };
  23529. }
  23530. }
  23531. /**
  23532. * Applies an OnlineState change to the view, potentially generating a
  23533. * ViewChange if the view's syncState changes as a result.
  23534. */
  23535. applyOnlineStateChange(onlineState) {
  23536. if (this.current && onlineState === "Offline" /* OnlineState.Offline */) {
  23537. // If we're offline, set `current` to false and then call applyChanges()
  23538. // to refresh our syncState and generate a ViewChange as appropriate. We
  23539. // are guaranteed to get a new TargetChange that sets `current` back to
  23540. // true once the client is back online.
  23541. this.current = false;
  23542. return this.applyChanges({
  23543. documentSet: this.documentSet,
  23544. changeSet: new DocumentChangeSet(),
  23545. mutatedKeys: this.mutatedKeys,
  23546. needsRefill: false
  23547. },
  23548. /* updateLimboDocuments= */ false);
  23549. }
  23550. else {
  23551. // No effect, just return a no-op ViewChange.
  23552. return { limboChanges: [] };
  23553. }
  23554. }
  23555. /**
  23556. * Returns whether the doc for the given key should be in limbo.
  23557. */
  23558. shouldBeInLimbo(key) {
  23559. // If the remote end says it's part of this query, it's not in limbo.
  23560. if (this._syncedDocuments.has(key)) {
  23561. return false;
  23562. }
  23563. // The local store doesn't think it's a result, so it shouldn't be in limbo.
  23564. if (!this.documentSet.has(key)) {
  23565. return false;
  23566. }
  23567. // If there are local changes to the doc, they might explain why the server
  23568. // doesn't know that it's part of the query. So don't put it in limbo.
  23569. // TODO(klimt): Ideally, we would only consider changes that might actually
  23570. // affect this specific query.
  23571. if (this.documentSet.get(key).hasLocalMutations) {
  23572. return false;
  23573. }
  23574. // Everything else is in limbo.
  23575. return true;
  23576. }
  23577. /**
  23578. * Updates syncedDocuments, current, and limbo docs based on the given change.
  23579. * Returns the list of changes to which docs are in limbo.
  23580. */
  23581. applyTargetChange(targetChange) {
  23582. if (targetChange) {
  23583. targetChange.addedDocuments.forEach(key => (this._syncedDocuments = this._syncedDocuments.add(key)));
  23584. targetChange.modifiedDocuments.forEach(key => {
  23585. });
  23586. targetChange.removedDocuments.forEach(key => (this._syncedDocuments = this._syncedDocuments.delete(key)));
  23587. this.current = targetChange.current;
  23588. }
  23589. }
  23590. updateLimboDocuments() {
  23591. // We can only determine limbo documents when we're in-sync with the server.
  23592. if (!this.current) {
  23593. return [];
  23594. }
  23595. // TODO(klimt): Do this incrementally so that it's not quadratic when
  23596. // updating many documents.
  23597. const oldLimboDocuments = this.limboDocuments;
  23598. this.limboDocuments = documentKeySet();
  23599. this.documentSet.forEach(doc => {
  23600. if (this.shouldBeInLimbo(doc.key)) {
  23601. this.limboDocuments = this.limboDocuments.add(doc.key);
  23602. }
  23603. });
  23604. // Diff the new limbo docs with the old limbo docs.
  23605. const changes = [];
  23606. oldLimboDocuments.forEach(key => {
  23607. if (!this.limboDocuments.has(key)) {
  23608. changes.push(new RemovedLimboDocument(key));
  23609. }
  23610. });
  23611. this.limboDocuments.forEach(key => {
  23612. if (!oldLimboDocuments.has(key)) {
  23613. changes.push(new AddedLimboDocument(key));
  23614. }
  23615. });
  23616. return changes;
  23617. }
  23618. /**
  23619. * Update the in-memory state of the current view with the state read from
  23620. * persistence.
  23621. *
  23622. * We update the query view whenever a client's primary status changes:
  23623. * - When a client transitions from primary to secondary, it can miss
  23624. * LocalStorage updates and its query views may temporarily not be
  23625. * synchronized with the state on disk.
  23626. * - For secondary to primary transitions, the client needs to update the list
  23627. * of `syncedDocuments` since secondary clients update their query views
  23628. * based purely on synthesized RemoteEvents.
  23629. *
  23630. * @param queryResult.documents - The documents that match the query according
  23631. * to the LocalStore.
  23632. * @param queryResult.remoteKeys - The keys of the documents that match the
  23633. * query according to the backend.
  23634. *
  23635. * @returns The ViewChange that resulted from this synchronization.
  23636. */
  23637. // PORTING NOTE: Multi-tab only.
  23638. synchronizeWithPersistedState(queryResult) {
  23639. this._syncedDocuments = queryResult.remoteKeys;
  23640. this.limboDocuments = documentKeySet();
  23641. const docChanges = this.computeDocChanges(queryResult.documents);
  23642. return this.applyChanges(docChanges, /*updateLimboDocuments=*/ true);
  23643. }
  23644. /**
  23645. * Returns a view snapshot as if this query was just listened to. Contains
  23646. * a document add for every existing document and the `fromCache` and
  23647. * `hasPendingWrites` status of the already established view.
  23648. */
  23649. // PORTING NOTE: Multi-tab only.
  23650. computeInitialSnapshot() {
  23651. return ViewSnapshot.fromInitialDocuments(this.query, this.documentSet, this.mutatedKeys, this.syncState === 0 /* SyncState.Local */, this.hasCachedResults);
  23652. }
  23653. }
  23654. function compareChangeType(c1, c2) {
  23655. const order = (change) => {
  23656. switch (change) {
  23657. case 0 /* ChangeType.Added */:
  23658. return 1;
  23659. case 2 /* ChangeType.Modified */:
  23660. return 2;
  23661. case 3 /* ChangeType.Metadata */:
  23662. // A metadata change is converted to a modified change at the public
  23663. // api layer. Since we sort by document key and then change type,
  23664. // metadata and modified changes must be sorted equivalently.
  23665. return 2;
  23666. case 1 /* ChangeType.Removed */:
  23667. return 0;
  23668. default:
  23669. return fail();
  23670. }
  23671. };
  23672. return order(c1) - order(c2);
  23673. }
  23674. /**
  23675. * @license
  23676. * Copyright 2020 Google LLC
  23677. *
  23678. * Licensed under the Apache License, Version 2.0 (the "License");
  23679. * you may not use this file except in compliance with the License.
  23680. * You may obtain a copy of the License at
  23681. *
  23682. * http://www.apache.org/licenses/LICENSE-2.0
  23683. *
  23684. * Unless required by applicable law or agreed to in writing, software
  23685. * distributed under the License is distributed on an "AS IS" BASIS,
  23686. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  23687. * See the License for the specific language governing permissions and
  23688. * limitations under the License.
  23689. */
  23690. const LOG_TAG$3 = 'SyncEngine';
  23691. /**
  23692. * QueryView contains all of the data that SyncEngine needs to keep track of for
  23693. * a particular query.
  23694. */
  23695. class QueryView {
  23696. constructor(
  23697. /**
  23698. * The query itself.
  23699. */
  23700. query,
  23701. /**
  23702. * The target number created by the client that is used in the watch
  23703. * stream to identify this query.
  23704. */
  23705. targetId,
  23706. /**
  23707. * The view is responsible for computing the final merged truth of what
  23708. * docs are in the query. It gets notified of local and remote changes,
  23709. * and applies the query filters and limits to determine the most correct
  23710. * possible results.
  23711. */
  23712. view) {
  23713. this.query = query;
  23714. this.targetId = targetId;
  23715. this.view = view;
  23716. }
  23717. }
  23718. /** Tracks a limbo resolution. */
  23719. class LimboResolution {
  23720. constructor(key) {
  23721. this.key = key;
  23722. /**
  23723. * Set to true once we've received a document. This is used in
  23724. * getRemoteKeysForTarget() and ultimately used by WatchChangeAggregator to
  23725. * decide whether it needs to manufacture a delete event for the target once
  23726. * the target is CURRENT.
  23727. */
  23728. this.receivedDocument = false;
  23729. }
  23730. }
  23731. /**
  23732. * An implementation of `SyncEngine` coordinating with other parts of SDK.
  23733. *
  23734. * The parts of SyncEngine that act as a callback to RemoteStore need to be
  23735. * registered individually. This is done in `syncEngineWrite()` and
  23736. * `syncEngineListen()` (as well as `applyPrimaryState()`) as these methods
  23737. * serve as entry points to RemoteStore's functionality.
  23738. *
  23739. * Note: some field defined in this class might have public access level, but
  23740. * the class is not exported so they are only accessible from this module.
  23741. * This is useful to implement optional features (like bundles) in free
  23742. * functions, such that they are tree-shakeable.
  23743. */
  23744. class SyncEngineImpl {
  23745. constructor(localStore, remoteStore, eventManager,
  23746. // PORTING NOTE: Manages state synchronization in multi-tab environments.
  23747. sharedClientState, currentUser, maxConcurrentLimboResolutions) {
  23748. this.localStore = localStore;
  23749. this.remoteStore = remoteStore;
  23750. this.eventManager = eventManager;
  23751. this.sharedClientState = sharedClientState;
  23752. this.currentUser = currentUser;
  23753. this.maxConcurrentLimboResolutions = maxConcurrentLimboResolutions;
  23754. this.syncEngineListener = {};
  23755. this.queryViewsByQuery = new ObjectMap(q => canonifyQuery(q), queryEquals);
  23756. this.queriesByTarget = new Map();
  23757. /**
  23758. * The keys of documents that are in limbo for which we haven't yet started a
  23759. * limbo resolution query. The strings in this set are the result of calling
  23760. * `key.path.canonicalString()` where `key` is a `DocumentKey` object.
  23761. *
  23762. * The `Set` type was chosen because it provides efficient lookup and removal
  23763. * of arbitrary elements and it also maintains insertion order, providing the
  23764. * desired queue-like FIFO semantics.
  23765. */
  23766. this.enqueuedLimboResolutions = new Set();
  23767. /**
  23768. * Keeps track of the target ID for each document that is in limbo with an
  23769. * active target.
  23770. */
  23771. this.activeLimboTargetsByKey = new SortedMap(DocumentKey.comparator);
  23772. /**
  23773. * Keeps track of the information about an active limbo resolution for each
  23774. * active target ID that was started for the purpose of limbo resolution.
  23775. */
  23776. this.activeLimboResolutionsByTarget = new Map();
  23777. this.limboDocumentRefs = new ReferenceSet();
  23778. /** Stores user completion handlers, indexed by User and BatchId. */
  23779. this.mutationUserCallbacks = {};
  23780. /** Stores user callbacks waiting for all pending writes to be acknowledged. */
  23781. this.pendingWritesCallbacks = new Map();
  23782. this.limboTargetIdGenerator = TargetIdGenerator.forSyncEngine();
  23783. this.onlineState = "Unknown" /* OnlineState.Unknown */;
  23784. // The primary state is set to `true` or `false` immediately after Firestore
  23785. // startup. In the interim, a client should only be considered primary if
  23786. // `isPrimary` is true.
  23787. this._isPrimaryClient = undefined;
  23788. }
  23789. get isPrimaryClient() {
  23790. return this._isPrimaryClient === true;
  23791. }
  23792. }
  23793. function newSyncEngine(localStore, remoteStore, eventManager,
  23794. // PORTING NOTE: Manages state synchronization in multi-tab environments.
  23795. sharedClientState, currentUser, maxConcurrentLimboResolutions, isPrimary) {
  23796. const syncEngine = new SyncEngineImpl(localStore, remoteStore, eventManager, sharedClientState, currentUser, maxConcurrentLimboResolutions);
  23797. if (isPrimary) {
  23798. syncEngine._isPrimaryClient = true;
  23799. }
  23800. return syncEngine;
  23801. }
  23802. /**
  23803. * Initiates the new listen, resolves promise when listen enqueued to the
  23804. * server. All the subsequent view snapshots or errors are sent to the
  23805. * subscribed handlers. Returns the initial snapshot.
  23806. */
  23807. async function syncEngineListen(syncEngine, query) {
  23808. const syncEngineImpl = ensureWatchCallbacks(syncEngine);
  23809. let targetId;
  23810. let viewSnapshot;
  23811. const queryView = syncEngineImpl.queryViewsByQuery.get(query);
  23812. if (queryView) {
  23813. // PORTING NOTE: With Multi-Tab Web, it is possible that a query view
  23814. // already exists when EventManager calls us for the first time. This
  23815. // happens when the primary tab is already listening to this query on
  23816. // behalf of another tab and the user of the primary also starts listening
  23817. // to the query. EventManager will not have an assigned target ID in this
  23818. // case and calls `listen` to obtain this ID.
  23819. targetId = queryView.targetId;
  23820. syncEngineImpl.sharedClientState.addLocalQueryTarget(targetId);
  23821. viewSnapshot = queryView.view.computeInitialSnapshot();
  23822. }
  23823. else {
  23824. const targetData = await localStoreAllocateTarget(syncEngineImpl.localStore, queryToTarget(query));
  23825. if (syncEngineImpl.isPrimaryClient) {
  23826. remoteStoreListen(syncEngineImpl.remoteStore, targetData);
  23827. }
  23828. const status = syncEngineImpl.sharedClientState.addLocalQueryTarget(targetData.targetId);
  23829. targetId = targetData.targetId;
  23830. viewSnapshot = await initializeViewAndComputeSnapshot(syncEngineImpl, query, targetId, status === 'current', targetData.resumeToken);
  23831. }
  23832. return viewSnapshot;
  23833. }
  23834. /**
  23835. * Registers a view for a previously unknown query and computes its initial
  23836. * snapshot.
  23837. */
  23838. async function initializeViewAndComputeSnapshot(syncEngineImpl, query, targetId, current, resumeToken) {
  23839. // PORTING NOTE: On Web only, we inject the code that registers new Limbo
  23840. // targets based on view changes. This allows us to only depend on Limbo
  23841. // changes when user code includes queries.
  23842. syncEngineImpl.applyDocChanges = (queryView, changes, remoteEvent) => applyDocChanges(syncEngineImpl, queryView, changes, remoteEvent);
  23843. const queryResult = await localStoreExecuteQuery(syncEngineImpl.localStore, query,
  23844. /* usePreviousResults= */ true);
  23845. const view = new View(query, queryResult.remoteKeys);
  23846. const viewDocChanges = view.computeDocChanges(queryResult.documents);
  23847. const synthesizedTargetChange = TargetChange.createSynthesizedTargetChangeForCurrentChange(targetId, current && syncEngineImpl.onlineState !== "Offline" /* OnlineState.Offline */, resumeToken);
  23848. const viewChange = view.applyChanges(viewDocChanges,
  23849. /* updateLimboDocuments= */ syncEngineImpl.isPrimaryClient, synthesizedTargetChange);
  23850. updateTrackedLimbos(syncEngineImpl, targetId, viewChange.limboChanges);
  23851. const data = new QueryView(query, targetId, view);
  23852. syncEngineImpl.queryViewsByQuery.set(query, data);
  23853. if (syncEngineImpl.queriesByTarget.has(targetId)) {
  23854. syncEngineImpl.queriesByTarget.get(targetId).push(query);
  23855. }
  23856. else {
  23857. syncEngineImpl.queriesByTarget.set(targetId, [query]);
  23858. }
  23859. return viewChange.snapshot;
  23860. }
  23861. /** Stops listening to the query. */
  23862. async function syncEngineUnlisten(syncEngine, query) {
  23863. const syncEngineImpl = debugCast(syncEngine);
  23864. const queryView = syncEngineImpl.queryViewsByQuery.get(query);
  23865. // Only clean up the query view and target if this is the only query mapped
  23866. // to the target.
  23867. const queries = syncEngineImpl.queriesByTarget.get(queryView.targetId);
  23868. if (queries.length > 1) {
  23869. syncEngineImpl.queriesByTarget.set(queryView.targetId, queries.filter(q => !queryEquals(q, query)));
  23870. syncEngineImpl.queryViewsByQuery.delete(query);
  23871. return;
  23872. }
  23873. // No other queries are mapped to the target, clean up the query and the target.
  23874. if (syncEngineImpl.isPrimaryClient) {
  23875. // We need to remove the local query target first to allow us to verify
  23876. // whether any other client is still interested in this target.
  23877. syncEngineImpl.sharedClientState.removeLocalQueryTarget(queryView.targetId);
  23878. const targetRemainsActive = syncEngineImpl.sharedClientState.isActiveQueryTarget(queryView.targetId);
  23879. if (!targetRemainsActive) {
  23880. await localStoreReleaseTarget(syncEngineImpl.localStore, queryView.targetId,
  23881. /*keepPersistedTargetData=*/ false)
  23882. .then(() => {
  23883. syncEngineImpl.sharedClientState.clearQueryState(queryView.targetId);
  23884. remoteStoreUnlisten(syncEngineImpl.remoteStore, queryView.targetId);
  23885. removeAndCleanupTarget(syncEngineImpl, queryView.targetId);
  23886. })
  23887. .catch(ignoreIfPrimaryLeaseLoss);
  23888. }
  23889. }
  23890. else {
  23891. removeAndCleanupTarget(syncEngineImpl, queryView.targetId);
  23892. await localStoreReleaseTarget(syncEngineImpl.localStore, queryView.targetId,
  23893. /*keepPersistedTargetData=*/ true);
  23894. }
  23895. }
  23896. /**
  23897. * Initiates the write of local mutation batch which involves adding the
  23898. * writes to the mutation queue, notifying the remote store about new
  23899. * mutations and raising events for any changes this write caused.
  23900. *
  23901. * The promise returned by this call is resolved when the above steps
  23902. * have completed, *not* when the write was acked by the backend. The
  23903. * userCallback is resolved once the write was acked/rejected by the
  23904. * backend (or failed locally for any other reason).
  23905. */
  23906. async function syncEngineWrite(syncEngine, batch, userCallback) {
  23907. const syncEngineImpl = syncEngineEnsureWriteCallbacks(syncEngine);
  23908. try {
  23909. const result = await localStoreWriteLocally(syncEngineImpl.localStore, batch);
  23910. syncEngineImpl.sharedClientState.addPendingMutation(result.batchId);
  23911. addMutationCallback(syncEngineImpl, result.batchId, userCallback);
  23912. await syncEngineEmitNewSnapsAndNotifyLocalStore(syncEngineImpl, result.changes);
  23913. await fillWritePipeline(syncEngineImpl.remoteStore);
  23914. }
  23915. catch (e) {
  23916. // If we can't persist the mutation, we reject the user callback and
  23917. // don't send the mutation. The user can then retry the write.
  23918. const error = wrapInUserErrorIfRecoverable(e, `Failed to persist write`);
  23919. userCallback.reject(error);
  23920. }
  23921. }
  23922. /**
  23923. * Applies one remote event to the sync engine, notifying any views of the
  23924. * changes, and releasing any pending mutation batches that would become
  23925. * visible because of the snapshot version the remote event contains.
  23926. */
  23927. async function syncEngineApplyRemoteEvent(syncEngine, remoteEvent) {
  23928. const syncEngineImpl = debugCast(syncEngine);
  23929. try {
  23930. const changes = await localStoreApplyRemoteEventToLocalCache(syncEngineImpl.localStore, remoteEvent);
  23931. // Update `receivedDocument` as appropriate for any limbo targets.
  23932. remoteEvent.targetChanges.forEach((targetChange, targetId) => {
  23933. const limboResolution = syncEngineImpl.activeLimboResolutionsByTarget.get(targetId);
  23934. if (limboResolution) {
  23935. // Since this is a limbo resolution lookup, it's for a single document
  23936. // and it could be added, modified, or removed, but not a combination.
  23937. hardAssert(targetChange.addedDocuments.size +
  23938. targetChange.modifiedDocuments.size +
  23939. targetChange.removedDocuments.size <=
  23940. 1);
  23941. if (targetChange.addedDocuments.size > 0) {
  23942. limboResolution.receivedDocument = true;
  23943. }
  23944. else if (targetChange.modifiedDocuments.size > 0) {
  23945. hardAssert(limboResolution.receivedDocument);
  23946. }
  23947. else if (targetChange.removedDocuments.size > 0) {
  23948. hardAssert(limboResolution.receivedDocument);
  23949. limboResolution.receivedDocument = false;
  23950. }
  23951. else {
  23952. // This was probably just a CURRENT targetChange or similar.
  23953. }
  23954. }
  23955. });
  23956. await syncEngineEmitNewSnapsAndNotifyLocalStore(syncEngineImpl, changes, remoteEvent);
  23957. }
  23958. catch (error) {
  23959. await ignoreIfPrimaryLeaseLoss(error);
  23960. }
  23961. }
  23962. /**
  23963. * Applies an OnlineState change to the sync engine and notifies any views of
  23964. * the change.
  23965. */
  23966. function syncEngineApplyOnlineStateChange(syncEngine, onlineState, source) {
  23967. const syncEngineImpl = debugCast(syncEngine);
  23968. // If we are the secondary client, we explicitly ignore the remote store's
  23969. // online state (the local client may go offline, even though the primary
  23970. // tab remains online) and only apply the primary tab's online state from
  23971. // SharedClientState.
  23972. if ((syncEngineImpl.isPrimaryClient &&
  23973. source === 0 /* OnlineStateSource.RemoteStore */) ||
  23974. (!syncEngineImpl.isPrimaryClient &&
  23975. source === 1 /* OnlineStateSource.SharedClientState */)) {
  23976. const newViewSnapshots = [];
  23977. syncEngineImpl.queryViewsByQuery.forEach((query, queryView) => {
  23978. const viewChange = queryView.view.applyOnlineStateChange(onlineState);
  23979. if (viewChange.snapshot) {
  23980. newViewSnapshots.push(viewChange.snapshot);
  23981. }
  23982. });
  23983. eventManagerOnOnlineStateChange(syncEngineImpl.eventManager, onlineState);
  23984. if (newViewSnapshots.length) {
  23985. syncEngineImpl.syncEngineListener.onWatchChange(newViewSnapshots);
  23986. }
  23987. syncEngineImpl.onlineState = onlineState;
  23988. if (syncEngineImpl.isPrimaryClient) {
  23989. syncEngineImpl.sharedClientState.setOnlineState(onlineState);
  23990. }
  23991. }
  23992. }
  23993. /**
  23994. * Rejects the listen for the given targetID. This can be triggered by the
  23995. * backend for any active target.
  23996. *
  23997. * @param syncEngine - The sync engine implementation.
  23998. * @param targetId - The targetID corresponds to one previously initiated by the
  23999. * user as part of TargetData passed to listen() on RemoteStore.
  24000. * @param err - A description of the condition that has forced the rejection.
  24001. * Nearly always this will be an indication that the user is no longer
  24002. * authorized to see the data matching the target.
  24003. */
  24004. async function syncEngineRejectListen(syncEngine, targetId, err) {
  24005. const syncEngineImpl = debugCast(syncEngine);
  24006. // PORTING NOTE: Multi-tab only.
  24007. syncEngineImpl.sharedClientState.updateQueryState(targetId, 'rejected', err);
  24008. const limboResolution = syncEngineImpl.activeLimboResolutionsByTarget.get(targetId);
  24009. const limboKey = limboResolution && limboResolution.key;
  24010. if (limboKey) {
  24011. // TODO(klimt): We really only should do the following on permission
  24012. // denied errors, but we don't have the cause code here.
  24013. // It's a limbo doc. Create a synthetic event saying it was deleted.
  24014. // This is kind of a hack. Ideally, we would have a method in the local
  24015. // store to purge a document. However, it would be tricky to keep all of
  24016. // the local store's invariants with another method.
  24017. let documentUpdates = new SortedMap(DocumentKey.comparator);
  24018. // TODO(b/217189216): This limbo document should ideally have a read time,
  24019. // so that it is picked up by any read-time based scans. The backend,
  24020. // however, does not send a read time for target removals.
  24021. documentUpdates = documentUpdates.insert(limboKey, MutableDocument.newNoDocument(limboKey, SnapshotVersion.min()));
  24022. const resolvedLimboDocuments = documentKeySet().add(limboKey);
  24023. const event = new RemoteEvent(SnapshotVersion.min(),
  24024. /* targetChanges= */ new Map(),
  24025. /* targetMismatches= */ new SortedSet(primitiveComparator), documentUpdates, resolvedLimboDocuments);
  24026. await syncEngineApplyRemoteEvent(syncEngineImpl, event);
  24027. // Since this query failed, we won't want to manually unlisten to it.
  24028. // We only remove it from bookkeeping after we successfully applied the
  24029. // RemoteEvent. If `applyRemoteEvent()` throws, we want to re-listen to
  24030. // this query when the RemoteStore restarts the Watch stream, which should
  24031. // re-trigger the target failure.
  24032. syncEngineImpl.activeLimboTargetsByKey =
  24033. syncEngineImpl.activeLimboTargetsByKey.remove(limboKey);
  24034. syncEngineImpl.activeLimboResolutionsByTarget.delete(targetId);
  24035. pumpEnqueuedLimboResolutions(syncEngineImpl);
  24036. }
  24037. else {
  24038. await localStoreReleaseTarget(syncEngineImpl.localStore, targetId,
  24039. /* keepPersistedTargetData */ false)
  24040. .then(() => removeAndCleanupTarget(syncEngineImpl, targetId, err))
  24041. .catch(ignoreIfPrimaryLeaseLoss);
  24042. }
  24043. }
  24044. async function syncEngineApplySuccessfulWrite(syncEngine, mutationBatchResult) {
  24045. const syncEngineImpl = debugCast(syncEngine);
  24046. const batchId = mutationBatchResult.batch.batchId;
  24047. try {
  24048. const changes = await localStoreAcknowledgeBatch(syncEngineImpl.localStore, mutationBatchResult);
  24049. // The local store may or may not be able to apply the write result and
  24050. // raise events immediately (depending on whether the watcher is caught
  24051. // up), so we raise user callbacks first so that they consistently happen
  24052. // before listen events.
  24053. processUserCallback(syncEngineImpl, batchId, /*error=*/ null);
  24054. triggerPendingWritesCallbacks(syncEngineImpl, batchId);
  24055. syncEngineImpl.sharedClientState.updateMutationState(batchId, 'acknowledged');
  24056. await syncEngineEmitNewSnapsAndNotifyLocalStore(syncEngineImpl, changes);
  24057. }
  24058. catch (error) {
  24059. await ignoreIfPrimaryLeaseLoss(error);
  24060. }
  24061. }
  24062. async function syncEngineRejectFailedWrite(syncEngine, batchId, error) {
  24063. const syncEngineImpl = debugCast(syncEngine);
  24064. try {
  24065. const changes = await localStoreRejectBatch(syncEngineImpl.localStore, batchId);
  24066. // The local store may or may not be able to apply the write result and
  24067. // raise events immediately (depending on whether the watcher is caught up),
  24068. // so we raise user callbacks first so that they consistently happen before
  24069. // listen events.
  24070. processUserCallback(syncEngineImpl, batchId, error);
  24071. triggerPendingWritesCallbacks(syncEngineImpl, batchId);
  24072. syncEngineImpl.sharedClientState.updateMutationState(batchId, 'rejected', error);
  24073. await syncEngineEmitNewSnapsAndNotifyLocalStore(syncEngineImpl, changes);
  24074. }
  24075. catch (error) {
  24076. await ignoreIfPrimaryLeaseLoss(error);
  24077. }
  24078. }
  24079. /**
  24080. * Registers a user callback that resolves when all pending mutations at the moment of calling
  24081. * are acknowledged .
  24082. */
  24083. async function syncEngineRegisterPendingWritesCallback(syncEngine, callback) {
  24084. const syncEngineImpl = debugCast(syncEngine);
  24085. if (!canUseNetwork(syncEngineImpl.remoteStore)) {
  24086. logDebug(LOG_TAG$3, 'The network is disabled. The task returned by ' +
  24087. "'awaitPendingWrites()' will not complete until the network is enabled.");
  24088. }
  24089. try {
  24090. const highestBatchId = await localStoreGetHighestUnacknowledgedBatchId(syncEngineImpl.localStore);
  24091. if (highestBatchId === BATCHID_UNKNOWN) {
  24092. // Trigger the callback right away if there is no pending writes at the moment.
  24093. callback.resolve();
  24094. return;
  24095. }
  24096. const callbacks = syncEngineImpl.pendingWritesCallbacks.get(highestBatchId) || [];
  24097. callbacks.push(callback);
  24098. syncEngineImpl.pendingWritesCallbacks.set(highestBatchId, callbacks);
  24099. }
  24100. catch (e) {
  24101. const firestoreError = wrapInUserErrorIfRecoverable(e, 'Initialization of waitForPendingWrites() operation failed');
  24102. callback.reject(firestoreError);
  24103. }
  24104. }
  24105. /**
  24106. * Triggers the callbacks that are waiting for this batch id to get acknowledged by server,
  24107. * if there are any.
  24108. */
  24109. function triggerPendingWritesCallbacks(syncEngineImpl, batchId) {
  24110. (syncEngineImpl.pendingWritesCallbacks.get(batchId) || []).forEach(callback => {
  24111. callback.resolve();
  24112. });
  24113. syncEngineImpl.pendingWritesCallbacks.delete(batchId);
  24114. }
  24115. /** Reject all outstanding callbacks waiting for pending writes to complete. */
  24116. function rejectOutstandingPendingWritesCallbacks(syncEngineImpl, errorMessage) {
  24117. syncEngineImpl.pendingWritesCallbacks.forEach(callbacks => {
  24118. callbacks.forEach(callback => {
  24119. callback.reject(new FirestoreError(Code.CANCELLED, errorMessage));
  24120. });
  24121. });
  24122. syncEngineImpl.pendingWritesCallbacks.clear();
  24123. }
  24124. function addMutationCallback(syncEngineImpl, batchId, callback) {
  24125. let newCallbacks = syncEngineImpl.mutationUserCallbacks[syncEngineImpl.currentUser.toKey()];
  24126. if (!newCallbacks) {
  24127. newCallbacks = new SortedMap(primitiveComparator);
  24128. }
  24129. newCallbacks = newCallbacks.insert(batchId, callback);
  24130. syncEngineImpl.mutationUserCallbacks[syncEngineImpl.currentUser.toKey()] =
  24131. newCallbacks;
  24132. }
  24133. /**
  24134. * Resolves or rejects the user callback for the given batch and then discards
  24135. * it.
  24136. */
  24137. function processUserCallback(syncEngine, batchId, error) {
  24138. const syncEngineImpl = debugCast(syncEngine);
  24139. let newCallbacks = syncEngineImpl.mutationUserCallbacks[syncEngineImpl.currentUser.toKey()];
  24140. // NOTE: Mutations restored from persistence won't have callbacks, so it's
  24141. // okay for there to be no callback for this ID.
  24142. if (newCallbacks) {
  24143. const callback = newCallbacks.get(batchId);
  24144. if (callback) {
  24145. if (error) {
  24146. callback.reject(error);
  24147. }
  24148. else {
  24149. callback.resolve();
  24150. }
  24151. newCallbacks = newCallbacks.remove(batchId);
  24152. }
  24153. syncEngineImpl.mutationUserCallbacks[syncEngineImpl.currentUser.toKey()] =
  24154. newCallbacks;
  24155. }
  24156. }
  24157. function removeAndCleanupTarget(syncEngineImpl, targetId, error = null) {
  24158. syncEngineImpl.sharedClientState.removeLocalQueryTarget(targetId);
  24159. for (const query of syncEngineImpl.queriesByTarget.get(targetId)) {
  24160. syncEngineImpl.queryViewsByQuery.delete(query);
  24161. if (error) {
  24162. syncEngineImpl.syncEngineListener.onWatchError(query, error);
  24163. }
  24164. }
  24165. syncEngineImpl.queriesByTarget.delete(targetId);
  24166. if (syncEngineImpl.isPrimaryClient) {
  24167. const limboKeys = syncEngineImpl.limboDocumentRefs.removeReferencesForId(targetId);
  24168. limboKeys.forEach(limboKey => {
  24169. const isReferenced = syncEngineImpl.limboDocumentRefs.containsKey(limboKey);
  24170. if (!isReferenced) {
  24171. // We removed the last reference for this key
  24172. removeLimboTarget(syncEngineImpl, limboKey);
  24173. }
  24174. });
  24175. }
  24176. }
  24177. function removeLimboTarget(syncEngineImpl, key) {
  24178. syncEngineImpl.enqueuedLimboResolutions.delete(key.path.canonicalString());
  24179. // It's possible that the target already got removed because the query failed. In that case,
  24180. // the key won't exist in `limboTargetsByKey`. Only do the cleanup if we still have the target.
  24181. const limboTargetId = syncEngineImpl.activeLimboTargetsByKey.get(key);
  24182. if (limboTargetId === null) {
  24183. // This target already got removed, because the query failed.
  24184. return;
  24185. }
  24186. remoteStoreUnlisten(syncEngineImpl.remoteStore, limboTargetId);
  24187. syncEngineImpl.activeLimboTargetsByKey =
  24188. syncEngineImpl.activeLimboTargetsByKey.remove(key);
  24189. syncEngineImpl.activeLimboResolutionsByTarget.delete(limboTargetId);
  24190. pumpEnqueuedLimboResolutions(syncEngineImpl);
  24191. }
  24192. function updateTrackedLimbos(syncEngineImpl, targetId, limboChanges) {
  24193. for (const limboChange of limboChanges) {
  24194. if (limboChange instanceof AddedLimboDocument) {
  24195. syncEngineImpl.limboDocumentRefs.addReference(limboChange.key, targetId);
  24196. trackLimboChange(syncEngineImpl, limboChange);
  24197. }
  24198. else if (limboChange instanceof RemovedLimboDocument) {
  24199. logDebug(LOG_TAG$3, 'Document no longer in limbo: ' + limboChange.key);
  24200. syncEngineImpl.limboDocumentRefs.removeReference(limboChange.key, targetId);
  24201. const isReferenced = syncEngineImpl.limboDocumentRefs.containsKey(limboChange.key);
  24202. if (!isReferenced) {
  24203. // We removed the last reference for this key
  24204. removeLimboTarget(syncEngineImpl, limboChange.key);
  24205. }
  24206. }
  24207. else {
  24208. fail();
  24209. }
  24210. }
  24211. }
  24212. function trackLimboChange(syncEngineImpl, limboChange) {
  24213. const key = limboChange.key;
  24214. const keyString = key.path.canonicalString();
  24215. if (!syncEngineImpl.activeLimboTargetsByKey.get(key) &&
  24216. !syncEngineImpl.enqueuedLimboResolutions.has(keyString)) {
  24217. logDebug(LOG_TAG$3, 'New document in limbo: ' + key);
  24218. syncEngineImpl.enqueuedLimboResolutions.add(keyString);
  24219. pumpEnqueuedLimboResolutions(syncEngineImpl);
  24220. }
  24221. }
  24222. /**
  24223. * Starts listens for documents in limbo that are enqueued for resolution,
  24224. * subject to a maximum number of concurrent resolutions.
  24225. *
  24226. * Without bounding the number of concurrent resolutions, the server can fail
  24227. * with "resource exhausted" errors which can lead to pathological client
  24228. * behavior as seen in https://github.com/firebase/firebase-js-sdk/issues/2683.
  24229. */
  24230. function pumpEnqueuedLimboResolutions(syncEngineImpl) {
  24231. while (syncEngineImpl.enqueuedLimboResolutions.size > 0 &&
  24232. syncEngineImpl.activeLimboTargetsByKey.size <
  24233. syncEngineImpl.maxConcurrentLimboResolutions) {
  24234. const keyString = syncEngineImpl.enqueuedLimboResolutions
  24235. .values()
  24236. .next().value;
  24237. syncEngineImpl.enqueuedLimboResolutions.delete(keyString);
  24238. const key = new DocumentKey(ResourcePath.fromString(keyString));
  24239. const limboTargetId = syncEngineImpl.limboTargetIdGenerator.next();
  24240. syncEngineImpl.activeLimboResolutionsByTarget.set(limboTargetId, new LimboResolution(key));
  24241. syncEngineImpl.activeLimboTargetsByKey =
  24242. syncEngineImpl.activeLimboTargetsByKey.insert(key, limboTargetId);
  24243. remoteStoreListen(syncEngineImpl.remoteStore, new TargetData(queryToTarget(newQueryForPath(key.path)), limboTargetId, 2 /* TargetPurpose.LimboResolution */, ListenSequence.INVALID));
  24244. }
  24245. }
  24246. async function syncEngineEmitNewSnapsAndNotifyLocalStore(syncEngine, changes, remoteEvent) {
  24247. const syncEngineImpl = debugCast(syncEngine);
  24248. const newSnaps = [];
  24249. const docChangesInAllViews = [];
  24250. const queriesProcessed = [];
  24251. if (syncEngineImpl.queryViewsByQuery.isEmpty()) {
  24252. // Return early since `onWatchChange()` might not have been assigned yet.
  24253. return;
  24254. }
  24255. syncEngineImpl.queryViewsByQuery.forEach((_, queryView) => {
  24256. queriesProcessed.push(syncEngineImpl
  24257. .applyDocChanges(queryView, changes, remoteEvent)
  24258. .then(viewSnapshot => {
  24259. // If there are changes, or we are handling a global snapshot, notify
  24260. // secondary clients to update query state.
  24261. if (viewSnapshot || remoteEvent) {
  24262. if (syncEngineImpl.isPrimaryClient) {
  24263. syncEngineImpl.sharedClientState.updateQueryState(queryView.targetId, (viewSnapshot === null || viewSnapshot === void 0 ? void 0 : viewSnapshot.fromCache) ? 'not-current' : 'current');
  24264. }
  24265. }
  24266. // Update views if there are actual changes.
  24267. if (!!viewSnapshot) {
  24268. newSnaps.push(viewSnapshot);
  24269. const docChanges = LocalViewChanges.fromSnapshot(queryView.targetId, viewSnapshot);
  24270. docChangesInAllViews.push(docChanges);
  24271. }
  24272. }));
  24273. });
  24274. await Promise.all(queriesProcessed);
  24275. syncEngineImpl.syncEngineListener.onWatchChange(newSnaps);
  24276. await localStoreNotifyLocalViewChanges(syncEngineImpl.localStore, docChangesInAllViews);
  24277. }
  24278. async function applyDocChanges(syncEngineImpl, queryView, changes, remoteEvent) {
  24279. let viewDocChanges = queryView.view.computeDocChanges(changes);
  24280. if (viewDocChanges.needsRefill) {
  24281. // The query has a limit and some docs were removed, so we need
  24282. // to re-run the query against the local store to make sure we
  24283. // didn't lose any good docs that had been past the limit.
  24284. viewDocChanges = await localStoreExecuteQuery(syncEngineImpl.localStore, queryView.query,
  24285. /* usePreviousResults= */ false).then(({ documents }) => {
  24286. return queryView.view.computeDocChanges(documents, viewDocChanges);
  24287. });
  24288. }
  24289. const targetChange = remoteEvent && remoteEvent.targetChanges.get(queryView.targetId);
  24290. const viewChange = queryView.view.applyChanges(viewDocChanges,
  24291. /* updateLimboDocuments= */ syncEngineImpl.isPrimaryClient, targetChange);
  24292. updateTrackedLimbos(syncEngineImpl, queryView.targetId, viewChange.limboChanges);
  24293. return viewChange.snapshot;
  24294. }
  24295. async function syncEngineHandleCredentialChange(syncEngine, user) {
  24296. const syncEngineImpl = debugCast(syncEngine);
  24297. const userChanged = !syncEngineImpl.currentUser.isEqual(user);
  24298. if (userChanged) {
  24299. logDebug(LOG_TAG$3, 'User change. New user:', user.toKey());
  24300. const result = await localStoreHandleUserChange(syncEngineImpl.localStore, user);
  24301. syncEngineImpl.currentUser = user;
  24302. // Fails tasks waiting for pending writes requested by previous user.
  24303. rejectOutstandingPendingWritesCallbacks(syncEngineImpl, "'waitForPendingWrites' promise is rejected due to a user change.");
  24304. // TODO(b/114226417): Consider calling this only in the primary tab.
  24305. syncEngineImpl.sharedClientState.handleUserChange(user, result.removedBatchIds, result.addedBatchIds);
  24306. await syncEngineEmitNewSnapsAndNotifyLocalStore(syncEngineImpl, result.affectedDocuments);
  24307. }
  24308. }
  24309. function syncEngineGetRemoteKeysForTarget(syncEngine, targetId) {
  24310. const syncEngineImpl = debugCast(syncEngine);
  24311. const limboResolution = syncEngineImpl.activeLimboResolutionsByTarget.get(targetId);
  24312. if (limboResolution && limboResolution.receivedDocument) {
  24313. return documentKeySet().add(limboResolution.key);
  24314. }
  24315. else {
  24316. let keySet = documentKeySet();
  24317. const queries = syncEngineImpl.queriesByTarget.get(targetId);
  24318. if (!queries) {
  24319. return keySet;
  24320. }
  24321. for (const query of queries) {
  24322. const queryView = syncEngineImpl.queryViewsByQuery.get(query);
  24323. keySet = keySet.unionWith(queryView.view.syncedDocuments);
  24324. }
  24325. return keySet;
  24326. }
  24327. }
  24328. /**
  24329. * Reconcile the list of synced documents in an existing view with those
  24330. * from persistence.
  24331. */
  24332. async function synchronizeViewAndComputeSnapshot(syncEngine, queryView) {
  24333. const syncEngineImpl = debugCast(syncEngine);
  24334. const queryResult = await localStoreExecuteQuery(syncEngineImpl.localStore, queryView.query,
  24335. /* usePreviousResults= */ true);
  24336. const viewSnapshot = queryView.view.synchronizeWithPersistedState(queryResult);
  24337. if (syncEngineImpl.isPrimaryClient) {
  24338. updateTrackedLimbos(syncEngineImpl, queryView.targetId, viewSnapshot.limboChanges);
  24339. }
  24340. return viewSnapshot;
  24341. }
  24342. /**
  24343. * Retrieves newly changed documents from remote document cache and raises
  24344. * snapshots if needed.
  24345. */
  24346. // PORTING NOTE: Multi-Tab only.
  24347. async function syncEngineSynchronizeWithChangedDocuments(syncEngine, collectionGroup) {
  24348. const syncEngineImpl = debugCast(syncEngine);
  24349. return localStoreGetNewDocumentChanges(syncEngineImpl.localStore, collectionGroup).then(changes => syncEngineEmitNewSnapsAndNotifyLocalStore(syncEngineImpl, changes));
  24350. }
  24351. /** Applies a mutation state to an existing batch. */
  24352. // PORTING NOTE: Multi-Tab only.
  24353. async function syncEngineApplyBatchState(syncEngine, batchId, batchState, error) {
  24354. const syncEngineImpl = debugCast(syncEngine);
  24355. const documents = await localStoreLookupMutationDocuments(syncEngineImpl.localStore, batchId);
  24356. if (documents === null) {
  24357. // A throttled tab may not have seen the mutation before it was completed
  24358. // and removed from the mutation queue, in which case we won't have cached
  24359. // the affected documents. In this case we can safely ignore the update
  24360. // since that means we didn't apply the mutation locally at all (if we
  24361. // had, we would have cached the affected documents), and so we will just
  24362. // see any resulting document changes via normal remote document updates
  24363. // as applicable.
  24364. logDebug(LOG_TAG$3, 'Cannot apply mutation batch with id: ' + batchId);
  24365. return;
  24366. }
  24367. if (batchState === 'pending') {
  24368. // If we are the primary client, we need to send this write to the
  24369. // backend. Secondary clients will ignore these writes since their remote
  24370. // connection is disabled.
  24371. await fillWritePipeline(syncEngineImpl.remoteStore);
  24372. }
  24373. else if (batchState === 'acknowledged' || batchState === 'rejected') {
  24374. // NOTE: Both these methods are no-ops for batches that originated from
  24375. // other clients.
  24376. processUserCallback(syncEngineImpl, batchId, error ? error : null);
  24377. triggerPendingWritesCallbacks(syncEngineImpl, batchId);
  24378. localStoreRemoveCachedMutationBatchMetadata(syncEngineImpl.localStore, batchId);
  24379. }
  24380. else {
  24381. fail();
  24382. }
  24383. await syncEngineEmitNewSnapsAndNotifyLocalStore(syncEngineImpl, documents);
  24384. }
  24385. /** Applies a query target change from a different tab. */
  24386. // PORTING NOTE: Multi-Tab only.
  24387. async function syncEngineApplyPrimaryState(syncEngine, isPrimary) {
  24388. const syncEngineImpl = debugCast(syncEngine);
  24389. ensureWatchCallbacks(syncEngineImpl);
  24390. syncEngineEnsureWriteCallbacks(syncEngineImpl);
  24391. if (isPrimary === true && syncEngineImpl._isPrimaryClient !== true) {
  24392. // Secondary tabs only maintain Views for their local listeners and the
  24393. // Views internal state may not be 100% populated (in particular
  24394. // secondary tabs don't track syncedDocuments, the set of documents the
  24395. // server considers to be in the target). So when a secondary becomes
  24396. // primary, we need to need to make sure that all views for all targets
  24397. // match the state on disk.
  24398. const activeTargets = syncEngineImpl.sharedClientState.getAllActiveQueryTargets();
  24399. const activeQueries = await synchronizeQueryViewsAndRaiseSnapshots(syncEngineImpl, activeTargets.toArray());
  24400. syncEngineImpl._isPrimaryClient = true;
  24401. await remoteStoreApplyPrimaryState(syncEngineImpl.remoteStore, true);
  24402. for (const targetData of activeQueries) {
  24403. remoteStoreListen(syncEngineImpl.remoteStore, targetData);
  24404. }
  24405. }
  24406. else if (isPrimary === false && syncEngineImpl._isPrimaryClient !== false) {
  24407. const activeTargets = [];
  24408. let p = Promise.resolve();
  24409. syncEngineImpl.queriesByTarget.forEach((_, targetId) => {
  24410. if (syncEngineImpl.sharedClientState.isLocalQueryTarget(targetId)) {
  24411. activeTargets.push(targetId);
  24412. }
  24413. else {
  24414. p = p.then(() => {
  24415. removeAndCleanupTarget(syncEngineImpl, targetId);
  24416. return localStoreReleaseTarget(syncEngineImpl.localStore, targetId,
  24417. /*keepPersistedTargetData=*/ true);
  24418. });
  24419. }
  24420. remoteStoreUnlisten(syncEngineImpl.remoteStore, targetId);
  24421. });
  24422. await p;
  24423. await synchronizeQueryViewsAndRaiseSnapshots(syncEngineImpl, activeTargets);
  24424. resetLimboDocuments(syncEngineImpl);
  24425. syncEngineImpl._isPrimaryClient = false;
  24426. await remoteStoreApplyPrimaryState(syncEngineImpl.remoteStore, false);
  24427. }
  24428. }
  24429. // PORTING NOTE: Multi-Tab only.
  24430. function resetLimboDocuments(syncEngine) {
  24431. const syncEngineImpl = debugCast(syncEngine);
  24432. syncEngineImpl.activeLimboResolutionsByTarget.forEach((_, targetId) => {
  24433. remoteStoreUnlisten(syncEngineImpl.remoteStore, targetId);
  24434. });
  24435. syncEngineImpl.limboDocumentRefs.removeAllReferences();
  24436. syncEngineImpl.activeLimboResolutionsByTarget = new Map();
  24437. syncEngineImpl.activeLimboTargetsByKey = new SortedMap(DocumentKey.comparator);
  24438. }
  24439. /**
  24440. * Reconcile the query views of the provided query targets with the state from
  24441. * persistence. Raises snapshots for any changes that affect the local
  24442. * client and returns the updated state of all target's query data.
  24443. *
  24444. * @param syncEngine - The sync engine implementation
  24445. * @param targets - the list of targets with views that need to be recomputed
  24446. * @param transitionToPrimary - `true` iff the tab transitions from a secondary
  24447. * tab to a primary tab
  24448. */
  24449. // PORTING NOTE: Multi-Tab only.
  24450. async function synchronizeQueryViewsAndRaiseSnapshots(syncEngine, targets, transitionToPrimary) {
  24451. const syncEngineImpl = debugCast(syncEngine);
  24452. const activeQueries = [];
  24453. const newViewSnapshots = [];
  24454. for (const targetId of targets) {
  24455. let targetData;
  24456. const queries = syncEngineImpl.queriesByTarget.get(targetId);
  24457. if (queries && queries.length !== 0) {
  24458. // For queries that have a local View, we fetch their current state
  24459. // from LocalStore (as the resume token and the snapshot version
  24460. // might have changed) and reconcile their views with the persisted
  24461. // state (the list of syncedDocuments may have gotten out of sync).
  24462. targetData = await localStoreAllocateTarget(syncEngineImpl.localStore, queryToTarget(queries[0]));
  24463. for (const query of queries) {
  24464. const queryView = syncEngineImpl.queryViewsByQuery.get(query);
  24465. const viewChange = await synchronizeViewAndComputeSnapshot(syncEngineImpl, queryView);
  24466. if (viewChange.snapshot) {
  24467. newViewSnapshots.push(viewChange.snapshot);
  24468. }
  24469. }
  24470. }
  24471. else {
  24472. // For queries that never executed on this client, we need to
  24473. // allocate the target in LocalStore and initialize a new View.
  24474. const target = await localStoreGetCachedTarget(syncEngineImpl.localStore, targetId);
  24475. targetData = await localStoreAllocateTarget(syncEngineImpl.localStore, target);
  24476. await initializeViewAndComputeSnapshot(syncEngineImpl, synthesizeTargetToQuery(target), targetId,
  24477. /*current=*/ false, targetData.resumeToken);
  24478. }
  24479. activeQueries.push(targetData);
  24480. }
  24481. syncEngineImpl.syncEngineListener.onWatchChange(newViewSnapshots);
  24482. return activeQueries;
  24483. }
  24484. /**
  24485. * Creates a `Query` object from the specified `Target`. There is no way to
  24486. * obtain the original `Query`, so we synthesize a `Query` from the `Target`
  24487. * object.
  24488. *
  24489. * The synthesized result might be different from the original `Query`, but
  24490. * since the synthesized `Query` should return the same results as the
  24491. * original one (only the presentation of results might differ), the potential
  24492. * difference will not cause issues.
  24493. */
  24494. // PORTING NOTE: Multi-Tab only.
  24495. function synthesizeTargetToQuery(target) {
  24496. return newQuery(target.path, target.collectionGroup, target.orderBy, target.filters, target.limit, "F" /* LimitType.First */, target.startAt, target.endAt);
  24497. }
  24498. /** Returns the IDs of the clients that are currently active. */
  24499. // PORTING NOTE: Multi-Tab only.
  24500. function syncEngineGetActiveClients(syncEngine) {
  24501. const syncEngineImpl = debugCast(syncEngine);
  24502. return localStoreGetActiveClients(syncEngineImpl.localStore);
  24503. }
  24504. /** Applies a query target change from a different tab. */
  24505. // PORTING NOTE: Multi-Tab only.
  24506. async function syncEngineApplyTargetState(syncEngine, targetId, state, error) {
  24507. const syncEngineImpl = debugCast(syncEngine);
  24508. if (syncEngineImpl._isPrimaryClient) {
  24509. // If we receive a target state notification via WebStorage, we are
  24510. // either already secondary or another tab has taken the primary lease.
  24511. logDebug(LOG_TAG$3, 'Ignoring unexpected query state notification.');
  24512. return;
  24513. }
  24514. const query = syncEngineImpl.queriesByTarget.get(targetId);
  24515. if (query && query.length > 0) {
  24516. switch (state) {
  24517. case 'current':
  24518. case 'not-current': {
  24519. const changes = await localStoreGetNewDocumentChanges(syncEngineImpl.localStore, queryCollectionGroup(query[0]));
  24520. const synthesizedRemoteEvent = RemoteEvent.createSynthesizedRemoteEventForCurrentChange(targetId, state === 'current', ByteString.EMPTY_BYTE_STRING);
  24521. await syncEngineEmitNewSnapsAndNotifyLocalStore(syncEngineImpl, changes, synthesizedRemoteEvent);
  24522. break;
  24523. }
  24524. case 'rejected': {
  24525. await localStoreReleaseTarget(syncEngineImpl.localStore, targetId,
  24526. /* keepPersistedTargetData */ true);
  24527. removeAndCleanupTarget(syncEngineImpl, targetId, error);
  24528. break;
  24529. }
  24530. default:
  24531. fail();
  24532. }
  24533. }
  24534. }
  24535. /** Adds or removes Watch targets for queries from different tabs. */
  24536. async function syncEngineApplyActiveTargetsChange(syncEngine, added, removed) {
  24537. const syncEngineImpl = ensureWatchCallbacks(syncEngine);
  24538. if (!syncEngineImpl._isPrimaryClient) {
  24539. return;
  24540. }
  24541. for (const targetId of added) {
  24542. if (syncEngineImpl.queriesByTarget.has(targetId)) {
  24543. // A target might have been added in a previous attempt
  24544. logDebug(LOG_TAG$3, 'Adding an already active target ' + targetId);
  24545. continue;
  24546. }
  24547. const target = await localStoreGetCachedTarget(syncEngineImpl.localStore, targetId);
  24548. const targetData = await localStoreAllocateTarget(syncEngineImpl.localStore, target);
  24549. await initializeViewAndComputeSnapshot(syncEngineImpl, synthesizeTargetToQuery(target), targetData.targetId,
  24550. /*current=*/ false, targetData.resumeToken);
  24551. remoteStoreListen(syncEngineImpl.remoteStore, targetData);
  24552. }
  24553. for (const targetId of removed) {
  24554. // Check that the target is still active since the target might have been
  24555. // removed if it has been rejected by the backend.
  24556. if (!syncEngineImpl.queriesByTarget.has(targetId)) {
  24557. continue;
  24558. }
  24559. // Release queries that are still active.
  24560. await localStoreReleaseTarget(syncEngineImpl.localStore, targetId,
  24561. /* keepPersistedTargetData */ false)
  24562. .then(() => {
  24563. remoteStoreUnlisten(syncEngineImpl.remoteStore, targetId);
  24564. removeAndCleanupTarget(syncEngineImpl, targetId);
  24565. })
  24566. .catch(ignoreIfPrimaryLeaseLoss);
  24567. }
  24568. }
  24569. function ensureWatchCallbacks(syncEngine) {
  24570. const syncEngineImpl = debugCast(syncEngine);
  24571. syncEngineImpl.remoteStore.remoteSyncer.applyRemoteEvent =
  24572. syncEngineApplyRemoteEvent.bind(null, syncEngineImpl);
  24573. syncEngineImpl.remoteStore.remoteSyncer.getRemoteKeysForTarget =
  24574. syncEngineGetRemoteKeysForTarget.bind(null, syncEngineImpl);
  24575. syncEngineImpl.remoteStore.remoteSyncer.rejectListen =
  24576. syncEngineRejectListen.bind(null, syncEngineImpl);
  24577. syncEngineImpl.syncEngineListener.onWatchChange =
  24578. eventManagerOnWatchChange.bind(null, syncEngineImpl.eventManager);
  24579. syncEngineImpl.syncEngineListener.onWatchError =
  24580. eventManagerOnWatchError.bind(null, syncEngineImpl.eventManager);
  24581. return syncEngineImpl;
  24582. }
  24583. function syncEngineEnsureWriteCallbacks(syncEngine) {
  24584. const syncEngineImpl = debugCast(syncEngine);
  24585. syncEngineImpl.remoteStore.remoteSyncer.applySuccessfulWrite =
  24586. syncEngineApplySuccessfulWrite.bind(null, syncEngineImpl);
  24587. syncEngineImpl.remoteStore.remoteSyncer.rejectFailedWrite =
  24588. syncEngineRejectFailedWrite.bind(null, syncEngineImpl);
  24589. return syncEngineImpl;
  24590. }
  24591. /**
  24592. * Loads a Firestore bundle into the SDK. The returned promise resolves when
  24593. * the bundle finished loading.
  24594. *
  24595. * @param syncEngine - SyncEngine to use.
  24596. * @param bundleReader - Bundle to load into the SDK.
  24597. * @param task - LoadBundleTask used to update the loading progress to public API.
  24598. */
  24599. function syncEngineLoadBundle(syncEngine, bundleReader, task) {
  24600. const syncEngineImpl = debugCast(syncEngine);
  24601. // eslint-disable-next-line @typescript-eslint/no-floating-promises
  24602. loadBundleImpl(syncEngineImpl, bundleReader, task).then(collectionGroups => {
  24603. syncEngineImpl.sharedClientState.notifyBundleLoaded(collectionGroups);
  24604. });
  24605. }
  24606. /** Loads a bundle and returns the list of affected collection groups. */
  24607. async function loadBundleImpl(syncEngine, reader, task) {
  24608. try {
  24609. const metadata = await reader.getMetadata();
  24610. const skip = await localStoreHasNewerBundle(syncEngine.localStore, metadata);
  24611. if (skip) {
  24612. await reader.close();
  24613. task._completeWith(bundleSuccessProgress(metadata));
  24614. return Promise.resolve(new Set());
  24615. }
  24616. task._updateProgress(bundleInitialProgress(metadata));
  24617. const loader = new BundleLoader(metadata, syncEngine.localStore, reader.serializer);
  24618. let element = await reader.nextElement();
  24619. while (element) {
  24620. ;
  24621. const progress = await loader.addSizedElement(element);
  24622. if (progress) {
  24623. task._updateProgress(progress);
  24624. }
  24625. element = await reader.nextElement();
  24626. }
  24627. const result = await loader.complete();
  24628. await syncEngineEmitNewSnapsAndNotifyLocalStore(syncEngine, result.changedDocs,
  24629. /* remoteEvent */ undefined);
  24630. // Save metadata, so loading the same bundle will skip.
  24631. await localStoreSaveBundle(syncEngine.localStore, metadata);
  24632. task._completeWith(result.progress);
  24633. return Promise.resolve(result.changedCollectionGroups);
  24634. }
  24635. catch (e) {
  24636. logWarn(LOG_TAG$3, `Loading bundle failed with ${e}`);
  24637. task._failWith(e);
  24638. return Promise.resolve(new Set());
  24639. }
  24640. }
  24641. /**
  24642. * @license
  24643. * Copyright 2020 Google LLC
  24644. *
  24645. * Licensed under the Apache License, Version 2.0 (the "License");
  24646. * you may not use this file except in compliance with the License.
  24647. * You may obtain a copy of the License at
  24648. *
  24649. * http://www.apache.org/licenses/LICENSE-2.0
  24650. *
  24651. * Unless required by applicable law or agreed to in writing, software
  24652. * distributed under the License is distributed on an "AS IS" BASIS,
  24653. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  24654. * See the License for the specific language governing permissions and
  24655. * limitations under the License.
  24656. */
  24657. /**
  24658. * Provides all components needed for Firestore with in-memory persistence.
  24659. * Uses EagerGC garbage collection.
  24660. */
  24661. class MemoryOfflineComponentProvider {
  24662. constructor() {
  24663. this.synchronizeTabs = false;
  24664. }
  24665. async initialize(cfg) {
  24666. this.serializer = newSerializer(cfg.databaseInfo.databaseId);
  24667. this.sharedClientState = this.createSharedClientState(cfg);
  24668. this.persistence = this.createPersistence(cfg);
  24669. await this.persistence.start();
  24670. this.localStore = this.createLocalStore(cfg);
  24671. this.gcScheduler = this.createGarbageCollectionScheduler(cfg, this.localStore);
  24672. this.indexBackfillerScheduler = this.createIndexBackfillerScheduler(cfg, this.localStore);
  24673. }
  24674. createGarbageCollectionScheduler(cfg, localStore) {
  24675. return null;
  24676. }
  24677. createIndexBackfillerScheduler(cfg, localStore) {
  24678. return null;
  24679. }
  24680. createLocalStore(cfg) {
  24681. return newLocalStore(this.persistence, new QueryEngine(), cfg.initialUser, this.serializer);
  24682. }
  24683. createPersistence(cfg) {
  24684. return new MemoryPersistence(MemoryEagerDelegate.factory, this.serializer);
  24685. }
  24686. createSharedClientState(cfg) {
  24687. return new MemorySharedClientState();
  24688. }
  24689. async terminate() {
  24690. if (this.gcScheduler) {
  24691. this.gcScheduler.stop();
  24692. }
  24693. await this.sharedClientState.shutdown();
  24694. await this.persistence.shutdown();
  24695. }
  24696. }
  24697. /**
  24698. * Provides all components needed for Firestore with IndexedDB persistence.
  24699. */
  24700. class IndexedDbOfflineComponentProvider extends MemoryOfflineComponentProvider {
  24701. constructor(onlineComponentProvider, cacheSizeBytes, forceOwnership) {
  24702. super();
  24703. this.onlineComponentProvider = onlineComponentProvider;
  24704. this.cacheSizeBytes = cacheSizeBytes;
  24705. this.forceOwnership = forceOwnership;
  24706. this.synchronizeTabs = false;
  24707. }
  24708. async initialize(cfg) {
  24709. await super.initialize(cfg);
  24710. await this.onlineComponentProvider.initialize(this, cfg);
  24711. // Enqueue writes from a previous session
  24712. await syncEngineEnsureWriteCallbacks(this.onlineComponentProvider.syncEngine);
  24713. await fillWritePipeline(this.onlineComponentProvider.remoteStore);
  24714. // NOTE: This will immediately call the listener, so we make sure to
  24715. // set it after localStore / remoteStore are started.
  24716. await this.persistence.setPrimaryStateListener(() => {
  24717. if (this.gcScheduler && !this.gcScheduler.started) {
  24718. this.gcScheduler.start();
  24719. }
  24720. if (this.indexBackfillerScheduler &&
  24721. !this.indexBackfillerScheduler.started) {
  24722. this.indexBackfillerScheduler.start();
  24723. }
  24724. return Promise.resolve();
  24725. });
  24726. }
  24727. createLocalStore(cfg) {
  24728. return newLocalStore(this.persistence, new QueryEngine(), cfg.initialUser, this.serializer);
  24729. }
  24730. createGarbageCollectionScheduler(cfg, localStore) {
  24731. const garbageCollector = this.persistence.referenceDelegate.garbageCollector;
  24732. return new LruScheduler(garbageCollector, cfg.asyncQueue, localStore);
  24733. }
  24734. createIndexBackfillerScheduler(cfg, localStore) {
  24735. const indexBackfiller = new IndexBackfiller(localStore, this.persistence);
  24736. return new IndexBackfillerScheduler(cfg.asyncQueue, indexBackfiller);
  24737. }
  24738. createPersistence(cfg) {
  24739. const persistenceKey = indexedDbStoragePrefix(cfg.databaseInfo.databaseId, cfg.databaseInfo.persistenceKey);
  24740. const lruParams = this.cacheSizeBytes !== undefined
  24741. ? LruParams.withCacheSize(this.cacheSizeBytes)
  24742. : LruParams.DEFAULT;
  24743. return new IndexedDbPersistence(this.synchronizeTabs, persistenceKey, cfg.clientId, lruParams, cfg.asyncQueue, getWindow(), getDocument(), this.serializer, this.sharedClientState, !!this.forceOwnership);
  24744. }
  24745. createSharedClientState(cfg) {
  24746. return new MemorySharedClientState();
  24747. }
  24748. }
  24749. /**
  24750. * Provides all components needed for Firestore with multi-tab IndexedDB
  24751. * persistence.
  24752. *
  24753. * In the legacy client, this provider is used to provide both multi-tab and
  24754. * non-multi-tab persistence since we cannot tell at build time whether
  24755. * `synchronizeTabs` will be enabled.
  24756. */
  24757. class MultiTabOfflineComponentProvider extends IndexedDbOfflineComponentProvider {
  24758. constructor(onlineComponentProvider, cacheSizeBytes) {
  24759. super(onlineComponentProvider, cacheSizeBytes, /* forceOwnership= */ false);
  24760. this.onlineComponentProvider = onlineComponentProvider;
  24761. this.cacheSizeBytes = cacheSizeBytes;
  24762. this.synchronizeTabs = true;
  24763. }
  24764. async initialize(cfg) {
  24765. await super.initialize(cfg);
  24766. const syncEngine = this.onlineComponentProvider.syncEngine;
  24767. if (this.sharedClientState instanceof WebStorageSharedClientState) {
  24768. this.sharedClientState.syncEngine = {
  24769. applyBatchState: syncEngineApplyBatchState.bind(null, syncEngine),
  24770. applyTargetState: syncEngineApplyTargetState.bind(null, syncEngine),
  24771. applyActiveTargetsChange: syncEngineApplyActiveTargetsChange.bind(null, syncEngine),
  24772. getActiveClients: syncEngineGetActiveClients.bind(null, syncEngine),
  24773. synchronizeWithChangedDocuments: syncEngineSynchronizeWithChangedDocuments.bind(null, syncEngine)
  24774. };
  24775. await this.sharedClientState.start();
  24776. }
  24777. // NOTE: This will immediately call the listener, so we make sure to
  24778. // set it after localStore / remoteStore are started.
  24779. await this.persistence.setPrimaryStateListener(async (isPrimary) => {
  24780. await syncEngineApplyPrimaryState(this.onlineComponentProvider.syncEngine, isPrimary);
  24781. if (this.gcScheduler) {
  24782. if (isPrimary && !this.gcScheduler.started) {
  24783. this.gcScheduler.start();
  24784. }
  24785. else if (!isPrimary) {
  24786. this.gcScheduler.stop();
  24787. }
  24788. }
  24789. if (this.indexBackfillerScheduler) {
  24790. if (isPrimary && !this.indexBackfillerScheduler.started) {
  24791. this.indexBackfillerScheduler.start();
  24792. }
  24793. else if (!isPrimary) {
  24794. this.indexBackfillerScheduler.stop();
  24795. }
  24796. }
  24797. });
  24798. }
  24799. createSharedClientState(cfg) {
  24800. const window = getWindow();
  24801. if (!WebStorageSharedClientState.isAvailable(window)) {
  24802. throw new FirestoreError(Code.UNIMPLEMENTED, 'IndexedDB persistence is only available on platforms that support LocalStorage.');
  24803. }
  24804. const persistenceKey = indexedDbStoragePrefix(cfg.databaseInfo.databaseId, cfg.databaseInfo.persistenceKey);
  24805. return new WebStorageSharedClientState(window, cfg.asyncQueue, persistenceKey, cfg.clientId, cfg.initialUser);
  24806. }
  24807. }
  24808. /**
  24809. * Initializes and wires the components that are needed to interface with the
  24810. * network.
  24811. */
  24812. class OnlineComponentProvider {
  24813. async initialize(offlineComponentProvider, cfg) {
  24814. if (this.localStore) {
  24815. // OnlineComponentProvider may get initialized multiple times if
  24816. // multi-tab persistence is used.
  24817. return;
  24818. }
  24819. this.localStore = offlineComponentProvider.localStore;
  24820. this.sharedClientState = offlineComponentProvider.sharedClientState;
  24821. this.datastore = this.createDatastore(cfg);
  24822. this.remoteStore = this.createRemoteStore(cfg);
  24823. this.eventManager = this.createEventManager(cfg);
  24824. this.syncEngine = this.createSyncEngine(cfg,
  24825. /* startAsPrimary=*/ !offlineComponentProvider.synchronizeTabs);
  24826. this.sharedClientState.onlineStateHandler = onlineState => syncEngineApplyOnlineStateChange(this.syncEngine, onlineState, 1 /* OnlineStateSource.SharedClientState */);
  24827. this.remoteStore.remoteSyncer.handleCredentialChange =
  24828. syncEngineHandleCredentialChange.bind(null, this.syncEngine);
  24829. await remoteStoreApplyPrimaryState(this.remoteStore, this.syncEngine.isPrimaryClient);
  24830. }
  24831. createEventManager(cfg) {
  24832. return newEventManager();
  24833. }
  24834. createDatastore(cfg) {
  24835. const serializer = newSerializer(cfg.databaseInfo.databaseId);
  24836. const connection = newConnection(cfg.databaseInfo);
  24837. return newDatastore(cfg.authCredentials, cfg.appCheckCredentials, connection, serializer);
  24838. }
  24839. createRemoteStore(cfg) {
  24840. return newRemoteStore(this.localStore, this.datastore, cfg.asyncQueue, onlineState => syncEngineApplyOnlineStateChange(this.syncEngine, onlineState, 0 /* OnlineStateSource.RemoteStore */), newConnectivityMonitor());
  24841. }
  24842. createSyncEngine(cfg, startAsPrimary) {
  24843. return newSyncEngine(this.localStore, this.remoteStore, this.eventManager, this.sharedClientState, cfg.initialUser, cfg.maxConcurrentLimboResolutions, startAsPrimary);
  24844. }
  24845. terminate() {
  24846. return remoteStoreShutdown(this.remoteStore);
  24847. }
  24848. }
  24849. /**
  24850. * @license
  24851. * Copyright 2020 Google LLC
  24852. *
  24853. * Licensed under the Apache License, Version 2.0 (the "License");
  24854. * you may not use this file except in compliance with the License.
  24855. * You may obtain a copy of the License at
  24856. *
  24857. * http://www.apache.org/licenses/LICENSE-2.0
  24858. *
  24859. * Unless required by applicable law or agreed to in writing, software
  24860. * distributed under the License is distributed on an "AS IS" BASIS,
  24861. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  24862. * See the License for the specific language governing permissions and
  24863. * limitations under the License.
  24864. */
  24865. /**
  24866. * How many bytes to read each time when `ReadableStreamReader.read()` is
  24867. * called. Only applicable for byte streams that we control (e.g. those backed
  24868. * by an UInt8Array).
  24869. */
  24870. const DEFAULT_BYTES_PER_READ = 10240;
  24871. /**
  24872. * Builds a `ByteStreamReader` from a UInt8Array.
  24873. * @param source - The data source to use.
  24874. * @param bytesPerRead - How many bytes each `read()` from the returned reader
  24875. * will read.
  24876. */
  24877. function toByteStreamReaderHelper(source, bytesPerRead = DEFAULT_BYTES_PER_READ) {
  24878. let readFrom = 0;
  24879. // The TypeScript definition for ReadableStreamReader changed. We use
  24880. // `any` here to allow this code to compile with different versions.
  24881. // See https://github.com/microsoft/TypeScript/issues/42970
  24882. // eslint-disable-next-line @typescript-eslint/no-explicit-any
  24883. const reader = {
  24884. // eslint-disable-next-line @typescript-eslint/no-explicit-any
  24885. async read() {
  24886. if (readFrom < source.byteLength) {
  24887. const result = {
  24888. value: source.slice(readFrom, readFrom + bytesPerRead),
  24889. done: false
  24890. };
  24891. readFrom += bytesPerRead;
  24892. return result;
  24893. }
  24894. return { done: true };
  24895. },
  24896. async cancel() { },
  24897. releaseLock() { },
  24898. closed: Promise.reject('unimplemented')
  24899. };
  24900. return reader;
  24901. }
  24902. /**
  24903. * @license
  24904. * Copyright 2017 Google LLC
  24905. *
  24906. * Licensed under the Apache License, Version 2.0 (the "License");
  24907. * you may not use this file except in compliance with the License.
  24908. * You may obtain a copy of the License at
  24909. *
  24910. * http://www.apache.org/licenses/LICENSE-2.0
  24911. *
  24912. * Unless required by applicable law or agreed to in writing, software
  24913. * distributed under the License is distributed on an "AS IS" BASIS,
  24914. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  24915. * See the License for the specific language governing permissions and
  24916. * limitations under the License.
  24917. */
  24918. function validateNonEmptyArgument(functionName, argumentName, argument) {
  24919. if (!argument) {
  24920. throw new FirestoreError(Code.INVALID_ARGUMENT, `Function ${functionName}() cannot be called with an empty ${argumentName}.`);
  24921. }
  24922. }
  24923. /**
  24924. * Validates that two boolean options are not set at the same time.
  24925. * @internal
  24926. */
  24927. function validateIsNotUsedTogether(optionName1, argument1, optionName2, argument2) {
  24928. if (argument1 === true && argument2 === true) {
  24929. throw new FirestoreError(Code.INVALID_ARGUMENT, `${optionName1} and ${optionName2} cannot be used together.`);
  24930. }
  24931. }
  24932. /**
  24933. * Validates that `path` refers to a document (indicated by the fact it contains
  24934. * an even numbers of segments).
  24935. */
  24936. function validateDocumentPath(path) {
  24937. if (!DocumentKey.isDocumentKey(path)) {
  24938. throw new FirestoreError(Code.INVALID_ARGUMENT, `Invalid document reference. Document references must have an even number of segments, but ${path} has ${path.length}.`);
  24939. }
  24940. }
  24941. /**
  24942. * Validates that `path` refers to a collection (indicated by the fact it
  24943. * contains an odd numbers of segments).
  24944. */
  24945. function validateCollectionPath(path) {
  24946. if (DocumentKey.isDocumentKey(path)) {
  24947. throw new FirestoreError(Code.INVALID_ARGUMENT, `Invalid collection reference. Collection references must have an odd number of segments, but ${path} has ${path.length}.`);
  24948. }
  24949. }
  24950. /**
  24951. * Returns true if it's a non-null object without a custom prototype
  24952. * (i.e. excludes Array, Date, etc.).
  24953. */
  24954. function isPlainObject(input) {
  24955. return (typeof input === 'object' &&
  24956. input !== null &&
  24957. (Object.getPrototypeOf(input) === Object.prototype ||
  24958. Object.getPrototypeOf(input) === null));
  24959. }
  24960. /** Returns a string describing the type / value of the provided input. */
  24961. function valueDescription(input) {
  24962. if (input === undefined) {
  24963. return 'undefined';
  24964. }
  24965. else if (input === null) {
  24966. return 'null';
  24967. }
  24968. else if (typeof input === 'string') {
  24969. if (input.length > 20) {
  24970. input = `${input.substring(0, 20)}...`;
  24971. }
  24972. return JSON.stringify(input);
  24973. }
  24974. else if (typeof input === 'number' || typeof input === 'boolean') {
  24975. return '' + input;
  24976. }
  24977. else if (typeof input === 'object') {
  24978. if (input instanceof Array) {
  24979. return 'an array';
  24980. }
  24981. else {
  24982. const customObjectName = tryGetCustomObjectType(input);
  24983. if (customObjectName) {
  24984. return `a custom ${customObjectName} object`;
  24985. }
  24986. else {
  24987. return 'an object';
  24988. }
  24989. }
  24990. }
  24991. else if (typeof input === 'function') {
  24992. return 'a function';
  24993. }
  24994. else {
  24995. return fail();
  24996. }
  24997. }
  24998. /** try to get the constructor name for an object. */
  24999. function tryGetCustomObjectType(input) {
  25000. if (input.constructor) {
  25001. return input.constructor.name;
  25002. }
  25003. return null;
  25004. }
  25005. /**
  25006. * Casts `obj` to `T`, optionally unwrapping Compat types to expose the
  25007. * underlying instance. Throws if `obj` is not an instance of `T`.
  25008. *
  25009. * This cast is used in the Lite and Full SDK to verify instance types for
  25010. * arguments passed to the public API.
  25011. * @internal
  25012. */
  25013. function cast(obj,
  25014. // eslint-disable-next-line @typescript-eslint/no-explicit-any
  25015. constructor) {
  25016. if ('_delegate' in obj) {
  25017. // Unwrap Compat types
  25018. // eslint-disable-next-line @typescript-eslint/no-explicit-any
  25019. obj = obj._delegate;
  25020. }
  25021. if (!(obj instanceof constructor)) {
  25022. if (constructor.name === obj.constructor.name) {
  25023. throw new FirestoreError(Code.INVALID_ARGUMENT, 'Type does not match the expected instance. Did you pass a ' +
  25024. `reference from a different Firestore SDK?`);
  25025. }
  25026. else {
  25027. const description = valueDescription(obj);
  25028. throw new FirestoreError(Code.INVALID_ARGUMENT, `Expected type '${constructor.name}', but it was: ${description}`);
  25029. }
  25030. }
  25031. return obj;
  25032. }
  25033. function validatePositiveNumber(functionName, n) {
  25034. if (n <= 0) {
  25035. throw new FirestoreError(Code.INVALID_ARGUMENT, `Function ${functionName}() requires a positive number, but it was: ${n}.`);
  25036. }
  25037. }
  25038. /**
  25039. * @license
  25040. * Copyright 2020 Google LLC
  25041. *
  25042. * Licensed under the Apache License, Version 2.0 (the "License");
  25043. * you may not use this file except in compliance with the License.
  25044. * You may obtain a copy of the License at
  25045. *
  25046. * http://www.apache.org/licenses/LICENSE-2.0
  25047. *
  25048. * Unless required by applicable law or agreed to in writing, software
  25049. * distributed under the License is distributed on an "AS IS" BASIS,
  25050. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  25051. * See the License for the specific language governing permissions and
  25052. * limitations under the License.
  25053. */
  25054. /**
  25055. * On Node, only supported data source is a `Uint8Array` for now.
  25056. */
  25057. function toByteStreamReader(source, bytesPerRead) {
  25058. if (!(source instanceof Uint8Array)) {
  25059. throw new FirestoreError(Code.INVALID_ARGUMENT, `NodePlatform.toByteStreamReader expects source to be Uint8Array, got ${valueDescription(source)}`);
  25060. }
  25061. return toByteStreamReaderHelper(source, bytesPerRead);
  25062. }
  25063. /**
  25064. * @license
  25065. * Copyright 2017 Google LLC
  25066. *
  25067. * Licensed under the Apache License, Version 2.0 (the "License");
  25068. * you may not use this file except in compliance with the License.
  25069. * You may obtain a copy of the License at
  25070. *
  25071. * http://www.apache.org/licenses/LICENSE-2.0
  25072. *
  25073. * Unless required by applicable law or agreed to in writing, software
  25074. * distributed under the License is distributed on an "AS IS" BASIS,
  25075. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  25076. * See the License for the specific language governing permissions and
  25077. * limitations under the License.
  25078. */
  25079. /*
  25080. * A wrapper implementation of Observer<T> that will dispatch events
  25081. * asynchronously. To allow immediate silencing, a mute call is added which
  25082. * causes events scheduled to no longer be raised.
  25083. */
  25084. class AsyncObserver {
  25085. constructor(observer) {
  25086. this.observer = observer;
  25087. /**
  25088. * When set to true, will not raise future events. Necessary to deal with
  25089. * async detachment of listener.
  25090. */
  25091. this.muted = false;
  25092. }
  25093. next(value) {
  25094. if (this.observer.next) {
  25095. this.scheduleEvent(this.observer.next, value);
  25096. }
  25097. }
  25098. error(error) {
  25099. if (this.observer.error) {
  25100. this.scheduleEvent(this.observer.error, error);
  25101. }
  25102. else {
  25103. logError('Uncaught Error in snapshot listener:', error.toString());
  25104. }
  25105. }
  25106. mute() {
  25107. this.muted = true;
  25108. }
  25109. scheduleEvent(eventHandler, event) {
  25110. if (!this.muted) {
  25111. setTimeout(() => {
  25112. if (!this.muted) {
  25113. eventHandler(event);
  25114. }
  25115. }, 0);
  25116. }
  25117. }
  25118. }
  25119. /**
  25120. * @license
  25121. * Copyright 2020 Google LLC
  25122. *
  25123. * Licensed under the Apache License, Version 2.0 (the "License");
  25124. * you may not use this file except in compliance with the License.
  25125. * You may obtain a copy of the License at
  25126. *
  25127. * http://www.apache.org/licenses/LICENSE-2.0
  25128. *
  25129. * Unless required by applicable law or agreed to in writing, software
  25130. * distributed under the License is distributed on an "AS IS" BASIS,
  25131. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  25132. * See the License for the specific language governing permissions and
  25133. * limitations under the License.
  25134. */
  25135. /**
  25136. * A complete element in the bundle stream, together with the byte length it
  25137. * occupies in the stream.
  25138. */
  25139. class SizedBundleElement {
  25140. constructor(payload,
  25141. // How many bytes this element takes to store in the bundle.
  25142. byteLength) {
  25143. this.payload = payload;
  25144. this.byteLength = byteLength;
  25145. }
  25146. isBundleMetadata() {
  25147. return 'metadata' in this.payload;
  25148. }
  25149. }
  25150. /**
  25151. * @license
  25152. * Copyright 2020 Google LLC
  25153. *
  25154. * Licensed under the Apache License, Version 2.0 (the "License");
  25155. * you may not use this file except in compliance with the License.
  25156. * You may obtain a copy of the License at
  25157. *
  25158. * http://www.apache.org/licenses/LICENSE-2.0
  25159. *
  25160. * Unless required by applicable law or agreed to in writing, software
  25161. * distributed under the License is distributed on an "AS IS" BASIS,
  25162. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  25163. * See the License for the specific language governing permissions and
  25164. * limitations under the License.
  25165. */
  25166. /**
  25167. * A class representing a bundle.
  25168. *
  25169. * Takes a bundle stream or buffer, and presents abstractions to read bundled
  25170. * elements out of the underlying content.
  25171. */
  25172. class BundleReaderImpl {
  25173. constructor(
  25174. /** The reader to read from underlying binary bundle data source. */
  25175. reader, serializer) {
  25176. this.reader = reader;
  25177. this.serializer = serializer;
  25178. /** Cached bundle metadata. */
  25179. this.metadata = new Deferred();
  25180. /**
  25181. * Internal buffer to hold bundle content, accumulating incomplete element
  25182. * content.
  25183. */
  25184. this.buffer = new Uint8Array();
  25185. this.textDecoder = newTextDecoder();
  25186. // Read the metadata (which is the first element).
  25187. this.nextElementImpl().then(element => {
  25188. if (element && element.isBundleMetadata()) {
  25189. this.metadata.resolve(element.payload.metadata);
  25190. }
  25191. else {
  25192. this.metadata.reject(new Error(`The first element of the bundle is not a metadata, it is
  25193. ${JSON.stringify(element === null || element === void 0 ? void 0 : element.payload)}`));
  25194. }
  25195. }, error => this.metadata.reject(error));
  25196. }
  25197. close() {
  25198. return this.reader.cancel();
  25199. }
  25200. async getMetadata() {
  25201. return this.metadata.promise;
  25202. }
  25203. async nextElement() {
  25204. // Makes sure metadata is read before proceeding.
  25205. await this.getMetadata();
  25206. return this.nextElementImpl();
  25207. }
  25208. /**
  25209. * Reads from the head of internal buffer, and pulling more data from
  25210. * underlying stream if a complete element cannot be found, until an
  25211. * element(including the prefixed length and the JSON string) is found.
  25212. *
  25213. * Once a complete element is read, it is dropped from internal buffer.
  25214. *
  25215. * Returns either the bundled element, or null if we have reached the end of
  25216. * the stream.
  25217. */
  25218. async nextElementImpl() {
  25219. const lengthBuffer = await this.readLength();
  25220. if (lengthBuffer === null) {
  25221. return null;
  25222. }
  25223. const lengthString = this.textDecoder.decode(lengthBuffer);
  25224. const length = Number(lengthString);
  25225. if (isNaN(length)) {
  25226. this.raiseError(`length string (${lengthString}) is not valid number`);
  25227. }
  25228. const jsonString = await this.readJsonString(length);
  25229. return new SizedBundleElement(JSON.parse(jsonString), lengthBuffer.length + length);
  25230. }
  25231. /** First index of '{' from the underlying buffer. */
  25232. indexOfOpenBracket() {
  25233. return this.buffer.findIndex(v => v === '{'.charCodeAt(0));
  25234. }
  25235. /**
  25236. * Reads from the beginning of the internal buffer, until the first '{', and
  25237. * return the content.
  25238. *
  25239. * If reached end of the stream, returns a null.
  25240. */
  25241. async readLength() {
  25242. while (this.indexOfOpenBracket() < 0) {
  25243. const done = await this.pullMoreDataToBuffer();
  25244. if (done) {
  25245. break;
  25246. }
  25247. }
  25248. // Broke out of the loop because underlying stream is closed, and there
  25249. // happens to be no more data to process.
  25250. if (this.buffer.length === 0) {
  25251. return null;
  25252. }
  25253. const position = this.indexOfOpenBracket();
  25254. // Broke out of the loop because underlying stream is closed, but still
  25255. // cannot find an open bracket.
  25256. if (position < 0) {
  25257. this.raiseError('Reached the end of bundle when a length string is expected.');
  25258. }
  25259. const result = this.buffer.slice(0, position);
  25260. // Update the internal buffer to drop the read length.
  25261. this.buffer = this.buffer.slice(position);
  25262. return result;
  25263. }
  25264. /**
  25265. * Reads from a specified position from the internal buffer, for a specified
  25266. * number of bytes, pulling more data from the underlying stream if needed.
  25267. *
  25268. * Returns a string decoded from the read bytes.
  25269. */
  25270. async readJsonString(length) {
  25271. while (this.buffer.length < length) {
  25272. const done = await this.pullMoreDataToBuffer();
  25273. if (done) {
  25274. this.raiseError('Reached the end of bundle when more is expected.');
  25275. }
  25276. }
  25277. const result = this.textDecoder.decode(this.buffer.slice(0, length));
  25278. // Update the internal buffer to drop the read json string.
  25279. this.buffer = this.buffer.slice(length);
  25280. return result;
  25281. }
  25282. raiseError(message) {
  25283. // eslint-disable-next-line @typescript-eslint/no-floating-promises
  25284. this.reader.cancel();
  25285. throw new Error(`Invalid bundle format: ${message}`);
  25286. }
  25287. /**
  25288. * Pulls more data from underlying stream to internal buffer.
  25289. * Returns a boolean indicating whether the stream is finished.
  25290. */
  25291. async pullMoreDataToBuffer() {
  25292. const result = await this.reader.read();
  25293. if (!result.done) {
  25294. const newBuffer = new Uint8Array(this.buffer.length + result.value.length);
  25295. newBuffer.set(this.buffer);
  25296. newBuffer.set(result.value, this.buffer.length);
  25297. this.buffer = newBuffer;
  25298. }
  25299. return result.done;
  25300. }
  25301. }
  25302. function newBundleReader(reader, serializer) {
  25303. return new BundleReaderImpl(reader, serializer);
  25304. }
  25305. /**
  25306. * @license
  25307. * Copyright 2022 Google LLC
  25308. *
  25309. * Licensed under the Apache License, Version 2.0 (the "License");
  25310. * you may not use this file except in compliance with the License.
  25311. * You may obtain a copy of the License at
  25312. *
  25313. * http://www.apache.org/licenses/LICENSE-2.0
  25314. *
  25315. * Unless required by applicable law or agreed to in writing, software
  25316. * distributed under the License is distributed on an "AS IS" BASIS,
  25317. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  25318. * See the License for the specific language governing permissions and
  25319. * limitations under the License.
  25320. */
  25321. /**
  25322. * Represents an aggregation that can be performed by Firestore.
  25323. */
  25324. // eslint-disable-next-line @typescript-eslint/no-unused-vars
  25325. class AggregateField {
  25326. constructor() {
  25327. /** A type string to uniquely identify instances of this class. */
  25328. this.type = 'AggregateField';
  25329. }
  25330. }
  25331. /**
  25332. * The results of executing an aggregation query.
  25333. */
  25334. class AggregateQuerySnapshot {
  25335. /** @hideconstructor */
  25336. constructor(query, _data) {
  25337. this._data = _data;
  25338. /** A type string to uniquely identify instances of this class. */
  25339. this.type = 'AggregateQuerySnapshot';
  25340. this.query = query;
  25341. }
  25342. /**
  25343. * Returns the results of the aggregations performed over the underlying
  25344. * query.
  25345. *
  25346. * The keys of the returned object will be the same as those of the
  25347. * `AggregateSpec` object specified to the aggregation method, and the values
  25348. * will be the corresponding aggregation result.
  25349. *
  25350. * @returns The results of the aggregations performed over the underlying
  25351. * query.
  25352. */
  25353. data() {
  25354. return this._data;
  25355. }
  25356. }
  25357. /**
  25358. * @license
  25359. * Copyright 2022 Google LLC
  25360. *
  25361. * Licensed under the Apache License, Version 2.0 (the "License");
  25362. * you may not use this file except in compliance with the License.
  25363. * You may obtain a copy of the License at
  25364. *
  25365. * http://www.apache.org/licenses/LICENSE-2.0
  25366. *
  25367. * Unless required by applicable law or agreed to in writing, software
  25368. * distributed under the License is distributed on an "AS IS" BASIS,
  25369. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  25370. * See the License for the specific language governing permissions and
  25371. * limitations under the License.
  25372. */
  25373. /**
  25374. * CountQueryRunner encapsulates the logic needed to run the count aggregation
  25375. * queries.
  25376. */
  25377. class CountQueryRunner {
  25378. constructor(query, datastore, userDataWriter) {
  25379. this.query = query;
  25380. this.datastore = datastore;
  25381. this.userDataWriter = userDataWriter;
  25382. }
  25383. run() {
  25384. return invokeRunAggregationQueryRpc(this.datastore, this.query._query).then(result => {
  25385. hardAssert(result[0] !== undefined);
  25386. const counts = Object.entries(result[0])
  25387. .filter(([key, value]) => key === 'count_alias')
  25388. .map(([key, value]) => this.userDataWriter.convertValue(value));
  25389. const countValue = counts[0];
  25390. hardAssert(typeof countValue === 'number');
  25391. return Promise.resolve(new AggregateQuerySnapshot(this.query, {
  25392. count: countValue
  25393. }));
  25394. });
  25395. }
  25396. }
  25397. /**
  25398. * @license
  25399. * Copyright 2017 Google LLC
  25400. *
  25401. * Licensed under the Apache License, Version 2.0 (the "License");
  25402. * you may not use this file except in compliance with the License.
  25403. * You may obtain a copy of the License at
  25404. *
  25405. * http://www.apache.org/licenses/LICENSE-2.0
  25406. *
  25407. * Unless required by applicable law or agreed to in writing, software
  25408. * distributed under the License is distributed on an "AS IS" BASIS,
  25409. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  25410. * See the License for the specific language governing permissions and
  25411. * limitations under the License.
  25412. */
  25413. /**
  25414. * Internal transaction object responsible for accumulating the mutations to
  25415. * perform and the base versions for any documents read.
  25416. */
  25417. class Transaction$2 {
  25418. constructor(datastore) {
  25419. this.datastore = datastore;
  25420. // The version of each document that was read during this transaction.
  25421. this.readVersions = new Map();
  25422. this.mutations = [];
  25423. this.committed = false;
  25424. /**
  25425. * A deferred usage error that occurred previously in this transaction that
  25426. * will cause the transaction to fail once it actually commits.
  25427. */
  25428. this.lastWriteError = null;
  25429. /**
  25430. * Set of documents that have been written in the transaction.
  25431. *
  25432. * When there's more than one write to the same key in a transaction, any
  25433. * writes after the first are handled differently.
  25434. */
  25435. this.writtenDocs = new Set();
  25436. }
  25437. async lookup(keys) {
  25438. this.ensureCommitNotCalled();
  25439. if (this.mutations.length > 0) {
  25440. throw new FirestoreError(Code.INVALID_ARGUMENT, 'Firestore transactions require all reads to be executed before all writes.');
  25441. }
  25442. const docs = await invokeBatchGetDocumentsRpc(this.datastore, keys);
  25443. docs.forEach(doc => this.recordVersion(doc));
  25444. return docs;
  25445. }
  25446. set(key, data) {
  25447. this.write(data.toMutation(key, this.precondition(key)));
  25448. this.writtenDocs.add(key.toString());
  25449. }
  25450. update(key, data) {
  25451. try {
  25452. this.write(data.toMutation(key, this.preconditionForUpdate(key)));
  25453. }
  25454. catch (e) {
  25455. this.lastWriteError = e;
  25456. }
  25457. this.writtenDocs.add(key.toString());
  25458. }
  25459. delete(key) {
  25460. this.write(new DeleteMutation(key, this.precondition(key)));
  25461. this.writtenDocs.add(key.toString());
  25462. }
  25463. async commit() {
  25464. this.ensureCommitNotCalled();
  25465. if (this.lastWriteError) {
  25466. throw this.lastWriteError;
  25467. }
  25468. const unwritten = this.readVersions;
  25469. // For each mutation, note that the doc was written.
  25470. this.mutations.forEach(mutation => {
  25471. unwritten.delete(mutation.key.toString());
  25472. });
  25473. // For each document that was read but not written to, we want to perform
  25474. // a `verify` operation.
  25475. unwritten.forEach((_, path) => {
  25476. const key = DocumentKey.fromPath(path);
  25477. this.mutations.push(new VerifyMutation(key, this.precondition(key)));
  25478. });
  25479. await invokeCommitRpc(this.datastore, this.mutations);
  25480. this.committed = true;
  25481. }
  25482. recordVersion(doc) {
  25483. let docVersion;
  25484. if (doc.isFoundDocument()) {
  25485. docVersion = doc.version;
  25486. }
  25487. else if (doc.isNoDocument()) {
  25488. // Represent a deleted doc using SnapshotVersion.min().
  25489. docVersion = SnapshotVersion.min();
  25490. }
  25491. else {
  25492. throw fail();
  25493. }
  25494. const existingVersion = this.readVersions.get(doc.key.toString());
  25495. if (existingVersion) {
  25496. if (!docVersion.isEqual(existingVersion)) {
  25497. // This transaction will fail no matter what.
  25498. throw new FirestoreError(Code.ABORTED, 'Document version changed between two reads.');
  25499. }
  25500. }
  25501. else {
  25502. this.readVersions.set(doc.key.toString(), docVersion);
  25503. }
  25504. }
  25505. /**
  25506. * Returns the version of this document when it was read in this transaction,
  25507. * as a precondition, or no precondition if it was not read.
  25508. */
  25509. precondition(key) {
  25510. const version = this.readVersions.get(key.toString());
  25511. if (!this.writtenDocs.has(key.toString()) && version) {
  25512. if (version.isEqual(SnapshotVersion.min())) {
  25513. return Precondition.exists(false);
  25514. }
  25515. else {
  25516. return Precondition.updateTime(version);
  25517. }
  25518. }
  25519. else {
  25520. return Precondition.none();
  25521. }
  25522. }
  25523. /**
  25524. * Returns the precondition for a document if the operation is an update.
  25525. */
  25526. preconditionForUpdate(key) {
  25527. const version = this.readVersions.get(key.toString());
  25528. // The first time a document is written, we want to take into account the
  25529. // read time and existence
  25530. if (!this.writtenDocs.has(key.toString()) && version) {
  25531. if (version.isEqual(SnapshotVersion.min())) {
  25532. // The document doesn't exist, so fail the transaction.
  25533. // This has to be validated locally because you can't send a
  25534. // precondition that a document does not exist without changing the
  25535. // semantics of the backend write to be an insert. This is the reverse
  25536. // of what we want, since we want to assert that the document doesn't
  25537. // exist but then send the update and have it fail. Since we can't
  25538. // express that to the backend, we have to validate locally.
  25539. // Note: this can change once we can send separate verify writes in the
  25540. // transaction.
  25541. throw new FirestoreError(Code.INVALID_ARGUMENT, "Can't update a document that doesn't exist.");
  25542. }
  25543. // Document exists, base precondition on document update time.
  25544. return Precondition.updateTime(version);
  25545. }
  25546. else {
  25547. // Document was not read, so we just use the preconditions for a blind
  25548. // update.
  25549. return Precondition.exists(true);
  25550. }
  25551. }
  25552. write(mutation) {
  25553. this.ensureCommitNotCalled();
  25554. this.mutations.push(mutation);
  25555. }
  25556. ensureCommitNotCalled() {
  25557. }
  25558. }
  25559. /**
  25560. * @license
  25561. * Copyright 2019 Google LLC
  25562. *
  25563. * Licensed under the Apache License, Version 2.0 (the "License");
  25564. * you may not use this file except in compliance with the License.
  25565. * You may obtain a copy of the License at
  25566. *
  25567. * http://www.apache.org/licenses/LICENSE-2.0
  25568. *
  25569. * Unless required by applicable law or agreed to in writing, software
  25570. * distributed under the License is distributed on an "AS IS" BASIS,
  25571. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  25572. * See the License for the specific language governing permissions and
  25573. * limitations under the License.
  25574. */
  25575. /**
  25576. * TransactionRunner encapsulates the logic needed to run and retry transactions
  25577. * with backoff.
  25578. */
  25579. class TransactionRunner {
  25580. constructor(asyncQueue, datastore, options, updateFunction, deferred) {
  25581. this.asyncQueue = asyncQueue;
  25582. this.datastore = datastore;
  25583. this.options = options;
  25584. this.updateFunction = updateFunction;
  25585. this.deferred = deferred;
  25586. this.attemptsRemaining = options.maxAttempts;
  25587. this.backoff = new ExponentialBackoff(this.asyncQueue, "transaction_retry" /* TimerId.TransactionRetry */);
  25588. }
  25589. /** Runs the transaction and sets the result on deferred. */
  25590. run() {
  25591. this.attemptsRemaining -= 1;
  25592. this.runWithBackOff();
  25593. }
  25594. runWithBackOff() {
  25595. this.backoff.backoffAndRun(async () => {
  25596. const transaction = new Transaction$2(this.datastore);
  25597. const userPromise = this.tryRunUpdateFunction(transaction);
  25598. if (userPromise) {
  25599. userPromise
  25600. .then(result => {
  25601. this.asyncQueue.enqueueAndForget(() => {
  25602. return transaction
  25603. .commit()
  25604. .then(() => {
  25605. this.deferred.resolve(result);
  25606. })
  25607. .catch(commitError => {
  25608. this.handleTransactionError(commitError);
  25609. });
  25610. });
  25611. })
  25612. .catch(userPromiseError => {
  25613. this.handleTransactionError(userPromiseError);
  25614. });
  25615. }
  25616. });
  25617. }
  25618. tryRunUpdateFunction(transaction) {
  25619. try {
  25620. const userPromise = this.updateFunction(transaction);
  25621. if (isNullOrUndefined(userPromise) ||
  25622. !userPromise.catch ||
  25623. !userPromise.then) {
  25624. this.deferred.reject(Error('Transaction callback must return a Promise'));
  25625. return null;
  25626. }
  25627. return userPromise;
  25628. }
  25629. catch (error) {
  25630. // Do not retry errors thrown by user provided updateFunction.
  25631. this.deferred.reject(error);
  25632. return null;
  25633. }
  25634. }
  25635. handleTransactionError(error) {
  25636. if (this.attemptsRemaining > 0 && this.isRetryableTransactionError(error)) {
  25637. this.attemptsRemaining -= 1;
  25638. this.asyncQueue.enqueueAndForget(() => {
  25639. this.runWithBackOff();
  25640. return Promise.resolve();
  25641. });
  25642. }
  25643. else {
  25644. this.deferred.reject(error);
  25645. }
  25646. }
  25647. isRetryableTransactionError(error) {
  25648. if (error.name === 'FirebaseError') {
  25649. // In transactions, the backend will fail outdated reads with FAILED_PRECONDITION and
  25650. // non-matching document versions with ABORTED. These errors should be retried.
  25651. const code = error.code;
  25652. return (code === 'aborted' ||
  25653. code === 'failed-precondition' ||
  25654. code === 'already-exists' ||
  25655. !isPermanentError(code));
  25656. }
  25657. return false;
  25658. }
  25659. }
  25660. /**
  25661. * @license
  25662. * Copyright 2017 Google LLC
  25663. *
  25664. * Licensed under the Apache License, Version 2.0 (the "License");
  25665. * you may not use this file except in compliance with the License.
  25666. * You may obtain a copy of the License at
  25667. *
  25668. * http://www.apache.org/licenses/LICENSE-2.0
  25669. *
  25670. * Unless required by applicable law or agreed to in writing, software
  25671. * distributed under the License is distributed on an "AS IS" BASIS,
  25672. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  25673. * See the License for the specific language governing permissions and
  25674. * limitations under the License.
  25675. */
  25676. const LOG_TAG$2 = 'FirestoreClient';
  25677. const MAX_CONCURRENT_LIMBO_RESOLUTIONS = 100;
  25678. /**
  25679. * FirestoreClient is a top-level class that constructs and owns all of the
  25680. * pieces of the client SDK architecture. It is responsible for creating the
  25681. * async queue that is shared by all of the other components in the system.
  25682. */
  25683. class FirestoreClient {
  25684. constructor(authCredentials, appCheckCredentials,
  25685. /**
  25686. * Asynchronous queue responsible for all of our internal processing. When
  25687. * we get incoming work from the user (via public API) or the network
  25688. * (incoming GRPC messages), we should always schedule onto this queue.
  25689. * This ensures all of our work is properly serialized (e.g. we don't
  25690. * start processing a new operation while the previous one is waiting for
  25691. * an async I/O to complete).
  25692. */
  25693. asyncQueue, databaseInfo) {
  25694. this.authCredentials = authCredentials;
  25695. this.appCheckCredentials = appCheckCredentials;
  25696. this.asyncQueue = asyncQueue;
  25697. this.databaseInfo = databaseInfo;
  25698. this.user = User.UNAUTHENTICATED;
  25699. this.clientId = AutoId.newId();
  25700. this.authCredentialListener = () => Promise.resolve();
  25701. this.appCheckCredentialListener = () => Promise.resolve();
  25702. this.authCredentials.start(asyncQueue, async (user) => {
  25703. logDebug(LOG_TAG$2, 'Received user=', user.uid);
  25704. await this.authCredentialListener(user);
  25705. this.user = user;
  25706. });
  25707. this.appCheckCredentials.start(asyncQueue, newAppCheckToken => {
  25708. logDebug(LOG_TAG$2, 'Received new app check token=', newAppCheckToken);
  25709. return this.appCheckCredentialListener(newAppCheckToken, this.user);
  25710. });
  25711. }
  25712. async getConfiguration() {
  25713. return {
  25714. asyncQueue: this.asyncQueue,
  25715. databaseInfo: this.databaseInfo,
  25716. clientId: this.clientId,
  25717. authCredentials: this.authCredentials,
  25718. appCheckCredentials: this.appCheckCredentials,
  25719. initialUser: this.user,
  25720. maxConcurrentLimboResolutions: MAX_CONCURRENT_LIMBO_RESOLUTIONS
  25721. };
  25722. }
  25723. setCredentialChangeListener(listener) {
  25724. this.authCredentialListener = listener;
  25725. }
  25726. setAppCheckTokenChangeListener(listener) {
  25727. this.appCheckCredentialListener = listener;
  25728. }
  25729. /**
  25730. * Checks that the client has not been terminated. Ensures that other methods on
  25731. * this class cannot be called after the client is terminated.
  25732. */
  25733. verifyNotTerminated() {
  25734. if (this.asyncQueue.isShuttingDown) {
  25735. throw new FirestoreError(Code.FAILED_PRECONDITION, 'The client has already been terminated.');
  25736. }
  25737. }
  25738. terminate() {
  25739. this.asyncQueue.enterRestrictedMode();
  25740. const deferred = new Deferred();
  25741. this.asyncQueue.enqueueAndForgetEvenWhileRestricted(async () => {
  25742. try {
  25743. if (this.onlineComponents) {
  25744. await this.onlineComponents.terminate();
  25745. }
  25746. if (this.offlineComponents) {
  25747. await this.offlineComponents.terminate();
  25748. }
  25749. // The credentials provider must be terminated after shutting down the
  25750. // RemoteStore as it will prevent the RemoteStore from retrieving auth
  25751. // tokens.
  25752. this.authCredentials.shutdown();
  25753. this.appCheckCredentials.shutdown();
  25754. deferred.resolve();
  25755. }
  25756. catch (e) {
  25757. const firestoreError = wrapInUserErrorIfRecoverable(e, `Failed to shutdown persistence`);
  25758. deferred.reject(firestoreError);
  25759. }
  25760. });
  25761. return deferred.promise;
  25762. }
  25763. }
  25764. async function setOfflineComponentProvider(client, offlineComponentProvider) {
  25765. client.asyncQueue.verifyOperationInProgress();
  25766. logDebug(LOG_TAG$2, 'Initializing OfflineComponentProvider');
  25767. const configuration = await client.getConfiguration();
  25768. await offlineComponentProvider.initialize(configuration);
  25769. let currentUser = configuration.initialUser;
  25770. client.setCredentialChangeListener(async (user) => {
  25771. if (!currentUser.isEqual(user)) {
  25772. await localStoreHandleUserChange(offlineComponentProvider.localStore, user);
  25773. currentUser = user;
  25774. }
  25775. });
  25776. // When a user calls clearPersistence() in one client, all other clients
  25777. // need to be terminated to allow the delete to succeed.
  25778. offlineComponentProvider.persistence.setDatabaseDeletedListener(() => client.terminate());
  25779. client.offlineComponents = offlineComponentProvider;
  25780. }
  25781. async function setOnlineComponentProvider(client, onlineComponentProvider) {
  25782. client.asyncQueue.verifyOperationInProgress();
  25783. const offlineComponentProvider = await ensureOfflineComponents(client);
  25784. logDebug(LOG_TAG$2, 'Initializing OnlineComponentProvider');
  25785. const configuration = await client.getConfiguration();
  25786. await onlineComponentProvider.initialize(offlineComponentProvider, configuration);
  25787. // The CredentialChangeListener of the online component provider takes
  25788. // precedence over the offline component provider.
  25789. client.setCredentialChangeListener(user => remoteStoreHandleCredentialChange(onlineComponentProvider.remoteStore, user));
  25790. client.setAppCheckTokenChangeListener((_, user) => remoteStoreHandleCredentialChange(onlineComponentProvider.remoteStore, user));
  25791. client.onlineComponents = onlineComponentProvider;
  25792. }
  25793. async function ensureOfflineComponents(client) {
  25794. if (!client.offlineComponents) {
  25795. logDebug(LOG_TAG$2, 'Using default OfflineComponentProvider');
  25796. await setOfflineComponentProvider(client, new MemoryOfflineComponentProvider());
  25797. }
  25798. return client.offlineComponents;
  25799. }
  25800. async function ensureOnlineComponents(client) {
  25801. if (!client.onlineComponents) {
  25802. logDebug(LOG_TAG$2, 'Using default OnlineComponentProvider');
  25803. await setOnlineComponentProvider(client, new OnlineComponentProvider());
  25804. }
  25805. return client.onlineComponents;
  25806. }
  25807. function getPersistence(client) {
  25808. return ensureOfflineComponents(client).then(c => c.persistence);
  25809. }
  25810. function getLocalStore(client) {
  25811. return ensureOfflineComponents(client).then(c => c.localStore);
  25812. }
  25813. function getRemoteStore(client) {
  25814. return ensureOnlineComponents(client).then(c => c.remoteStore);
  25815. }
  25816. function getSyncEngine(client) {
  25817. return ensureOnlineComponents(client).then(c => c.syncEngine);
  25818. }
  25819. function getDatastore(client) {
  25820. return ensureOnlineComponents(client).then(c => c.datastore);
  25821. }
  25822. async function getEventManager(client) {
  25823. const onlineComponentProvider = await ensureOnlineComponents(client);
  25824. const eventManager = onlineComponentProvider.eventManager;
  25825. eventManager.onListen = syncEngineListen.bind(null, onlineComponentProvider.syncEngine);
  25826. eventManager.onUnlisten = syncEngineUnlisten.bind(null, onlineComponentProvider.syncEngine);
  25827. return eventManager;
  25828. }
  25829. /** Enables the network connection and re-enqueues all pending operations. */
  25830. function firestoreClientEnableNetwork(client) {
  25831. return client.asyncQueue.enqueue(async () => {
  25832. const persistence = await getPersistence(client);
  25833. const remoteStore = await getRemoteStore(client);
  25834. persistence.setNetworkEnabled(true);
  25835. return remoteStoreEnableNetwork(remoteStore);
  25836. });
  25837. }
  25838. /** Disables the network connection. Pending operations will not complete. */
  25839. function firestoreClientDisableNetwork(client) {
  25840. return client.asyncQueue.enqueue(async () => {
  25841. const persistence = await getPersistence(client);
  25842. const remoteStore = await getRemoteStore(client);
  25843. persistence.setNetworkEnabled(false);
  25844. return remoteStoreDisableNetwork(remoteStore);
  25845. });
  25846. }
  25847. /**
  25848. * Returns a Promise that resolves when all writes that were pending at the time
  25849. * this method was called received server acknowledgement. An acknowledgement
  25850. * can be either acceptance or rejection.
  25851. */
  25852. function firestoreClientWaitForPendingWrites(client) {
  25853. const deferred = new Deferred();
  25854. client.asyncQueue.enqueueAndForget(async () => {
  25855. const syncEngine = await getSyncEngine(client);
  25856. return syncEngineRegisterPendingWritesCallback(syncEngine, deferred);
  25857. });
  25858. return deferred.promise;
  25859. }
  25860. function firestoreClientListen(client, query, options, observer) {
  25861. const wrappedObserver = new AsyncObserver(observer);
  25862. const listener = new QueryListener(query, wrappedObserver, options);
  25863. client.asyncQueue.enqueueAndForget(async () => {
  25864. const eventManager = await getEventManager(client);
  25865. return eventManagerListen(eventManager, listener);
  25866. });
  25867. return () => {
  25868. wrappedObserver.mute();
  25869. client.asyncQueue.enqueueAndForget(async () => {
  25870. const eventManager = await getEventManager(client);
  25871. return eventManagerUnlisten(eventManager, listener);
  25872. });
  25873. };
  25874. }
  25875. function firestoreClientGetDocumentFromLocalCache(client, docKey) {
  25876. const deferred = new Deferred();
  25877. client.asyncQueue.enqueueAndForget(async () => {
  25878. const localStore = await getLocalStore(client);
  25879. return readDocumentFromCache(localStore, docKey, deferred);
  25880. });
  25881. return deferred.promise;
  25882. }
  25883. function firestoreClientGetDocumentViaSnapshotListener(client, key, options = {}) {
  25884. const deferred = new Deferred();
  25885. client.asyncQueue.enqueueAndForget(async () => {
  25886. const eventManager = await getEventManager(client);
  25887. return readDocumentViaSnapshotListener(eventManager, client.asyncQueue, key, options, deferred);
  25888. });
  25889. return deferred.promise;
  25890. }
  25891. function firestoreClientGetDocumentsFromLocalCache(client, query) {
  25892. const deferred = new Deferred();
  25893. client.asyncQueue.enqueueAndForget(async () => {
  25894. const localStore = await getLocalStore(client);
  25895. return executeQueryFromCache(localStore, query, deferred);
  25896. });
  25897. return deferred.promise;
  25898. }
  25899. function firestoreClientGetDocumentsViaSnapshotListener(client, query, options = {}) {
  25900. const deferred = new Deferred();
  25901. client.asyncQueue.enqueueAndForget(async () => {
  25902. const eventManager = await getEventManager(client);
  25903. return executeQueryViaSnapshotListener(eventManager, client.asyncQueue, query, options, deferred);
  25904. });
  25905. return deferred.promise;
  25906. }
  25907. function firestoreClientWrite(client, mutations) {
  25908. const deferred = new Deferred();
  25909. client.asyncQueue.enqueueAndForget(async () => {
  25910. const syncEngine = await getSyncEngine(client);
  25911. return syncEngineWrite(syncEngine, mutations, deferred);
  25912. });
  25913. return deferred.promise;
  25914. }
  25915. function firestoreClientAddSnapshotsInSyncListener(client, observer) {
  25916. const wrappedObserver = new AsyncObserver(observer);
  25917. client.asyncQueue.enqueueAndForget(async () => {
  25918. const eventManager = await getEventManager(client);
  25919. return addSnapshotsInSyncListener(eventManager, wrappedObserver);
  25920. });
  25921. return () => {
  25922. wrappedObserver.mute();
  25923. client.asyncQueue.enqueueAndForget(async () => {
  25924. const eventManager = await getEventManager(client);
  25925. return removeSnapshotsInSyncListener(eventManager, wrappedObserver);
  25926. });
  25927. };
  25928. }
  25929. /**
  25930. * Takes an updateFunction in which a set of reads and writes can be performed
  25931. * atomically. In the updateFunction, the client can read and write values
  25932. * using the supplied transaction object. After the updateFunction, all
  25933. * changes will be committed. If a retryable error occurs (ex: some other
  25934. * client has changed any of the data referenced), then the updateFunction
  25935. * will be called again after a backoff. If the updateFunction still fails
  25936. * after all retries, then the transaction will be rejected.
  25937. *
  25938. * The transaction object passed to the updateFunction contains methods for
  25939. * accessing documents and collections. Unlike other datastore access, data
  25940. * accessed with the transaction will not reflect local changes that have not
  25941. * been committed. For this reason, it is required that all reads are
  25942. * performed before any writes. Transactions must be performed while online.
  25943. */
  25944. function firestoreClientTransaction(client, updateFunction, options) {
  25945. const deferred = new Deferred();
  25946. client.asyncQueue.enqueueAndForget(async () => {
  25947. const datastore = await getDatastore(client);
  25948. new TransactionRunner(client.asyncQueue, datastore, options, updateFunction, deferred).run();
  25949. });
  25950. return deferred.promise;
  25951. }
  25952. function firestoreClientRunCountQuery(client, query, userDataWriter) {
  25953. const deferred = new Deferred();
  25954. client.asyncQueue.enqueueAndForget(async () => {
  25955. try {
  25956. const remoteStore = await getRemoteStore(client);
  25957. if (!canUseNetwork(remoteStore)) {
  25958. deferred.reject(new FirestoreError(Code.UNAVAILABLE, 'Failed to get count result because the client is offline.'));
  25959. }
  25960. else {
  25961. const datastore = await getDatastore(client);
  25962. const result = new CountQueryRunner(query, datastore, userDataWriter).run();
  25963. deferred.resolve(result);
  25964. }
  25965. }
  25966. catch (e) {
  25967. deferred.reject(e);
  25968. }
  25969. });
  25970. return deferred.promise;
  25971. }
  25972. async function readDocumentFromCache(localStore, docKey, result) {
  25973. try {
  25974. const document = await localStoreReadDocument(localStore, docKey);
  25975. if (document.isFoundDocument()) {
  25976. result.resolve(document);
  25977. }
  25978. else if (document.isNoDocument()) {
  25979. result.resolve(null);
  25980. }
  25981. else {
  25982. result.reject(new FirestoreError(Code.UNAVAILABLE, 'Failed to get document from cache. (However, this document may ' +
  25983. "exist on the server. Run again without setting 'source' in " +
  25984. 'the GetOptions to attempt to retrieve the document from the ' +
  25985. 'server.)'));
  25986. }
  25987. }
  25988. catch (e) {
  25989. const firestoreError = wrapInUserErrorIfRecoverable(e, `Failed to get document '${docKey} from cache`);
  25990. result.reject(firestoreError);
  25991. }
  25992. }
  25993. /**
  25994. * Retrieves a latency-compensated document from the backend via a
  25995. * SnapshotListener.
  25996. */
  25997. function readDocumentViaSnapshotListener(eventManager, asyncQueue, key, options, result) {
  25998. const wrappedObserver = new AsyncObserver({
  25999. next: (snap) => {
  26000. // Remove query first before passing event to user to avoid
  26001. // user actions affecting the now stale query.
  26002. asyncQueue.enqueueAndForget(() => eventManagerUnlisten(eventManager, listener));
  26003. const exists = snap.docs.has(key);
  26004. if (!exists && snap.fromCache) {
  26005. // TODO(dimond): If we're online and the document doesn't
  26006. // exist then we resolve with a doc.exists set to false. If
  26007. // we're offline however, we reject the Promise in this
  26008. // case. Two options: 1) Cache the negative response from
  26009. // the server so we can deliver that even when you're
  26010. // offline 2) Actually reject the Promise in the online case
  26011. // if the document doesn't exist.
  26012. result.reject(new FirestoreError(Code.UNAVAILABLE, 'Failed to get document because the client is offline.'));
  26013. }
  26014. else if (exists &&
  26015. snap.fromCache &&
  26016. options &&
  26017. options.source === 'server') {
  26018. result.reject(new FirestoreError(Code.UNAVAILABLE, 'Failed to get document from server. (However, this ' +
  26019. 'document does exist in the local cache. Run again ' +
  26020. 'without setting source to "server" to ' +
  26021. 'retrieve the cached document.)'));
  26022. }
  26023. else {
  26024. result.resolve(snap);
  26025. }
  26026. },
  26027. error: e => result.reject(e)
  26028. });
  26029. const listener = new QueryListener(newQueryForPath(key.path), wrappedObserver, {
  26030. includeMetadataChanges: true,
  26031. waitForSyncWhenOnline: true
  26032. });
  26033. return eventManagerListen(eventManager, listener);
  26034. }
  26035. async function executeQueryFromCache(localStore, query, result) {
  26036. try {
  26037. const queryResult = await localStoreExecuteQuery(localStore, query,
  26038. /* usePreviousResults= */ true);
  26039. const view = new View(query, queryResult.remoteKeys);
  26040. const viewDocChanges = view.computeDocChanges(queryResult.documents);
  26041. const viewChange = view.applyChanges(viewDocChanges,
  26042. /* updateLimboDocuments= */ false);
  26043. result.resolve(viewChange.snapshot);
  26044. }
  26045. catch (e) {
  26046. const firestoreError = wrapInUserErrorIfRecoverable(e, `Failed to execute query '${query} against cache`);
  26047. result.reject(firestoreError);
  26048. }
  26049. }
  26050. /**
  26051. * Retrieves a latency-compensated query snapshot from the backend via a
  26052. * SnapshotListener.
  26053. */
  26054. function executeQueryViaSnapshotListener(eventManager, asyncQueue, query, options, result) {
  26055. const wrappedObserver = new AsyncObserver({
  26056. next: snapshot => {
  26057. // Remove query first before passing event to user to avoid
  26058. // user actions affecting the now stale query.
  26059. asyncQueue.enqueueAndForget(() => eventManagerUnlisten(eventManager, listener));
  26060. if (snapshot.fromCache && options.source === 'server') {
  26061. result.reject(new FirestoreError(Code.UNAVAILABLE, 'Failed to get documents from server. (However, these ' +
  26062. 'documents may exist in the local cache. Run again ' +
  26063. 'without setting source to "server" to ' +
  26064. 'retrieve the cached documents.)'));
  26065. }
  26066. else {
  26067. result.resolve(snapshot);
  26068. }
  26069. },
  26070. error: e => result.reject(e)
  26071. });
  26072. const listener = new QueryListener(query, wrappedObserver, {
  26073. includeMetadataChanges: true,
  26074. waitForSyncWhenOnline: true
  26075. });
  26076. return eventManagerListen(eventManager, listener);
  26077. }
  26078. function firestoreClientLoadBundle(client, databaseId, data, resultTask) {
  26079. const reader = createBundleReader(data, newSerializer(databaseId));
  26080. client.asyncQueue.enqueueAndForget(async () => {
  26081. syncEngineLoadBundle(await getSyncEngine(client), reader, resultTask);
  26082. });
  26083. }
  26084. function firestoreClientGetNamedQuery(client, queryName) {
  26085. return client.asyncQueue.enqueue(async () => localStoreGetNamedQuery(await getLocalStore(client), queryName));
  26086. }
  26087. function createBundleReader(data, serializer) {
  26088. let content;
  26089. if (typeof data === 'string') {
  26090. content = newTextEncoder().encode(data);
  26091. }
  26092. else {
  26093. content = data;
  26094. }
  26095. return newBundleReader(toByteStreamReader(content), serializer);
  26096. }
  26097. /**
  26098. * @license
  26099. * Copyright 2020 Google LLC
  26100. *
  26101. * Licensed under the Apache License, Version 2.0 (the "License");
  26102. * you may not use this file except in compliance with the License.
  26103. * You may obtain a copy of the License at
  26104. *
  26105. * http://www.apache.org/licenses/LICENSE-2.0
  26106. *
  26107. * Unless required by applicable law or agreed to in writing, software
  26108. * distributed under the License is distributed on an "AS IS" BASIS,
  26109. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  26110. * See the License for the specific language governing permissions and
  26111. * limitations under the License.
  26112. */
  26113. const LOG_TAG$1 = 'ComponentProvider';
  26114. /**
  26115. * An instance map that ensures only one Datastore exists per Firestore
  26116. * instance.
  26117. */
  26118. const datastoreInstances = new Map();
  26119. /**
  26120. * Removes all components associated with the provided instance. Must be called
  26121. * when the `Firestore` instance is terminated.
  26122. */
  26123. function removeComponents(firestore) {
  26124. const datastore = datastoreInstances.get(firestore);
  26125. if (datastore) {
  26126. logDebug(LOG_TAG$1, 'Removing Datastore');
  26127. datastoreInstances.delete(firestore);
  26128. datastore.terminate();
  26129. }
  26130. }
  26131. function makeDatabaseInfo(databaseId, appId, persistenceKey, settings) {
  26132. return new DatabaseInfo(databaseId, appId, persistenceKey, settings.host, settings.ssl, settings.experimentalForceLongPolling, settings.experimentalAutoDetectLongPolling, settings.useFetchStreams);
  26133. }
  26134. /**
  26135. * @license
  26136. * Copyright 2020 Google LLC
  26137. *
  26138. * Licensed under the Apache License, Version 2.0 (the "License");
  26139. * you may not use this file except in compliance with the License.
  26140. * You may obtain a copy of the License at
  26141. *
  26142. * http://www.apache.org/licenses/LICENSE-2.0
  26143. *
  26144. * Unless required by applicable law or agreed to in writing, software
  26145. * distributed under the License is distributed on an "AS IS" BASIS,
  26146. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  26147. * See the License for the specific language governing permissions and
  26148. * limitations under the License.
  26149. */
  26150. // settings() defaults:
  26151. const DEFAULT_HOST = 'firestore.googleapis.com';
  26152. const DEFAULT_SSL = true;
  26153. /**
  26154. * A concrete type describing all the values that can be applied via a
  26155. * user-supplied `FirestoreSettings` object. This is a separate type so that
  26156. * defaults can be supplied and the value can be checked for equality.
  26157. */
  26158. class FirestoreSettingsImpl {
  26159. constructor(settings) {
  26160. var _a;
  26161. if (settings.host === undefined) {
  26162. if (settings.ssl !== undefined) {
  26163. throw new FirestoreError(Code.INVALID_ARGUMENT, "Can't provide ssl option if host option is not set");
  26164. }
  26165. this.host = DEFAULT_HOST;
  26166. this.ssl = DEFAULT_SSL;
  26167. }
  26168. else {
  26169. this.host = settings.host;
  26170. this.ssl = (_a = settings.ssl) !== null && _a !== void 0 ? _a : DEFAULT_SSL;
  26171. }
  26172. this.credentials = settings.credentials;
  26173. this.ignoreUndefinedProperties = !!settings.ignoreUndefinedProperties;
  26174. if (settings.cacheSizeBytes === undefined) {
  26175. this.cacheSizeBytes = LRU_DEFAULT_CACHE_SIZE_BYTES;
  26176. }
  26177. else {
  26178. if (settings.cacheSizeBytes !== LRU_COLLECTION_DISABLED &&
  26179. settings.cacheSizeBytes < LRU_MINIMUM_CACHE_SIZE_BYTES) {
  26180. throw new FirestoreError(Code.INVALID_ARGUMENT, `cacheSizeBytes must be at least ${LRU_MINIMUM_CACHE_SIZE_BYTES}`);
  26181. }
  26182. else {
  26183. this.cacheSizeBytes = settings.cacheSizeBytes;
  26184. }
  26185. }
  26186. this.experimentalForceLongPolling = !!settings.experimentalForceLongPolling;
  26187. this.experimentalAutoDetectLongPolling =
  26188. !!settings.experimentalAutoDetectLongPolling;
  26189. this.useFetchStreams = !!settings.useFetchStreams;
  26190. validateIsNotUsedTogether('experimentalForceLongPolling', settings.experimentalForceLongPolling, 'experimentalAutoDetectLongPolling', settings.experimentalAutoDetectLongPolling);
  26191. }
  26192. isEqual(other) {
  26193. return (this.host === other.host &&
  26194. this.ssl === other.ssl &&
  26195. this.credentials === other.credentials &&
  26196. this.cacheSizeBytes === other.cacheSizeBytes &&
  26197. this.experimentalForceLongPolling ===
  26198. other.experimentalForceLongPolling &&
  26199. this.experimentalAutoDetectLongPolling ===
  26200. other.experimentalAutoDetectLongPolling &&
  26201. this.ignoreUndefinedProperties === other.ignoreUndefinedProperties &&
  26202. this.useFetchStreams === other.useFetchStreams);
  26203. }
  26204. }
  26205. /**
  26206. * @license
  26207. * Copyright 2020 Google LLC
  26208. *
  26209. * Licensed under the Apache License, Version 2.0 (the "License");
  26210. * you may not use this file except in compliance with the License.
  26211. * You may obtain a copy of the License at
  26212. *
  26213. * http://www.apache.org/licenses/LICENSE-2.0
  26214. *
  26215. * Unless required by applicable law or agreed to in writing, software
  26216. * distributed under the License is distributed on an "AS IS" BASIS,
  26217. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  26218. * See the License for the specific language governing permissions and
  26219. * limitations under the License.
  26220. */
  26221. /**
  26222. * The Cloud Firestore service interface.
  26223. *
  26224. * Do not call this constructor directly. Instead, use {@link (getFirestore:1)}.
  26225. */
  26226. class Firestore$1 {
  26227. /** @hideconstructor */
  26228. constructor(_authCredentials, _appCheckCredentials, _databaseId, _app) {
  26229. this._authCredentials = _authCredentials;
  26230. this._appCheckCredentials = _appCheckCredentials;
  26231. this._databaseId = _databaseId;
  26232. this._app = _app;
  26233. /**
  26234. * Whether it's a Firestore or Firestore Lite instance.
  26235. */
  26236. this.type = 'firestore-lite';
  26237. this._persistenceKey = '(lite)';
  26238. this._settings = new FirestoreSettingsImpl({});
  26239. this._settingsFrozen = false;
  26240. }
  26241. /**
  26242. * The {@link @firebase/app#FirebaseApp} associated with this `Firestore` service
  26243. * instance.
  26244. */
  26245. get app() {
  26246. if (!this._app) {
  26247. throw new FirestoreError(Code.FAILED_PRECONDITION, "Firestore was not initialized using the Firebase SDK. 'app' is " +
  26248. 'not available');
  26249. }
  26250. return this._app;
  26251. }
  26252. get _initialized() {
  26253. return this._settingsFrozen;
  26254. }
  26255. get _terminated() {
  26256. return this._terminateTask !== undefined;
  26257. }
  26258. _setSettings(settings) {
  26259. if (this._settingsFrozen) {
  26260. throw new FirestoreError(Code.FAILED_PRECONDITION, 'Firestore has already been started and its settings can no longer ' +
  26261. 'be changed. You can only modify settings before calling any other ' +
  26262. 'methods on a Firestore object.');
  26263. }
  26264. this._settings = new FirestoreSettingsImpl(settings);
  26265. if (settings.credentials !== undefined) {
  26266. this._authCredentials = makeAuthCredentialsProvider(settings.credentials);
  26267. }
  26268. }
  26269. _getSettings() {
  26270. return this._settings;
  26271. }
  26272. _freezeSettings() {
  26273. this._settingsFrozen = true;
  26274. return this._settings;
  26275. }
  26276. _delete() {
  26277. if (!this._terminateTask) {
  26278. this._terminateTask = this._terminate();
  26279. }
  26280. return this._terminateTask;
  26281. }
  26282. /** Returns a JSON-serializable representation of this `Firestore` instance. */
  26283. toJSON() {
  26284. return {
  26285. app: this._app,
  26286. databaseId: this._databaseId,
  26287. settings: this._settings
  26288. };
  26289. }
  26290. /**
  26291. * Terminates all components used by this client. Subclasses can override
  26292. * this method to clean up their own dependencies, but must also call this
  26293. * method.
  26294. *
  26295. * Only ever called once.
  26296. */
  26297. _terminate() {
  26298. removeComponents(this);
  26299. return Promise.resolve();
  26300. }
  26301. }
  26302. /**
  26303. * Modify this instance to communicate with the Cloud Firestore emulator.
  26304. *
  26305. * Note: This must be called before this instance has been used to do any
  26306. * operations.
  26307. *
  26308. * @param firestore - The `Firestore` instance to configure to connect to the
  26309. * emulator.
  26310. * @param host - the emulator host (ex: localhost).
  26311. * @param port - the emulator port (ex: 9000).
  26312. * @param options.mockUserToken - the mock auth token to use for unit testing
  26313. * Security Rules.
  26314. */
  26315. function connectFirestoreEmulator(firestore, host, port, options = {}) {
  26316. var _a;
  26317. firestore = cast(firestore, Firestore$1);
  26318. const settings = firestore._getSettings();
  26319. if (settings.host !== DEFAULT_HOST && settings.host !== host) {
  26320. logWarn('Host has been set in both settings() and useEmulator(), emulator host ' +
  26321. 'will be used');
  26322. }
  26323. firestore._setSettings(Object.assign(Object.assign({}, settings), { host: `${host}:${port}`, ssl: false }));
  26324. if (options.mockUserToken) {
  26325. let token;
  26326. let user;
  26327. if (typeof options.mockUserToken === 'string') {
  26328. token = options.mockUserToken;
  26329. user = User.MOCK_USER;
  26330. }
  26331. else {
  26332. // Let createMockUserToken validate first (catches common mistakes like
  26333. // invalid field "uid" and missing field "sub" / "user_id".)
  26334. token = createMockUserToken(options.mockUserToken, (_a = firestore._app) === null || _a === void 0 ? void 0 : _a.options.projectId);
  26335. const uid = options.mockUserToken.sub || options.mockUserToken.user_id;
  26336. if (!uid) {
  26337. throw new FirestoreError(Code.INVALID_ARGUMENT, "mockUserToken must contain 'sub' or 'user_id' field!");
  26338. }
  26339. user = new User(uid);
  26340. }
  26341. firestore._authCredentials = new EmulatorAuthCredentialsProvider(new OAuthToken(token, user));
  26342. }
  26343. }
  26344. /**
  26345. * @license
  26346. * Copyright 2020 Google LLC
  26347. *
  26348. * Licensed under the Apache License, Version 2.0 (the "License");
  26349. * you may not use this file except in compliance with the License.
  26350. * You may obtain a copy of the License at
  26351. *
  26352. * http://www.apache.org/licenses/LICENSE-2.0
  26353. *
  26354. * Unless required by applicable law or agreed to in writing, software
  26355. * distributed under the License is distributed on an "AS IS" BASIS,
  26356. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  26357. * See the License for the specific language governing permissions and
  26358. * limitations under the License.
  26359. */
  26360. /**
  26361. * A `DocumentReference` refers to a document location in a Firestore database
  26362. * and can be used to write, read, or listen to the location. The document at
  26363. * the referenced location may or may not exist.
  26364. */
  26365. class DocumentReference {
  26366. /** @hideconstructor */
  26367. constructor(firestore,
  26368. /**
  26369. * If provided, the `FirestoreDataConverter` associated with this instance.
  26370. */
  26371. converter, _key) {
  26372. this.converter = converter;
  26373. this._key = _key;
  26374. /** The type of this Firestore reference. */
  26375. this.type = 'document';
  26376. this.firestore = firestore;
  26377. }
  26378. get _path() {
  26379. return this._key.path;
  26380. }
  26381. /**
  26382. * The document's identifier within its collection.
  26383. */
  26384. get id() {
  26385. return this._key.path.lastSegment();
  26386. }
  26387. /**
  26388. * A string representing the path of the referenced document (relative
  26389. * to the root of the database).
  26390. */
  26391. get path() {
  26392. return this._key.path.canonicalString();
  26393. }
  26394. /**
  26395. * The collection this `DocumentReference` belongs to.
  26396. */
  26397. get parent() {
  26398. return new CollectionReference(this.firestore, this.converter, this._key.path.popLast());
  26399. }
  26400. withConverter(converter) {
  26401. return new DocumentReference(this.firestore, converter, this._key);
  26402. }
  26403. }
  26404. /**
  26405. * A `Query` refers to a query which you can read or listen to. You can also
  26406. * construct refined `Query` objects by adding filters and ordering.
  26407. */
  26408. class Query {
  26409. // This is the lite version of the Query class in the main SDK.
  26410. /** @hideconstructor protected */
  26411. constructor(firestore,
  26412. /**
  26413. * If provided, the `FirestoreDataConverter` associated with this instance.
  26414. */
  26415. converter, _query) {
  26416. this.converter = converter;
  26417. this._query = _query;
  26418. /** The type of this Firestore reference. */
  26419. this.type = 'query';
  26420. this.firestore = firestore;
  26421. }
  26422. withConverter(converter) {
  26423. return new Query(this.firestore, converter, this._query);
  26424. }
  26425. }
  26426. /**
  26427. * A `CollectionReference` object can be used for adding documents, getting
  26428. * document references, and querying for documents (using {@link query}).
  26429. */
  26430. class CollectionReference extends Query {
  26431. /** @hideconstructor */
  26432. constructor(firestore, converter, _path) {
  26433. super(firestore, converter, newQueryForPath(_path));
  26434. this._path = _path;
  26435. /** The type of this Firestore reference. */
  26436. this.type = 'collection';
  26437. }
  26438. /** The collection's identifier. */
  26439. get id() {
  26440. return this._query.path.lastSegment();
  26441. }
  26442. /**
  26443. * A string representing the path of the referenced collection (relative
  26444. * to the root of the database).
  26445. */
  26446. get path() {
  26447. return this._query.path.canonicalString();
  26448. }
  26449. /**
  26450. * A reference to the containing `DocumentReference` if this is a
  26451. * subcollection. If this isn't a subcollection, the reference is null.
  26452. */
  26453. get parent() {
  26454. const parentPath = this._path.popLast();
  26455. if (parentPath.isEmpty()) {
  26456. return null;
  26457. }
  26458. else {
  26459. return new DocumentReference(this.firestore,
  26460. /* converter= */ null, new DocumentKey(parentPath));
  26461. }
  26462. }
  26463. withConverter(converter) {
  26464. return new CollectionReference(this.firestore, converter, this._path);
  26465. }
  26466. }
  26467. function collection(parent, path, ...pathSegments) {
  26468. parent = getModularInstance(parent);
  26469. validateNonEmptyArgument('collection', 'path', path);
  26470. if (parent instanceof Firestore$1) {
  26471. const absolutePath = ResourcePath.fromString(path, ...pathSegments);
  26472. validateCollectionPath(absolutePath);
  26473. return new CollectionReference(parent, /* converter= */ null, absolutePath);
  26474. }
  26475. else {
  26476. if (!(parent instanceof DocumentReference) &&
  26477. !(parent instanceof CollectionReference)) {
  26478. throw new FirestoreError(Code.INVALID_ARGUMENT, 'Expected first argument to collection() to be a CollectionReference, ' +
  26479. 'a DocumentReference or FirebaseFirestore');
  26480. }
  26481. const absolutePath = parent._path.child(ResourcePath.fromString(path, ...pathSegments));
  26482. validateCollectionPath(absolutePath);
  26483. return new CollectionReference(parent.firestore,
  26484. /* converter= */ null, absolutePath);
  26485. }
  26486. }
  26487. // TODO(firestorelite): Consider using ErrorFactory -
  26488. // https://github.com/firebase/firebase-js-sdk/blob/0131e1f/packages/util/src/errors.ts#L106
  26489. /**
  26490. * Creates and returns a new `Query` instance that includes all documents in the
  26491. * database that are contained in a collection or subcollection with the
  26492. * given `collectionId`.
  26493. *
  26494. * @param firestore - A reference to the root `Firestore` instance.
  26495. * @param collectionId - Identifies the collections to query over. Every
  26496. * collection or subcollection with this ID as the last segment of its path
  26497. * will be included. Cannot contain a slash.
  26498. * @returns The created `Query`.
  26499. */
  26500. function collectionGroup(firestore, collectionId) {
  26501. firestore = cast(firestore, Firestore$1);
  26502. validateNonEmptyArgument('collectionGroup', 'collection id', collectionId);
  26503. if (collectionId.indexOf('/') >= 0) {
  26504. throw new FirestoreError(Code.INVALID_ARGUMENT, `Invalid collection ID '${collectionId}' passed to function ` +
  26505. `collectionGroup(). Collection IDs must not contain '/'.`);
  26506. }
  26507. return new Query(firestore,
  26508. /* converter= */ null, newQueryForCollectionGroup(collectionId));
  26509. }
  26510. function doc(parent, path, ...pathSegments) {
  26511. parent = getModularInstance(parent);
  26512. // We allow omission of 'pathString' but explicitly prohibit passing in both
  26513. // 'undefined' and 'null'.
  26514. if (arguments.length === 1) {
  26515. path = AutoId.newId();
  26516. }
  26517. validateNonEmptyArgument('doc', 'path', path);
  26518. if (parent instanceof Firestore$1) {
  26519. const absolutePath = ResourcePath.fromString(path, ...pathSegments);
  26520. validateDocumentPath(absolutePath);
  26521. return new DocumentReference(parent,
  26522. /* converter= */ null, new DocumentKey(absolutePath));
  26523. }
  26524. else {
  26525. if (!(parent instanceof DocumentReference) &&
  26526. !(parent instanceof CollectionReference)) {
  26527. throw new FirestoreError(Code.INVALID_ARGUMENT, 'Expected first argument to collection() to be a CollectionReference, ' +
  26528. 'a DocumentReference or FirebaseFirestore');
  26529. }
  26530. const absolutePath = parent._path.child(ResourcePath.fromString(path, ...pathSegments));
  26531. validateDocumentPath(absolutePath);
  26532. return new DocumentReference(parent.firestore, parent instanceof CollectionReference ? parent.converter : null, new DocumentKey(absolutePath));
  26533. }
  26534. }
  26535. /**
  26536. * Returns true if the provided references are equal.
  26537. *
  26538. * @param left - A reference to compare.
  26539. * @param right - A reference to compare.
  26540. * @returns true if the references point to the same location in the same
  26541. * Firestore database.
  26542. */
  26543. function refEqual(left, right) {
  26544. left = getModularInstance(left);
  26545. right = getModularInstance(right);
  26546. if ((left instanceof DocumentReference ||
  26547. left instanceof CollectionReference) &&
  26548. (right instanceof DocumentReference || right instanceof CollectionReference)) {
  26549. return (left.firestore === right.firestore &&
  26550. left.path === right.path &&
  26551. left.converter === right.converter);
  26552. }
  26553. return false;
  26554. }
  26555. /**
  26556. * Returns true if the provided queries point to the same collection and apply
  26557. * the same constraints.
  26558. *
  26559. * @param left - A `Query` to compare.
  26560. * @param right - A `Query` to compare.
  26561. * @returns true if the references point to the same location in the same
  26562. * Firestore database.
  26563. */
  26564. function queryEqual(left, right) {
  26565. left = getModularInstance(left);
  26566. right = getModularInstance(right);
  26567. if (left instanceof Query && right instanceof Query) {
  26568. return (left.firestore === right.firestore &&
  26569. queryEquals(left._query, right._query) &&
  26570. left.converter === right.converter);
  26571. }
  26572. return false;
  26573. }
  26574. /**
  26575. * @license
  26576. * Copyright 2020 Google LLC
  26577. *
  26578. * Licensed under the Apache License, Version 2.0 (the "License");
  26579. * you may not use this file except in compliance with the License.
  26580. * You may obtain a copy of the License at
  26581. *
  26582. * http://www.apache.org/licenses/LICENSE-2.0
  26583. *
  26584. * Unless required by applicable law or agreed to in writing, software
  26585. * distributed under the License is distributed on an "AS IS" BASIS,
  26586. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  26587. * See the License for the specific language governing permissions and
  26588. * limitations under the License.
  26589. */
  26590. const LOG_TAG = 'AsyncQueue';
  26591. class AsyncQueueImpl {
  26592. constructor() {
  26593. // The last promise in the queue.
  26594. this.tail = Promise.resolve();
  26595. // A list of retryable operations. Retryable operations are run in order and
  26596. // retried with backoff.
  26597. this.retryableOps = [];
  26598. // Is this AsyncQueue being shut down? Once it is set to true, it will not
  26599. // be changed again.
  26600. this._isShuttingDown = false;
  26601. // Operations scheduled to be queued in the future. Operations are
  26602. // automatically removed after they are run or canceled.
  26603. this.delayedOperations = [];
  26604. // visible for testing
  26605. this.failure = null;
  26606. // Flag set while there's an outstanding AsyncQueue operation, used for
  26607. // assertion sanity-checks.
  26608. this.operationInProgress = false;
  26609. // Enabled during shutdown on Safari to prevent future access to IndexedDB.
  26610. this.skipNonRestrictedTasks = false;
  26611. // List of TimerIds to fast-forward delays for.
  26612. this.timerIdsToSkip = [];
  26613. // Backoff timer used to schedule retries for retryable operations
  26614. this.backoff = new ExponentialBackoff(this, "async_queue_retry" /* TimerId.AsyncQueueRetry */);
  26615. // Visibility handler that triggers an immediate retry of all retryable
  26616. // operations. Meant to speed up recovery when we regain file system access
  26617. // after page comes into foreground.
  26618. this.visibilityHandler = () => {
  26619. this.backoff.skipBackoff();
  26620. };
  26621. }
  26622. get isShuttingDown() {
  26623. return this._isShuttingDown;
  26624. }
  26625. /**
  26626. * Adds a new operation to the queue without waiting for it to complete (i.e.
  26627. * we ignore the Promise result).
  26628. */
  26629. enqueueAndForget(op) {
  26630. // eslint-disable-next-line @typescript-eslint/no-floating-promises
  26631. this.enqueue(op);
  26632. }
  26633. enqueueAndForgetEvenWhileRestricted(op) {
  26634. this.verifyNotFailed();
  26635. // eslint-disable-next-line @typescript-eslint/no-floating-promises
  26636. this.enqueueInternal(op);
  26637. }
  26638. enterRestrictedMode(purgeExistingTasks) {
  26639. if (!this._isShuttingDown) {
  26640. this._isShuttingDown = true;
  26641. this.skipNonRestrictedTasks = purgeExistingTasks || false;
  26642. }
  26643. }
  26644. enqueue(op) {
  26645. this.verifyNotFailed();
  26646. if (this._isShuttingDown) {
  26647. // Return a Promise which never resolves.
  26648. return new Promise(() => { });
  26649. }
  26650. // Create a deferred Promise that we can return to the callee. This
  26651. // allows us to return a "hanging Promise" only to the callee and still
  26652. // advance the queue even when the operation is not run.
  26653. const task = new Deferred();
  26654. return this.enqueueInternal(() => {
  26655. if (this._isShuttingDown && this.skipNonRestrictedTasks) {
  26656. // We do not resolve 'task'
  26657. return Promise.resolve();
  26658. }
  26659. op().then(task.resolve, task.reject);
  26660. return task.promise;
  26661. }).then(() => task.promise);
  26662. }
  26663. enqueueRetryable(op) {
  26664. this.enqueueAndForget(() => {
  26665. this.retryableOps.push(op);
  26666. return this.retryNextOp();
  26667. });
  26668. }
  26669. /**
  26670. * Runs the next operation from the retryable queue. If the operation fails,
  26671. * reschedules with backoff.
  26672. */
  26673. async retryNextOp() {
  26674. if (this.retryableOps.length === 0) {
  26675. return;
  26676. }
  26677. try {
  26678. await this.retryableOps[0]();
  26679. this.retryableOps.shift();
  26680. this.backoff.reset();
  26681. }
  26682. catch (e) {
  26683. if (isIndexedDbTransactionError(e)) {
  26684. logDebug(LOG_TAG, 'Operation failed with retryable error: ' + e);
  26685. }
  26686. else {
  26687. throw e; // Failure will be handled by AsyncQueue
  26688. }
  26689. }
  26690. if (this.retryableOps.length > 0) {
  26691. // If there are additional operations, we re-schedule `retryNextOp()`.
  26692. // This is necessary to run retryable operations that failed during
  26693. // their initial attempt since we don't know whether they are already
  26694. // enqueued. If, for example, `op1`, `op2`, `op3` are enqueued and `op1`
  26695. // needs to be re-run, we will run `op1`, `op1`, `op2` using the
  26696. // already enqueued calls to `retryNextOp()`. `op3()` will then run in the
  26697. // call scheduled here.
  26698. // Since `backoffAndRun()` cancels an existing backoff and schedules a
  26699. // new backoff on every call, there is only ever a single additional
  26700. // operation in the queue.
  26701. this.backoff.backoffAndRun(() => this.retryNextOp());
  26702. }
  26703. }
  26704. enqueueInternal(op) {
  26705. const newTail = this.tail.then(() => {
  26706. this.operationInProgress = true;
  26707. return op()
  26708. .catch((error) => {
  26709. this.failure = error;
  26710. this.operationInProgress = false;
  26711. const message = getMessageOrStack(error);
  26712. logError('INTERNAL UNHANDLED ERROR: ', message);
  26713. // Re-throw the error so that this.tail becomes a rejected Promise and
  26714. // all further attempts to chain (via .then) will just short-circuit
  26715. // and return the rejected Promise.
  26716. throw error;
  26717. })
  26718. .then(result => {
  26719. this.operationInProgress = false;
  26720. return result;
  26721. });
  26722. });
  26723. this.tail = newTail;
  26724. return newTail;
  26725. }
  26726. enqueueAfterDelay(timerId, delayMs, op) {
  26727. this.verifyNotFailed();
  26728. // Fast-forward delays for timerIds that have been overriden.
  26729. if (this.timerIdsToSkip.indexOf(timerId) > -1) {
  26730. delayMs = 0;
  26731. }
  26732. const delayedOp = DelayedOperation.createAndSchedule(this, timerId, delayMs, op, removedOp => this.removeDelayedOperation(removedOp));
  26733. this.delayedOperations.push(delayedOp);
  26734. return delayedOp;
  26735. }
  26736. verifyNotFailed() {
  26737. if (this.failure) {
  26738. fail();
  26739. }
  26740. }
  26741. verifyOperationInProgress() {
  26742. }
  26743. /**
  26744. * Waits until all currently queued tasks are finished executing. Delayed
  26745. * operations are not run.
  26746. */
  26747. async drain() {
  26748. // Operations in the queue prior to draining may have enqueued additional
  26749. // operations. Keep draining the queue until the tail is no longer advanced,
  26750. // which indicates that no more new operations were enqueued and that all
  26751. // operations were executed.
  26752. let currentTail;
  26753. do {
  26754. currentTail = this.tail;
  26755. await currentTail;
  26756. } while (currentTail !== this.tail);
  26757. }
  26758. /**
  26759. * For Tests: Determine if a delayed operation with a particular TimerId
  26760. * exists.
  26761. */
  26762. containsDelayedOperation(timerId) {
  26763. for (const op of this.delayedOperations) {
  26764. if (op.timerId === timerId) {
  26765. return true;
  26766. }
  26767. }
  26768. return false;
  26769. }
  26770. /**
  26771. * For Tests: Runs some or all delayed operations early.
  26772. *
  26773. * @param lastTimerId - Delayed operations up to and including this TimerId
  26774. * will be drained. Pass TimerId.All to run all delayed operations.
  26775. * @returns a Promise that resolves once all operations have been run.
  26776. */
  26777. runAllDelayedOperationsUntil(lastTimerId) {
  26778. // Note that draining may generate more delayed ops, so we do that first.
  26779. return this.drain().then(() => {
  26780. // Run ops in the same order they'd run if they ran naturally.
  26781. this.delayedOperations.sort((a, b) => a.targetTimeMs - b.targetTimeMs);
  26782. for (const op of this.delayedOperations) {
  26783. op.skipDelay();
  26784. if (lastTimerId !== "all" /* TimerId.All */ && op.timerId === lastTimerId) {
  26785. break;
  26786. }
  26787. }
  26788. return this.drain();
  26789. });
  26790. }
  26791. /**
  26792. * For Tests: Skip all subsequent delays for a timer id.
  26793. */
  26794. skipDelaysForTimerId(timerId) {
  26795. this.timerIdsToSkip.push(timerId);
  26796. }
  26797. /** Called once a DelayedOperation is run or canceled. */
  26798. removeDelayedOperation(op) {
  26799. // NOTE: indexOf / slice are O(n), but delayedOperations is expected to be small.
  26800. const index = this.delayedOperations.indexOf(op);
  26801. this.delayedOperations.splice(index, 1);
  26802. }
  26803. }
  26804. function newAsyncQueue() {
  26805. return new AsyncQueueImpl();
  26806. }
  26807. /**
  26808. * Chrome includes Error.message in Error.stack. Other browsers do not.
  26809. * This returns expected output of message + stack when available.
  26810. * @param error - Error or FirestoreError
  26811. */
  26812. function getMessageOrStack(error) {
  26813. let message = error.message || '';
  26814. if (error.stack) {
  26815. if (error.stack.includes(error.message)) {
  26816. message = error.stack;
  26817. }
  26818. else {
  26819. message = error.message + '\n' + error.stack;
  26820. }
  26821. }
  26822. return message;
  26823. }
  26824. /**
  26825. * @license
  26826. * Copyright 2020 Google LLC
  26827. *
  26828. * Licensed under the Apache License, Version 2.0 (the "License");
  26829. * you may not use this file except in compliance with the License.
  26830. * You may obtain a copy of the License at
  26831. *
  26832. * http://www.apache.org/licenses/LICENSE-2.0
  26833. *
  26834. * Unless required by applicable law or agreed to in writing, software
  26835. * distributed under the License is distributed on an "AS IS" BASIS,
  26836. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  26837. * See the License for the specific language governing permissions and
  26838. * limitations under the License.
  26839. */
  26840. /**
  26841. * Represents the task of loading a Firestore bundle. It provides progress of bundle
  26842. * loading, as well as task completion and error events.
  26843. *
  26844. * The API is compatible with `Promise<LoadBundleTaskProgress>`.
  26845. */
  26846. class LoadBundleTask {
  26847. constructor() {
  26848. this._progressObserver = {};
  26849. this._taskCompletionResolver = new Deferred();
  26850. this._lastProgress = {
  26851. taskState: 'Running',
  26852. totalBytes: 0,
  26853. totalDocuments: 0,
  26854. bytesLoaded: 0,
  26855. documentsLoaded: 0
  26856. };
  26857. }
  26858. /**
  26859. * Registers functions to listen to bundle loading progress events.
  26860. * @param next - Called when there is a progress update from bundle loading. Typically `next` calls occur
  26861. * each time a Firestore document is loaded from the bundle.
  26862. * @param error - Called when an error occurs during bundle loading. The task aborts after reporting the
  26863. * error, and there should be no more updates after this.
  26864. * @param complete - Called when the loading task is complete.
  26865. */
  26866. onProgress(next, error, complete) {
  26867. this._progressObserver = {
  26868. next,
  26869. error,
  26870. complete
  26871. };
  26872. }
  26873. /**
  26874. * Implements the `Promise<LoadBundleTaskProgress>.catch` interface.
  26875. *
  26876. * @param onRejected - Called when an error occurs during bundle loading.
  26877. */
  26878. catch(onRejected) {
  26879. return this._taskCompletionResolver.promise.catch(onRejected);
  26880. }
  26881. /**
  26882. * Implements the `Promise<LoadBundleTaskProgress>.then` interface.
  26883. *
  26884. * @param onFulfilled - Called on the completion of the loading task with a final `LoadBundleTaskProgress` update.
  26885. * The update will always have its `taskState` set to `"Success"`.
  26886. * @param onRejected - Called when an error occurs during bundle loading.
  26887. */
  26888. then(onFulfilled, onRejected) {
  26889. return this._taskCompletionResolver.promise.then(onFulfilled, onRejected);
  26890. }
  26891. /**
  26892. * Notifies all observers that bundle loading has completed, with a provided
  26893. * `LoadBundleTaskProgress` object.
  26894. *
  26895. * @private
  26896. */
  26897. _completeWith(progress) {
  26898. this._updateProgress(progress);
  26899. if (this._progressObserver.complete) {
  26900. this._progressObserver.complete();
  26901. }
  26902. this._taskCompletionResolver.resolve(progress);
  26903. }
  26904. /**
  26905. * Notifies all observers that bundle loading has failed, with a provided
  26906. * `Error` as the reason.
  26907. *
  26908. * @private
  26909. */
  26910. _failWith(error) {
  26911. this._lastProgress.taskState = 'Error';
  26912. if (this._progressObserver.next) {
  26913. this._progressObserver.next(this._lastProgress);
  26914. }
  26915. if (this._progressObserver.error) {
  26916. this._progressObserver.error(error);
  26917. }
  26918. this._taskCompletionResolver.reject(error);
  26919. }
  26920. /**
  26921. * Notifies a progress update of loading a bundle.
  26922. * @param progress - The new progress.
  26923. *
  26924. * @private
  26925. */
  26926. _updateProgress(progress) {
  26927. this._lastProgress = progress;
  26928. if (this._progressObserver.next) {
  26929. this._progressObserver.next(progress);
  26930. }
  26931. }
  26932. }
  26933. /**
  26934. * @license
  26935. * Copyright 2020 Google LLC
  26936. *
  26937. * Licensed under the Apache License, Version 2.0 (the "License");
  26938. * you may not use this file except in compliance with the License.
  26939. * You may obtain a copy of the License at
  26940. *
  26941. * http://www.apache.org/licenses/LICENSE-2.0
  26942. *
  26943. * Unless required by applicable law or agreed to in writing, software
  26944. * distributed under the License is distributed on an "AS IS" BASIS,
  26945. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  26946. * See the License for the specific language governing permissions and
  26947. * limitations under the License.
  26948. */
  26949. /** DOMException error code constants. */
  26950. const DOM_EXCEPTION_INVALID_STATE = 11;
  26951. const DOM_EXCEPTION_ABORTED = 20;
  26952. const DOM_EXCEPTION_QUOTA_EXCEEDED = 22;
  26953. /**
  26954. * Constant used to indicate the LRU garbage collection should be disabled.
  26955. * Set this value as the `cacheSizeBytes` on the settings passed to the
  26956. * {@link Firestore} instance.
  26957. */
  26958. const CACHE_SIZE_UNLIMITED = LRU_COLLECTION_DISABLED;
  26959. /**
  26960. * The Cloud Firestore service interface.
  26961. *
  26962. * Do not call this constructor directly. Instead, use {@link (getFirestore:1)}.
  26963. */
  26964. class Firestore extends Firestore$1 {
  26965. /** @hideconstructor */
  26966. constructor(authCredentialsProvider, appCheckCredentialsProvider, databaseId, app) {
  26967. super(authCredentialsProvider, appCheckCredentialsProvider, databaseId, app);
  26968. /**
  26969. * Whether it's a {@link Firestore} or Firestore Lite instance.
  26970. */
  26971. this.type = 'firestore';
  26972. this._queue = newAsyncQueue();
  26973. this._persistenceKey = (app === null || app === void 0 ? void 0 : app.name) || '[DEFAULT]';
  26974. }
  26975. _terminate() {
  26976. if (!this._firestoreClient) {
  26977. // The client must be initialized to ensure that all subsequent API
  26978. // usage throws an exception.
  26979. configureFirestore(this);
  26980. }
  26981. return this._firestoreClient.terminate();
  26982. }
  26983. }
  26984. /**
  26985. * Initializes a new instance of {@link Firestore} with the provided settings.
  26986. * Can only be called before any other function, including
  26987. * {@link (getFirestore:1)}. If the custom settings are empty, this function is
  26988. * equivalent to calling {@link (getFirestore:1)}.
  26989. *
  26990. * @param app - The {@link @firebase/app#FirebaseApp} with which the {@link Firestore} instance will
  26991. * be associated.
  26992. * @param settings - A settings object to configure the {@link Firestore} instance.
  26993. * @param databaseId - The name of database.
  26994. * @returns A newly initialized {@link Firestore} instance.
  26995. */
  26996. function initializeFirestore(app, settings, databaseId) {
  26997. if (!databaseId) {
  26998. databaseId = DEFAULT_DATABASE_NAME;
  26999. }
  27000. const provider = _getProvider(app, 'firestore');
  27001. if (provider.isInitialized(databaseId)) {
  27002. const existingInstance = provider.getImmediate({
  27003. identifier: databaseId
  27004. });
  27005. const initialSettings = provider.getOptions(databaseId);
  27006. if (deepEqual(initialSettings, settings)) {
  27007. return existingInstance;
  27008. }
  27009. else {
  27010. throw new FirestoreError(Code.FAILED_PRECONDITION, 'initializeFirestore() has already been called with ' +
  27011. 'different options. To avoid this error, call initializeFirestore() with the ' +
  27012. 'same options as when it was originally called, or call getFirestore() to return the' +
  27013. ' already initialized instance.');
  27014. }
  27015. }
  27016. if (settings.cacheSizeBytes !== undefined &&
  27017. settings.cacheSizeBytes !== CACHE_SIZE_UNLIMITED &&
  27018. settings.cacheSizeBytes < LRU_MINIMUM_CACHE_SIZE_BYTES) {
  27019. throw new FirestoreError(Code.INVALID_ARGUMENT, `cacheSizeBytes must be at least ${LRU_MINIMUM_CACHE_SIZE_BYTES}`);
  27020. }
  27021. return provider.initialize({
  27022. options: settings,
  27023. instanceIdentifier: databaseId
  27024. });
  27025. }
  27026. function getFirestore(appOrDatabaseId, optionalDatabaseId) {
  27027. const app = typeof appOrDatabaseId === 'object' ? appOrDatabaseId : getApp();
  27028. const databaseId = typeof appOrDatabaseId === 'string'
  27029. ? appOrDatabaseId
  27030. : optionalDatabaseId || DEFAULT_DATABASE_NAME;
  27031. const db = _getProvider(app, 'firestore').getImmediate({
  27032. identifier: databaseId
  27033. });
  27034. if (!db._initialized) {
  27035. const emulator = getDefaultEmulatorHostnameAndPort('firestore');
  27036. if (emulator) {
  27037. connectFirestoreEmulator(db, ...emulator);
  27038. }
  27039. }
  27040. return db;
  27041. }
  27042. /**
  27043. * @internal
  27044. */
  27045. function ensureFirestoreConfigured(firestore) {
  27046. if (!firestore._firestoreClient) {
  27047. configureFirestore(firestore);
  27048. }
  27049. firestore._firestoreClient.verifyNotTerminated();
  27050. return firestore._firestoreClient;
  27051. }
  27052. function configureFirestore(firestore) {
  27053. var _a;
  27054. const settings = firestore._freezeSettings();
  27055. const databaseInfo = makeDatabaseInfo(firestore._databaseId, ((_a = firestore._app) === null || _a === void 0 ? void 0 : _a.options.appId) || '', firestore._persistenceKey, settings);
  27056. firestore._firestoreClient = new FirestoreClient(firestore._authCredentials, firestore._appCheckCredentials, firestore._queue, databaseInfo);
  27057. }
  27058. /**
  27059. * Attempts to enable persistent storage, if possible.
  27060. *
  27061. * Must be called before any other functions (other than
  27062. * {@link initializeFirestore}, {@link (getFirestore:1)} or
  27063. * {@link clearIndexedDbPersistence}.
  27064. *
  27065. * If this fails, `enableIndexedDbPersistence()` will reject the promise it
  27066. * returns. Note that even after this failure, the {@link Firestore} instance will
  27067. * remain usable, however offline persistence will be disabled.
  27068. *
  27069. * There are several reasons why this can fail, which can be identified by
  27070. * the `code` on the error.
  27071. *
  27072. * * failed-precondition: The app is already open in another browser tab.
  27073. * * unimplemented: The browser is incompatible with the offline
  27074. * persistence implementation.
  27075. *
  27076. * @param firestore - The {@link Firestore} instance to enable persistence for.
  27077. * @param persistenceSettings - Optional settings object to configure
  27078. * persistence.
  27079. * @returns A `Promise` that represents successfully enabling persistent storage.
  27080. */
  27081. function enableIndexedDbPersistence(firestore, persistenceSettings) {
  27082. firestore = cast(firestore, Firestore);
  27083. verifyNotInitialized(firestore);
  27084. const client = ensureFirestoreConfigured(firestore);
  27085. const settings = firestore._freezeSettings();
  27086. const onlineComponentProvider = new OnlineComponentProvider();
  27087. const offlineComponentProvider = new IndexedDbOfflineComponentProvider(onlineComponentProvider, settings.cacheSizeBytes, persistenceSettings === null || persistenceSettings === void 0 ? void 0 : persistenceSettings.forceOwnership);
  27088. return setPersistenceProviders(client, onlineComponentProvider, offlineComponentProvider);
  27089. }
  27090. /**
  27091. * Attempts to enable multi-tab persistent storage, if possible. If enabled
  27092. * across all tabs, all operations share access to local persistence, including
  27093. * shared execution of queries and latency-compensated local document updates
  27094. * across all connected instances.
  27095. *
  27096. * If this fails, `enableMultiTabIndexedDbPersistence()` will reject the promise
  27097. * it returns. Note that even after this failure, the {@link Firestore} instance will
  27098. * remain usable, however offline persistence will be disabled.
  27099. *
  27100. * There are several reasons why this can fail, which can be identified by
  27101. * the `code` on the error.
  27102. *
  27103. * * failed-precondition: The app is already open in another browser tab and
  27104. * multi-tab is not enabled.
  27105. * * unimplemented: The browser is incompatible with the offline
  27106. * persistence implementation.
  27107. *
  27108. * @param firestore - The {@link Firestore} instance to enable persistence for.
  27109. * @returns A `Promise` that represents successfully enabling persistent
  27110. * storage.
  27111. */
  27112. function enableMultiTabIndexedDbPersistence(firestore) {
  27113. firestore = cast(firestore, Firestore);
  27114. verifyNotInitialized(firestore);
  27115. const client = ensureFirestoreConfigured(firestore);
  27116. const settings = firestore._freezeSettings();
  27117. const onlineComponentProvider = new OnlineComponentProvider();
  27118. const offlineComponentProvider = new MultiTabOfflineComponentProvider(onlineComponentProvider, settings.cacheSizeBytes);
  27119. return setPersistenceProviders(client, onlineComponentProvider, offlineComponentProvider);
  27120. }
  27121. /**
  27122. * Registers both the `OfflineComponentProvider` and `OnlineComponentProvider`.
  27123. * If the operation fails with a recoverable error (see
  27124. * `canRecoverFromIndexedDbError()` below), the returned Promise is rejected
  27125. * but the client remains usable.
  27126. */
  27127. function setPersistenceProviders(client, onlineComponentProvider, offlineComponentProvider) {
  27128. const persistenceResult = new Deferred();
  27129. return client.asyncQueue
  27130. .enqueue(async () => {
  27131. try {
  27132. await setOfflineComponentProvider(client, offlineComponentProvider);
  27133. await setOnlineComponentProvider(client, onlineComponentProvider);
  27134. persistenceResult.resolve();
  27135. }
  27136. catch (e) {
  27137. const error = e;
  27138. if (!canFallbackFromIndexedDbError(error)) {
  27139. throw error;
  27140. }
  27141. logWarn('Error enabling offline persistence. Falling back to ' +
  27142. 'persistence disabled: ' +
  27143. error);
  27144. persistenceResult.reject(error);
  27145. }
  27146. })
  27147. .then(() => persistenceResult.promise);
  27148. }
  27149. /**
  27150. * Decides whether the provided error allows us to gracefully disable
  27151. * persistence (as opposed to crashing the client).
  27152. */
  27153. function canFallbackFromIndexedDbError(error) {
  27154. if (error.name === 'FirebaseError') {
  27155. return (error.code === Code.FAILED_PRECONDITION ||
  27156. error.code === Code.UNIMPLEMENTED);
  27157. }
  27158. else if (typeof DOMException !== 'undefined' &&
  27159. error instanceof DOMException) {
  27160. // There are a few known circumstances where we can open IndexedDb but
  27161. // trying to read/write will fail (e.g. quota exceeded). For
  27162. // well-understood cases, we attempt to detect these and then gracefully
  27163. // fall back to memory persistence.
  27164. // NOTE: Rather than continue to add to this list, we could decide to
  27165. // always fall back, with the risk that we might accidentally hide errors
  27166. // representing actual SDK bugs.
  27167. return (
  27168. // When the browser is out of quota we could get either quota exceeded
  27169. // or an aborted error depending on whether the error happened during
  27170. // schema migration.
  27171. error.code === DOM_EXCEPTION_QUOTA_EXCEEDED ||
  27172. error.code === DOM_EXCEPTION_ABORTED ||
  27173. // Firefox Private Browsing mode disables IndexedDb and returns
  27174. // INVALID_STATE for any usage.
  27175. error.code === DOM_EXCEPTION_INVALID_STATE);
  27176. }
  27177. return true;
  27178. }
  27179. /**
  27180. * Clears the persistent storage. This includes pending writes and cached
  27181. * documents.
  27182. *
  27183. * Must be called while the {@link Firestore} instance is not started (after the app is
  27184. * terminated or when the app is first initialized). On startup, this function
  27185. * must be called before other functions (other than {@link
  27186. * initializeFirestore} or {@link (getFirestore:1)})). If the {@link Firestore}
  27187. * instance is still running, the promise will be rejected with the error code
  27188. * of `failed-precondition`.
  27189. *
  27190. * Note: `clearIndexedDbPersistence()` is primarily intended to help write
  27191. * reliable tests that use Cloud Firestore. It uses an efficient mechanism for
  27192. * dropping existing data but does not attempt to securely overwrite or
  27193. * otherwise make cached data unrecoverable. For applications that are sensitive
  27194. * to the disclosure of cached data in between user sessions, we strongly
  27195. * recommend not enabling persistence at all.
  27196. *
  27197. * @param firestore - The {@link Firestore} instance to clear persistence for.
  27198. * @returns A `Promise` that is resolved when the persistent storage is
  27199. * cleared. Otherwise, the promise is rejected with an error.
  27200. */
  27201. function clearIndexedDbPersistence(firestore) {
  27202. if (firestore._initialized && !firestore._terminated) {
  27203. throw new FirestoreError(Code.FAILED_PRECONDITION, 'Persistence can only be cleared before a Firestore instance is ' +
  27204. 'initialized or after it is terminated.');
  27205. }
  27206. const deferred = new Deferred();
  27207. firestore._queue.enqueueAndForgetEvenWhileRestricted(async () => {
  27208. try {
  27209. await indexedDbClearPersistence(indexedDbStoragePrefix(firestore._databaseId, firestore._persistenceKey));
  27210. deferred.resolve();
  27211. }
  27212. catch (e) {
  27213. deferred.reject(e);
  27214. }
  27215. });
  27216. return deferred.promise;
  27217. }
  27218. /**
  27219. * Waits until all currently pending writes for the active user have been
  27220. * acknowledged by the backend.
  27221. *
  27222. * The returned promise resolves immediately if there are no outstanding writes.
  27223. * Otherwise, the promise waits for all previously issued writes (including
  27224. * those written in a previous app session), but it does not wait for writes
  27225. * that were added after the function is called. If you want to wait for
  27226. * additional writes, call `waitForPendingWrites()` again.
  27227. *
  27228. * Any outstanding `waitForPendingWrites()` promises are rejected during user
  27229. * changes.
  27230. *
  27231. * @returns A `Promise` which resolves when all currently pending writes have been
  27232. * acknowledged by the backend.
  27233. */
  27234. function waitForPendingWrites(firestore) {
  27235. firestore = cast(firestore, Firestore);
  27236. const client = ensureFirestoreConfigured(firestore);
  27237. return firestoreClientWaitForPendingWrites(client);
  27238. }
  27239. /**
  27240. * Re-enables use of the network for this {@link Firestore} instance after a prior
  27241. * call to {@link disableNetwork}.
  27242. *
  27243. * @returns A `Promise` that is resolved once the network has been enabled.
  27244. */
  27245. function enableNetwork(firestore) {
  27246. firestore = cast(firestore, Firestore);
  27247. const client = ensureFirestoreConfigured(firestore);
  27248. return firestoreClientEnableNetwork(client);
  27249. }
  27250. /**
  27251. * Disables network usage for this instance. It can be re-enabled via {@link
  27252. * enableNetwork}. While the network is disabled, any snapshot listeners,
  27253. * `getDoc()` or `getDocs()` calls will return results from cache, and any write
  27254. * operations will be queued until the network is restored.
  27255. *
  27256. * @returns A `Promise` that is resolved once the network has been disabled.
  27257. */
  27258. function disableNetwork(firestore) {
  27259. firestore = cast(firestore, Firestore);
  27260. const client = ensureFirestoreConfigured(firestore);
  27261. return firestoreClientDisableNetwork(client);
  27262. }
  27263. /**
  27264. * Terminates the provided {@link Firestore} instance.
  27265. *
  27266. * After calling `terminate()` only the `clearIndexedDbPersistence()` function
  27267. * may be used. Any other function will throw a `FirestoreError`.
  27268. *
  27269. * To restart after termination, create a new instance of FirebaseFirestore with
  27270. * {@link (getFirestore:1)}.
  27271. *
  27272. * Termination does not cancel any pending writes, and any promises that are
  27273. * awaiting a response from the server will not be resolved. If you have
  27274. * persistence enabled, the next time you start this instance, it will resume
  27275. * sending these writes to the server.
  27276. *
  27277. * Note: Under normal circumstances, calling `terminate()` is not required. This
  27278. * function is useful only when you want to force this instance to release all
  27279. * of its resources or in combination with `clearIndexedDbPersistence()` to
  27280. * ensure that all local state is destroyed between test runs.
  27281. *
  27282. * @returns A `Promise` that is resolved when the instance has been successfully
  27283. * terminated.
  27284. */
  27285. function terminate(firestore) {
  27286. _removeServiceInstance(firestore.app, 'firestore', firestore._databaseId.database);
  27287. return firestore._delete();
  27288. }
  27289. /**
  27290. * Loads a Firestore bundle into the local cache.
  27291. *
  27292. * @param firestore - The {@link Firestore} instance to load bundles for.
  27293. * @param bundleData - An object representing the bundle to be loaded. Valid
  27294. * objects are `ArrayBuffer`, `ReadableStream<Uint8Array>` or `string`.
  27295. *
  27296. * @returns A `LoadBundleTask` object, which notifies callers with progress
  27297. * updates, and completion or error events. It can be used as a
  27298. * `Promise<LoadBundleTaskProgress>`.
  27299. */
  27300. function loadBundle(firestore, bundleData) {
  27301. firestore = cast(firestore, Firestore);
  27302. const client = ensureFirestoreConfigured(firestore);
  27303. const resultTask = new LoadBundleTask();
  27304. firestoreClientLoadBundle(client, firestore._databaseId, bundleData, resultTask);
  27305. return resultTask;
  27306. }
  27307. /**
  27308. * Reads a Firestore {@link Query} from local cache, identified by the given
  27309. * name.
  27310. *
  27311. * The named queries are packaged into bundles on the server side (along
  27312. * with resulting documents), and loaded to local cache using `loadBundle`. Once
  27313. * in local cache, use this method to extract a {@link Query} by name.
  27314. *
  27315. * @param firestore - The {@link Firestore} instance to read the query from.
  27316. * @param name - The name of the query.
  27317. * @returns A `Promise` that is resolved with the Query or `null`.
  27318. */
  27319. function namedQuery(firestore, name) {
  27320. firestore = cast(firestore, Firestore);
  27321. const client = ensureFirestoreConfigured(firestore);
  27322. return firestoreClientGetNamedQuery(client, name).then(namedQuery => {
  27323. if (!namedQuery) {
  27324. return null;
  27325. }
  27326. return new Query(firestore, null, namedQuery.query);
  27327. });
  27328. }
  27329. function verifyNotInitialized(firestore) {
  27330. if (firestore._initialized || firestore._terminated) {
  27331. throw new FirestoreError(Code.FAILED_PRECONDITION, 'Firestore has already been started and persistence can no longer be ' +
  27332. 'enabled. You can only enable persistence before calling any other ' +
  27333. 'methods on a Firestore object.');
  27334. }
  27335. }
  27336. /**
  27337. * @license
  27338. * Copyright 2020 Google LLC
  27339. *
  27340. * Licensed under the Apache License, Version 2.0 (the "License");
  27341. * you may not use this file except in compliance with the License.
  27342. * You may obtain a copy of the License at
  27343. *
  27344. * http://www.apache.org/licenses/LICENSE-2.0
  27345. *
  27346. * Unless required by applicable law or agreed to in writing, software
  27347. * distributed under the License is distributed on an "AS IS" BASIS,
  27348. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  27349. * See the License for the specific language governing permissions and
  27350. * limitations under the License.
  27351. */
  27352. function registerFirestore(variant, useFetchStreams = true) {
  27353. setSDKVersion(SDK_VERSION$1);
  27354. _registerComponent(new Component('firestore', (container, { instanceIdentifier: databaseId, options: settings }) => {
  27355. const app = container.getProvider('app').getImmediate();
  27356. const firestoreInstance = new Firestore(new FirebaseAuthCredentialsProvider(container.getProvider('auth-internal')), new FirebaseAppCheckTokenProvider(container.getProvider('app-check-internal')), databaseIdFromApp(app, databaseId), app);
  27357. settings = Object.assign({ useFetchStreams }, settings);
  27358. firestoreInstance._setSettings(settings);
  27359. return firestoreInstance;
  27360. }, 'PUBLIC').setMultipleInstances(true));
  27361. registerVersion(name, version$1, variant);
  27362. // BUILD_TARGET will be replaced by values like esm5, esm2017, cjs5, etc during the compilation
  27363. registerVersion(name, version$1, 'esm2017');
  27364. }
  27365. /**
  27366. * @license
  27367. * Copyright 2017 Google LLC
  27368. *
  27369. * Licensed under the Apache License, Version 2.0 (the "License");
  27370. * you may not use this file except in compliance with the License.
  27371. * You may obtain a copy of the License at
  27372. *
  27373. * http://www.apache.org/licenses/LICENSE-2.0
  27374. *
  27375. * Unless required by applicable law or agreed to in writing, software
  27376. * distributed under the License is distributed on an "AS IS" BASIS,
  27377. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  27378. * See the License for the specific language governing permissions and
  27379. * limitations under the License.
  27380. */
  27381. function isPartialObserver(obj) {
  27382. return implementsAnyMethods(obj, ['next', 'error', 'complete']);
  27383. }
  27384. /**
  27385. * Returns true if obj is an object and contains at least one of the specified
  27386. * methods.
  27387. */
  27388. function implementsAnyMethods(obj, methods) {
  27389. if (typeof obj !== 'object' || obj === null) {
  27390. return false;
  27391. }
  27392. const object = obj;
  27393. for (const method of methods) {
  27394. if (method in object && typeof object[method] === 'function') {
  27395. return true;
  27396. }
  27397. }
  27398. return false;
  27399. }
  27400. /**
  27401. * @license
  27402. * Copyright 2020 Google LLC
  27403. *
  27404. * Licensed under the Apache License, Version 2.0 (the "License");
  27405. * you may not use this file except in compliance with the License.
  27406. * You may obtain a copy of the License at
  27407. *
  27408. * http://www.apache.org/licenses/LICENSE-2.0
  27409. *
  27410. * Unless required by applicable law or agreed to in writing, software
  27411. * distributed under the License is distributed on an "AS IS" BASIS,
  27412. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  27413. * See the License for the specific language governing permissions and
  27414. * limitations under the License.
  27415. */
  27416. /**
  27417. * An immutable object representing an array of bytes.
  27418. */
  27419. class Bytes {
  27420. /** @hideconstructor */
  27421. constructor(byteString) {
  27422. this._byteString = byteString;
  27423. }
  27424. /**
  27425. * Creates a new `Bytes` object from the given Base64 string, converting it to
  27426. * bytes.
  27427. *
  27428. * @param base64 - The Base64 string used to create the `Bytes` object.
  27429. */
  27430. static fromBase64String(base64) {
  27431. try {
  27432. return new Bytes(ByteString.fromBase64String(base64));
  27433. }
  27434. catch (e) {
  27435. throw new FirestoreError(Code.INVALID_ARGUMENT, 'Failed to construct data from Base64 string: ' + e);
  27436. }
  27437. }
  27438. /**
  27439. * Creates a new `Bytes` object from the given Uint8Array.
  27440. *
  27441. * @param array - The Uint8Array used to create the `Bytes` object.
  27442. */
  27443. static fromUint8Array(array) {
  27444. return new Bytes(ByteString.fromUint8Array(array));
  27445. }
  27446. /**
  27447. * Returns the underlying bytes as a Base64-encoded string.
  27448. *
  27449. * @returns The Base64-encoded string created from the `Bytes` object.
  27450. */
  27451. toBase64() {
  27452. return this._byteString.toBase64();
  27453. }
  27454. /**
  27455. * Returns the underlying bytes in a new `Uint8Array`.
  27456. *
  27457. * @returns The Uint8Array created from the `Bytes` object.
  27458. */
  27459. toUint8Array() {
  27460. return this._byteString.toUint8Array();
  27461. }
  27462. /**
  27463. * Returns a string representation of the `Bytes` object.
  27464. *
  27465. * @returns A string representation of the `Bytes` object.
  27466. */
  27467. toString() {
  27468. return 'Bytes(base64: ' + this.toBase64() + ')';
  27469. }
  27470. /**
  27471. * Returns true if this `Bytes` object is equal to the provided one.
  27472. *
  27473. * @param other - The `Bytes` object to compare against.
  27474. * @returns true if this `Bytes` object is equal to the provided one.
  27475. */
  27476. isEqual(other) {
  27477. return this._byteString.isEqual(other._byteString);
  27478. }
  27479. }
  27480. /**
  27481. * @license
  27482. * Copyright 2020 Google LLC
  27483. *
  27484. * Licensed under the Apache License, Version 2.0 (the "License");
  27485. * you may not use this file except in compliance with the License.
  27486. * You may obtain a copy of the License at
  27487. *
  27488. * http://www.apache.org/licenses/LICENSE-2.0
  27489. *
  27490. * Unless required by applicable law or agreed to in writing, software
  27491. * distributed under the License is distributed on an "AS IS" BASIS,
  27492. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  27493. * See the License for the specific language governing permissions and
  27494. * limitations under the License.
  27495. */
  27496. /**
  27497. * A `FieldPath` refers to a field in a document. The path may consist of a
  27498. * single field name (referring to a top-level field in the document), or a
  27499. * list of field names (referring to a nested field in the document).
  27500. *
  27501. * Create a `FieldPath` by providing field names. If more than one field
  27502. * name is provided, the path will point to a nested field in a document.
  27503. */
  27504. class FieldPath {
  27505. /**
  27506. * Creates a `FieldPath` from the provided field names. If more than one field
  27507. * name is provided, the path will point to a nested field in a document.
  27508. *
  27509. * @param fieldNames - A list of field names.
  27510. */
  27511. constructor(...fieldNames) {
  27512. for (let i = 0; i < fieldNames.length; ++i) {
  27513. if (fieldNames[i].length === 0) {
  27514. throw new FirestoreError(Code.INVALID_ARGUMENT, `Invalid field name at argument $(i + 1). ` +
  27515. 'Field names must not be empty.');
  27516. }
  27517. }
  27518. this._internalPath = new FieldPath$1(fieldNames);
  27519. }
  27520. /**
  27521. * Returns true if this `FieldPath` is equal to the provided one.
  27522. *
  27523. * @param other - The `FieldPath` to compare against.
  27524. * @returns true if this `FieldPath` is equal to the provided one.
  27525. */
  27526. isEqual(other) {
  27527. return this._internalPath.isEqual(other._internalPath);
  27528. }
  27529. }
  27530. /**
  27531. * Returns a special sentinel `FieldPath` to refer to the ID of a document.
  27532. * It can be used in queries to sort or filter by the document ID.
  27533. */
  27534. function documentId() {
  27535. return new FieldPath(DOCUMENT_KEY_NAME);
  27536. }
  27537. /**
  27538. * @license
  27539. * Copyright 2020 Google LLC
  27540. *
  27541. * Licensed under the Apache License, Version 2.0 (the "License");
  27542. * you may not use this file except in compliance with the License.
  27543. * You may obtain a copy of the License at
  27544. *
  27545. * http://www.apache.org/licenses/LICENSE-2.0
  27546. *
  27547. * Unless required by applicable law or agreed to in writing, software
  27548. * distributed under the License is distributed on an "AS IS" BASIS,
  27549. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  27550. * See the License for the specific language governing permissions and
  27551. * limitations under the License.
  27552. */
  27553. /**
  27554. * Sentinel values that can be used when writing document fields with `set()`
  27555. * or `update()`.
  27556. */
  27557. class FieldValue {
  27558. /**
  27559. * @param _methodName - The public API endpoint that returns this class.
  27560. * @hideconstructor
  27561. */
  27562. constructor(_methodName) {
  27563. this._methodName = _methodName;
  27564. }
  27565. }
  27566. /**
  27567. * @license
  27568. * Copyright 2017 Google LLC
  27569. *
  27570. * Licensed under the Apache License, Version 2.0 (the "License");
  27571. * you may not use this file except in compliance with the License.
  27572. * You may obtain a copy of the License at
  27573. *
  27574. * http://www.apache.org/licenses/LICENSE-2.0
  27575. *
  27576. * Unless required by applicable law or agreed to in writing, software
  27577. * distributed under the License is distributed on an "AS IS" BASIS,
  27578. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  27579. * See the License for the specific language governing permissions and
  27580. * limitations under the License.
  27581. */
  27582. /**
  27583. * An immutable object representing a geographic location in Firestore. The
  27584. * location is represented as latitude/longitude pair.
  27585. *
  27586. * Latitude values are in the range of [-90, 90].
  27587. * Longitude values are in the range of [-180, 180].
  27588. */
  27589. class GeoPoint {
  27590. /**
  27591. * Creates a new immutable `GeoPoint` object with the provided latitude and
  27592. * longitude values.
  27593. * @param latitude - The latitude as number between -90 and 90.
  27594. * @param longitude - The longitude as number between -180 and 180.
  27595. */
  27596. constructor(latitude, longitude) {
  27597. if (!isFinite(latitude) || latitude < -90 || latitude > 90) {
  27598. throw new FirestoreError(Code.INVALID_ARGUMENT, 'Latitude must be a number between -90 and 90, but was: ' + latitude);
  27599. }
  27600. if (!isFinite(longitude) || longitude < -180 || longitude > 180) {
  27601. throw new FirestoreError(Code.INVALID_ARGUMENT, 'Longitude must be a number between -180 and 180, but was: ' + longitude);
  27602. }
  27603. this._lat = latitude;
  27604. this._long = longitude;
  27605. }
  27606. /**
  27607. * The latitude of this `GeoPoint` instance.
  27608. */
  27609. get latitude() {
  27610. return this._lat;
  27611. }
  27612. /**
  27613. * The longitude of this `GeoPoint` instance.
  27614. */
  27615. get longitude() {
  27616. return this._long;
  27617. }
  27618. /**
  27619. * Returns true if this `GeoPoint` is equal to the provided one.
  27620. *
  27621. * @param other - The `GeoPoint` to compare against.
  27622. * @returns true if this `GeoPoint` is equal to the provided one.
  27623. */
  27624. isEqual(other) {
  27625. return this._lat === other._lat && this._long === other._long;
  27626. }
  27627. /** Returns a JSON-serializable representation of this GeoPoint. */
  27628. toJSON() {
  27629. return { latitude: this._lat, longitude: this._long };
  27630. }
  27631. /**
  27632. * Actually private to JS consumers of our API, so this function is prefixed
  27633. * with an underscore.
  27634. */
  27635. _compareTo(other) {
  27636. return (primitiveComparator(this._lat, other._lat) ||
  27637. primitiveComparator(this._long, other._long));
  27638. }
  27639. }
  27640. /**
  27641. * @license
  27642. * Copyright 2017 Google LLC
  27643. *
  27644. * Licensed under the Apache License, Version 2.0 (the "License");
  27645. * you may not use this file except in compliance with the License.
  27646. * You may obtain a copy of the License at
  27647. *
  27648. * http://www.apache.org/licenses/LICENSE-2.0
  27649. *
  27650. * Unless required by applicable law or agreed to in writing, software
  27651. * distributed under the License is distributed on an "AS IS" BASIS,
  27652. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  27653. * See the License for the specific language governing permissions and
  27654. * limitations under the License.
  27655. */
  27656. const RESERVED_FIELD_REGEX = /^__.*__$/;
  27657. /** The result of parsing document data (e.g. for a setData call). */
  27658. class ParsedSetData {
  27659. constructor(data, fieldMask, fieldTransforms) {
  27660. this.data = data;
  27661. this.fieldMask = fieldMask;
  27662. this.fieldTransforms = fieldTransforms;
  27663. }
  27664. toMutation(key, precondition) {
  27665. if (this.fieldMask !== null) {
  27666. return new PatchMutation(key, this.data, this.fieldMask, precondition, this.fieldTransforms);
  27667. }
  27668. else {
  27669. return new SetMutation(key, this.data, precondition, this.fieldTransforms);
  27670. }
  27671. }
  27672. }
  27673. /** The result of parsing "update" data (i.e. for an updateData call). */
  27674. class ParsedUpdateData {
  27675. constructor(data,
  27676. // The fieldMask does not include document transforms.
  27677. fieldMask, fieldTransforms) {
  27678. this.data = data;
  27679. this.fieldMask = fieldMask;
  27680. this.fieldTransforms = fieldTransforms;
  27681. }
  27682. toMutation(key, precondition) {
  27683. return new PatchMutation(key, this.data, this.fieldMask, precondition, this.fieldTransforms);
  27684. }
  27685. }
  27686. function isWrite(dataSource) {
  27687. switch (dataSource) {
  27688. case 0 /* UserDataSource.Set */: // fall through
  27689. case 2 /* UserDataSource.MergeSet */: // fall through
  27690. case 1 /* UserDataSource.Update */:
  27691. return true;
  27692. case 3 /* UserDataSource.Argument */:
  27693. case 4 /* UserDataSource.ArrayArgument */:
  27694. return false;
  27695. default:
  27696. throw fail();
  27697. }
  27698. }
  27699. /** A "context" object passed around while parsing user data. */
  27700. class ParseContextImpl {
  27701. /**
  27702. * Initializes a ParseContext with the given source and path.
  27703. *
  27704. * @param settings - The settings for the parser.
  27705. * @param databaseId - The database ID of the Firestore instance.
  27706. * @param serializer - The serializer to use to generate the Value proto.
  27707. * @param ignoreUndefinedProperties - Whether to ignore undefined properties
  27708. * rather than throw.
  27709. * @param fieldTransforms - A mutable list of field transforms encountered
  27710. * while parsing the data.
  27711. * @param fieldMask - A mutable list of field paths encountered while parsing
  27712. * the data.
  27713. *
  27714. * TODO(b/34871131): We don't support array paths right now, so path can be
  27715. * null to indicate the context represents any location within an array (in
  27716. * which case certain features will not work and errors will be somewhat
  27717. * compromised).
  27718. */
  27719. constructor(settings, databaseId, serializer, ignoreUndefinedProperties, fieldTransforms, fieldMask) {
  27720. this.settings = settings;
  27721. this.databaseId = databaseId;
  27722. this.serializer = serializer;
  27723. this.ignoreUndefinedProperties = ignoreUndefinedProperties;
  27724. // Minor hack: If fieldTransforms is undefined, we assume this is an
  27725. // external call and we need to validate the entire path.
  27726. if (fieldTransforms === undefined) {
  27727. this.validatePath();
  27728. }
  27729. this.fieldTransforms = fieldTransforms || [];
  27730. this.fieldMask = fieldMask || [];
  27731. }
  27732. get path() {
  27733. return this.settings.path;
  27734. }
  27735. get dataSource() {
  27736. return this.settings.dataSource;
  27737. }
  27738. /** Returns a new context with the specified settings overwritten. */
  27739. contextWith(configuration) {
  27740. return new ParseContextImpl(Object.assign(Object.assign({}, this.settings), configuration), this.databaseId, this.serializer, this.ignoreUndefinedProperties, this.fieldTransforms, this.fieldMask);
  27741. }
  27742. childContextForField(field) {
  27743. var _a;
  27744. const childPath = (_a = this.path) === null || _a === void 0 ? void 0 : _a.child(field);
  27745. const context = this.contextWith({ path: childPath, arrayElement: false });
  27746. context.validatePathSegment(field);
  27747. return context;
  27748. }
  27749. childContextForFieldPath(field) {
  27750. var _a;
  27751. const childPath = (_a = this.path) === null || _a === void 0 ? void 0 : _a.child(field);
  27752. const context = this.contextWith({ path: childPath, arrayElement: false });
  27753. context.validatePath();
  27754. return context;
  27755. }
  27756. childContextForArray(index) {
  27757. // TODO(b/34871131): We don't support array paths right now; so make path
  27758. // undefined.
  27759. return this.contextWith({ path: undefined, arrayElement: true });
  27760. }
  27761. createError(reason) {
  27762. return createError(reason, this.settings.methodName, this.settings.hasConverter || false, this.path, this.settings.targetDoc);
  27763. }
  27764. /** Returns 'true' if 'fieldPath' was traversed when creating this context. */
  27765. contains(fieldPath) {
  27766. return (this.fieldMask.find(field => fieldPath.isPrefixOf(field)) !== undefined ||
  27767. this.fieldTransforms.find(transform => fieldPath.isPrefixOf(transform.field)) !== undefined);
  27768. }
  27769. validatePath() {
  27770. // TODO(b/34871131): Remove null check once we have proper paths for fields
  27771. // within arrays.
  27772. if (!this.path) {
  27773. return;
  27774. }
  27775. for (let i = 0; i < this.path.length; i++) {
  27776. this.validatePathSegment(this.path.get(i));
  27777. }
  27778. }
  27779. validatePathSegment(segment) {
  27780. if (segment.length === 0) {
  27781. throw this.createError('Document fields must not be empty');
  27782. }
  27783. if (isWrite(this.dataSource) && RESERVED_FIELD_REGEX.test(segment)) {
  27784. throw this.createError('Document fields cannot begin and end with "__"');
  27785. }
  27786. }
  27787. }
  27788. /**
  27789. * Helper for parsing raw user input (provided via the API) into internal model
  27790. * classes.
  27791. */
  27792. class UserDataReader {
  27793. constructor(databaseId, ignoreUndefinedProperties, serializer) {
  27794. this.databaseId = databaseId;
  27795. this.ignoreUndefinedProperties = ignoreUndefinedProperties;
  27796. this.serializer = serializer || newSerializer(databaseId);
  27797. }
  27798. /** Creates a new top-level parse context. */
  27799. createContext(dataSource, methodName, targetDoc, hasConverter = false) {
  27800. return new ParseContextImpl({
  27801. dataSource,
  27802. methodName,
  27803. targetDoc,
  27804. path: FieldPath$1.emptyPath(),
  27805. arrayElement: false,
  27806. hasConverter
  27807. }, this.databaseId, this.serializer, this.ignoreUndefinedProperties);
  27808. }
  27809. }
  27810. function newUserDataReader(firestore) {
  27811. const settings = firestore._freezeSettings();
  27812. const serializer = newSerializer(firestore._databaseId);
  27813. return new UserDataReader(firestore._databaseId, !!settings.ignoreUndefinedProperties, serializer);
  27814. }
  27815. /** Parse document data from a set() call. */
  27816. function parseSetData(userDataReader, methodName, targetDoc, input, hasConverter, options = {}) {
  27817. const context = userDataReader.createContext(options.merge || options.mergeFields
  27818. ? 2 /* UserDataSource.MergeSet */
  27819. : 0 /* UserDataSource.Set */, methodName, targetDoc, hasConverter);
  27820. validatePlainObject('Data must be an object, but it was:', context, input);
  27821. const updateData = parseObject(input, context);
  27822. let fieldMask;
  27823. let fieldTransforms;
  27824. if (options.merge) {
  27825. fieldMask = new FieldMask(context.fieldMask);
  27826. fieldTransforms = context.fieldTransforms;
  27827. }
  27828. else if (options.mergeFields) {
  27829. const validatedFieldPaths = [];
  27830. for (const stringOrFieldPath of options.mergeFields) {
  27831. const fieldPath = fieldPathFromArgument$1(methodName, stringOrFieldPath, targetDoc);
  27832. if (!context.contains(fieldPath)) {
  27833. throw new FirestoreError(Code.INVALID_ARGUMENT, `Field '${fieldPath}' is specified in your field mask but missing from your input data.`);
  27834. }
  27835. if (!fieldMaskContains(validatedFieldPaths, fieldPath)) {
  27836. validatedFieldPaths.push(fieldPath);
  27837. }
  27838. }
  27839. fieldMask = new FieldMask(validatedFieldPaths);
  27840. fieldTransforms = context.fieldTransforms.filter(transform => fieldMask.covers(transform.field));
  27841. }
  27842. else {
  27843. fieldMask = null;
  27844. fieldTransforms = context.fieldTransforms;
  27845. }
  27846. return new ParsedSetData(new ObjectValue(updateData), fieldMask, fieldTransforms);
  27847. }
  27848. class DeleteFieldValueImpl extends FieldValue {
  27849. _toFieldTransform(context) {
  27850. if (context.dataSource === 2 /* UserDataSource.MergeSet */) {
  27851. // No transform to add for a delete, but we need to add it to our
  27852. // fieldMask so it gets deleted.
  27853. context.fieldMask.push(context.path);
  27854. }
  27855. else if (context.dataSource === 1 /* UserDataSource.Update */) {
  27856. throw context.createError(`${this._methodName}() can only appear at the top level ` +
  27857. 'of your update data');
  27858. }
  27859. else {
  27860. // We shouldn't encounter delete sentinels for queries or non-merge set() calls.
  27861. throw context.createError(`${this._methodName}() cannot be used with set() unless you pass ` +
  27862. '{merge:true}');
  27863. }
  27864. return null;
  27865. }
  27866. isEqual(other) {
  27867. return other instanceof DeleteFieldValueImpl;
  27868. }
  27869. }
  27870. /**
  27871. * Creates a child context for parsing SerializableFieldValues.
  27872. *
  27873. * This is different than calling `ParseContext.contextWith` because it keeps
  27874. * the fieldTransforms and fieldMask separate.
  27875. *
  27876. * The created context has its `dataSource` set to `UserDataSource.Argument`.
  27877. * Although these values are used with writes, any elements in these FieldValues
  27878. * are not considered writes since they cannot contain any FieldValue sentinels,
  27879. * etc.
  27880. *
  27881. * @param fieldValue - The sentinel FieldValue for which to create a child
  27882. * context.
  27883. * @param context - The parent context.
  27884. * @param arrayElement - Whether or not the FieldValue has an array.
  27885. */
  27886. function createSentinelChildContext(fieldValue, context, arrayElement) {
  27887. return new ParseContextImpl({
  27888. dataSource: 3 /* UserDataSource.Argument */,
  27889. targetDoc: context.settings.targetDoc,
  27890. methodName: fieldValue._methodName,
  27891. arrayElement
  27892. }, context.databaseId, context.serializer, context.ignoreUndefinedProperties);
  27893. }
  27894. class ServerTimestampFieldValueImpl extends FieldValue {
  27895. _toFieldTransform(context) {
  27896. return new FieldTransform(context.path, new ServerTimestampTransform());
  27897. }
  27898. isEqual(other) {
  27899. return other instanceof ServerTimestampFieldValueImpl;
  27900. }
  27901. }
  27902. class ArrayUnionFieldValueImpl extends FieldValue {
  27903. constructor(methodName, _elements) {
  27904. super(methodName);
  27905. this._elements = _elements;
  27906. }
  27907. _toFieldTransform(context) {
  27908. const parseContext = createSentinelChildContext(this, context,
  27909. /*array=*/ true);
  27910. const parsedElements = this._elements.map(element => parseData(element, parseContext));
  27911. const arrayUnion = new ArrayUnionTransformOperation(parsedElements);
  27912. return new FieldTransform(context.path, arrayUnion);
  27913. }
  27914. isEqual(other) {
  27915. // TODO(mrschmidt): Implement isEquals
  27916. return this === other;
  27917. }
  27918. }
  27919. class ArrayRemoveFieldValueImpl extends FieldValue {
  27920. constructor(methodName, _elements) {
  27921. super(methodName);
  27922. this._elements = _elements;
  27923. }
  27924. _toFieldTransform(context) {
  27925. const parseContext = createSentinelChildContext(this, context,
  27926. /*array=*/ true);
  27927. const parsedElements = this._elements.map(element => parseData(element, parseContext));
  27928. const arrayUnion = new ArrayRemoveTransformOperation(parsedElements);
  27929. return new FieldTransform(context.path, arrayUnion);
  27930. }
  27931. isEqual(other) {
  27932. // TODO(mrschmidt): Implement isEquals
  27933. return this === other;
  27934. }
  27935. }
  27936. class NumericIncrementFieldValueImpl extends FieldValue {
  27937. constructor(methodName, _operand) {
  27938. super(methodName);
  27939. this._operand = _operand;
  27940. }
  27941. _toFieldTransform(context) {
  27942. const numericIncrement = new NumericIncrementTransformOperation(context.serializer, toNumber(context.serializer, this._operand));
  27943. return new FieldTransform(context.path, numericIncrement);
  27944. }
  27945. isEqual(other) {
  27946. // TODO(mrschmidt): Implement isEquals
  27947. return this === other;
  27948. }
  27949. }
  27950. /** Parse update data from an update() call. */
  27951. function parseUpdateData(userDataReader, methodName, targetDoc, input) {
  27952. const context = userDataReader.createContext(1 /* UserDataSource.Update */, methodName, targetDoc);
  27953. validatePlainObject('Data must be an object, but it was:', context, input);
  27954. const fieldMaskPaths = [];
  27955. const updateData = ObjectValue.empty();
  27956. forEach(input, (key, value) => {
  27957. const path = fieldPathFromDotSeparatedString(methodName, key, targetDoc);
  27958. // For Compat types, we have to "extract" the underlying types before
  27959. // performing validation.
  27960. value = getModularInstance(value);
  27961. const childContext = context.childContextForFieldPath(path);
  27962. if (value instanceof DeleteFieldValueImpl) {
  27963. // Add it to the field mask, but don't add anything to updateData.
  27964. fieldMaskPaths.push(path);
  27965. }
  27966. else {
  27967. const parsedValue = parseData(value, childContext);
  27968. if (parsedValue != null) {
  27969. fieldMaskPaths.push(path);
  27970. updateData.set(path, parsedValue);
  27971. }
  27972. }
  27973. });
  27974. const mask = new FieldMask(fieldMaskPaths);
  27975. return new ParsedUpdateData(updateData, mask, context.fieldTransforms);
  27976. }
  27977. /** Parse update data from a list of field/value arguments. */
  27978. function parseUpdateVarargs(userDataReader, methodName, targetDoc, field, value, moreFieldsAndValues) {
  27979. const context = userDataReader.createContext(1 /* UserDataSource.Update */, methodName, targetDoc);
  27980. const keys = [fieldPathFromArgument$1(methodName, field, targetDoc)];
  27981. const values = [value];
  27982. if (moreFieldsAndValues.length % 2 !== 0) {
  27983. throw new FirestoreError(Code.INVALID_ARGUMENT, `Function ${methodName}() needs to be called with an even number ` +
  27984. 'of arguments that alternate between field names and values.');
  27985. }
  27986. for (let i = 0; i < moreFieldsAndValues.length; i += 2) {
  27987. keys.push(fieldPathFromArgument$1(methodName, moreFieldsAndValues[i]));
  27988. values.push(moreFieldsAndValues[i + 1]);
  27989. }
  27990. const fieldMaskPaths = [];
  27991. const updateData = ObjectValue.empty();
  27992. // We iterate in reverse order to pick the last value for a field if the
  27993. // user specified the field multiple times.
  27994. for (let i = keys.length - 1; i >= 0; --i) {
  27995. if (!fieldMaskContains(fieldMaskPaths, keys[i])) {
  27996. const path = keys[i];
  27997. let value = values[i];
  27998. // For Compat types, we have to "extract" the underlying types before
  27999. // performing validation.
  28000. value = getModularInstance(value);
  28001. const childContext = context.childContextForFieldPath(path);
  28002. if (value instanceof DeleteFieldValueImpl) {
  28003. // Add it to the field mask, but don't add anything to updateData.
  28004. fieldMaskPaths.push(path);
  28005. }
  28006. else {
  28007. const parsedValue = parseData(value, childContext);
  28008. if (parsedValue != null) {
  28009. fieldMaskPaths.push(path);
  28010. updateData.set(path, parsedValue);
  28011. }
  28012. }
  28013. }
  28014. }
  28015. const mask = new FieldMask(fieldMaskPaths);
  28016. return new ParsedUpdateData(updateData, mask, context.fieldTransforms);
  28017. }
  28018. /**
  28019. * Parse a "query value" (e.g. value in a where filter or a value in a cursor
  28020. * bound).
  28021. *
  28022. * @param allowArrays - Whether the query value is an array that may directly
  28023. * contain additional arrays (e.g. the operand of an `in` query).
  28024. */
  28025. function parseQueryValue(userDataReader, methodName, input, allowArrays = false) {
  28026. const context = userDataReader.createContext(allowArrays ? 4 /* UserDataSource.ArrayArgument */ : 3 /* UserDataSource.Argument */, methodName);
  28027. const parsed = parseData(input, context);
  28028. return parsed;
  28029. }
  28030. /**
  28031. * Parses user data to Protobuf Values.
  28032. *
  28033. * @param input - Data to be parsed.
  28034. * @param context - A context object representing the current path being parsed,
  28035. * the source of the data being parsed, etc.
  28036. * @returns The parsed value, or null if the value was a FieldValue sentinel
  28037. * that should not be included in the resulting parsed data.
  28038. */
  28039. function parseData(input, context) {
  28040. // Unwrap the API type from the Compat SDK. This will return the API type
  28041. // from firestore-exp.
  28042. input = getModularInstance(input);
  28043. if (looksLikeJsonObject(input)) {
  28044. validatePlainObject('Unsupported field value:', context, input);
  28045. return parseObject(input, context);
  28046. }
  28047. else if (input instanceof FieldValue) {
  28048. // FieldValues usually parse into transforms (except deleteField())
  28049. // in which case we do not want to include this field in our parsed data
  28050. // (as doing so will overwrite the field directly prior to the transform
  28051. // trying to transform it). So we don't add this location to
  28052. // context.fieldMask and we return null as our parsing result.
  28053. parseSentinelFieldValue(input, context);
  28054. return null;
  28055. }
  28056. else if (input === undefined && context.ignoreUndefinedProperties) {
  28057. // If the input is undefined it can never participate in the fieldMask, so
  28058. // don't handle this below. If `ignoreUndefinedProperties` is false,
  28059. // `parseScalarValue` will reject an undefined value.
  28060. return null;
  28061. }
  28062. else {
  28063. // If context.path is null we are inside an array and we don't support
  28064. // field mask paths more granular than the top-level array.
  28065. if (context.path) {
  28066. context.fieldMask.push(context.path);
  28067. }
  28068. if (input instanceof Array) {
  28069. // TODO(b/34871131): Include the path containing the array in the error
  28070. // message.
  28071. // In the case of IN queries, the parsed data is an array (representing
  28072. // the set of values to be included for the IN query) that may directly
  28073. // contain additional arrays (each representing an individual field
  28074. // value), so we disable this validation.
  28075. if (context.settings.arrayElement &&
  28076. context.dataSource !== 4 /* UserDataSource.ArrayArgument */) {
  28077. throw context.createError('Nested arrays are not supported');
  28078. }
  28079. return parseArray(input, context);
  28080. }
  28081. else {
  28082. return parseScalarValue(input, context);
  28083. }
  28084. }
  28085. }
  28086. function parseObject(obj, context) {
  28087. const fields = {};
  28088. if (isEmpty(obj)) {
  28089. // If we encounter an empty object, we explicitly add it to the update
  28090. // mask to ensure that the server creates a map entry.
  28091. if (context.path && context.path.length > 0) {
  28092. context.fieldMask.push(context.path);
  28093. }
  28094. }
  28095. else {
  28096. forEach(obj, (key, val) => {
  28097. const parsedValue = parseData(val, context.childContextForField(key));
  28098. if (parsedValue != null) {
  28099. fields[key] = parsedValue;
  28100. }
  28101. });
  28102. }
  28103. return { mapValue: { fields } };
  28104. }
  28105. function parseArray(array, context) {
  28106. const values = [];
  28107. let entryIndex = 0;
  28108. for (const entry of array) {
  28109. let parsedEntry = parseData(entry, context.childContextForArray(entryIndex));
  28110. if (parsedEntry == null) {
  28111. // Just include nulls in the array for fields being replaced with a
  28112. // sentinel.
  28113. parsedEntry = { nullValue: 'NULL_VALUE' };
  28114. }
  28115. values.push(parsedEntry);
  28116. entryIndex++;
  28117. }
  28118. return { arrayValue: { values } };
  28119. }
  28120. /**
  28121. * "Parses" the provided FieldValueImpl, adding any necessary transforms to
  28122. * context.fieldTransforms.
  28123. */
  28124. function parseSentinelFieldValue(value, context) {
  28125. // Sentinels are only supported with writes, and not within arrays.
  28126. if (!isWrite(context.dataSource)) {
  28127. throw context.createError(`${value._methodName}() can only be used with update() and set()`);
  28128. }
  28129. if (!context.path) {
  28130. throw context.createError(`${value._methodName}() is not currently supported inside arrays`);
  28131. }
  28132. const fieldTransform = value._toFieldTransform(context);
  28133. if (fieldTransform) {
  28134. context.fieldTransforms.push(fieldTransform);
  28135. }
  28136. }
  28137. /**
  28138. * Helper to parse a scalar value (i.e. not an Object, Array, or FieldValue)
  28139. *
  28140. * @returns The parsed value
  28141. */
  28142. function parseScalarValue(value, context) {
  28143. value = getModularInstance(value);
  28144. if (value === null) {
  28145. return { nullValue: 'NULL_VALUE' };
  28146. }
  28147. else if (typeof value === 'number') {
  28148. return toNumber(context.serializer, value);
  28149. }
  28150. else if (typeof value === 'boolean') {
  28151. return { booleanValue: value };
  28152. }
  28153. else if (typeof value === 'string') {
  28154. return { stringValue: value };
  28155. }
  28156. else if (value instanceof Date) {
  28157. const timestamp = Timestamp.fromDate(value);
  28158. return {
  28159. timestampValue: toTimestamp(context.serializer, timestamp)
  28160. };
  28161. }
  28162. else if (value instanceof Timestamp) {
  28163. // Firestore backend truncates precision down to microseconds. To ensure
  28164. // offline mode works the same with regards to truncation, perform the
  28165. // truncation immediately without waiting for the backend to do that.
  28166. const timestamp = new Timestamp(value.seconds, Math.floor(value.nanoseconds / 1000) * 1000);
  28167. return {
  28168. timestampValue: toTimestamp(context.serializer, timestamp)
  28169. };
  28170. }
  28171. else if (value instanceof GeoPoint) {
  28172. return {
  28173. geoPointValue: {
  28174. latitude: value.latitude,
  28175. longitude: value.longitude
  28176. }
  28177. };
  28178. }
  28179. else if (value instanceof Bytes) {
  28180. return { bytesValue: toBytes(context.serializer, value._byteString) };
  28181. }
  28182. else if (value instanceof DocumentReference) {
  28183. const thisDb = context.databaseId;
  28184. const otherDb = value.firestore._databaseId;
  28185. if (!otherDb.isEqual(thisDb)) {
  28186. throw context.createError('Document reference is for database ' +
  28187. `${otherDb.projectId}/${otherDb.database} but should be ` +
  28188. `for database ${thisDb.projectId}/${thisDb.database}`);
  28189. }
  28190. return {
  28191. referenceValue: toResourceName(value.firestore._databaseId || context.databaseId, value._key.path)
  28192. };
  28193. }
  28194. else {
  28195. throw context.createError(`Unsupported field value: ${valueDescription(value)}`);
  28196. }
  28197. }
  28198. /**
  28199. * Checks whether an object looks like a JSON object that should be converted
  28200. * into a struct. Normal class/prototype instances are considered to look like
  28201. * JSON objects since they should be converted to a struct value. Arrays, Dates,
  28202. * GeoPoints, etc. are not considered to look like JSON objects since they map
  28203. * to specific FieldValue types other than ObjectValue.
  28204. */
  28205. function looksLikeJsonObject(input) {
  28206. return (typeof input === 'object' &&
  28207. input !== null &&
  28208. !(input instanceof Array) &&
  28209. !(input instanceof Date) &&
  28210. !(input instanceof Timestamp) &&
  28211. !(input instanceof GeoPoint) &&
  28212. !(input instanceof Bytes) &&
  28213. !(input instanceof DocumentReference) &&
  28214. !(input instanceof FieldValue));
  28215. }
  28216. function validatePlainObject(message, context, input) {
  28217. if (!looksLikeJsonObject(input) || !isPlainObject(input)) {
  28218. const description = valueDescription(input);
  28219. if (description === 'an object') {
  28220. // Massage the error if it was an object.
  28221. throw context.createError(message + ' a custom object');
  28222. }
  28223. else {
  28224. throw context.createError(message + ' ' + description);
  28225. }
  28226. }
  28227. }
  28228. /**
  28229. * Helper that calls fromDotSeparatedString() but wraps any error thrown.
  28230. */
  28231. function fieldPathFromArgument$1(methodName, path, targetDoc) {
  28232. // If required, replace the FieldPath Compat class with with the firestore-exp
  28233. // FieldPath.
  28234. path = getModularInstance(path);
  28235. if (path instanceof FieldPath) {
  28236. return path._internalPath;
  28237. }
  28238. else if (typeof path === 'string') {
  28239. return fieldPathFromDotSeparatedString(methodName, path);
  28240. }
  28241. else {
  28242. const message = 'Field path arguments must be of type string or ';
  28243. throw createError(message, methodName,
  28244. /* hasConverter= */ false,
  28245. /* path= */ undefined, targetDoc);
  28246. }
  28247. }
  28248. /**
  28249. * Matches any characters in a field path string that are reserved.
  28250. */
  28251. const FIELD_PATH_RESERVED = new RegExp('[~\\*/\\[\\]]');
  28252. /**
  28253. * Wraps fromDotSeparatedString with an error message about the method that
  28254. * was thrown.
  28255. * @param methodName - The publicly visible method name
  28256. * @param path - The dot-separated string form of a field path which will be
  28257. * split on dots.
  28258. * @param targetDoc - The document against which the field path will be
  28259. * evaluated.
  28260. */
  28261. function fieldPathFromDotSeparatedString(methodName, path, targetDoc) {
  28262. const found = path.search(FIELD_PATH_RESERVED);
  28263. if (found >= 0) {
  28264. throw createError(`Invalid field path (${path}). Paths must not contain ` +
  28265. `'~', '*', '/', '[', or ']'`, methodName,
  28266. /* hasConverter= */ false,
  28267. /* path= */ undefined, targetDoc);
  28268. }
  28269. try {
  28270. return new FieldPath(...path.split('.'))._internalPath;
  28271. }
  28272. catch (e) {
  28273. throw createError(`Invalid field path (${path}). Paths must not be empty, ` +
  28274. `begin with '.', end with '.', or contain '..'`, methodName,
  28275. /* hasConverter= */ false,
  28276. /* path= */ undefined, targetDoc);
  28277. }
  28278. }
  28279. function createError(reason, methodName, hasConverter, path, targetDoc) {
  28280. const hasPath = path && !path.isEmpty();
  28281. const hasDocument = targetDoc !== undefined;
  28282. let message = `Function ${methodName}() called with invalid data`;
  28283. if (hasConverter) {
  28284. message += ' (via `toFirestore()`)';
  28285. }
  28286. message += '. ';
  28287. let description = '';
  28288. if (hasPath || hasDocument) {
  28289. description += ' (found';
  28290. if (hasPath) {
  28291. description += ` in field ${path}`;
  28292. }
  28293. if (hasDocument) {
  28294. description += ` in document ${targetDoc}`;
  28295. }
  28296. description += ')';
  28297. }
  28298. return new FirestoreError(Code.INVALID_ARGUMENT, message + reason + description);
  28299. }
  28300. /** Checks `haystack` if FieldPath `needle` is present. Runs in O(n). */
  28301. function fieldMaskContains(haystack, needle) {
  28302. return haystack.some(v => v.isEqual(needle));
  28303. }
  28304. /**
  28305. * @license
  28306. * Copyright 2020 Google LLC
  28307. *
  28308. * Licensed under the Apache License, Version 2.0 (the "License");
  28309. * you may not use this file except in compliance with the License.
  28310. * You may obtain a copy of the License at
  28311. *
  28312. * http://www.apache.org/licenses/LICENSE-2.0
  28313. *
  28314. * Unless required by applicable law or agreed to in writing, software
  28315. * distributed under the License is distributed on an "AS IS" BASIS,
  28316. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  28317. * See the License for the specific language governing permissions and
  28318. * limitations under the License.
  28319. */
  28320. /**
  28321. * A `DocumentSnapshot` contains data read from a document in your Firestore
  28322. * database. The data can be extracted with `.data()` or `.get(<field>)` to
  28323. * get a specific field.
  28324. *
  28325. * For a `DocumentSnapshot` that points to a non-existing document, any data
  28326. * access will return 'undefined'. You can use the `exists()` method to
  28327. * explicitly verify a document's existence.
  28328. */
  28329. class DocumentSnapshot$1 {
  28330. // Note: This class is stripped down version of the DocumentSnapshot in
  28331. // the legacy SDK. The changes are:
  28332. // - No support for SnapshotMetadata.
  28333. // - No support for SnapshotOptions.
  28334. /** @hideconstructor protected */
  28335. constructor(_firestore, _userDataWriter, _key, _document, _converter) {
  28336. this._firestore = _firestore;
  28337. this._userDataWriter = _userDataWriter;
  28338. this._key = _key;
  28339. this._document = _document;
  28340. this._converter = _converter;
  28341. }
  28342. /** Property of the `DocumentSnapshot` that provides the document's ID. */
  28343. get id() {
  28344. return this._key.path.lastSegment();
  28345. }
  28346. /**
  28347. * The `DocumentReference` for the document included in the `DocumentSnapshot`.
  28348. */
  28349. get ref() {
  28350. return new DocumentReference(this._firestore, this._converter, this._key);
  28351. }
  28352. /**
  28353. * Signals whether or not the document at the snapshot's location exists.
  28354. *
  28355. * @returns true if the document exists.
  28356. */
  28357. exists() {
  28358. return this._document !== null;
  28359. }
  28360. /**
  28361. * Retrieves all fields in the document as an `Object`. Returns `undefined` if
  28362. * the document doesn't exist.
  28363. *
  28364. * @returns An `Object` containing all fields in the document or `undefined`
  28365. * if the document doesn't exist.
  28366. */
  28367. data() {
  28368. if (!this._document) {
  28369. return undefined;
  28370. }
  28371. else if (this._converter) {
  28372. // We only want to use the converter and create a new DocumentSnapshot
  28373. // if a converter has been provided.
  28374. const snapshot = new QueryDocumentSnapshot$1(this._firestore, this._userDataWriter, this._key, this._document,
  28375. /* converter= */ null);
  28376. return this._converter.fromFirestore(snapshot);
  28377. }
  28378. else {
  28379. return this._userDataWriter.convertValue(this._document.data.value);
  28380. }
  28381. }
  28382. /**
  28383. * Retrieves the field specified by `fieldPath`. Returns `undefined` if the
  28384. * document or field doesn't exist.
  28385. *
  28386. * @param fieldPath - The path (for example 'foo' or 'foo.bar') to a specific
  28387. * field.
  28388. * @returns The data at the specified field location or undefined if no such
  28389. * field exists in the document.
  28390. */
  28391. // We are using `any` here to avoid an explicit cast by our users.
  28392. // eslint-disable-next-line @typescript-eslint/no-explicit-any
  28393. get(fieldPath) {
  28394. if (this._document) {
  28395. const value = this._document.data.field(fieldPathFromArgument('DocumentSnapshot.get', fieldPath));
  28396. if (value !== null) {
  28397. return this._userDataWriter.convertValue(value);
  28398. }
  28399. }
  28400. return undefined;
  28401. }
  28402. }
  28403. /**
  28404. * A `QueryDocumentSnapshot` contains data read from a document in your
  28405. * Firestore database as part of a query. The document is guaranteed to exist
  28406. * and its data can be extracted with `.data()` or `.get(<field>)` to get a
  28407. * specific field.
  28408. *
  28409. * A `QueryDocumentSnapshot` offers the same API surface as a
  28410. * `DocumentSnapshot`. Since query results contain only existing documents, the
  28411. * `exists` property will always be true and `data()` will never return
  28412. * 'undefined'.
  28413. */
  28414. class QueryDocumentSnapshot$1 extends DocumentSnapshot$1 {
  28415. /**
  28416. * Retrieves all fields in the document as an `Object`.
  28417. *
  28418. * @override
  28419. * @returns An `Object` containing all fields in the document.
  28420. */
  28421. data() {
  28422. return super.data();
  28423. }
  28424. }
  28425. /**
  28426. * Helper that calls `fromDotSeparatedString()` but wraps any error thrown.
  28427. */
  28428. function fieldPathFromArgument(methodName, arg) {
  28429. if (typeof arg === 'string') {
  28430. return fieldPathFromDotSeparatedString(methodName, arg);
  28431. }
  28432. else if (arg instanceof FieldPath) {
  28433. return arg._internalPath;
  28434. }
  28435. else {
  28436. return arg._delegate._internalPath;
  28437. }
  28438. }
  28439. /**
  28440. * @license
  28441. * Copyright 2020 Google LLC
  28442. *
  28443. * Licensed under the Apache License, Version 2.0 (the "License");
  28444. * you may not use this file except in compliance with the License.
  28445. * You may obtain a copy of the License at
  28446. *
  28447. * http://www.apache.org/licenses/LICENSE-2.0
  28448. *
  28449. * Unless required by applicable law or agreed to in writing, software
  28450. * distributed under the License is distributed on an "AS IS" BASIS,
  28451. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  28452. * See the License for the specific language governing permissions and
  28453. * limitations under the License.
  28454. */
  28455. function validateHasExplicitOrderByForLimitToLast(query) {
  28456. if (query.limitType === "L" /* LimitType.Last */ &&
  28457. query.explicitOrderBy.length === 0) {
  28458. throw new FirestoreError(Code.UNIMPLEMENTED, 'limitToLast() queries require specifying at least one orderBy() clause');
  28459. }
  28460. }
  28461. /**
  28462. * An `AppliableConstraint` is an abstraction of a constraint that can be applied
  28463. * to a Firestore query.
  28464. */
  28465. class AppliableConstraint {
  28466. }
  28467. /**
  28468. * A `QueryConstraint` is used to narrow the set of documents returned by a
  28469. * Firestore query. `QueryConstraint`s are created by invoking {@link where},
  28470. * {@link orderBy}, {@link startAt}, {@link startAfter}, {@link
  28471. * endBefore}, {@link endAt}, {@link limit}, {@link limitToLast} and
  28472. * can then be passed to {@link query} to create a new query instance that
  28473. * also contains this `QueryConstraint`.
  28474. */
  28475. class QueryConstraint extends AppliableConstraint {
  28476. }
  28477. function query(query, queryConstraint, ...additionalQueryConstraints) {
  28478. let queryConstraints = [];
  28479. if (queryConstraint instanceof AppliableConstraint) {
  28480. queryConstraints.push(queryConstraint);
  28481. }
  28482. queryConstraints = queryConstraints.concat(additionalQueryConstraints);
  28483. validateQueryConstraintArray(queryConstraints);
  28484. for (const constraint of queryConstraints) {
  28485. query = constraint._apply(query);
  28486. }
  28487. return query;
  28488. }
  28489. /**
  28490. * A `QueryFieldFilterConstraint` is used to narrow the set of documents returned by
  28491. * a Firestore query by filtering on one or more document fields.
  28492. * `QueryFieldFilterConstraint`s are created by invoking {@link where} and can then
  28493. * be passed to {@link query} to create a new query instance that also contains
  28494. * this `QueryFieldFilterConstraint`.
  28495. */
  28496. class QueryFieldFilterConstraint extends QueryConstraint {
  28497. /**
  28498. * @internal
  28499. */
  28500. constructor(_field, _op, _value) {
  28501. super();
  28502. this._field = _field;
  28503. this._op = _op;
  28504. this._value = _value;
  28505. /** The type of this query constraint */
  28506. this.type = 'where';
  28507. }
  28508. static _create(_field, _op, _value) {
  28509. return new QueryFieldFilterConstraint(_field, _op, _value);
  28510. }
  28511. _apply(query) {
  28512. const filter = this._parse(query);
  28513. validateNewFieldFilter(query._query, filter);
  28514. return new Query(query.firestore, query.converter, queryWithAddedFilter(query._query, filter));
  28515. }
  28516. _parse(query) {
  28517. const reader = newUserDataReader(query.firestore);
  28518. const filter = newQueryFilter(query._query, 'where', reader, query.firestore._databaseId, this._field, this._op, this._value);
  28519. return filter;
  28520. }
  28521. }
  28522. /**
  28523. * Creates a {@link QueryFieldFilterConstraint} that enforces that documents
  28524. * must contain the specified field and that the value should satisfy the
  28525. * relation constraint provided.
  28526. *
  28527. * @param fieldPath - The path to compare
  28528. * @param opStr - The operation string (e.g "&lt;", "&lt;=", "==", "&lt;",
  28529. * "&lt;=", "!=").
  28530. * @param value - The value for comparison
  28531. * @returns The created {@link QueryFieldFilterConstraint}.
  28532. */
  28533. function where(fieldPath, opStr, value) {
  28534. const op = opStr;
  28535. const field = fieldPathFromArgument('where', fieldPath);
  28536. return QueryFieldFilterConstraint._create(field, op, value);
  28537. }
  28538. /**
  28539. * A `QueryCompositeFilterConstraint` is used to narrow the set of documents
  28540. * returned by a Firestore query by performing the logical OR or AND of multiple
  28541. * {@link QueryFieldFilterConstraint}s or {@link QueryCompositeFilterConstraint}s.
  28542. * `QueryCompositeFilterConstraint`s are created by invoking {@link or} or
  28543. * {@link and} and can then be passed to {@link query} to create a new query
  28544. * instance that also contains the `QueryCompositeFilterConstraint`.
  28545. * @internal TODO remove this internal tag with OR Query support in the server
  28546. */
  28547. class QueryCompositeFilterConstraint extends AppliableConstraint {
  28548. /**
  28549. * @internal
  28550. */
  28551. constructor(
  28552. /** The type of this query constraint */
  28553. type, _queryConstraints) {
  28554. super();
  28555. this.type = type;
  28556. this._queryConstraints = _queryConstraints;
  28557. }
  28558. static _create(type, _queryConstraints) {
  28559. return new QueryCompositeFilterConstraint(type, _queryConstraints);
  28560. }
  28561. _parse(query) {
  28562. const parsedFilters = this._queryConstraints
  28563. .map(queryConstraint => {
  28564. return queryConstraint._parse(query);
  28565. })
  28566. .filter(parsedFilter => parsedFilter.getFilters().length > 0);
  28567. if (parsedFilters.length === 1) {
  28568. return parsedFilters[0];
  28569. }
  28570. return CompositeFilter.create(parsedFilters, this._getOperator());
  28571. }
  28572. _apply(query) {
  28573. const parsedFilter = this._parse(query);
  28574. if (parsedFilter.getFilters().length === 0) {
  28575. // Return the existing query if not adding any more filters (e.g. an empty
  28576. // composite filter).
  28577. return query;
  28578. }
  28579. validateNewFilter(query._query, parsedFilter);
  28580. return new Query(query.firestore, query.converter, queryWithAddedFilter(query._query, parsedFilter));
  28581. }
  28582. _getQueryConstraints() {
  28583. return this._queryConstraints;
  28584. }
  28585. _getOperator() {
  28586. return this.type === 'and' ? "and" /* CompositeOperator.AND */ : "or" /* CompositeOperator.OR */;
  28587. }
  28588. }
  28589. /**
  28590. * Creates a new {@link QueryCompositeFilterConstraint} that is a disjunction of
  28591. * the given filter constraints. A disjunction filter includes a document if it
  28592. * satisfies any of the given filters.
  28593. *
  28594. * @param queryConstraints - Optional. The list of
  28595. * {@link QueryFilterConstraint}s to perform a disjunction for. These must be
  28596. * created with calls to {@link where}, {@link or}, or {@link and}.
  28597. * @returns The newly created {@link QueryCompositeFilterConstraint}.
  28598. * @internal TODO remove this internal tag with OR Query support in the server
  28599. */
  28600. function or(...queryConstraints) {
  28601. // Only support QueryFilterConstraints
  28602. queryConstraints.forEach(queryConstraint => validateQueryFilterConstraint('or', queryConstraint));
  28603. return QueryCompositeFilterConstraint._create("or" /* CompositeOperator.OR */, queryConstraints);
  28604. }
  28605. /**
  28606. * Creates a new {@link QueryCompositeFilterConstraint} that is a conjunction of
  28607. * the given filter constraints. A conjunction filter includes a document if it
  28608. * satisfies all of the given filters.
  28609. *
  28610. * @param queryConstraints - Optional. The list of
  28611. * {@link QueryFilterConstraint}s to perform a conjunction for. These must be
  28612. * created with calls to {@link where}, {@link or}, or {@link and}.
  28613. * @returns The newly created {@link QueryCompositeFilterConstraint}.
  28614. * @internal TODO remove this internal tag with OR Query support in the server
  28615. */
  28616. function and(...queryConstraints) {
  28617. // Only support QueryFilterConstraints
  28618. queryConstraints.forEach(queryConstraint => validateQueryFilterConstraint('and', queryConstraint));
  28619. return QueryCompositeFilterConstraint._create("and" /* CompositeOperator.AND */, queryConstraints);
  28620. }
  28621. /**
  28622. * A `QueryOrderByConstraint` is used to sort the set of documents returned by a
  28623. * Firestore query. `QueryOrderByConstraint`s are created by invoking
  28624. * {@link orderBy} and can then be passed to {@link query} to create a new query
  28625. * instance that also contains this `QueryOrderByConstraint`.
  28626. *
  28627. * Note: Documents that do not contain the orderBy field will not be present in
  28628. * the query result.
  28629. */
  28630. class QueryOrderByConstraint extends QueryConstraint {
  28631. /**
  28632. * @internal
  28633. */
  28634. constructor(_field, _direction) {
  28635. super();
  28636. this._field = _field;
  28637. this._direction = _direction;
  28638. /** The type of this query constraint */
  28639. this.type = 'orderBy';
  28640. }
  28641. static _create(_field, _direction) {
  28642. return new QueryOrderByConstraint(_field, _direction);
  28643. }
  28644. _apply(query) {
  28645. const orderBy = newQueryOrderBy(query._query, this._field, this._direction);
  28646. return new Query(query.firestore, query.converter, queryWithAddedOrderBy(query._query, orderBy));
  28647. }
  28648. }
  28649. /**
  28650. * Creates a {@link QueryOrderByConstraint} that sorts the query result by the
  28651. * specified field, optionally in descending order instead of ascending.
  28652. *
  28653. * Note: Documents that do not contain the specified field will not be present
  28654. * in the query result.
  28655. *
  28656. * @param fieldPath - The field to sort by.
  28657. * @param directionStr - Optional direction to sort by ('asc' or 'desc'). If
  28658. * not specified, order will be ascending.
  28659. * @returns The created {@link QueryOrderByConstraint}.
  28660. */
  28661. function orderBy(fieldPath, directionStr = 'asc') {
  28662. const direction = directionStr;
  28663. const path = fieldPathFromArgument('orderBy', fieldPath);
  28664. return QueryOrderByConstraint._create(path, direction);
  28665. }
  28666. /**
  28667. * A `QueryLimitConstraint` is used to limit the number of documents returned by
  28668. * a Firestore query.
  28669. * `QueryLimitConstraint`s are created by invoking {@link limit} or
  28670. * {@link limitToLast} and can then be passed to {@link query} to create a new
  28671. * query instance that also contains this `QueryLimitConstraint`.
  28672. */
  28673. class QueryLimitConstraint extends QueryConstraint {
  28674. /**
  28675. * @internal
  28676. */
  28677. constructor(
  28678. /** The type of this query constraint */
  28679. type, _limit, _limitType) {
  28680. super();
  28681. this.type = type;
  28682. this._limit = _limit;
  28683. this._limitType = _limitType;
  28684. }
  28685. static _create(type, _limit, _limitType) {
  28686. return new QueryLimitConstraint(type, _limit, _limitType);
  28687. }
  28688. _apply(query) {
  28689. return new Query(query.firestore, query.converter, queryWithLimit(query._query, this._limit, this._limitType));
  28690. }
  28691. }
  28692. /**
  28693. * Creates a {@link QueryLimitConstraint} that only returns the first matching
  28694. * documents.
  28695. *
  28696. * @param limit - The maximum number of items to return.
  28697. * @returns The created {@link QueryLimitConstraint}.
  28698. */
  28699. function limit(limit) {
  28700. validatePositiveNumber('limit', limit);
  28701. return QueryLimitConstraint._create('limit', limit, "F" /* LimitType.First */);
  28702. }
  28703. /**
  28704. * Creates a {@link QueryLimitConstraint} that only returns the last matching
  28705. * documents.
  28706. *
  28707. * You must specify at least one `orderBy` clause for `limitToLast` queries,
  28708. * otherwise an exception will be thrown during execution.
  28709. *
  28710. * @param limit - The maximum number of items to return.
  28711. * @returns The created {@link QueryLimitConstraint}.
  28712. */
  28713. function limitToLast(limit) {
  28714. validatePositiveNumber('limitToLast', limit);
  28715. return QueryLimitConstraint._create('limitToLast', limit, "L" /* LimitType.Last */);
  28716. }
  28717. /**
  28718. * A `QueryStartAtConstraint` is used to exclude documents from the start of a
  28719. * result set returned by a Firestore query.
  28720. * `QueryStartAtConstraint`s are created by invoking {@link (startAt:1)} or
  28721. * {@link (startAfter:1)} and can then be passed to {@link query} to create a
  28722. * new query instance that also contains this `QueryStartAtConstraint`.
  28723. */
  28724. class QueryStartAtConstraint extends QueryConstraint {
  28725. /**
  28726. * @internal
  28727. */
  28728. constructor(
  28729. /** The type of this query constraint */
  28730. type, _docOrFields, _inclusive) {
  28731. super();
  28732. this.type = type;
  28733. this._docOrFields = _docOrFields;
  28734. this._inclusive = _inclusive;
  28735. }
  28736. static _create(type, _docOrFields, _inclusive) {
  28737. return new QueryStartAtConstraint(type, _docOrFields, _inclusive);
  28738. }
  28739. _apply(query) {
  28740. const bound = newQueryBoundFromDocOrFields(query, this.type, this._docOrFields, this._inclusive);
  28741. return new Query(query.firestore, query.converter, queryWithStartAt(query._query, bound));
  28742. }
  28743. }
  28744. function startAt(...docOrFields) {
  28745. return QueryStartAtConstraint._create('startAt', docOrFields,
  28746. /*inclusive=*/ true);
  28747. }
  28748. function startAfter(...docOrFields) {
  28749. return QueryStartAtConstraint._create('startAfter', docOrFields,
  28750. /*inclusive=*/ false);
  28751. }
  28752. /**
  28753. * A `QueryEndAtConstraint` is used to exclude documents from the end of a
  28754. * result set returned by a Firestore query.
  28755. * `QueryEndAtConstraint`s are created by invoking {@link (endAt:1)} or
  28756. * {@link (endBefore:1)} and can then be passed to {@link query} to create a new
  28757. * query instance that also contains this `QueryEndAtConstraint`.
  28758. */
  28759. class QueryEndAtConstraint extends QueryConstraint {
  28760. /**
  28761. * @internal
  28762. */
  28763. constructor(
  28764. /** The type of this query constraint */
  28765. type, _docOrFields, _inclusive) {
  28766. super();
  28767. this.type = type;
  28768. this._docOrFields = _docOrFields;
  28769. this._inclusive = _inclusive;
  28770. }
  28771. static _create(type, _docOrFields, _inclusive) {
  28772. return new QueryEndAtConstraint(type, _docOrFields, _inclusive);
  28773. }
  28774. _apply(query) {
  28775. const bound = newQueryBoundFromDocOrFields(query, this.type, this._docOrFields, this._inclusive);
  28776. return new Query(query.firestore, query.converter, queryWithEndAt(query._query, bound));
  28777. }
  28778. }
  28779. function endBefore(...docOrFields) {
  28780. return QueryEndAtConstraint._create('endBefore', docOrFields,
  28781. /*inclusive=*/ false);
  28782. }
  28783. function endAt(...docOrFields) {
  28784. return QueryEndAtConstraint._create('endAt', docOrFields,
  28785. /*inclusive=*/ true);
  28786. }
  28787. /** Helper function to create a bound from a document or fields */
  28788. function newQueryBoundFromDocOrFields(query, methodName, docOrFields, inclusive) {
  28789. docOrFields[0] = getModularInstance(docOrFields[0]);
  28790. if (docOrFields[0] instanceof DocumentSnapshot$1) {
  28791. return newQueryBoundFromDocument(query._query, query.firestore._databaseId, methodName, docOrFields[0]._document, inclusive);
  28792. }
  28793. else {
  28794. const reader = newUserDataReader(query.firestore);
  28795. return newQueryBoundFromFields(query._query, query.firestore._databaseId, reader, methodName, docOrFields, inclusive);
  28796. }
  28797. }
  28798. function newQueryFilter(query, methodName, dataReader, databaseId, fieldPath, op, value) {
  28799. let fieldValue;
  28800. if (fieldPath.isKeyField()) {
  28801. if (op === "array-contains" /* Operator.ARRAY_CONTAINS */ || op === "array-contains-any" /* Operator.ARRAY_CONTAINS_ANY */) {
  28802. throw new FirestoreError(Code.INVALID_ARGUMENT, `Invalid Query. You can't perform '${op}' queries on documentId().`);
  28803. }
  28804. else if (op === "in" /* Operator.IN */ || op === "not-in" /* Operator.NOT_IN */) {
  28805. validateDisjunctiveFilterElements(value, op);
  28806. const referenceList = [];
  28807. for (const arrayValue of value) {
  28808. referenceList.push(parseDocumentIdValue(databaseId, query, arrayValue));
  28809. }
  28810. fieldValue = { arrayValue: { values: referenceList } };
  28811. }
  28812. else {
  28813. fieldValue = parseDocumentIdValue(databaseId, query, value);
  28814. }
  28815. }
  28816. else {
  28817. if (op === "in" /* Operator.IN */ ||
  28818. op === "not-in" /* Operator.NOT_IN */ ||
  28819. op === "array-contains-any" /* Operator.ARRAY_CONTAINS_ANY */) {
  28820. validateDisjunctiveFilterElements(value, op);
  28821. }
  28822. fieldValue = parseQueryValue(dataReader, methodName, value,
  28823. /* allowArrays= */ op === "in" /* Operator.IN */ || op === "not-in" /* Operator.NOT_IN */);
  28824. }
  28825. const filter = FieldFilter.create(fieldPath, op, fieldValue);
  28826. return filter;
  28827. }
  28828. function newQueryOrderBy(query, fieldPath, direction) {
  28829. if (query.startAt !== null) {
  28830. throw new FirestoreError(Code.INVALID_ARGUMENT, 'Invalid query. You must not call startAt() or startAfter() before ' +
  28831. 'calling orderBy().');
  28832. }
  28833. if (query.endAt !== null) {
  28834. throw new FirestoreError(Code.INVALID_ARGUMENT, 'Invalid query. You must not call endAt() or endBefore() before ' +
  28835. 'calling orderBy().');
  28836. }
  28837. const orderBy = new OrderBy(fieldPath, direction);
  28838. validateNewOrderBy(query, orderBy);
  28839. return orderBy;
  28840. }
  28841. /**
  28842. * Create a `Bound` from a query and a document.
  28843. *
  28844. * Note that the `Bound` will always include the key of the document
  28845. * and so only the provided document will compare equal to the returned
  28846. * position.
  28847. *
  28848. * Will throw if the document does not contain all fields of the order by
  28849. * of the query or if any of the fields in the order by are an uncommitted
  28850. * server timestamp.
  28851. */
  28852. function newQueryBoundFromDocument(query, databaseId, methodName, doc, inclusive) {
  28853. if (!doc) {
  28854. throw new FirestoreError(Code.NOT_FOUND, `Can't use a DocumentSnapshot that doesn't exist for ` +
  28855. `${methodName}().`);
  28856. }
  28857. const components = [];
  28858. // Because people expect to continue/end a query at the exact document
  28859. // provided, we need to use the implicit sort order rather than the explicit
  28860. // sort order, because it's guaranteed to contain the document key. That way
  28861. // the position becomes unambiguous and the query continues/ends exactly at
  28862. // the provided document. Without the key (by using the explicit sort
  28863. // orders), multiple documents could match the position, yielding duplicate
  28864. // results.
  28865. for (const orderBy of queryOrderBy(query)) {
  28866. if (orderBy.field.isKeyField()) {
  28867. components.push(refValue(databaseId, doc.key));
  28868. }
  28869. else {
  28870. const value = doc.data.field(orderBy.field);
  28871. if (isServerTimestamp(value)) {
  28872. throw new FirestoreError(Code.INVALID_ARGUMENT, 'Invalid query. You are trying to start or end a query using a ' +
  28873. 'document for which the field "' +
  28874. orderBy.field +
  28875. '" is an uncommitted server timestamp. (Since the value of ' +
  28876. 'this field is unknown, you cannot start/end a query with it.)');
  28877. }
  28878. else if (value !== null) {
  28879. components.push(value);
  28880. }
  28881. else {
  28882. const field = orderBy.field.canonicalString();
  28883. throw new FirestoreError(Code.INVALID_ARGUMENT, `Invalid query. You are trying to start or end a query using a ` +
  28884. `document for which the field '${field}' (used as the ` +
  28885. `orderBy) does not exist.`);
  28886. }
  28887. }
  28888. }
  28889. return new Bound(components, inclusive);
  28890. }
  28891. /**
  28892. * Converts a list of field values to a `Bound` for the given query.
  28893. */
  28894. function newQueryBoundFromFields(query, databaseId, dataReader, methodName, values, inclusive) {
  28895. // Use explicit order by's because it has to match the query the user made
  28896. const orderBy = query.explicitOrderBy;
  28897. if (values.length > orderBy.length) {
  28898. throw new FirestoreError(Code.INVALID_ARGUMENT, `Too many arguments provided to ${methodName}(). ` +
  28899. `The number of arguments must be less than or equal to the ` +
  28900. `number of orderBy() clauses`);
  28901. }
  28902. const components = [];
  28903. for (let i = 0; i < values.length; i++) {
  28904. const rawValue = values[i];
  28905. const orderByComponent = orderBy[i];
  28906. if (orderByComponent.field.isKeyField()) {
  28907. if (typeof rawValue !== 'string') {
  28908. throw new FirestoreError(Code.INVALID_ARGUMENT, `Invalid query. Expected a string for document ID in ` +
  28909. `${methodName}(), but got a ${typeof rawValue}`);
  28910. }
  28911. if (!isCollectionGroupQuery(query) && rawValue.indexOf('/') !== -1) {
  28912. throw new FirestoreError(Code.INVALID_ARGUMENT, `Invalid query. When querying a collection and ordering by documentId(), ` +
  28913. `the value passed to ${methodName}() must be a plain document ID, but ` +
  28914. `'${rawValue}' contains a slash.`);
  28915. }
  28916. const path = query.path.child(ResourcePath.fromString(rawValue));
  28917. if (!DocumentKey.isDocumentKey(path)) {
  28918. throw new FirestoreError(Code.INVALID_ARGUMENT, `Invalid query. When querying a collection group and ordering by ` +
  28919. `documentId(), the value passed to ${methodName}() must result in a ` +
  28920. `valid document path, but '${path}' is not because it contains an odd number ` +
  28921. `of segments.`);
  28922. }
  28923. const key = new DocumentKey(path);
  28924. components.push(refValue(databaseId, key));
  28925. }
  28926. else {
  28927. const wrapped = parseQueryValue(dataReader, methodName, rawValue);
  28928. components.push(wrapped);
  28929. }
  28930. }
  28931. return new Bound(components, inclusive);
  28932. }
  28933. /**
  28934. * Parses the given `documentIdValue` into a `ReferenceValue`, throwing
  28935. * appropriate errors if the value is anything other than a `DocumentReference`
  28936. * or `string`, or if the string is malformed.
  28937. */
  28938. function parseDocumentIdValue(databaseId, query, documentIdValue) {
  28939. documentIdValue = getModularInstance(documentIdValue);
  28940. if (typeof documentIdValue === 'string') {
  28941. if (documentIdValue === '') {
  28942. throw new FirestoreError(Code.INVALID_ARGUMENT, 'Invalid query. When querying with documentId(), you ' +
  28943. 'must provide a valid document ID, but it was an empty string.');
  28944. }
  28945. if (!isCollectionGroupQuery(query) && documentIdValue.indexOf('/') !== -1) {
  28946. throw new FirestoreError(Code.INVALID_ARGUMENT, `Invalid query. When querying a collection by ` +
  28947. `documentId(), you must provide a plain document ID, but ` +
  28948. `'${documentIdValue}' contains a '/' character.`);
  28949. }
  28950. const path = query.path.child(ResourcePath.fromString(documentIdValue));
  28951. if (!DocumentKey.isDocumentKey(path)) {
  28952. throw new FirestoreError(Code.INVALID_ARGUMENT, `Invalid query. When querying a collection group by ` +
  28953. `documentId(), the value provided must result in a valid document path, ` +
  28954. `but '${path}' is not because it has an odd number of segments (${path.length}).`);
  28955. }
  28956. return refValue(databaseId, new DocumentKey(path));
  28957. }
  28958. else if (documentIdValue instanceof DocumentReference) {
  28959. return refValue(databaseId, documentIdValue._key);
  28960. }
  28961. else {
  28962. throw new FirestoreError(Code.INVALID_ARGUMENT, `Invalid query. When querying with documentId(), you must provide a valid ` +
  28963. `string or a DocumentReference, but it was: ` +
  28964. `${valueDescription(documentIdValue)}.`);
  28965. }
  28966. }
  28967. /**
  28968. * Validates that the value passed into a disjunctive filter satisfies all
  28969. * array requirements.
  28970. */
  28971. function validateDisjunctiveFilterElements(value, operator) {
  28972. if (!Array.isArray(value) || value.length === 0) {
  28973. throw new FirestoreError(Code.INVALID_ARGUMENT, 'Invalid Query. A non-empty array is required for ' +
  28974. `'${operator.toString()}' filters.`);
  28975. }
  28976. if (value.length > 10) {
  28977. throw new FirestoreError(Code.INVALID_ARGUMENT, `Invalid Query. '${operator.toString()}' filters support a ` +
  28978. 'maximum of 10 elements in the value array.');
  28979. }
  28980. }
  28981. /**
  28982. * Given an operator, returns the set of operators that cannot be used with it.
  28983. *
  28984. * Operators in a query must adhere to the following set of rules:
  28985. * 1. Only one array operator is allowed.
  28986. * 2. Only one disjunctive operator is allowed.
  28987. * 3. `NOT_EQUAL` cannot be used with another `NOT_EQUAL` operator.
  28988. * 4. `NOT_IN` cannot be used with array, disjunctive, or `NOT_EQUAL` operators.
  28989. *
  28990. * Array operators: `ARRAY_CONTAINS`, `ARRAY_CONTAINS_ANY`
  28991. * Disjunctive operators: `IN`, `ARRAY_CONTAINS_ANY`, `NOT_IN`
  28992. */
  28993. function conflictingOps(op) {
  28994. switch (op) {
  28995. case "!=" /* Operator.NOT_EQUAL */:
  28996. return ["!=" /* Operator.NOT_EQUAL */, "not-in" /* Operator.NOT_IN */];
  28997. case "array-contains" /* Operator.ARRAY_CONTAINS */:
  28998. return [
  28999. "array-contains" /* Operator.ARRAY_CONTAINS */,
  29000. "array-contains-any" /* Operator.ARRAY_CONTAINS_ANY */,
  29001. "not-in" /* Operator.NOT_IN */
  29002. ];
  29003. case "in" /* Operator.IN */:
  29004. return ["array-contains-any" /* Operator.ARRAY_CONTAINS_ANY */, "in" /* Operator.IN */, "not-in" /* Operator.NOT_IN */];
  29005. case "array-contains-any" /* Operator.ARRAY_CONTAINS_ANY */:
  29006. return [
  29007. "array-contains" /* Operator.ARRAY_CONTAINS */,
  29008. "array-contains-any" /* Operator.ARRAY_CONTAINS_ANY */,
  29009. "in" /* Operator.IN */,
  29010. "not-in" /* Operator.NOT_IN */
  29011. ];
  29012. case "not-in" /* Operator.NOT_IN */:
  29013. return [
  29014. "array-contains" /* Operator.ARRAY_CONTAINS */,
  29015. "array-contains-any" /* Operator.ARRAY_CONTAINS_ANY */,
  29016. "in" /* Operator.IN */,
  29017. "not-in" /* Operator.NOT_IN */,
  29018. "!=" /* Operator.NOT_EQUAL */
  29019. ];
  29020. default:
  29021. return [];
  29022. }
  29023. }
  29024. function validateNewFieldFilter(query, fieldFilter) {
  29025. if (fieldFilter.isInequality()) {
  29026. const existingInequality = getInequalityFilterField(query);
  29027. const newInequality = fieldFilter.field;
  29028. if (existingInequality !== null &&
  29029. !existingInequality.isEqual(newInequality)) {
  29030. throw new FirestoreError(Code.INVALID_ARGUMENT, 'Invalid query. All where filters with an inequality' +
  29031. ' (<, <=, !=, not-in, >, or >=) must be on the same field. But you have' +
  29032. ` inequality filters on '${existingInequality.toString()}'` +
  29033. ` and '${newInequality.toString()}'`);
  29034. }
  29035. const firstOrderByField = getFirstOrderByField(query);
  29036. if (firstOrderByField !== null) {
  29037. validateOrderByAndInequalityMatch(query, newInequality, firstOrderByField);
  29038. }
  29039. }
  29040. const conflictingOp = findOpInsideFilters(query.filters, conflictingOps(fieldFilter.op));
  29041. if (conflictingOp !== null) {
  29042. // Special case when it's a duplicate op to give a slightly clearer error message.
  29043. if (conflictingOp === fieldFilter.op) {
  29044. throw new FirestoreError(Code.INVALID_ARGUMENT, 'Invalid query. You cannot use more than one ' +
  29045. `'${fieldFilter.op.toString()}' filter.`);
  29046. }
  29047. else {
  29048. throw new FirestoreError(Code.INVALID_ARGUMENT, `Invalid query. You cannot use '${fieldFilter.op.toString()}' filters ` +
  29049. `with '${conflictingOp.toString()}' filters.`);
  29050. }
  29051. }
  29052. }
  29053. function validateNewFilter(query, filter) {
  29054. let testQuery = query;
  29055. const subFilters = filter.getFlattenedFilters();
  29056. for (const subFilter of subFilters) {
  29057. validateNewFieldFilter(testQuery, subFilter);
  29058. testQuery = queryWithAddedFilter(testQuery, subFilter);
  29059. }
  29060. }
  29061. // Checks if any of the provided filter operators are included in the given list of filters and
  29062. // returns the first one that is, or null if none are.
  29063. function findOpInsideFilters(filters, operators) {
  29064. for (const filter of filters) {
  29065. for (const fieldFilter of filter.getFlattenedFilters()) {
  29066. if (operators.indexOf(fieldFilter.op) >= 0) {
  29067. return fieldFilter.op;
  29068. }
  29069. }
  29070. }
  29071. return null;
  29072. }
  29073. function validateNewOrderBy(query, orderBy) {
  29074. if (getFirstOrderByField(query) === null) {
  29075. // This is the first order by. It must match any inequality.
  29076. const inequalityField = getInequalityFilterField(query);
  29077. if (inequalityField !== null) {
  29078. validateOrderByAndInequalityMatch(query, inequalityField, orderBy.field);
  29079. }
  29080. }
  29081. }
  29082. function validateOrderByAndInequalityMatch(baseQuery, inequality, orderBy) {
  29083. if (!orderBy.isEqual(inequality)) {
  29084. throw new FirestoreError(Code.INVALID_ARGUMENT, `Invalid query. You have a where filter with an inequality ` +
  29085. `(<, <=, !=, not-in, >, or >=) on field '${inequality.toString()}' ` +
  29086. `and so you must also use '${inequality.toString()}' ` +
  29087. `as your first argument to orderBy(), but your first orderBy() ` +
  29088. `is on field '${orderBy.toString()}' instead.`);
  29089. }
  29090. }
  29091. function validateQueryFilterConstraint(functionName, queryConstraint) {
  29092. if (!(queryConstraint instanceof QueryFieldFilterConstraint) &&
  29093. !(queryConstraint instanceof QueryCompositeFilterConstraint)) {
  29094. throw new FirestoreError(Code.INVALID_ARGUMENT, `Function ${functionName}() requires AppliableConstraints created with a call to 'where(...)', 'or(...)', or 'and(...)'.`);
  29095. }
  29096. }
  29097. function validateQueryConstraintArray(queryConstraint) {
  29098. const compositeFilterCount = queryConstraint.filter(filter => filter instanceof QueryCompositeFilterConstraint).length;
  29099. const fieldFilterCount = queryConstraint.filter(filter => filter instanceof QueryFieldFilterConstraint).length;
  29100. if (compositeFilterCount > 1 ||
  29101. (compositeFilterCount > 0 && fieldFilterCount > 0)) {
  29102. throw new FirestoreError(Code.INVALID_ARGUMENT, 'InvalidQuery. When using composite filters, you cannot use ' +
  29103. 'more than one filter at the top level. Consider nesting the multiple ' +
  29104. 'filters within an `and(...)` statement. For example: ' +
  29105. 'change `query(query, where(...), or(...))` to ' +
  29106. '`query(query, and(where(...), or(...)))`.');
  29107. }
  29108. }
  29109. /**
  29110. * @license
  29111. * Copyright 2020 Google LLC
  29112. *
  29113. * Licensed under the Apache License, Version 2.0 (the "License");
  29114. * you may not use this file except in compliance with the License.
  29115. * You may obtain a copy of the License at
  29116. *
  29117. * http://www.apache.org/licenses/LICENSE-2.0
  29118. *
  29119. * Unless required by applicable law or agreed to in writing, software
  29120. * distributed under the License is distributed on an "AS IS" BASIS,
  29121. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  29122. * See the License for the specific language governing permissions and
  29123. * limitations under the License.
  29124. */
  29125. /**
  29126. * Converts Firestore's internal types to the JavaScript types that we expose
  29127. * to the user.
  29128. *
  29129. * @internal
  29130. */
  29131. class AbstractUserDataWriter {
  29132. convertValue(value, serverTimestampBehavior = 'none') {
  29133. switch (typeOrder(value)) {
  29134. case 0 /* TypeOrder.NullValue */:
  29135. return null;
  29136. case 1 /* TypeOrder.BooleanValue */:
  29137. return value.booleanValue;
  29138. case 2 /* TypeOrder.NumberValue */:
  29139. return normalizeNumber(value.integerValue || value.doubleValue);
  29140. case 3 /* TypeOrder.TimestampValue */:
  29141. return this.convertTimestamp(value.timestampValue);
  29142. case 4 /* TypeOrder.ServerTimestampValue */:
  29143. return this.convertServerTimestamp(value, serverTimestampBehavior);
  29144. case 5 /* TypeOrder.StringValue */:
  29145. return value.stringValue;
  29146. case 6 /* TypeOrder.BlobValue */:
  29147. return this.convertBytes(normalizeByteString(value.bytesValue));
  29148. case 7 /* TypeOrder.RefValue */:
  29149. return this.convertReference(value.referenceValue);
  29150. case 8 /* TypeOrder.GeoPointValue */:
  29151. return this.convertGeoPoint(value.geoPointValue);
  29152. case 9 /* TypeOrder.ArrayValue */:
  29153. return this.convertArray(value.arrayValue, serverTimestampBehavior);
  29154. case 10 /* TypeOrder.ObjectValue */:
  29155. return this.convertObject(value.mapValue, serverTimestampBehavior);
  29156. default:
  29157. throw fail();
  29158. }
  29159. }
  29160. convertObject(mapValue, serverTimestampBehavior) {
  29161. const result = {};
  29162. forEach(mapValue.fields, (key, value) => {
  29163. result[key] = this.convertValue(value, serverTimestampBehavior);
  29164. });
  29165. return result;
  29166. }
  29167. convertGeoPoint(value) {
  29168. return new GeoPoint(normalizeNumber(value.latitude), normalizeNumber(value.longitude));
  29169. }
  29170. convertArray(arrayValue, serverTimestampBehavior) {
  29171. return (arrayValue.values || []).map(value => this.convertValue(value, serverTimestampBehavior));
  29172. }
  29173. convertServerTimestamp(value, serverTimestampBehavior) {
  29174. switch (serverTimestampBehavior) {
  29175. case 'previous':
  29176. const previousValue = getPreviousValue(value);
  29177. if (previousValue == null) {
  29178. return null;
  29179. }
  29180. return this.convertValue(previousValue, serverTimestampBehavior);
  29181. case 'estimate':
  29182. return this.convertTimestamp(getLocalWriteTime(value));
  29183. default:
  29184. return null;
  29185. }
  29186. }
  29187. convertTimestamp(value) {
  29188. const normalizedValue = normalizeTimestamp(value);
  29189. return new Timestamp(normalizedValue.seconds, normalizedValue.nanos);
  29190. }
  29191. convertDocumentKey(name, expectedDatabaseId) {
  29192. const resourcePath = ResourcePath.fromString(name);
  29193. hardAssert(isValidResourceName(resourcePath));
  29194. const databaseId = new DatabaseId(resourcePath.get(1), resourcePath.get(3));
  29195. const key = new DocumentKey(resourcePath.popFirst(5));
  29196. if (!databaseId.isEqual(expectedDatabaseId)) {
  29197. // TODO(b/64130202): Somehow support foreign references.
  29198. logError(`Document ${key} contains a document ` +
  29199. `reference within a different database (` +
  29200. `${databaseId.projectId}/${databaseId.database}) which is not ` +
  29201. `supported. It will be treated as a reference in the current ` +
  29202. `database (${expectedDatabaseId.projectId}/${expectedDatabaseId.database}) ` +
  29203. `instead.`);
  29204. }
  29205. return key;
  29206. }
  29207. }
  29208. /**
  29209. * @license
  29210. * Copyright 2020 Google LLC
  29211. *
  29212. * Licensed under the Apache License, Version 2.0 (the "License");
  29213. * you may not use this file except in compliance with the License.
  29214. * You may obtain a copy of the License at
  29215. *
  29216. * http://www.apache.org/licenses/LICENSE-2.0
  29217. *
  29218. * Unless required by applicable law or agreed to in writing, software
  29219. * distributed under the License is distributed on an "AS IS" BASIS,
  29220. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  29221. * See the License for the specific language governing permissions and
  29222. * limitations under the License.
  29223. */
  29224. /**
  29225. * Converts custom model object of type T into `DocumentData` by applying the
  29226. * converter if it exists.
  29227. *
  29228. * This function is used when converting user objects to `DocumentData`
  29229. * because we want to provide the user with a more specific error message if
  29230. * their `set()` or fails due to invalid data originating from a `toFirestore()`
  29231. * call.
  29232. */
  29233. function applyFirestoreDataConverter(converter, value, options) {
  29234. let convertedValue;
  29235. if (converter) {
  29236. if (options && (options.merge || options.mergeFields)) {
  29237. // Cast to `any` in order to satisfy the union type constraint on
  29238. // toFirestore().
  29239. // eslint-disable-next-line @typescript-eslint/no-explicit-any
  29240. convertedValue = converter.toFirestore(value, options);
  29241. }
  29242. else {
  29243. convertedValue = converter.toFirestore(value);
  29244. }
  29245. }
  29246. else {
  29247. convertedValue = value;
  29248. }
  29249. return convertedValue;
  29250. }
  29251. class LiteUserDataWriter extends AbstractUserDataWriter {
  29252. constructor(firestore) {
  29253. super();
  29254. this.firestore = firestore;
  29255. }
  29256. convertBytes(bytes) {
  29257. return new Bytes(bytes);
  29258. }
  29259. convertReference(name) {
  29260. const key = this.convertDocumentKey(name, this.firestore._databaseId);
  29261. return new DocumentReference(this.firestore, /* converter= */ null, key);
  29262. }
  29263. }
  29264. /**
  29265. * @license
  29266. * Copyright 2020 Google LLC
  29267. *
  29268. * Licensed under the Apache License, Version 2.0 (the "License");
  29269. * you may not use this file except in compliance with the License.
  29270. * You may obtain a copy of the License at
  29271. *
  29272. * http://www.apache.org/licenses/LICENSE-2.0
  29273. *
  29274. * Unless required by applicable law or agreed to in writing, software
  29275. * distributed under the License is distributed on an "AS IS" BASIS,
  29276. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  29277. * See the License for the specific language governing permissions and
  29278. * limitations under the License.
  29279. */
  29280. /**
  29281. * Metadata about a snapshot, describing the state of the snapshot.
  29282. */
  29283. class SnapshotMetadata {
  29284. /** @hideconstructor */
  29285. constructor(hasPendingWrites, fromCache) {
  29286. this.hasPendingWrites = hasPendingWrites;
  29287. this.fromCache = fromCache;
  29288. }
  29289. /**
  29290. * Returns true if this `SnapshotMetadata` is equal to the provided one.
  29291. *
  29292. * @param other - The `SnapshotMetadata` to compare against.
  29293. * @returns true if this `SnapshotMetadata` is equal to the provided one.
  29294. */
  29295. isEqual(other) {
  29296. return (this.hasPendingWrites === other.hasPendingWrites &&
  29297. this.fromCache === other.fromCache);
  29298. }
  29299. }
  29300. /**
  29301. * A `DocumentSnapshot` contains data read from a document in your Firestore
  29302. * database. The data can be extracted with `.data()` or `.get(<field>)` to
  29303. * get a specific field.
  29304. *
  29305. * For a `DocumentSnapshot` that points to a non-existing document, any data
  29306. * access will return 'undefined'. You can use the `exists()` method to
  29307. * explicitly verify a document's existence.
  29308. */
  29309. class DocumentSnapshot extends DocumentSnapshot$1 {
  29310. /** @hideconstructor protected */
  29311. constructor(_firestore, userDataWriter, key, document, metadata, converter) {
  29312. super(_firestore, userDataWriter, key, document, converter);
  29313. this._firestore = _firestore;
  29314. this._firestoreImpl = _firestore;
  29315. this.metadata = metadata;
  29316. }
  29317. /**
  29318. * Returns whether or not the data exists. True if the document exists.
  29319. */
  29320. exists() {
  29321. return super.exists();
  29322. }
  29323. /**
  29324. * Retrieves all fields in the document as an `Object`. Returns `undefined` if
  29325. * the document doesn't exist.
  29326. *
  29327. * By default, `serverTimestamp()` values that have not yet been
  29328. * set to their final value will be returned as `null`. You can override
  29329. * this by passing an options object.
  29330. *
  29331. * @param options - An options object to configure how data is retrieved from
  29332. * the snapshot (for example the desired behavior for server timestamps that
  29333. * have not yet been set to their final value).
  29334. * @returns An `Object` containing all fields in the document or `undefined` if
  29335. * the document doesn't exist.
  29336. */
  29337. data(options = {}) {
  29338. if (!this._document) {
  29339. return undefined;
  29340. }
  29341. else if (this._converter) {
  29342. // We only want to use the converter and create a new DocumentSnapshot
  29343. // if a converter has been provided.
  29344. const snapshot = new QueryDocumentSnapshot(this._firestore, this._userDataWriter, this._key, this._document, this.metadata,
  29345. /* converter= */ null);
  29346. return this._converter.fromFirestore(snapshot, options);
  29347. }
  29348. else {
  29349. return this._userDataWriter.convertValue(this._document.data.value, options.serverTimestamps);
  29350. }
  29351. }
  29352. /**
  29353. * Retrieves the field specified by `fieldPath`. Returns `undefined` if the
  29354. * document or field doesn't exist.
  29355. *
  29356. * By default, a `serverTimestamp()` that has not yet been set to
  29357. * its final value will be returned as `null`. You can override this by
  29358. * passing an options object.
  29359. *
  29360. * @param fieldPath - The path (for example 'foo' or 'foo.bar') to a specific
  29361. * field.
  29362. * @param options - An options object to configure how the field is retrieved
  29363. * from the snapshot (for example the desired behavior for server timestamps
  29364. * that have not yet been set to their final value).
  29365. * @returns The data at the specified field location or undefined if no such
  29366. * field exists in the document.
  29367. */
  29368. // We are using `any` here to avoid an explicit cast by our users.
  29369. // eslint-disable-next-line @typescript-eslint/no-explicit-any
  29370. get(fieldPath, options = {}) {
  29371. if (this._document) {
  29372. const value = this._document.data.field(fieldPathFromArgument('DocumentSnapshot.get', fieldPath));
  29373. if (value !== null) {
  29374. return this._userDataWriter.convertValue(value, options.serverTimestamps);
  29375. }
  29376. }
  29377. return undefined;
  29378. }
  29379. }
  29380. /**
  29381. * A `QueryDocumentSnapshot` contains data read from a document in your
  29382. * Firestore database as part of a query. The document is guaranteed to exist
  29383. * and its data can be extracted with `.data()` or `.get(<field>)` to get a
  29384. * specific field.
  29385. *
  29386. * A `QueryDocumentSnapshot` offers the same API surface as a
  29387. * `DocumentSnapshot`. Since query results contain only existing documents, the
  29388. * `exists` property will always be true and `data()` will never return
  29389. * 'undefined'.
  29390. */
  29391. class QueryDocumentSnapshot extends DocumentSnapshot {
  29392. /**
  29393. * Retrieves all fields in the document as an `Object`.
  29394. *
  29395. * By default, `serverTimestamp()` values that have not yet been
  29396. * set to their final value will be returned as `null`. You can override
  29397. * this by passing an options object.
  29398. *
  29399. * @override
  29400. * @param options - An options object to configure how data is retrieved from
  29401. * the snapshot (for example the desired behavior for server timestamps that
  29402. * have not yet been set to their final value).
  29403. * @returns An `Object` containing all fields in the document.
  29404. */
  29405. data(options = {}) {
  29406. return super.data(options);
  29407. }
  29408. }
  29409. /**
  29410. * A `QuerySnapshot` contains zero or more `DocumentSnapshot` objects
  29411. * representing the results of a query. The documents can be accessed as an
  29412. * array via the `docs` property or enumerated using the `forEach` method. The
  29413. * number of documents can be determined via the `empty` and `size`
  29414. * properties.
  29415. */
  29416. class QuerySnapshot {
  29417. /** @hideconstructor */
  29418. constructor(_firestore, _userDataWriter, query, _snapshot) {
  29419. this._firestore = _firestore;
  29420. this._userDataWriter = _userDataWriter;
  29421. this._snapshot = _snapshot;
  29422. this.metadata = new SnapshotMetadata(_snapshot.hasPendingWrites, _snapshot.fromCache);
  29423. this.query = query;
  29424. }
  29425. /** An array of all the documents in the `QuerySnapshot`. */
  29426. get docs() {
  29427. const result = [];
  29428. this.forEach(doc => result.push(doc));
  29429. return result;
  29430. }
  29431. /** The number of documents in the `QuerySnapshot`. */
  29432. get size() {
  29433. return this._snapshot.docs.size;
  29434. }
  29435. /** True if there are no documents in the `QuerySnapshot`. */
  29436. get empty() {
  29437. return this.size === 0;
  29438. }
  29439. /**
  29440. * Enumerates all of the documents in the `QuerySnapshot`.
  29441. *
  29442. * @param callback - A callback to be called with a `QueryDocumentSnapshot` for
  29443. * each document in the snapshot.
  29444. * @param thisArg - The `this` binding for the callback.
  29445. */
  29446. forEach(callback, thisArg) {
  29447. this._snapshot.docs.forEach(doc => {
  29448. callback.call(thisArg, new QueryDocumentSnapshot(this._firestore, this._userDataWriter, doc.key, doc, new SnapshotMetadata(this._snapshot.mutatedKeys.has(doc.key), this._snapshot.fromCache), this.query.converter));
  29449. });
  29450. }
  29451. /**
  29452. * Returns an array of the documents changes since the last snapshot. If this
  29453. * is the first snapshot, all documents will be in the list as 'added'
  29454. * changes.
  29455. *
  29456. * @param options - `SnapshotListenOptions` that control whether metadata-only
  29457. * changes (i.e. only `DocumentSnapshot.metadata` changed) should trigger
  29458. * snapshot events.
  29459. */
  29460. docChanges(options = {}) {
  29461. const includeMetadataChanges = !!options.includeMetadataChanges;
  29462. if (includeMetadataChanges && this._snapshot.excludesMetadataChanges) {
  29463. throw new FirestoreError(Code.INVALID_ARGUMENT, 'To include metadata changes with your document changes, you must ' +
  29464. 'also pass { includeMetadataChanges:true } to onSnapshot().');
  29465. }
  29466. if (!this._cachedChanges ||
  29467. this._cachedChangesIncludeMetadataChanges !== includeMetadataChanges) {
  29468. this._cachedChanges = changesFromSnapshot(this, includeMetadataChanges);
  29469. this._cachedChangesIncludeMetadataChanges = includeMetadataChanges;
  29470. }
  29471. return this._cachedChanges;
  29472. }
  29473. }
  29474. /** Calculates the array of `DocumentChange`s for a given `ViewSnapshot`. */
  29475. function changesFromSnapshot(querySnapshot, includeMetadataChanges) {
  29476. if (querySnapshot._snapshot.oldDocs.isEmpty()) {
  29477. let index = 0;
  29478. return querySnapshot._snapshot.docChanges.map(change => {
  29479. const doc = new QueryDocumentSnapshot(querySnapshot._firestore, querySnapshot._userDataWriter, change.doc.key, change.doc, new SnapshotMetadata(querySnapshot._snapshot.mutatedKeys.has(change.doc.key), querySnapshot._snapshot.fromCache), querySnapshot.query.converter);
  29480. change.doc;
  29481. return {
  29482. type: 'added',
  29483. doc,
  29484. oldIndex: -1,
  29485. newIndex: index++
  29486. };
  29487. });
  29488. }
  29489. else {
  29490. // A `DocumentSet` that is updated incrementally as changes are applied to use
  29491. // to lookup the index of a document.
  29492. let indexTracker = querySnapshot._snapshot.oldDocs;
  29493. return querySnapshot._snapshot.docChanges
  29494. .filter(change => includeMetadataChanges || change.type !== 3 /* ChangeType.Metadata */)
  29495. .map(change => {
  29496. const doc = new QueryDocumentSnapshot(querySnapshot._firestore, querySnapshot._userDataWriter, change.doc.key, change.doc, new SnapshotMetadata(querySnapshot._snapshot.mutatedKeys.has(change.doc.key), querySnapshot._snapshot.fromCache), querySnapshot.query.converter);
  29497. let oldIndex = -1;
  29498. let newIndex = -1;
  29499. if (change.type !== 0 /* ChangeType.Added */) {
  29500. oldIndex = indexTracker.indexOf(change.doc.key);
  29501. indexTracker = indexTracker.delete(change.doc.key);
  29502. }
  29503. if (change.type !== 1 /* ChangeType.Removed */) {
  29504. indexTracker = indexTracker.add(change.doc);
  29505. newIndex = indexTracker.indexOf(change.doc.key);
  29506. }
  29507. return {
  29508. type: resultChangeType(change.type),
  29509. doc,
  29510. oldIndex,
  29511. newIndex
  29512. };
  29513. });
  29514. }
  29515. }
  29516. function resultChangeType(type) {
  29517. switch (type) {
  29518. case 0 /* ChangeType.Added */:
  29519. return 'added';
  29520. case 2 /* ChangeType.Modified */:
  29521. case 3 /* ChangeType.Metadata */:
  29522. return 'modified';
  29523. case 1 /* ChangeType.Removed */:
  29524. return 'removed';
  29525. default:
  29526. return fail();
  29527. }
  29528. }
  29529. // TODO(firestoreexp): Add tests for snapshotEqual with different snapshot
  29530. // metadata
  29531. /**
  29532. * Returns true if the provided snapshots are equal.
  29533. *
  29534. * @param left - A snapshot to compare.
  29535. * @param right - A snapshot to compare.
  29536. * @returns true if the snapshots are equal.
  29537. */
  29538. function snapshotEqual(left, right) {
  29539. if (left instanceof DocumentSnapshot && right instanceof DocumentSnapshot) {
  29540. return (left._firestore === right._firestore &&
  29541. left._key.isEqual(right._key) &&
  29542. (left._document === null
  29543. ? right._document === null
  29544. : left._document.isEqual(right._document)) &&
  29545. left._converter === right._converter);
  29546. }
  29547. else if (left instanceof QuerySnapshot && right instanceof QuerySnapshot) {
  29548. return (left._firestore === right._firestore &&
  29549. queryEqual(left.query, right.query) &&
  29550. left.metadata.isEqual(right.metadata) &&
  29551. left._snapshot.isEqual(right._snapshot));
  29552. }
  29553. return false;
  29554. }
  29555. /**
  29556. * @license
  29557. * Copyright 2020 Google LLC
  29558. *
  29559. * Licensed under the Apache License, Version 2.0 (the "License");
  29560. * you may not use this file except in compliance with the License.
  29561. * You may obtain a copy of the License at
  29562. *
  29563. * http://www.apache.org/licenses/LICENSE-2.0
  29564. *
  29565. * Unless required by applicable law or agreed to in writing, software
  29566. * distributed under the License is distributed on an "AS IS" BASIS,
  29567. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  29568. * See the License for the specific language governing permissions and
  29569. * limitations under the License.
  29570. */
  29571. /**
  29572. * Reads the document referred to by this `DocumentReference`.
  29573. *
  29574. * Note: `getDoc()` attempts to provide up-to-date data when possible by waiting
  29575. * for data from the server, but it may return cached data or fail if you are
  29576. * offline and the server cannot be reached. To specify this behavior, invoke
  29577. * {@link getDocFromCache} or {@link getDocFromServer}.
  29578. *
  29579. * @param reference - The reference of the document to fetch.
  29580. * @returns A Promise resolved with a `DocumentSnapshot` containing the
  29581. * current document contents.
  29582. */
  29583. function getDoc(reference) {
  29584. reference = cast(reference, DocumentReference);
  29585. const firestore = cast(reference.firestore, Firestore);
  29586. const client = ensureFirestoreConfigured(firestore);
  29587. return firestoreClientGetDocumentViaSnapshotListener(client, reference._key).then(snapshot => convertToDocSnapshot(firestore, reference, snapshot));
  29588. }
  29589. class ExpUserDataWriter extends AbstractUserDataWriter {
  29590. constructor(firestore) {
  29591. super();
  29592. this.firestore = firestore;
  29593. }
  29594. convertBytes(bytes) {
  29595. return new Bytes(bytes);
  29596. }
  29597. convertReference(name) {
  29598. const key = this.convertDocumentKey(name, this.firestore._databaseId);
  29599. return new DocumentReference(this.firestore, /* converter= */ null, key);
  29600. }
  29601. }
  29602. /**
  29603. * Reads the document referred to by this `DocumentReference` from cache.
  29604. * Returns an error if the document is not currently cached.
  29605. *
  29606. * @returns A `Promise` resolved with a `DocumentSnapshot` containing the
  29607. * current document contents.
  29608. */
  29609. function getDocFromCache(reference) {
  29610. reference = cast(reference, DocumentReference);
  29611. const firestore = cast(reference.firestore, Firestore);
  29612. const client = ensureFirestoreConfigured(firestore);
  29613. const userDataWriter = new ExpUserDataWriter(firestore);
  29614. return firestoreClientGetDocumentFromLocalCache(client, reference._key).then(doc => new DocumentSnapshot(firestore, userDataWriter, reference._key, doc, new SnapshotMetadata(doc !== null && doc.hasLocalMutations,
  29615. /* fromCache= */ true), reference.converter));
  29616. }
  29617. /**
  29618. * Reads the document referred to by this `DocumentReference` from the server.
  29619. * Returns an error if the network is not available.
  29620. *
  29621. * @returns A `Promise` resolved with a `DocumentSnapshot` containing the
  29622. * current document contents.
  29623. */
  29624. function getDocFromServer(reference) {
  29625. reference = cast(reference, DocumentReference);
  29626. const firestore = cast(reference.firestore, Firestore);
  29627. const client = ensureFirestoreConfigured(firestore);
  29628. return firestoreClientGetDocumentViaSnapshotListener(client, reference._key, {
  29629. source: 'server'
  29630. }).then(snapshot => convertToDocSnapshot(firestore, reference, snapshot));
  29631. }
  29632. /**
  29633. * Executes the query and returns the results as a `QuerySnapshot`.
  29634. *
  29635. * Note: `getDocs()` attempts to provide up-to-date data when possible by
  29636. * waiting for data from the server, but it may return cached data or fail if
  29637. * you are offline and the server cannot be reached. To specify this behavior,
  29638. * invoke {@link getDocsFromCache} or {@link getDocsFromServer}.
  29639. *
  29640. * @returns A `Promise` that will be resolved with the results of the query.
  29641. */
  29642. function getDocs(query) {
  29643. query = cast(query, Query);
  29644. const firestore = cast(query.firestore, Firestore);
  29645. const client = ensureFirestoreConfigured(firestore);
  29646. const userDataWriter = new ExpUserDataWriter(firestore);
  29647. validateHasExplicitOrderByForLimitToLast(query._query);
  29648. return firestoreClientGetDocumentsViaSnapshotListener(client, query._query).then(snapshot => new QuerySnapshot(firestore, userDataWriter, query, snapshot));
  29649. }
  29650. /**
  29651. * Executes the query and returns the results as a `QuerySnapshot` from cache.
  29652. * Returns an empty result set if no documents matching the query are currently
  29653. * cached.
  29654. *
  29655. * @returns A `Promise` that will be resolved with the results of the query.
  29656. */
  29657. function getDocsFromCache(query) {
  29658. query = cast(query, Query);
  29659. const firestore = cast(query.firestore, Firestore);
  29660. const client = ensureFirestoreConfigured(firestore);
  29661. const userDataWriter = new ExpUserDataWriter(firestore);
  29662. return firestoreClientGetDocumentsFromLocalCache(client, query._query).then(snapshot => new QuerySnapshot(firestore, userDataWriter, query, snapshot));
  29663. }
  29664. /**
  29665. * Executes the query and returns the results as a `QuerySnapshot` from the
  29666. * server. Returns an error if the network is not available.
  29667. *
  29668. * @returns A `Promise` that will be resolved with the results of the query.
  29669. */
  29670. function getDocsFromServer(query) {
  29671. query = cast(query, Query);
  29672. const firestore = cast(query.firestore, Firestore);
  29673. const client = ensureFirestoreConfigured(firestore);
  29674. const userDataWriter = new ExpUserDataWriter(firestore);
  29675. return firestoreClientGetDocumentsViaSnapshotListener(client, query._query, {
  29676. source: 'server'
  29677. }).then(snapshot => new QuerySnapshot(firestore, userDataWriter, query, snapshot));
  29678. }
  29679. function setDoc(reference, data, options) {
  29680. reference = cast(reference, DocumentReference);
  29681. const firestore = cast(reference.firestore, Firestore);
  29682. const convertedValue = applyFirestoreDataConverter(reference.converter, data, options);
  29683. const dataReader = newUserDataReader(firestore);
  29684. const parsed = parseSetData(dataReader, 'setDoc', reference._key, convertedValue, reference.converter !== null, options);
  29685. const mutation = parsed.toMutation(reference._key, Precondition.none());
  29686. return executeWrite(firestore, [mutation]);
  29687. }
  29688. function updateDoc(reference, fieldOrUpdateData, value, ...moreFieldsAndValues) {
  29689. reference = cast(reference, DocumentReference);
  29690. const firestore = cast(reference.firestore, Firestore);
  29691. const dataReader = newUserDataReader(firestore);
  29692. // For Compat types, we have to "extract" the underlying types before
  29693. // performing validation.
  29694. fieldOrUpdateData = getModularInstance(fieldOrUpdateData);
  29695. let parsed;
  29696. if (typeof fieldOrUpdateData === 'string' ||
  29697. fieldOrUpdateData instanceof FieldPath) {
  29698. parsed = parseUpdateVarargs(dataReader, 'updateDoc', reference._key, fieldOrUpdateData, value, moreFieldsAndValues);
  29699. }
  29700. else {
  29701. parsed = parseUpdateData(dataReader, 'updateDoc', reference._key, fieldOrUpdateData);
  29702. }
  29703. const mutation = parsed.toMutation(reference._key, Precondition.exists(true));
  29704. return executeWrite(firestore, [mutation]);
  29705. }
  29706. /**
  29707. * Deletes the document referred to by the specified `DocumentReference`.
  29708. *
  29709. * @param reference - A reference to the document to delete.
  29710. * @returns A Promise resolved once the document has been successfully
  29711. * deleted from the backend (note that it won't resolve while you're offline).
  29712. */
  29713. function deleteDoc(reference) {
  29714. const firestore = cast(reference.firestore, Firestore);
  29715. const mutations = [new DeleteMutation(reference._key, Precondition.none())];
  29716. return executeWrite(firestore, mutations);
  29717. }
  29718. /**
  29719. * Add a new document to specified `CollectionReference` with the given data,
  29720. * assigning it a document ID automatically.
  29721. *
  29722. * @param reference - A reference to the collection to add this document to.
  29723. * @param data - An Object containing the data for the new document.
  29724. * @returns A `Promise` resolved with a `DocumentReference` pointing to the
  29725. * newly created document after it has been written to the backend (Note that it
  29726. * won't resolve while you're offline).
  29727. */
  29728. function addDoc(reference, data) {
  29729. const firestore = cast(reference.firestore, Firestore);
  29730. const docRef = doc(reference);
  29731. const convertedValue = applyFirestoreDataConverter(reference.converter, data);
  29732. const dataReader = newUserDataReader(reference.firestore);
  29733. const parsed = parseSetData(dataReader, 'addDoc', docRef._key, convertedValue, reference.converter !== null, {});
  29734. const mutation = parsed.toMutation(docRef._key, Precondition.exists(false));
  29735. return executeWrite(firestore, [mutation]).then(() => docRef);
  29736. }
  29737. function onSnapshot(reference, ...args) {
  29738. var _a, _b, _c;
  29739. reference = getModularInstance(reference);
  29740. let options = {
  29741. includeMetadataChanges: false
  29742. };
  29743. let currArg = 0;
  29744. if (typeof args[currArg] === 'object' && !isPartialObserver(args[currArg])) {
  29745. options = args[currArg];
  29746. currArg++;
  29747. }
  29748. const internalOptions = {
  29749. includeMetadataChanges: options.includeMetadataChanges
  29750. };
  29751. if (isPartialObserver(args[currArg])) {
  29752. const userObserver = args[currArg];
  29753. args[currArg] = (_a = userObserver.next) === null || _a === void 0 ? void 0 : _a.bind(userObserver);
  29754. args[currArg + 1] = (_b = userObserver.error) === null || _b === void 0 ? void 0 : _b.bind(userObserver);
  29755. args[currArg + 2] = (_c = userObserver.complete) === null || _c === void 0 ? void 0 : _c.bind(userObserver);
  29756. }
  29757. let observer;
  29758. let firestore;
  29759. let internalQuery;
  29760. if (reference instanceof DocumentReference) {
  29761. firestore = cast(reference.firestore, Firestore);
  29762. internalQuery = newQueryForPath(reference._key.path);
  29763. observer = {
  29764. next: snapshot => {
  29765. if (args[currArg]) {
  29766. args[currArg](convertToDocSnapshot(firestore, reference, snapshot));
  29767. }
  29768. },
  29769. error: args[currArg + 1],
  29770. complete: args[currArg + 2]
  29771. };
  29772. }
  29773. else {
  29774. const query = cast(reference, Query);
  29775. firestore = cast(query.firestore, Firestore);
  29776. internalQuery = query._query;
  29777. const userDataWriter = new ExpUserDataWriter(firestore);
  29778. observer = {
  29779. next: snapshot => {
  29780. if (args[currArg]) {
  29781. args[currArg](new QuerySnapshot(firestore, userDataWriter, query, snapshot));
  29782. }
  29783. },
  29784. error: args[currArg + 1],
  29785. complete: args[currArg + 2]
  29786. };
  29787. validateHasExplicitOrderByForLimitToLast(reference._query);
  29788. }
  29789. const client = ensureFirestoreConfigured(firestore);
  29790. return firestoreClientListen(client, internalQuery, internalOptions, observer);
  29791. }
  29792. function onSnapshotsInSync(firestore, arg) {
  29793. firestore = cast(firestore, Firestore);
  29794. const client = ensureFirestoreConfigured(firestore);
  29795. const observer = isPartialObserver(arg)
  29796. ? arg
  29797. : {
  29798. next: arg
  29799. };
  29800. return firestoreClientAddSnapshotsInSyncListener(client, observer);
  29801. }
  29802. /**
  29803. * Locally writes `mutations` on the async queue.
  29804. * @internal
  29805. */
  29806. function executeWrite(firestore, mutations) {
  29807. const client = ensureFirestoreConfigured(firestore);
  29808. return firestoreClientWrite(client, mutations);
  29809. }
  29810. /**
  29811. * Converts a {@link ViewSnapshot} that contains the single document specified by `ref`
  29812. * to a {@link DocumentSnapshot}.
  29813. */
  29814. function convertToDocSnapshot(firestore, ref, snapshot) {
  29815. const doc = snapshot.docs.get(ref._key);
  29816. const userDataWriter = new ExpUserDataWriter(firestore);
  29817. return new DocumentSnapshot(firestore, userDataWriter, ref._key, doc, new SnapshotMetadata(snapshot.hasPendingWrites, snapshot.fromCache), ref.converter);
  29818. }
  29819. /**
  29820. * @license
  29821. * Copyright 2022 Google LLC
  29822. *
  29823. * Licensed under the Apache License, Version 2.0 (the "License");
  29824. * you may not use this file except in compliance with the License.
  29825. * You may obtain a copy of the License at
  29826. *
  29827. * http://www.apache.org/licenses/LICENSE-2.0
  29828. *
  29829. * Unless required by applicable law or agreed to in writing, software
  29830. * distributed under the License is distributed on an "AS IS" BASIS,
  29831. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  29832. * See the License for the specific language governing permissions and
  29833. * limitations under the License.
  29834. */
  29835. /**
  29836. * Compares two `AggregateQuerySnapshot` instances for equality.
  29837. *
  29838. * Two `AggregateQuerySnapshot` instances are considered "equal" if they have
  29839. * underlying queries that compare equal, and the same data.
  29840. *
  29841. * @param left - The first `AggregateQuerySnapshot` to compare.
  29842. * @param right - The second `AggregateQuerySnapshot` to compare.
  29843. *
  29844. * @returns `true` if the objects are "equal", as defined above, or `false`
  29845. * otherwise.
  29846. */
  29847. function aggregateQuerySnapshotEqual(left, right) {
  29848. return (queryEqual(left.query, right.query) && deepEqual(left.data(), right.data()));
  29849. }
  29850. /**
  29851. * @license
  29852. * Copyright 2022 Google LLC
  29853. *
  29854. * Licensed under the Apache License, Version 2.0 (the "License");
  29855. * you may not use this file except in compliance with the License.
  29856. * You may obtain a copy of the License at
  29857. *
  29858. * http://www.apache.org/licenses/LICENSE-2.0
  29859. *
  29860. * Unless required by applicable law or agreed to in writing, software
  29861. * distributed under the License is distributed on an "AS IS" BASIS,
  29862. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  29863. * See the License for the specific language governing permissions and
  29864. * limitations under the License.
  29865. */
  29866. /**
  29867. * Calculates the number of documents in the result set of the given query,
  29868. * without actually downloading the documents.
  29869. *
  29870. * Using this function to count the documents is efficient because only the
  29871. * final count, not the documents' data, is downloaded. This function can even
  29872. * count the documents if the result set would be prohibitively large to
  29873. * download entirely (e.g. thousands of documents).
  29874. *
  29875. * The result received from the server is presented, unaltered, without
  29876. * considering any local state. That is, documents in the local cache are not
  29877. * taken into consideration, neither are local modifications not yet
  29878. * synchronized with the server. Previously-downloaded results, if any, are not
  29879. * used: every request using this source necessarily involves a round trip to
  29880. * the server.
  29881. *
  29882. * @param query - The query whose result set size to calculate.
  29883. * @returns A Promise that will be resolved with the count; the count can be
  29884. * retrieved from `snapshot.data().count`, where `snapshot` is the
  29885. * `AggregateQuerySnapshot` to which the returned Promise resolves.
  29886. */
  29887. function getCountFromServer(query) {
  29888. const firestore = cast(query.firestore, Firestore);
  29889. const client = ensureFirestoreConfigured(firestore);
  29890. const userDataWriter = new ExpUserDataWriter(firestore);
  29891. return firestoreClientRunCountQuery(client, query, userDataWriter);
  29892. }
  29893. /**
  29894. * @license
  29895. * Copyright 2022 Google LLC
  29896. *
  29897. * Licensed under the Apache License, Version 2.0 (the "License");
  29898. * you may not use this file except in compliance with the License.
  29899. * You may obtain a copy of the License at
  29900. *
  29901. * http://www.apache.org/licenses/LICENSE-2.0
  29902. *
  29903. * Unless required by applicable law or agreed to in writing, software
  29904. * distributed under the License is distributed on an "AS IS" BASIS,
  29905. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  29906. * See the License for the specific language governing permissions and
  29907. * limitations under the License.
  29908. */
  29909. const DEFAULT_TRANSACTION_OPTIONS = {
  29910. maxAttempts: 5
  29911. };
  29912. function validateTransactionOptions(options) {
  29913. if (options.maxAttempts < 1) {
  29914. throw new FirestoreError(Code.INVALID_ARGUMENT, 'Max attempts must be at least 1');
  29915. }
  29916. }
  29917. /**
  29918. * @license
  29919. * Copyright 2020 Google LLC
  29920. *
  29921. * Licensed under the Apache License, Version 2.0 (the "License");
  29922. * you may not use this file except in compliance with the License.
  29923. * You may obtain a copy of the License at
  29924. *
  29925. * http://www.apache.org/licenses/LICENSE-2.0
  29926. *
  29927. * Unless required by applicable law or agreed to in writing, software
  29928. * distributed under the License is distributed on an "AS IS" BASIS,
  29929. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  29930. * See the License for the specific language governing permissions and
  29931. * limitations under the License.
  29932. */
  29933. /**
  29934. * A write batch, used to perform multiple writes as a single atomic unit.
  29935. *
  29936. * A `WriteBatch` object can be acquired by calling {@link writeBatch}. It
  29937. * provides methods for adding writes to the write batch. None of the writes
  29938. * will be committed (or visible locally) until {@link WriteBatch.commit} is
  29939. * called.
  29940. */
  29941. class WriteBatch {
  29942. /** @hideconstructor */
  29943. constructor(_firestore, _commitHandler) {
  29944. this._firestore = _firestore;
  29945. this._commitHandler = _commitHandler;
  29946. this._mutations = [];
  29947. this._committed = false;
  29948. this._dataReader = newUserDataReader(_firestore);
  29949. }
  29950. set(documentRef, data, options) {
  29951. this._verifyNotCommitted();
  29952. const ref = validateReference(documentRef, this._firestore);
  29953. const convertedValue = applyFirestoreDataConverter(ref.converter, data, options);
  29954. const parsed = parseSetData(this._dataReader, 'WriteBatch.set', ref._key, convertedValue, ref.converter !== null, options);
  29955. this._mutations.push(parsed.toMutation(ref._key, Precondition.none()));
  29956. return this;
  29957. }
  29958. update(documentRef, fieldOrUpdateData, value, ...moreFieldsAndValues) {
  29959. this._verifyNotCommitted();
  29960. const ref = validateReference(documentRef, this._firestore);
  29961. // For Compat types, we have to "extract" the underlying types before
  29962. // performing validation.
  29963. fieldOrUpdateData = getModularInstance(fieldOrUpdateData);
  29964. let parsed;
  29965. if (typeof fieldOrUpdateData === 'string' ||
  29966. fieldOrUpdateData instanceof FieldPath) {
  29967. parsed = parseUpdateVarargs(this._dataReader, 'WriteBatch.update', ref._key, fieldOrUpdateData, value, moreFieldsAndValues);
  29968. }
  29969. else {
  29970. parsed = parseUpdateData(this._dataReader, 'WriteBatch.update', ref._key, fieldOrUpdateData);
  29971. }
  29972. this._mutations.push(parsed.toMutation(ref._key, Precondition.exists(true)));
  29973. return this;
  29974. }
  29975. /**
  29976. * Deletes the document referred to by the provided {@link DocumentReference}.
  29977. *
  29978. * @param documentRef - A reference to the document to be deleted.
  29979. * @returns This `WriteBatch` instance. Used for chaining method calls.
  29980. */
  29981. delete(documentRef) {
  29982. this._verifyNotCommitted();
  29983. const ref = validateReference(documentRef, this._firestore);
  29984. this._mutations = this._mutations.concat(new DeleteMutation(ref._key, Precondition.none()));
  29985. return this;
  29986. }
  29987. /**
  29988. * Commits all of the writes in this write batch as a single atomic unit.
  29989. *
  29990. * The result of these writes will only be reflected in document reads that
  29991. * occur after the returned promise resolves. If the client is offline, the
  29992. * write fails. If you would like to see local modifications or buffer writes
  29993. * until the client is online, use the full Firestore SDK.
  29994. *
  29995. * @returns A `Promise` resolved once all of the writes in the batch have been
  29996. * successfully written to the backend as an atomic unit (note that it won't
  29997. * resolve while you're offline).
  29998. */
  29999. commit() {
  30000. this._verifyNotCommitted();
  30001. this._committed = true;
  30002. if (this._mutations.length > 0) {
  30003. return this._commitHandler(this._mutations);
  30004. }
  30005. return Promise.resolve();
  30006. }
  30007. _verifyNotCommitted() {
  30008. if (this._committed) {
  30009. throw new FirestoreError(Code.FAILED_PRECONDITION, 'A write batch can no longer be used after commit() ' +
  30010. 'has been called.');
  30011. }
  30012. }
  30013. }
  30014. function validateReference(documentRef, firestore) {
  30015. documentRef = getModularInstance(documentRef);
  30016. if (documentRef.firestore !== firestore) {
  30017. throw new FirestoreError(Code.INVALID_ARGUMENT, 'Provided document reference is from a different Firestore instance.');
  30018. }
  30019. else {
  30020. return documentRef;
  30021. }
  30022. }
  30023. /**
  30024. * @license
  30025. * Copyright 2020 Google LLC
  30026. *
  30027. * Licensed under the Apache License, Version 2.0 (the "License");
  30028. * you may not use this file except in compliance with the License.
  30029. * You may obtain a copy of the License at
  30030. *
  30031. * http://www.apache.org/licenses/LICENSE-2.0
  30032. *
  30033. * Unless required by applicable law or agreed to in writing, software
  30034. * distributed under the License is distributed on an "AS IS" BASIS,
  30035. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  30036. * See the License for the specific language governing permissions and
  30037. * limitations under the License.
  30038. */
  30039. // TODO(mrschmidt) Consider using `BaseTransaction` as the base class in the
  30040. // legacy SDK.
  30041. /**
  30042. * A reference to a transaction.
  30043. *
  30044. * The `Transaction` object passed to a transaction's `updateFunction` provides
  30045. * the methods to read and write data within the transaction context. See
  30046. * {@link runTransaction}.
  30047. */
  30048. class Transaction$1 {
  30049. /** @hideconstructor */
  30050. constructor(_firestore, _transaction) {
  30051. this._firestore = _firestore;
  30052. this._transaction = _transaction;
  30053. this._dataReader = newUserDataReader(_firestore);
  30054. }
  30055. /**
  30056. * Reads the document referenced by the provided {@link DocumentReference}.
  30057. *
  30058. * @param documentRef - A reference to the document to be read.
  30059. * @returns A `DocumentSnapshot` with the read data.
  30060. */
  30061. get(documentRef) {
  30062. const ref = validateReference(documentRef, this._firestore);
  30063. const userDataWriter = new LiteUserDataWriter(this._firestore);
  30064. return this._transaction.lookup([ref._key]).then(docs => {
  30065. if (!docs || docs.length !== 1) {
  30066. return fail();
  30067. }
  30068. const doc = docs[0];
  30069. if (doc.isFoundDocument()) {
  30070. return new DocumentSnapshot$1(this._firestore, userDataWriter, doc.key, doc, ref.converter);
  30071. }
  30072. else if (doc.isNoDocument()) {
  30073. return new DocumentSnapshot$1(this._firestore, userDataWriter, ref._key, null, ref.converter);
  30074. }
  30075. else {
  30076. throw fail();
  30077. }
  30078. });
  30079. }
  30080. set(documentRef, value, options) {
  30081. const ref = validateReference(documentRef, this._firestore);
  30082. const convertedValue = applyFirestoreDataConverter(ref.converter, value, options);
  30083. const parsed = parseSetData(this._dataReader, 'Transaction.set', ref._key, convertedValue, ref.converter !== null, options);
  30084. this._transaction.set(ref._key, parsed);
  30085. return this;
  30086. }
  30087. update(documentRef, fieldOrUpdateData, value, ...moreFieldsAndValues) {
  30088. const ref = validateReference(documentRef, this._firestore);
  30089. // For Compat types, we have to "extract" the underlying types before
  30090. // performing validation.
  30091. fieldOrUpdateData = getModularInstance(fieldOrUpdateData);
  30092. let parsed;
  30093. if (typeof fieldOrUpdateData === 'string' ||
  30094. fieldOrUpdateData instanceof FieldPath) {
  30095. parsed = parseUpdateVarargs(this._dataReader, 'Transaction.update', ref._key, fieldOrUpdateData, value, moreFieldsAndValues);
  30096. }
  30097. else {
  30098. parsed = parseUpdateData(this._dataReader, 'Transaction.update', ref._key, fieldOrUpdateData);
  30099. }
  30100. this._transaction.update(ref._key, parsed);
  30101. return this;
  30102. }
  30103. /**
  30104. * Deletes the document referred to by the provided {@link DocumentReference}.
  30105. *
  30106. * @param documentRef - A reference to the document to be deleted.
  30107. * @returns This `Transaction` instance. Used for chaining method calls.
  30108. */
  30109. delete(documentRef) {
  30110. const ref = validateReference(documentRef, this._firestore);
  30111. this._transaction.delete(ref._key);
  30112. return this;
  30113. }
  30114. }
  30115. /**
  30116. * @license
  30117. * Copyright 2020 Google LLC
  30118. *
  30119. * Licensed under the Apache License, Version 2.0 (the "License");
  30120. * you may not use this file except in compliance with the License.
  30121. * You may obtain a copy of the License at
  30122. *
  30123. * http://www.apache.org/licenses/LICENSE-2.0
  30124. *
  30125. * Unless required by applicable law or agreed to in writing, software
  30126. * distributed under the License is distributed on an "AS IS" BASIS,
  30127. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  30128. * See the License for the specific language governing permissions and
  30129. * limitations under the License.
  30130. */
  30131. /**
  30132. * A reference to a transaction.
  30133. *
  30134. * The `Transaction` object passed to a transaction's `updateFunction` provides
  30135. * the methods to read and write data within the transaction context. See
  30136. * {@link runTransaction}.
  30137. */
  30138. class Transaction extends Transaction$1 {
  30139. // This class implements the same logic as the Transaction API in the Lite SDK
  30140. // but is subclassed in order to return its own DocumentSnapshot types.
  30141. /** @hideconstructor */
  30142. constructor(_firestore, _transaction) {
  30143. super(_firestore, _transaction);
  30144. this._firestore = _firestore;
  30145. }
  30146. /**
  30147. * Reads the document referenced by the provided {@link DocumentReference}.
  30148. *
  30149. * @param documentRef - A reference to the document to be read.
  30150. * @returns A `DocumentSnapshot` with the read data.
  30151. */
  30152. get(documentRef) {
  30153. const ref = validateReference(documentRef, this._firestore);
  30154. const userDataWriter = new ExpUserDataWriter(this._firestore);
  30155. return super
  30156. .get(documentRef)
  30157. .then(liteDocumentSnapshot => new DocumentSnapshot(this._firestore, userDataWriter, ref._key, liteDocumentSnapshot._document, new SnapshotMetadata(
  30158. /* hasPendingWrites= */ false,
  30159. /* fromCache= */ false), ref.converter));
  30160. }
  30161. }
  30162. /**
  30163. * Executes the given `updateFunction` and then attempts to commit the changes
  30164. * applied within the transaction. If any document read within the transaction
  30165. * has changed, Cloud Firestore retries the `updateFunction`. If it fails to
  30166. * commit after 5 attempts, the transaction fails.
  30167. *
  30168. * The maximum number of writes allowed in a single transaction is 500.
  30169. *
  30170. * @param firestore - A reference to the Firestore database to run this
  30171. * transaction against.
  30172. * @param updateFunction - The function to execute within the transaction
  30173. * context.
  30174. * @param options - An options object to configure maximum number of attempts to
  30175. * commit.
  30176. * @returns If the transaction completed successfully or was explicitly aborted
  30177. * (the `updateFunction` returned a failed promise), the promise returned by the
  30178. * `updateFunction `is returned here. Otherwise, if the transaction failed, a
  30179. * rejected promise with the corresponding failure error is returned.
  30180. */
  30181. function runTransaction(firestore, updateFunction, options) {
  30182. firestore = cast(firestore, Firestore);
  30183. const optionsWithDefaults = Object.assign(Object.assign({}, DEFAULT_TRANSACTION_OPTIONS), options);
  30184. validateTransactionOptions(optionsWithDefaults);
  30185. const client = ensureFirestoreConfigured(firestore);
  30186. return firestoreClientTransaction(client, internalTransaction => updateFunction(new Transaction(firestore, internalTransaction)), optionsWithDefaults);
  30187. }
  30188. /**
  30189. * @license
  30190. * Copyright 2020 Google LLC
  30191. *
  30192. * Licensed under the Apache License, Version 2.0 (the "License");
  30193. * you may not use this file except in compliance with the License.
  30194. * You may obtain a copy of the License at
  30195. *
  30196. * http://www.apache.org/licenses/LICENSE-2.0
  30197. *
  30198. * Unless required by applicable law or agreed to in writing, software
  30199. * distributed under the License is distributed on an "AS IS" BASIS,
  30200. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  30201. * See the License for the specific language governing permissions and
  30202. * limitations under the License.
  30203. */
  30204. /**
  30205. * Returns a sentinel for use with {@link @firebase/firestore/lite#(updateDoc:1)} or
  30206. * {@link @firebase/firestore/lite#(setDoc:1)} with `{merge: true}` to mark a field for deletion.
  30207. */
  30208. function deleteField() {
  30209. return new DeleteFieldValueImpl('deleteField');
  30210. }
  30211. /**
  30212. * Returns a sentinel used with {@link @firebase/firestore/lite#(setDoc:1)} or {@link @firebase/firestore/lite#(updateDoc:1)} to
  30213. * include a server-generated timestamp in the written data.
  30214. */
  30215. function serverTimestamp() {
  30216. return new ServerTimestampFieldValueImpl('serverTimestamp');
  30217. }
  30218. /**
  30219. * Returns a special value that can be used with {@link @firebase/firestore/lite#(setDoc:1)} or {@link
  30220. * @firebase/firestore/lite#(updateDoc:1)} that tells the server to union the given elements with any array
  30221. * value that already exists on the server. Each specified element that doesn't
  30222. * already exist in the array will be added to the end. If the field being
  30223. * modified is not already an array it will be overwritten with an array
  30224. * containing exactly the specified elements.
  30225. *
  30226. * @param elements - The elements to union into the array.
  30227. * @returns The `FieldValue` sentinel for use in a call to `setDoc()` or
  30228. * `updateDoc()`.
  30229. */
  30230. function arrayUnion(...elements) {
  30231. // NOTE: We don't actually parse the data until it's used in set() or
  30232. // update() since we'd need the Firestore instance to do this.
  30233. return new ArrayUnionFieldValueImpl('arrayUnion', elements);
  30234. }
  30235. /**
  30236. * Returns a special value that can be used with {@link (setDoc:1)} or {@link
  30237. * updateDoc:1} that tells the server to remove the given elements from any
  30238. * array value that already exists on the server. All instances of each element
  30239. * specified will be removed from the array. If the field being modified is not
  30240. * already an array it will be overwritten with an empty array.
  30241. *
  30242. * @param elements - The elements to remove from the array.
  30243. * @returns The `FieldValue` sentinel for use in a call to `setDoc()` or
  30244. * `updateDoc()`
  30245. */
  30246. function arrayRemove(...elements) {
  30247. // NOTE: We don't actually parse the data until it's used in set() or
  30248. // update() since we'd need the Firestore instance to do this.
  30249. return new ArrayRemoveFieldValueImpl('arrayRemove', elements);
  30250. }
  30251. /**
  30252. * Returns a special value that can be used with {@link @firebase/firestore/lite#(setDoc:1)} or {@link
  30253. * @firebase/firestore/lite#(updateDoc:1)} that tells the server to increment the field's current value by
  30254. * the given value.
  30255. *
  30256. * If either the operand or the current field value uses floating point
  30257. * precision, all arithmetic follows IEEE 754 semantics. If both values are
  30258. * integers, values outside of JavaScript's safe number range
  30259. * (`Number.MIN_SAFE_INTEGER` to `Number.MAX_SAFE_INTEGER`) are also subject to
  30260. * precision loss. Furthermore, once processed by the Firestore backend, all
  30261. * integer operations are capped between -2^63 and 2^63-1.
  30262. *
  30263. * If the current field value is not of type `number`, or if the field does not
  30264. * yet exist, the transformation sets the field to the given value.
  30265. *
  30266. * @param n - The value to increment by.
  30267. * @returns The `FieldValue` sentinel for use in a call to `setDoc()` or
  30268. * `updateDoc()`
  30269. */
  30270. function increment(n) {
  30271. return new NumericIncrementFieldValueImpl('increment', n);
  30272. }
  30273. /**
  30274. * @license
  30275. * Copyright 2020 Google LLC
  30276. *
  30277. * Licensed under the Apache License, Version 2.0 (the "License");
  30278. * you may not use this file except in compliance with the License.
  30279. * You may obtain a copy of the License at
  30280. *
  30281. * http://www.apache.org/licenses/LICENSE-2.0
  30282. *
  30283. * Unless required by applicable law or agreed to in writing, software
  30284. * distributed under the License is distributed on an "AS IS" BASIS,
  30285. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  30286. * See the License for the specific language governing permissions and
  30287. * limitations under the License.
  30288. */
  30289. /**
  30290. * Creates a write batch, used for performing multiple writes as a single
  30291. * atomic operation. The maximum number of writes allowed in a single {@link WriteBatch}
  30292. * is 500.
  30293. *
  30294. * Unlike transactions, write batches are persisted offline and therefore are
  30295. * preferable when you don't need to condition your writes on read data.
  30296. *
  30297. * @returns A {@link WriteBatch} that can be used to atomically execute multiple
  30298. * writes.
  30299. */
  30300. function writeBatch(firestore) {
  30301. firestore = cast(firestore, Firestore);
  30302. ensureFirestoreConfigured(firestore);
  30303. return new WriteBatch(firestore, mutations => executeWrite(firestore, mutations));
  30304. }
  30305. /**
  30306. * @license
  30307. * Copyright 2021 Google LLC
  30308. *
  30309. * Licensed under the Apache License, Version 2.0 (the "License");
  30310. * you may not use this file except in compliance with the License.
  30311. * You may obtain a copy of the License at
  30312. *
  30313. * http://www.apache.org/licenses/LICENSE-2.0
  30314. *
  30315. * Unless required by applicable law or agreed to in writing, software
  30316. * distributed under the License is distributed on an "AS IS" BASIS,
  30317. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  30318. * See the License for the specific language governing permissions and
  30319. * limitations under the License.
  30320. */
  30321. function setIndexConfiguration(firestore, jsonOrConfiguration) {
  30322. var _a;
  30323. firestore = cast(firestore, Firestore);
  30324. const client = ensureFirestoreConfigured(firestore);
  30325. // PORTING NOTE: We don't return an error if the user has not enabled
  30326. // persistence since `enableIndexeddbPersistence()` can fail on the Web.
  30327. if (!((_a = client.offlineComponents) === null || _a === void 0 ? void 0 : _a.indexBackfillerScheduler)) {
  30328. logWarn('Cannot enable indexes when persistence is disabled');
  30329. return Promise.resolve();
  30330. }
  30331. const parsedIndexes = parseIndexes(jsonOrConfiguration);
  30332. return getLocalStore(client).then(localStore => localStoreConfigureFieldIndexes(localStore, parsedIndexes));
  30333. }
  30334. function parseIndexes(jsonOrConfiguration) {
  30335. const indexConfiguration = typeof jsonOrConfiguration === 'string'
  30336. ? tryParseJson(jsonOrConfiguration)
  30337. : jsonOrConfiguration;
  30338. const parsedIndexes = [];
  30339. if (Array.isArray(indexConfiguration.indexes)) {
  30340. for (const index of indexConfiguration.indexes) {
  30341. const collectionGroup = tryGetString(index, 'collectionGroup');
  30342. const segments = [];
  30343. if (Array.isArray(index.fields)) {
  30344. for (const field of index.fields) {
  30345. const fieldPathString = tryGetString(field, 'fieldPath');
  30346. const fieldPath = fieldPathFromDotSeparatedString('setIndexConfiguration', fieldPathString);
  30347. if (field.arrayConfig === 'CONTAINS') {
  30348. segments.push(new IndexSegment(fieldPath, 2 /* IndexKind.CONTAINS */));
  30349. }
  30350. else if (field.order === 'ASCENDING') {
  30351. segments.push(new IndexSegment(fieldPath, 0 /* IndexKind.ASCENDING */));
  30352. }
  30353. else if (field.order === 'DESCENDING') {
  30354. segments.push(new IndexSegment(fieldPath, 1 /* IndexKind.DESCENDING */));
  30355. }
  30356. }
  30357. }
  30358. parsedIndexes.push(new FieldIndex(FieldIndex.UNKNOWN_ID, collectionGroup, segments, IndexState.empty()));
  30359. }
  30360. }
  30361. return parsedIndexes;
  30362. }
  30363. function tryParseJson(json) {
  30364. try {
  30365. return JSON.parse(json);
  30366. }
  30367. catch (e) {
  30368. throw new FirestoreError(Code.INVALID_ARGUMENT, 'Failed to parse JSON: ' + (e === null || e === void 0 ? void 0 : e.message));
  30369. }
  30370. }
  30371. function tryGetString(data, property) {
  30372. if (typeof data[property] !== 'string') {
  30373. throw new FirestoreError(Code.INVALID_ARGUMENT, 'Missing string value for: ' + property);
  30374. }
  30375. return data[property];
  30376. }
  30377. /**
  30378. * @license
  30379. * Copyright 2021 Google LLC
  30380. *
  30381. * Licensed under the Apache License, Version 2.0 (the "License");
  30382. * you may not use this file except in compliance with the License.
  30383. * You may obtain a copy of the License at
  30384. *
  30385. * http://www.apache.org/licenses/LICENSE-2.0
  30386. *
  30387. * Unless required by applicable law or agreed to in writing, software
  30388. * distributed under the License is distributed on an "AS IS" BASIS,
  30389. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  30390. * See the License for the specific language governing permissions and
  30391. * limitations under the License.
  30392. */
  30393. registerFirestore('node');
  30394. export { AbstractUserDataWriter, AggregateField, AggregateQuerySnapshot, Bytes, CACHE_SIZE_UNLIMITED, CollectionReference, DocumentReference, DocumentSnapshot, FieldPath, FieldValue, Firestore, FirestoreError, GeoPoint, LoadBundleTask, Query, QueryCompositeFilterConstraint, QueryConstraint, QueryDocumentSnapshot, QueryEndAtConstraint, QueryFieldFilterConstraint, QueryLimitConstraint, QueryOrderByConstraint, QuerySnapshot, QueryStartAtConstraint, SnapshotMetadata, Timestamp, Transaction, WriteBatch, DatabaseId as _DatabaseId, DocumentKey as _DocumentKey, EmptyAppCheckTokenProvider as _EmptyAppCheckTokenProvider, EmptyAuthCredentialsProvider as _EmptyAuthCredentialsProvider, FieldPath$1 as _FieldPath, cast as _cast, debugAssert as _debugAssert, isBase64Available as _isBase64Available, logWarn as _logWarn, validateIsNotUsedTogether as _validateIsNotUsedTogether, addDoc, aggregateQuerySnapshotEqual, and, arrayRemove, arrayUnion, clearIndexedDbPersistence, collection, collectionGroup, connectFirestoreEmulator, deleteDoc, deleteField, disableNetwork, doc, documentId, enableIndexedDbPersistence, enableMultiTabIndexedDbPersistence, enableNetwork, endAt, endBefore, ensureFirestoreConfigured, executeWrite, getCountFromServer, getDoc, getDocFromCache, getDocFromServer, getDocs, getDocsFromCache, getDocsFromServer, getFirestore, increment, initializeFirestore, limit, limitToLast, loadBundle, namedQuery, onSnapshot, onSnapshotsInSync, or, orderBy, query, queryEqual, refEqual, runTransaction, serverTimestamp, setDoc, setIndexConfiguration, setLogLevel, snapshotEqual, startAfter, startAt, terminate, updateDoc, waitForPendingWrites, where, writeBatch };
  30395. //# sourceMappingURL=index.node.mjs.map