[Fizz] Expose callbacks in options for when various stages of the content is done (#21056)

* Report errors to a global handler

This allows you to log errors or set things like status codes.

* Add complete callback

* onReadyToStream callback

This is typically not needed because if you want to stream when the
root is ready you can just start writing immediately.

* Rename onComplete -> onCompleteAll
This commit is contained in:
Sebastian Markbåge 2021-03-23 14:39:38 -04:00 committed by GitHub
parent 25bfa287f6
commit 435cff9866
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
7 changed files with 255 additions and 60 deletions

View File

@ -336,7 +336,6 @@ describe('ReactDOMFizzServer', () => {
writable.write(chunk, encoding, next);
};
writable.write('<div id="container-A">');
await act(async () => {
const {startWriting} = ReactDOMFizzServer.pipeToNodeWritable(
<Suspense fallback={<Text text="Loading A..." />}>
@ -346,13 +345,17 @@ describe('ReactDOMFizzServer', () => {
</div>
</Suspense>,
writableA,
{identifierPrefix: 'A_'},
{
identifierPrefix: 'A_',
onReadyToStream() {
writableA.write('<div id="container-A">');
startWriting();
writableA.write('</div>');
},
},
);
startWriting();
});
writable.write('</div>');
writable.write('<div id="container-B">');
await act(async () => {
const {startWriting} = ReactDOMFizzServer.pipeToNodeWritable(
<Suspense fallback={<Text text="Loading B..." />}>
@ -362,11 +365,16 @@ describe('ReactDOMFizzServer', () => {
</div>
</Suspense>,
writableB,
{identifierPrefix: 'B_'},
{
identifierPrefix: 'B_',
onReadyToStream() {
writableB.write('<div id="container-B">');
startWriting();
writableB.write('</div>');
},
},
);
startWriting();
});
writable.write('</div>');
expect(getVisibleChildren(container)).toEqual([
<div id="container-A">Loading A...</div>,

View File

@ -58,12 +58,56 @@ describe('ReactDOMFizzServer', () => {
expect(result).toBe('<div>hello world</div>');
});
// @gate experimental
it('emits all HTML as one unit if we wait until the end to start', async () => {
let hasLoaded = false;
let resolve;
const promise = new Promise(r => (resolve = r));
function Wait() {
if (!hasLoaded) {
throw promise;
}
return 'Done';
}
let isComplete = false;
const stream = ReactDOMFizzServer.renderToReadableStream(
<div>
<Suspense fallback="Loading">
<Wait />
</Suspense>
</div>,
{
onCompleteAll() {
isComplete = true;
},
},
);
await jest.runAllTimers();
expect(isComplete).toBe(false);
// Resolve the loading.
hasLoaded = true;
await resolve();
await jest.runAllTimers();
expect(isComplete).toBe(true);
const result = await readResult(stream);
expect(result).toBe('<div><!--$-->Done<!--/$--></div>');
});
// @gate experimental
it('should error the stream when an error is thrown at the root', async () => {
const reportedErrors = [];
const stream = ReactDOMFizzServer.renderToReadableStream(
<div>
<Throw />
</div>,
{
onError(x) {
reportedErrors.push(x);
},
},
);
let caughtError = null;
@ -75,16 +119,23 @@ describe('ReactDOMFizzServer', () => {
}
expect(caughtError).toBe(theError);
expect(result).toBe('');
expect(reportedErrors).toEqual([theError]);
});
// @gate experimental
it('should error the stream when an error is thrown inside a fallback', async () => {
const reportedErrors = [];
const stream = ReactDOMFizzServer.renderToReadableStream(
<div>
<Suspense fallback={<Throw />}>
<InfiniteSuspend />
</Suspense>
</div>,
{
onError(x) {
reportedErrors.push(x);
},
},
);
let caughtError = null;
@ -96,20 +147,28 @@ describe('ReactDOMFizzServer', () => {
}
expect(caughtError).toBe(theError);
expect(result).toBe('');
expect(reportedErrors).toEqual([theError]);
});
// @gate experimental
it('should not error the stream when an error is thrown inside suspense boundary', async () => {
const reportedErrors = [];
const stream = ReactDOMFizzServer.renderToReadableStream(
<div>
<Suspense fallback={<div>Loading</div>}>
<Throw />
</Suspense>
</div>,
{
onError(x) {
reportedErrors.push(x);
},
},
);
const result = await readResult(stream);
expect(result).toContain('Loading');
expect(reportedErrors).toEqual([theError]);
});
// @gate experimental

View File

@ -86,14 +86,68 @@ describe('ReactDOMFizzServer', () => {
);
});
// @gate experimental
it('emits all HTML as one unit if we wait until the end to start', async () => {
let hasLoaded = false;
let resolve;
const promise = new Promise(r => (resolve = r));
function Wait() {
if (!hasLoaded) {
throw promise;
}
return 'Done';
}
let isComplete = false;
const {writable, output} = getTestWritable();
const {startWriting} = ReactDOMFizzServer.pipeToNodeWritable(
<div>
<Suspense fallback="Loading">
<Wait />
</Suspense>
</div>,
writable,
{
onCompleteAll() {
isComplete = true;
},
},
);
await jest.runAllTimers();
expect(output.result).toBe('');
expect(isComplete).toBe(false);
// Resolve the loading.
hasLoaded = true;
await resolve();
await jest.runAllTimers();
expect(output.result).toBe('');
expect(isComplete).toBe(true);
// First we write our header.
output.result +=
'<!doctype html><html><head><title>test</title><head><body>';
// Then React starts writing.
startWriting();
expect(output.result).toBe(
'<!doctype html><html><head><title>test</title><head><body><div><!--$-->Done<!--/$--></div>',
);
});
// @gate experimental
it('should error the stream when an error is thrown at the root', async () => {
const reportedErrors = [];
const {writable, output, completed} = getTestWritable();
ReactDOMFizzServer.pipeToNodeWritable(
<div>
<Throw />
</div>,
writable,
{
onError(x) {
reportedErrors.push(x);
},
},
);
// The stream is errored even if we haven't started writing.
@ -102,10 +156,13 @@ describe('ReactDOMFizzServer', () => {
expect(output.error).toBe(theError);
expect(output.result).toBe('');
// This type of error is reported to the error callback too.
expect(reportedErrors).toEqual([theError]);
});
// @gate experimental
it('should error the stream when an error is thrown inside a fallback', async () => {
const reportedErrors = [];
const {writable, output, completed} = getTestWritable();
const {startWriting} = ReactDOMFizzServer.pipeToNodeWritable(
<div>
@ -114,6 +171,11 @@ describe('ReactDOMFizzServer', () => {
</Suspense>
</div>,
writable,
{
onError(x) {
reportedErrors.push(x);
},
},
);
startWriting();
@ -121,10 +183,12 @@ describe('ReactDOMFizzServer', () => {
expect(output.error).toBe(theError);
expect(output.result).toBe('');
expect(reportedErrors).toEqual([theError]);
});
// @gate experimental
it('should not error the stream when an error is thrown inside suspense boundary', async () => {
const reportedErrors = [];
const {writable, output, completed} = getTestWritable();
const {startWriting} = ReactDOMFizzServer.pipeToNodeWritable(
<div>
@ -133,6 +197,11 @@ describe('ReactDOMFizzServer', () => {
</Suspense>
</div>,
writable,
{
onError(x) {
reportedErrors.push(x);
},
},
);
startWriting();
@ -140,6 +209,8 @@ describe('ReactDOMFizzServer', () => {
expect(output.error).toBe(undefined);
expect(output.result).toContain('Loading');
// While no error is reported to the stream, the error is reported to the callback.
expect(reportedErrors).toEqual([theError]);
});
// @gate experimental

View File

@ -22,6 +22,9 @@ type Options = {
identifierPrefix?: string,
progressiveChunkSize?: number,
signal?: AbortSignal,
onReadyToStream?: () => void,
onCompleteAll?: () => void,
onError?: (error: mixed) => void,
};
function renderToReadableStream(
@ -37,21 +40,31 @@ function renderToReadableStream(
};
signal.addEventListener('abort', listener);
}
return new ReadableStream({
const stream = new ReadableStream({
start(controller) {
request = createRequest(
children,
controller,
createResponseState(options ? options.identifierPrefix : undefined),
options ? options.progressiveChunkSize : undefined,
options ? options.onError : undefined,
options ? options.onCompleteAll : undefined,
options ? options.onReadyToStream : undefined,
);
startWork(request);
},
pull(controller) {
startFlowing(request);
// Pull is called immediately even if the stream is not passed to anything.
// That's buffering too early. We want to start buffering once the stream
// is actually used by something so we can give it the best result possible
// at that point.
if (stream.locked) {
startFlowing(request);
}
},
cancel(reason) {},
});
return stream;
}
export {renderToReadableStream};

View File

@ -26,6 +26,9 @@ function createDrainHandler(destination, request) {
type Options = {
identifierPrefix?: string,
progressiveChunkSize?: number,
onReadyToStream?: () => void,
onCompleteAll?: () => void,
onError?: (error: mixed) => void,
};
type Controls = {
@ -44,6 +47,9 @@ function pipeToNodeWritable(
destination,
createResponseState(options ? options.identifierPrefix : undefined),
options ? options.progressiveChunkSize : undefined,
options ? options.onError : undefined,
options ? options.onCompleteAll : undefined,
options ? options.onReadyToStream : undefined,
);
let hasStartedFlowing = false;
startWork(request);

View File

@ -217,6 +217,9 @@ const ReactNoopServer = ReactFizzServer({
type Options = {
progressiveChunkSize?: number,
onReadyToStream?: () => void,
onCompleteAll?: () => void,
onError?: (error: mixed) => void,
};
function render(children: React$Element<any>, options?: Options): Destination {
@ -234,6 +237,9 @@ function render(children: React$Element<any>, options?: Options): Destination {
destination,
null,
options ? options.progressiveChunkSize : undefined,
options ? options.onError : undefined,
options ? options.onCompleteAll : undefined,
options ? options.onReadyToStream : undefined,
);
ReactNoopServer.startWork(request);
ReactNoopServer.startFlowing(request);

View File

@ -110,6 +110,15 @@ type Request = {
clientRenderedBoundaries: Array<SuspenseBoundary>, // Errored or client rendered but not yet flushed.
completedBoundaries: Array<SuspenseBoundary>, // Completed but not yet fully flushed boundaries to show.
partialBoundaries: Array<SuspenseBoundary>, // Partially completed boundaries that can flush its segments early.
// onError is called when an error happens anywhere in the tree. It might recover.
onError: (error: mixed) => void,
// onCompleteAll is called when all pending work is done but it may not have flushed yet.
// This is a good time to start writing if you want only HTML and no intermediate steps.
onCompleteAll: () => void,
// onReadyToStream is called when there is at least a root fallback ready to show.
// Typically you don't need this callback because it's best practice to always have a
// root fallback ready so there's no need to wait.
onReadyToStream: () => void,
};
// This is a default heuristic for how to split up the HTML content into progressive
@ -134,6 +143,9 @@ export function createRequest(
destination: Destination,
responseState: ResponseState,
progressiveChunkSize: number = DEFAULT_PROGRESSIVE_CHUNK_SIZE,
onError: (error: mixed) => void = noop,
onCompleteAll: () => void = noop,
onReadyToStream: () => void = noop,
): Request {
const pingedWork = [];
const abortSet: Set<SuspendedWork> = new Set();
@ -151,6 +163,9 @@ export function createRequest(
clientRenderedBoundaries: [],
completedBoundaries: [],
partialBoundaries: [],
onError,
onCompleteAll,
onReadyToStream,
};
// This segment represents the root fallback.
const rootSegment = createPendingSegment(request, 0, null);
@ -235,7 +250,9 @@ function createPendingSegment(
}
function reportError(request: Request, error: mixed): void {
// TODO: Report errors on the server.
// If this callback errors, we intentionally let that error bubble up to become a fatal error
// so that someone fixes the error reporting instead of hiding it.
request.onError(error);
}
function fatalError(request: Request, error: mixed): void {
@ -389,28 +406,31 @@ function erroredWork(
segment: Segment,
error: mixed,
) {
request.allPendingWork--;
if (boundary !== null) {
boundary.pendingWork--;
}
// Report the error to a global handler.
reportError(request, error);
if (boundary === null) {
fatalError(request, error);
} else if (!boundary.forceClientRender) {
boundary.forceClientRender = true;
} else {
boundary.pendingWork--;
if (!boundary.forceClientRender) {
boundary.forceClientRender = true;
// Regardless of what happens next, this boundary won't be displayed,
// so we can flush it, if the parent already flushed.
if (boundary.parentFlushed) {
// We don't have a preference where in the queue this goes since it's likely
// to error on the client anyway. However, intentionally client-rendered
// boundaries should be flushed earlier so that they can start on the client.
// We reuse the same queue for errors.
request.clientRenderedBoundaries.push(boundary);
// Regardless of what happens next, this boundary won't be displayed,
// so we can flush it, if the parent already flushed.
if (boundary.parentFlushed) {
// We don't have a preference where in the queue this goes since it's likely
// to error on the client anyway. However, intentionally client-rendered
// boundaries should be flushed earlier so that they can start on the client.
// We reuse the same queue for errors.
request.clientRenderedBoundaries.push(boundary);
}
}
}
request.allPendingWork--;
if (request.allPendingWork === 0) {
request.onCompleteAll();
}
}
function abortWorkSoft(suspendedWork: SuspendedWork): void {
@ -454,6 +474,10 @@ function abortWork(suspendedWork: SuspendedWork): void {
request.clientRenderedBoundaries.push(boundary);
}
}
if (request.allPendingWork === 0) {
request.onCompleteAll();
}
}
}
@ -462,10 +486,7 @@ function finishedWork(
boundary: Root | SuspenseBoundary,
segment: Segment,
) {
request.allPendingWork--;
if (boundary === null) {
request.pendingRootWork--;
if (segment.parentFlushed) {
invariant(
request.completedRootSegment === null,
@ -473,43 +494,51 @@ function finishedWork(
);
request.completedRootSegment = segment;
}
return;
}
boundary.pendingWork--;
if (boundary.forceClientRender) {
// This already errored.
return;
}
if (boundary.pendingWork === 0) {
// This must have been the last segment we were waiting on. This boundary is now complete.
// We can now cancel any pending work on the fallback since we won't need to show it anymore.
boundary.fallbackAbortableWork.forEach(abortWorkSoft, request);
boundary.fallbackAbortableWork.clear();
if (segment.parentFlushed) {
// Our parent segment already flushed, so we need to schedule this segment to be emitted.
boundary.completedSegments.push(segment);
}
if (boundary.parentFlushed) {
// The segment might be part of a segment that didn't flush yet, but if the boundary's
// parent flushed, we need to schedule the boundary to be emitted.
request.completedBoundaries.push(boundary);
request.pendingRootWork--;
if (request.pendingRootWork === 0) {
request.onReadyToStream();
}
} else {
if (segment.parentFlushed) {
// Our parent already flushed, so we need to schedule this segment to be emitted.
const completedSegments = boundary.completedSegments;
completedSegments.push(segment);
if (completedSegments.length === 1) {
// This is the first time since we last flushed that we completed anything.
// We can schedule this boundary to emit its partially completed segments early
// in case the parent has already been flushed.
if (boundary.parentFlushed) {
request.partialBoundaries.push(boundary);
boundary.pendingWork--;
if (boundary.forceClientRender) {
// This already errored.
} else if (boundary.pendingWork === 0) {
// This must have been the last segment we were waiting on. This boundary is now complete.
// We can now cancel any pending work on the fallback since we won't need to show it anymore.
boundary.fallbackAbortableWork.forEach(abortWorkSoft, request);
boundary.fallbackAbortableWork.clear();
if (segment.parentFlushed) {
// Our parent segment already flushed, so we need to schedule this segment to be emitted.
boundary.completedSegments.push(segment);
}
if (boundary.parentFlushed) {
// The segment might be part of a segment that didn't flush yet, but if the boundary's
// parent flushed, we need to schedule the boundary to be emitted.
request.completedBoundaries.push(boundary);
}
} else {
if (segment.parentFlushed) {
// Our parent already flushed, so we need to schedule this segment to be emitted.
const completedSegments = boundary.completedSegments;
completedSegments.push(segment);
if (completedSegments.length === 1) {
// This is the first time since we last flushed that we completed anything.
// We can schedule this boundary to emit its partially completed segments early
// in case the parent has already been flushed.
if (boundary.parentFlushed) {
request.partialBoundaries.push(boundary);
}
}
}
}
}
request.allPendingWork--;
if (request.allPendingWork === 0) {
// This needs to be called at the very end so that we can synchronously write the result
// in the callback if needed.
request.onCompleteAll();
}
}
function retryWork(request: Request, work: SuspendedWork): void {
@ -573,6 +602,7 @@ function performWork(request: Request): void {
flushCompletedQueues(request);
}
} catch (error) {
reportError(request, error);
fatalError(request, error);
} finally {
ReactCurrentDispatcher.current = prevDispatcher;
@ -920,6 +950,7 @@ export function startFlowing(request: Request): void {
try {
flushCompletedQueues(request);
} catch (error) {
reportError(request, error);
fatalError(request, error);
}
}
@ -934,6 +965,7 @@ export function abort(request: Request): void {
flushCompletedQueues(request);
}
} catch (error) {
reportError(request, error);
fatalError(request, error);
}
}