Line data Source code
1 : /* -*- Mode: C++; tab-width: 2; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 : /* This Source Code Form is subject to the terms of the Mozilla Public
3 : * License, v. 2.0. If a copy of the MPL was not distributed with this
4 : * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
5 :
6 : #include "SourceBuffer.h"
7 :
8 : #include <algorithm>
9 : #include <cmath>
10 : #include <cstring>
11 : #include "mozilla/Likely.h"
12 : #include "nsIInputStream.h"
13 : #include "MainThreadUtils.h"
14 : #include "SurfaceCache.h"
15 :
16 : using std::max;
17 : using std::min;
18 :
19 : namespace mozilla {
20 : namespace image {
21 :
22 : //////////////////////////////////////////////////////////////////////////////
23 : // SourceBufferIterator implementation.
24 : //////////////////////////////////////////////////////////////////////////////
25 :
26 0 : SourceBufferIterator::~SourceBufferIterator()
27 : {
28 0 : if (mOwner) {
29 0 : mOwner->OnIteratorRelease();
30 : }
31 0 : }
32 :
33 : SourceBufferIterator&
34 0 : SourceBufferIterator::operator=(SourceBufferIterator&& aOther)
35 : {
36 0 : if (mOwner) {
37 0 : mOwner->OnIteratorRelease();
38 : }
39 :
40 0 : mOwner = std::move(aOther.mOwner);
41 0 : mState = aOther.mState;
42 0 : mData = aOther.mData;
43 0 : mChunkCount = aOther.mChunkCount;
44 0 : mByteCount = aOther.mByteCount;
45 0 : mRemainderToRead = aOther.mRemainderToRead;
46 :
47 0 : return *this;
48 : }
49 :
50 : SourceBufferIterator::State
51 0 : SourceBufferIterator::AdvanceOrScheduleResume(size_t aRequestedBytes,
52 : IResumable* aConsumer)
53 : {
54 0 : MOZ_ASSERT(mOwner);
55 :
56 0 : if (MOZ_UNLIKELY(!HasMore())) {
57 0 : MOZ_ASSERT_UNREACHABLE("Should not advance a completed iterator");
58 : return COMPLETE;
59 : }
60 :
61 : // The range of data [mOffset, mOffset + mNextReadLength) has just been read
62 : // by the caller (or at least they don't have any interest in it), so consume
63 : // that data.
64 0 : MOZ_ASSERT(mData.mIterating.mNextReadLength <= mData.mIterating.mAvailableLength);
65 0 : mData.mIterating.mOffset += mData.mIterating.mNextReadLength;
66 0 : mData.mIterating.mAvailableLength -= mData.mIterating.mNextReadLength;
67 :
68 : // An iterator can have a limit imposed on it to read only a subset of a
69 : // source buffer. If it is present, we need to mimic the same behaviour as
70 : // the owning SourceBuffer.
71 0 : if (MOZ_UNLIKELY(mRemainderToRead != SIZE_MAX)) {
72 0 : MOZ_ASSERT(mData.mIterating.mNextReadLength <= mRemainderToRead);
73 0 : mRemainderToRead -= mData.mIterating.mNextReadLength;
74 :
75 0 : if (MOZ_UNLIKELY(mRemainderToRead == 0)) {
76 0 : mData.mIterating.mNextReadLength = 0;
77 0 : SetComplete(NS_OK);
78 0 : return COMPLETE;
79 : }
80 :
81 0 : if (MOZ_UNLIKELY(aRequestedBytes > mRemainderToRead)) {
82 0 : aRequestedBytes = mRemainderToRead;
83 : }
84 : }
85 :
86 0 : mData.mIterating.mNextReadLength = 0;
87 :
88 0 : if (MOZ_LIKELY(mState == READY)) {
89 : // If the caller wants zero bytes of data, that's easy enough; we just
90 : // configured ourselves for a zero-byte read above! In theory we could do
91 : // this even in the START state, but it's not important for performance and
92 : // breaking the ability of callers to assert that the pointer returned by
93 : // Data() is non-null doesn't seem worth it.
94 0 : if (aRequestedBytes == 0) {
95 : MOZ_ASSERT(mData.mIterating.mNextReadLength == 0);
96 : return READY;
97 : }
98 :
99 : // Try to satisfy the request out of our local buffer. This is potentially
100 : // much faster than requesting data from our owning SourceBuffer because we
101 : // don't have to take the lock. Note that if we have anything at all in our
102 : // local buffer, we use it to satisfy the request; @aRequestedBytes is just
103 : // the *maximum* number of bytes we can return.
104 0 : if (mData.mIterating.mAvailableLength > 0) {
105 0 : return AdvanceFromLocalBuffer(aRequestedBytes);
106 : }
107 : }
108 :
109 : // Our local buffer is empty, so we'll have to request data from our owning
110 : // SourceBuffer.
111 0 : return mOwner->AdvanceIteratorOrScheduleResume(*this,
112 : aRequestedBytes,
113 0 : aConsumer);
114 : }
115 :
116 : bool
117 0 : SourceBufferIterator::RemainingBytesIsNoMoreThan(size_t aBytes) const
118 : {
119 0 : MOZ_ASSERT(mOwner);
120 0 : return mOwner->RemainingBytesIsNoMoreThan(*this, aBytes);
121 : }
122 :
123 :
124 : //////////////////////////////////////////////////////////////////////////////
125 : // SourceBuffer implementation.
126 : //////////////////////////////////////////////////////////////////////////////
127 :
128 : const size_t SourceBuffer::MIN_CHUNK_CAPACITY;
129 : const size_t SourceBuffer::MAX_CHUNK_CAPACITY;
130 :
131 0 : SourceBuffer::SourceBuffer()
132 : : mMutex("image::SourceBuffer")
133 : , mConsumerCount(0)
134 0 : , mCompacted(false)
135 0 : { }
136 :
137 0 : SourceBuffer::~SourceBuffer()
138 : {
139 0 : MOZ_ASSERT(mConsumerCount == 0,
140 : "SourceBuffer destroyed with active consumers");
141 0 : }
142 :
143 : nsresult
144 0 : SourceBuffer::AppendChunk(Maybe<Chunk>&& aChunk)
145 : {
146 0 : mMutex.AssertCurrentThreadOwns();
147 :
148 : #ifdef DEBUG
149 0 : if (mChunks.Length() > 0) {
150 0 : NS_WARNING("Appending an extra chunk for SourceBuffer");
151 : }
152 : #endif
153 :
154 0 : if (MOZ_UNLIKELY(!aChunk)) {
155 : return NS_ERROR_OUT_OF_MEMORY;
156 : }
157 :
158 0 : if (MOZ_UNLIKELY(aChunk->AllocationFailed())) {
159 : return NS_ERROR_OUT_OF_MEMORY;
160 : }
161 :
162 0 : if (MOZ_UNLIKELY(!mChunks.AppendElement(std::move(*aChunk), fallible))) {
163 : return NS_ERROR_OUT_OF_MEMORY;
164 : }
165 :
166 0 : return NS_OK;
167 : }
168 :
169 : Maybe<SourceBuffer::Chunk>
170 0 : SourceBuffer::CreateChunk(size_t aCapacity,
171 : size_t aExistingCapacity /* = 0 */,
172 : bool aRoundUp /* = true */)
173 : {
174 0 : if (MOZ_UNLIKELY(aCapacity == 0)) {
175 0 : MOZ_ASSERT_UNREACHABLE("Appending a chunk of zero size?");
176 : return Nothing();
177 : }
178 :
179 : // Round up if requested.
180 0 : size_t finalCapacity = aRoundUp ? RoundedUpCapacity(aCapacity)
181 0 : : aCapacity;
182 :
183 : // Use the size of the SurfaceCache as an additional heuristic to avoid
184 : // allocating huge buffers. Generally images do not get smaller when decoded,
185 : // so if we could store the source data in the SurfaceCache, we assume that
186 : // there's no way we'll be able to store the decoded version.
187 0 : if (MOZ_UNLIKELY(!SurfaceCache::CanHold(finalCapacity + aExistingCapacity))) {
188 0 : NS_WARNING("SourceBuffer refused to create chunk too large for SurfaceCache");
189 : return Nothing();
190 : }
191 :
192 0 : return Some(Chunk(finalCapacity));
193 : }
194 :
195 : nsresult
196 0 : SourceBuffer::Compact()
197 : {
198 0 : mMutex.AssertCurrentThreadOwns();
199 :
200 0 : MOZ_ASSERT(mConsumerCount == 0, "Should have no consumers here");
201 0 : MOZ_ASSERT(mWaitingConsumers.Length() == 0, "Shouldn't have waiters");
202 0 : MOZ_ASSERT(mStatus, "Should be complete here");
203 :
204 : // If we've tried to compact once, don't attempt again.
205 0 : if (mCompacted) {
206 : return NS_OK;
207 : }
208 :
209 0 : mCompacted = true;
210 :
211 : // Compact our waiting consumers list, since we're complete and no future
212 : // consumer will ever have to wait.
213 0 : mWaitingConsumers.Compact();
214 :
215 : // If we have no chunks, then there's nothing to compact.
216 0 : if (mChunks.Length() < 1) {
217 : return NS_OK;
218 : }
219 :
220 : // If we have one chunk, then we can compact if it has excess capacity.
221 0 : if (mChunks.Length() == 1 && mChunks[0].Length() == mChunks[0].Capacity()) {
222 : return NS_OK;
223 : }
224 :
225 : // If the last chunk has the maximum capacity, then we know the total size
226 : // will be quite large and not worth consolidating. We can likely/cheapily
227 : // trim the last chunk if it is too big however.
228 0 : size_t capacity = mChunks.LastElement().Capacity();
229 0 : if (capacity == MAX_CHUNK_CAPACITY) {
230 0 : size_t lastLength = mChunks.LastElement().Length();
231 0 : if (lastLength != capacity) {
232 0 : mChunks.LastElement().SetCapacity(lastLength);
233 : }
234 : return NS_OK;
235 : }
236 :
237 : // We can compact our buffer. Determine the total length.
238 : size_t length = 0;
239 0 : for (uint32_t i = 0 ; i < mChunks.Length() ; ++i) {
240 0 : length += mChunks[i].Length();
241 : }
242 :
243 : // If our total length is zero (which means ExpectLength() got called, but no
244 : // data ever actually got written) then just empty our chunk list.
245 0 : if (MOZ_UNLIKELY(length == 0)) {
246 0 : mChunks.Clear();
247 0 : return NS_OK;
248 : }
249 :
250 0 : Chunk& mergeChunk = mChunks[0];
251 0 : if (MOZ_UNLIKELY(!mergeChunk.SetCapacity(length))) {
252 0 : NS_WARNING("Failed to reallocate chunk for SourceBuffer compacting - OOM?");
253 0 : return NS_OK;
254 : }
255 :
256 : // Copy our old chunks into the newly reallocated first chunk.
257 0 : for (uint32_t i = 1 ; i < mChunks.Length() ; ++i) {
258 0 : size_t offset = mergeChunk.Length();
259 0 : MOZ_ASSERT(offset < mergeChunk.Capacity());
260 0 : MOZ_ASSERT(offset + mChunks[i].Length() <= mergeChunk.Capacity());
261 :
262 0 : memcpy(mergeChunk.Data() + offset, mChunks[i].Data(), mChunks[i].Length());
263 0 : mergeChunk.AddLength(mChunks[i].Length());
264 : }
265 :
266 0 : MOZ_ASSERT(mergeChunk.Length() == mergeChunk.Capacity(),
267 : "Compacted chunk has slack space");
268 :
269 : // Remove the redundant chunks.
270 0 : mChunks.RemoveElementsAt(1, mChunks.Length() - 1);
271 0 : mChunks.Compact();
272 :
273 0 : return NS_OK;
274 : }
275 :
276 : /* static */ size_t
277 0 : SourceBuffer::RoundedUpCapacity(size_t aCapacity)
278 : {
279 : // Protect against overflow.
280 0 : if (MOZ_UNLIKELY(SIZE_MAX - aCapacity < MIN_CHUNK_CAPACITY)) {
281 : return aCapacity;
282 : }
283 :
284 : // Round up to the next multiple of MIN_CHUNK_CAPACITY (which should be the
285 : // size of a page).
286 : size_t roundedCapacity =
287 0 : (aCapacity + MIN_CHUNK_CAPACITY - 1) & ~(MIN_CHUNK_CAPACITY - 1);
288 0 : MOZ_ASSERT(roundedCapacity >= aCapacity, "Bad math?");
289 0 : MOZ_ASSERT(roundedCapacity - aCapacity < MIN_CHUNK_CAPACITY, "Bad math?");
290 :
291 : return roundedCapacity;
292 : }
293 :
294 : size_t
295 0 : SourceBuffer::FibonacciCapacityWithMinimum(size_t aMinCapacity)
296 : {
297 0 : mMutex.AssertCurrentThreadOwns();
298 :
299 : // We grow the source buffer using a Fibonacci growth rate. It will be capped
300 : // at MAX_CHUNK_CAPACITY, unless the available data exceeds that.
301 :
302 0 : size_t length = mChunks.Length();
303 :
304 0 : if (length == 0 || aMinCapacity > MAX_CHUNK_CAPACITY) {
305 0 : return aMinCapacity;
306 : }
307 :
308 0 : if (length == 1) {
309 0 : return min(max(2 * mChunks[0].Capacity(), aMinCapacity),
310 0 : MAX_CHUNK_CAPACITY);
311 : }
312 :
313 0 : return min(max(mChunks[length - 1].Capacity() +
314 0 : mChunks[length - 2].Capacity(),
315 0 : aMinCapacity), MAX_CHUNK_CAPACITY);
316 : }
317 :
318 : void
319 0 : SourceBuffer::AddWaitingConsumer(IResumable* aConsumer)
320 : {
321 0 : mMutex.AssertCurrentThreadOwns();
322 :
323 0 : MOZ_ASSERT(!mStatus, "Waiting when we're complete?");
324 :
325 0 : if (aConsumer) {
326 0 : mWaitingConsumers.AppendElement(aConsumer);
327 : }
328 0 : }
329 :
330 : void
331 0 : SourceBuffer::ResumeWaitingConsumers()
332 : {
333 0 : mMutex.AssertCurrentThreadOwns();
334 :
335 0 : if (mWaitingConsumers.Length() == 0) {
336 : return;
337 : }
338 :
339 0 : for (uint32_t i = 0 ; i < mWaitingConsumers.Length() ; ++i) {
340 0 : mWaitingConsumers[i]->Resume();
341 : }
342 :
343 0 : mWaitingConsumers.Clear();
344 : }
345 :
346 : nsresult
347 0 : SourceBuffer::ExpectLength(size_t aExpectedLength)
348 : {
349 0 : MOZ_ASSERT(aExpectedLength > 0, "Zero expected size?");
350 :
351 0 : MutexAutoLock lock(mMutex);
352 :
353 0 : if (MOZ_UNLIKELY(mStatus)) {
354 0 : MOZ_ASSERT_UNREACHABLE("ExpectLength after SourceBuffer is complete");
355 : return NS_OK;
356 : }
357 :
358 0 : if (MOZ_UNLIKELY(mChunks.Length() > 0)) {
359 0 : MOZ_ASSERT_UNREACHABLE("Duplicate or post-Append call to ExpectLength");
360 : return NS_OK;
361 : }
362 :
363 0 : if (MOZ_UNLIKELY(!SurfaceCache::CanHold(aExpectedLength))) {
364 0 : NS_WARNING("SourceBuffer refused to store too large buffer");
365 0 : return HandleError(NS_ERROR_INVALID_ARG);
366 : }
367 :
368 0 : size_t length = min(aExpectedLength, MAX_CHUNK_CAPACITY);
369 0 : if (MOZ_UNLIKELY(NS_FAILED(AppendChunk(CreateChunk(length,
370 : /* aExistingCapacity */ 0,
371 0 : /* aRoundUp */ false))))) {
372 0 : return HandleError(NS_ERROR_OUT_OF_MEMORY);
373 : }
374 :
375 : return NS_OK;
376 : }
377 :
378 : nsresult
379 0 : SourceBuffer::Append(const char* aData, size_t aLength)
380 : {
381 0 : MOZ_ASSERT(aData, "Should have a buffer");
382 0 : MOZ_ASSERT(aLength > 0, "Writing a zero-sized chunk");
383 :
384 0 : size_t currentChunkCapacity = 0;
385 0 : size_t currentChunkLength = 0;
386 0 : char* currentChunkData = nullptr;
387 0 : size_t currentChunkRemaining = 0;
388 0 : size_t forCurrentChunk = 0;
389 0 : size_t forNextChunk = 0;
390 0 : size_t nextChunkCapacity = 0;
391 0 : size_t totalCapacity = 0;
392 :
393 : {
394 0 : MutexAutoLock lock(mMutex);
395 :
396 0 : if (MOZ_UNLIKELY(mStatus)) {
397 : // This SourceBuffer is already complete; ignore further data.
398 0 : return NS_ERROR_FAILURE;
399 : }
400 :
401 0 : if (MOZ_UNLIKELY(mChunks.Length() == 0)) {
402 0 : if (MOZ_UNLIKELY(NS_FAILED(AppendChunk(CreateChunk(aLength))))) {
403 0 : return HandleError(NS_ERROR_OUT_OF_MEMORY);
404 : }
405 : }
406 :
407 : // Copy out the current chunk's information so we can release the lock.
408 : // Note that this wouldn't be safe if multiple producers were allowed!
409 0 : Chunk& currentChunk = mChunks.LastElement();
410 0 : currentChunkCapacity = currentChunk.Capacity();
411 0 : currentChunkLength = currentChunk.Length();
412 0 : currentChunkData = currentChunk.Data();
413 :
414 : // Partition this data between the current chunk and the next chunk.
415 : // (Because we always allocate a chunk big enough to fit everything passed
416 : // to Append, we'll never need more than those two chunks to store
417 : // everything.)
418 0 : currentChunkRemaining = currentChunkCapacity - currentChunkLength;
419 0 : forCurrentChunk = min(aLength, currentChunkRemaining);
420 0 : forNextChunk = aLength - forCurrentChunk;
421 :
422 : // If we'll need another chunk, determine what its capacity should be while
423 : // we still hold the lock.
424 0 : nextChunkCapacity = forNextChunk > 0
425 0 : ? FibonacciCapacityWithMinimum(forNextChunk)
426 : : 0;
427 :
428 0 : for (uint32_t i = 0 ; i < mChunks.Length() ; ++i) {
429 0 : totalCapacity += mChunks[i].Capacity();
430 : }
431 : }
432 :
433 : // Write everything we can fit into the current chunk.
434 0 : MOZ_ASSERT(currentChunkLength + forCurrentChunk <= currentChunkCapacity);
435 0 : memcpy(currentChunkData + currentChunkLength, aData, forCurrentChunk);
436 :
437 : // If there's something left, create a new chunk and write it there.
438 0 : Maybe<Chunk> nextChunk;
439 0 : if (forNextChunk > 0) {
440 0 : MOZ_ASSERT(nextChunkCapacity >= forNextChunk, "Next chunk too small?");
441 0 : nextChunk = CreateChunk(nextChunkCapacity, totalCapacity);
442 0 : if (MOZ_LIKELY(nextChunk && !nextChunk->AllocationFailed())) {
443 0 : memcpy(nextChunk->Data(), aData + forCurrentChunk, forNextChunk);
444 0 : nextChunk->AddLength(forNextChunk);
445 : }
446 : }
447 :
448 : // Update shared data structures.
449 : {
450 0 : MutexAutoLock lock(mMutex);
451 :
452 : // Update the length of the current chunk.
453 0 : Chunk& currentChunk = mChunks.LastElement();
454 0 : MOZ_ASSERT(currentChunk.Data() == currentChunkData, "Multiple producers?");
455 0 : MOZ_ASSERT(currentChunk.Length() == currentChunkLength,
456 : "Multiple producers?");
457 :
458 0 : currentChunk.AddLength(forCurrentChunk);
459 :
460 : // If we created a new chunk, add it to the series.
461 0 : if (forNextChunk > 0) {
462 0 : if (MOZ_UNLIKELY(!nextChunk)) {
463 0 : return HandleError(NS_ERROR_OUT_OF_MEMORY);
464 : }
465 :
466 0 : if (MOZ_UNLIKELY(NS_FAILED(AppendChunk(std::move(nextChunk))))) {
467 0 : return HandleError(NS_ERROR_OUT_OF_MEMORY);
468 : }
469 : }
470 :
471 : // Resume any waiting readers now that there's new data.
472 0 : ResumeWaitingConsumers();
473 : }
474 :
475 0 : return NS_OK;
476 : }
477 :
478 : static nsresult
479 0 : AppendToSourceBuffer(nsIInputStream*,
480 : void* aClosure,
481 : const char* aFromRawSegment,
482 : uint32_t,
483 : uint32_t aCount,
484 : uint32_t* aWriteCount)
485 : {
486 0 : SourceBuffer* sourceBuffer = static_cast<SourceBuffer*>(aClosure);
487 :
488 : // Copy the source data. Unless we hit OOM, we squelch the return value here,
489 : // because returning an error means that ReadSegments stops reading data, and
490 : // we want to ensure that we read everything we get. If we hit OOM then we
491 : // return a failed status to the caller.
492 0 : nsresult rv = sourceBuffer->Append(aFromRawSegment, aCount);
493 0 : if (rv == NS_ERROR_OUT_OF_MEMORY) {
494 : return rv;
495 : }
496 :
497 : // Report that we wrote everything we got.
498 0 : *aWriteCount = aCount;
499 :
500 0 : return NS_OK;
501 : }
502 :
503 : nsresult
504 0 : SourceBuffer::AppendFromInputStream(nsIInputStream* aInputStream,
505 : uint32_t aCount)
506 : {
507 : uint32_t bytesRead;
508 : nsresult rv = aInputStream->ReadSegments(AppendToSourceBuffer, this,
509 0 : aCount, &bytesRead);
510 0 : if (NS_WARN_IF(NS_FAILED(rv))) {
511 : return rv;
512 : }
513 :
514 0 : if (bytesRead == 0) {
515 : // The loading of the image has been canceled.
516 : return NS_ERROR_FAILURE;
517 : }
518 :
519 0 : if (bytesRead != aCount) {
520 : // Only some of the given data was read. We may have failed in
521 : // SourceBuffer::Append but ReadSegments swallowed the error. Otherwise the
522 : // stream itself failed to yield the data.
523 0 : MutexAutoLock lock(mMutex);
524 0 : if (mStatus) {
525 0 : MOZ_ASSERT(NS_FAILED(*mStatus));
526 0 : return *mStatus;
527 : }
528 :
529 0 : MOZ_ASSERT_UNREACHABLE("AppendToSourceBuffer should consume everything");
530 : }
531 :
532 : return rv;
533 : }
534 :
535 : void
536 7 : SourceBuffer::Complete(nsresult aStatus)
537 : {
538 14 : MutexAutoLock lock(mMutex);
539 :
540 : // When an error occurs internally (e.g. due to an OOM), we save the status.
541 : // This will indirectly trigger a failure higher up and that will call
542 : // SourceBuffer::Complete. Since it doesn't necessarily know we are already
543 : // complete, it is safe to ignore.
544 7 : if (mStatus && (MOZ_UNLIKELY(NS_SUCCEEDED(*mStatus) ||
545 14 : aStatus != NS_IMAGELIB_ERROR_FAILURE))) {
546 0 : MOZ_ASSERT_UNREACHABLE("Called Complete more than once");
547 : return;
548 : }
549 :
550 7 : if (MOZ_UNLIKELY(NS_SUCCEEDED(aStatus) && IsEmpty())) {
551 : // It's illegal to succeed without writing anything.
552 0 : aStatus = NS_ERROR_FAILURE;
553 : }
554 :
555 14 : mStatus = Some(aStatus);
556 :
557 : // Resume any waiting consumers now that we're complete.
558 0 : ResumeWaitingConsumers();
559 :
560 : // If we still have active consumers, just return.
561 7 : if (mConsumerCount > 0) {
562 0 : return;
563 : }
564 :
565 : // Attempt to compact our buffer down to a single chunk.
566 0 : Compact();
567 : }
568 :
569 : bool
570 4 : SourceBuffer::IsComplete()
571 : {
572 8 : MutexAutoLock lock(mMutex);
573 0 : return bool(mStatus);
574 : }
575 :
576 : size_t
577 3 : SourceBuffer::SizeOfIncludingThisWithComputedFallback(MallocSizeOf
578 : aMallocSizeOf) const
579 : {
580 6 : MutexAutoLock lock(mMutex);
581 :
582 0 : size_t n = aMallocSizeOf(this);
583 3 : n += mChunks.ShallowSizeOfExcludingThis(aMallocSizeOf);
584 :
585 12 : for (uint32_t i = 0 ; i < mChunks.Length() ; ++i) {
586 1 : size_t chunkSize = aMallocSizeOf(mChunks[i].Data());
587 :
588 3 : if (chunkSize == 0) {
589 : // We're on a platform where moz_malloc_size_of always returns 0.
590 0 : chunkSize = mChunks[i].Capacity();
591 : }
592 :
593 3 : n += chunkSize;
594 : }
595 :
596 0 : return n;
597 : }
598 :
599 : SourceBufferIterator
600 0 : SourceBuffer::Iterator(size_t aReadLength)
601 : {
602 : {
603 0 : MutexAutoLock lock(mMutex);
604 11 : mConsumerCount++;
605 : }
606 :
607 0 : return SourceBufferIterator(this, aReadLength);
608 : }
609 :
610 : void
611 0 : SourceBuffer::OnIteratorRelease()
612 : {
613 18 : MutexAutoLock lock(mMutex);
614 :
615 0 : MOZ_ASSERT(mConsumerCount > 0, "Consumer count doesn't add up");
616 0 : mConsumerCount--;
617 :
618 : // If we still have active consumers, or we're not complete yet, then return.
619 21 : if (mConsumerCount > 0 || !mStatus) {
620 0 : return;
621 : }
622 :
623 : // Attempt to compact our buffer down to a single chunk.
624 0 : Compact();
625 : }
626 :
627 : bool
628 0 : SourceBuffer::RemainingBytesIsNoMoreThan(const SourceBufferIterator& aIterator,
629 : size_t aBytes) const
630 : {
631 0 : MutexAutoLock lock(mMutex);
632 :
633 : // If we're not complete, we always say no.
634 0 : if (!mStatus) {
635 : return false;
636 : }
637 :
638 : // If the iterator's at the end, the answer is trivial.
639 0 : if (!aIterator.HasMore()) {
640 : return true;
641 : }
642 :
643 0 : uint32_t iteratorChunk = aIterator.mData.mIterating.mChunk;
644 0 : size_t iteratorOffset = aIterator.mData.mIterating.mOffset;
645 0 : size_t iteratorLength = aIterator.mData.mIterating.mAvailableLength;
646 :
647 : // Include the bytes the iterator is currently pointing to in the limit, so
648 : // that the current chunk doesn't have to be a special case.
649 0 : size_t bytes = aBytes + iteratorOffset + iteratorLength;
650 :
651 : // Count the length over all of our chunks, starting with the one that the
652 : // iterator is currently pointing to. (This is O(N), but N is expected to be
653 : // ~1, so it doesn't seem worth caching the length separately.)
654 0 : size_t lengthSoFar = 0;
655 0 : for (uint32_t i = iteratorChunk ; i < mChunks.Length() ; ++i) {
656 0 : lengthSoFar += mChunks[i].Length();
657 0 : if (lengthSoFar > bytes) {
658 : return false;
659 : }
660 : }
661 :
662 : return true;
663 : }
664 :
665 : SourceBufferIterator::State
666 0 : SourceBuffer::AdvanceIteratorOrScheduleResume(SourceBufferIterator& aIterator,
667 : size_t aRequestedBytes,
668 : IResumable* aConsumer)
669 : {
670 22 : MutexAutoLock lock(mMutex);
671 :
672 11 : MOZ_ASSERT(aIterator.HasMore(), "Advancing a completed iterator and "
673 : "AdvanceOrScheduleResume didn't catch it");
674 :
675 22 : if (MOZ_UNLIKELY(mStatus && NS_FAILED(*mStatus))) {
676 : // This SourceBuffer is complete due to an error; all reads fail.
677 0 : return aIterator.SetComplete(*mStatus);
678 : }
679 :
680 22 : if (MOZ_UNLIKELY(mChunks.Length() == 0)) {
681 : // We haven't gotten an initial chunk yet.
682 0 : AddWaitingConsumer(aConsumer);
683 0 : return aIterator.SetWaiting(!!aConsumer);
684 : }
685 :
686 0 : uint32_t iteratorChunkIdx = aIterator.mData.mIterating.mChunk;
687 0 : MOZ_ASSERT(iteratorChunkIdx < mChunks.Length());
688 :
689 0 : const Chunk& currentChunk = mChunks[iteratorChunkIdx];
690 11 : size_t iteratorEnd = aIterator.mData.mIterating.mOffset +
691 0 : aIterator.mData.mIterating.mAvailableLength;
692 11 : MOZ_ASSERT(iteratorEnd <= currentChunk.Length());
693 0 : MOZ_ASSERT(iteratorEnd <= currentChunk.Capacity());
694 :
695 0 : if (iteratorEnd < currentChunk.Length()) {
696 : // There's more data in the current chunk.
697 11 : return aIterator.SetReady(iteratorChunkIdx, currentChunk.Data(),
698 : iteratorEnd, currentChunk.Length() - iteratorEnd,
699 0 : aRequestedBytes);
700 : }
701 :
702 0 : if (iteratorEnd == currentChunk.Capacity() &&
703 0 : !IsLastChunk(iteratorChunkIdx)) {
704 : // Advance to the next chunk.
705 0 : const Chunk& nextChunk = mChunks[iteratorChunkIdx + 1];
706 0 : return aIterator.SetReady(iteratorChunkIdx + 1, nextChunk.Data(), 0,
707 0 : nextChunk.Length(), aRequestedBytes);
708 : }
709 :
710 0 : MOZ_ASSERT(IsLastChunk(iteratorChunkIdx), "Should've advanced");
711 :
712 0 : if (mStatus) {
713 : // There's no more data and this SourceBuffer completed successfully.
714 0 : MOZ_ASSERT(NS_SUCCEEDED(*mStatus), "Handled failures earlier");
715 0 : return aIterator.SetComplete(*mStatus);
716 : }
717 :
718 : // We're not complete, but there's no more data right now. Arrange to wake up
719 : // the consumer when we get more data.
720 0 : AddWaitingConsumer(aConsumer);
721 0 : return aIterator.SetWaiting(!!aConsumer);
722 : }
723 :
724 : nsresult
725 0 : SourceBuffer::HandleError(nsresult aError)
726 : {
727 0 : MOZ_ASSERT(NS_FAILED(aError), "Should have an error here");
728 0 : MOZ_ASSERT(aError == NS_ERROR_OUT_OF_MEMORY ||
729 : aError == NS_ERROR_INVALID_ARG,
730 : "Unexpected error; may want to notify waiting readers, which "
731 : "HandleError currently doesn't do");
732 :
733 0 : mMutex.AssertCurrentThreadOwns();
734 :
735 0 : NS_WARNING("SourceBuffer encountered an unrecoverable error");
736 :
737 : // Record the error.
738 0 : mStatus = Some(aError);
739 :
740 : // Drop our references to waiting readers.
741 0 : mWaitingConsumers.Clear();
742 :
743 0 : return *mStatus;
744 : }
745 :
746 : bool
747 0 : SourceBuffer::IsEmpty()
748 : {
749 7 : mMutex.AssertCurrentThreadOwns();
750 21 : return mChunks.Length() == 0 ||
751 0 : mChunks[0].Length() == 0;
752 : }
753 :
754 : bool
755 : SourceBuffer::IsLastChunk(uint32_t aChunk)
756 : {
757 : mMutex.AssertCurrentThreadOwns();
758 : return aChunk + 1 == mChunks.Length();
759 : }
760 :
761 : } // namespace image
762 : } // namespace mozilla
|