fix: Update PageCache to handle non-chronological ordering (#633)

The PageCache implementation wasn't properly dealing with timelines that
could return statuses in non-chronological order.

For example, if you bookmark a recent status, then go back in the
timeline and bookmark an older status; the bookmarks timeline is ordered
by the time of the bookmark event, not the creation time of the status
that was bookmarked.

If a sufficiently old status was bookmarked so it straddled a page
boundary you could have a situation where the range of status IDs in two
different cached pages overlapped.

E.g., this log extract:

```
0: k: 110912736679636090, prev: 3521487, next: 3057175, size: 40, range: 112219564107059218..110912736679636090
1: k: 111651744569170291, prev: 3049659, next: 2710596, size: 40, range: 111926741634665808..111651744569170291
```

The range of IDs in page 0 overlaps with the range of IDs in page 1.

The previous `PageCache` assumed this couldn't happen, and broke in
various interesting ways when it did.

E.g., you can't find the page that contains a given status by looking
for the largest key less than the needle's status id. Given the pages
above looking for ID 112219564107059218 (first status in page 0) would
suggest page 1 as having the greatest key less than that ID. This
manifested as the correct page briefly appearing in the UI (page 0),
then being completely replaced with page 1.

Rewrite PageCache to fix this. The previous implementation used a single
`TreeMap` assuming items were always sorted by ID. The new code keeps an
unordered map from status IDs to the page that contains that status, and
a separate `LinkedList` that contains the pages in order they're
provided by the API. This decouples the ordering of pages in the cache
with the overall ordering of items within the pages.
This commit is contained in:
Nik Clayton 2024-04-22 20:34:16 +02:00 committed by GitHub
parent b8939dd2cf
commit b0d63e4243
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
6 changed files with 224 additions and 212 deletions

View File

@ -132,7 +132,7 @@ class NetworkTimelineRepository @Inject constructor(
fun removeStatusWithId(statusId: String) {
synchronized(pageCache) {
pageCache.floorEntry(statusId)?.value?.data?.removeAll { status ->
pageCache.getPageById(statusId)?.data?.removeAll { status ->
status.id == statusId || status.reblog?.id == statusId
}
}
@ -141,7 +141,7 @@ class NetworkTimelineRepository @Inject constructor(
fun updateStatusById(statusId: String, updater: (Status) -> Status) {
synchronized(pageCache) {
pageCache.floorEntry(statusId)?.value?.let { page ->
pageCache.getPageById(statusId)?.let { page ->
val index = page.data.indexOfFirst { it.id == statusId }
if (index != -1) {
page.data[index] = updater(page.data[index])
@ -153,7 +153,7 @@ class NetworkTimelineRepository @Inject constructor(
fun updateActionableStatusById(statusId: String, updater: (Status) -> Status) {
synchronized(pageCache) {
pageCache.floorEntry(statusId)?.value?.let { page ->
pageCache.getPageById(statusId)?.let { page ->
val index = page.data.indexOfFirst { it.id == statusId }
if (index != -1) {
val status = page.data[index]

View File

@ -26,7 +26,11 @@ import timber.log.Timber
private val INVALID = LoadResult.Invalid<String, Status>()
/** [PagingSource] for Mastodon Status, identified by the Status ID */
/**
* [PagingSource] for Mastodon Status, identified by the Status ID
*
* @param pageCache The [PageCache] backing this source
*/
class NetworkTimelinePagingSource @Inject constructor(
private val pageCache: PageCache,
) : PagingSource<String, Status>() {
@ -36,68 +40,17 @@ class NetworkTimelinePagingSource @Inject constructor(
pageCache.debug()
val page = synchronized(pageCache) {
if (pageCache.isEmpty()) {
return@synchronized null
}
if (pageCache.isEmpty()) return@synchronized null
when (params) {
is LoadParams.Refresh -> {
// Return the page that contains the given key, or the most recent page if
// the key isn't in the cache.
params.key?.let { key ->
pageCache.floorEntry(key)?.value
} ?: pageCache.lastEntry()?.value
pageCache.getPageById(params.key) ?: pageCache.firstPage
}
// Loading previous / next pages (`Prepend` or `Append`) is a little complicated.
//
// Append and Prepend requests have a `params.key` that corresponds to the previous
// or next page. For some timeline types those keys have the same form as the
// item keys, and are valid item keys.
//
// But for some timeline types they are completely different.
//
// For example, bookmarks might have item keys that look like 110542553707722778
// but prevKey / nextKey values that look like 1480606 / 1229303.
//
// There's no guarantee that the `nextKey` value for one page matches the `prevKey`
// value of the page immediately before it.
//
// E.g., suppose `pages` has the following entries (older entries have lower page
// indices).
//
// .--- page index
// | .-- ID of last item (key in `pageCache`)
// v V
// 0: k: 109934818460629189, prevKey: 995916, nextKey: 941865
// 1: k: 110033940961955385, prevKey: 1073324, nextKey: 997376
// 2: k: 110239564017438509, prevKey: 1224838, nextKey: 1073352
// 3: k: 110542553707722778, prevKey: 1480606, nextKey: 1229303
//
// And the request is `LoadParams.Append` with `params.key` == 1073352. This means
// "fetch the page *before* the page that has `nextKey` == 1073352".
//
// The desired page has index 1. But that can't be found directly, because although
// the page after it (index 2) points back to it with the `nextKey` value, the page
// at index 1 **does not** have a `prevKey` value of 1073352. There can be gaps in
// the `prevKey` / `nextKey` chain -- I assume this is a Mastodon implementation
// detail.
//
// Further, we can't assume anything about the structure of the keys.
//
// To find the correct page for Append we must:
//
// 1. Find the page that has a `nextKey` value that matches `params.key` (page 2)
// 2. Get that page's key ("110239564017438509")
// 3. Return the page with the key that is immediately lower than the key from step 2
//
// The approach for Prepend is the same, except it is `prevKey` that is checked.
is LoadParams.Append -> {
pageCache.firstNotNullOfOrNull { entry -> entry.takeIf { it.value.nextKey == params.key }?.value }
?.let { page -> pageCache.lowerEntry(page.data.last().id)?.value }
pageCache.getNextPage(params.key)
}
is LoadParams.Prepend -> {
pageCache.firstNotNullOfOrNull { entry -> entry.takeIf { it.value.prevKey == params.key }?.value }
?.let { page -> pageCache.higherEntry(page.data.last().id)?.value }
pageCache.getPrevPage(params.key)
}
}
}
@ -117,22 +70,14 @@ class NetworkTimelinePagingSource @Inject constructor(
return INVALID
}
// Calculate itemsBefore and itemsAfter values to include in the returned Page.
// Set itemsBefore and itemsAfter values to include in the returned Page.
// If you do not do this (and this is not documented anywhere) then the anchorPosition
// in the PagingState (used in getRefreshKey) is bogus, and refreshing the list can
// result in large jumps in the user's position.
//
// The items are calculated relative to the local cache, not the remote data source.
val itemsBefore = page?.let {
it.prevKey?.let { key ->
pageCache.tailMap(key).values.fold(0) { sum, p -> sum + p.data.size }
}
} ?: 0
val itemsAfter = page?.let {
// Note: headMap and tailMap have different behaviour, tailMap is greater-or-equal,
// headMap is strictly less than, so `it.nextKey` does not work here.
pageCache.headMap(it.data.first().id).values.fold(0) { sum, p -> sum + p.data.size }
} ?: 0
val itemsBefore = pageCache.itemsBefore(page?.prevKey)
val itemsAfter = pageCache.itemsAfter(page?.nextKey)
return LoadResult.Page(
page?.data ?: emptyList(),
@ -146,7 +91,7 @@ class NetworkTimelinePagingSource @Inject constructor(
override fun getRefreshKey(state: PagingState<String, Status>): String? {
val refreshKey = state.anchorPosition?.let {
state.closestItemToPosition(it)?.id
} ?: pageCache.firstEntry()?.value?.data?.let {
} ?: pageCache.firstPage?.data?.let {
it.getOrNull(it.size / 2)?.id
}

View File

@ -58,45 +58,24 @@ class NetworkTimelineRemoteMediator(
val itemKey = state.anchorPosition?.let { state.closestItemToPosition(it) }?.id
itemKey?.let { ik ->
// Find the page that contains the item, so the remote key can be determined
// Most Mastodon timelines are ordered by ID, greatest ID first. But not all
// (https://github.com/mastodon/documentation/issues/1292 explains that
// trends/statuses) isn't. This makes finding the relevant page a little
// more complicated.
// First, assume that they are ordered by ID, and find the page that should
// contain this item.
var pageContainingItem = pageCache.floorEntry(ik)?.value
// Second, if no page was found it means the statuses are not sorted, and
// the entire cache must be searched.
if (pageContainingItem == null) {
for (page in pageCache.values) {
val s = page.data.find { it.id == ik }
if (s != null) {
pageContainingItem = page
break
}
}
pageContainingItem ?: throw java.lang.IllegalStateException("$itemKey not found in the pageCache page")
}
val pageContainingItem = pageCache.getPageById(ik)
// Double check the item appears in the page
if (BuildConfig.DEBUG) {
pageContainingItem ?: throw java.lang.IllegalStateException("page with $itemKey not found")
pageContainingItem.data.find { it.id == itemKey }
?: throw java.lang.IllegalStateException("$itemKey not found in returned page")
}
// The desired key is the prevKey of the page immediately before this one
pageCache.lowerEntry(pageContainingItem.data.last().id)?.value?.prevKey
pageCache.getPrevPage(pageContainingItem?.prevKey)?.prevKey
}
}
LoadType.APPEND -> {
pageCache.firstEntry()?.value?.nextKey ?: return MediatorResult.Success(endOfPaginationReached = true)
pageCache.lastPage?.nextKey ?: return MediatorResult.Success(endOfPaginationReached = true)
}
LoadType.PREPEND -> {
pageCache.lastEntry()?.value?.prevKey ?: return MediatorResult.Success(endOfPaginationReached = true)
pageCache.firstPage?.prevKey ?: return MediatorResult.Success(endOfPaginationReached = true)
}
}
@ -108,11 +87,7 @@ class NetworkTimelineRemoteMediator(
val endOfPaginationReached = page.data.isEmpty()
if (!endOfPaginationReached) {
synchronized(pageCache) {
if (loadType == LoadType.REFRESH) {
pageCache.clear()
}
pageCache.upsert(page)
pageCache.add(page, loadType)
Timber.d(
" Page %s complete for %s, now got %d pages",
loadType,

View File

@ -17,11 +17,12 @@
package app.pachli.components.timeline.viewmodel
import androidx.annotation.VisibleForTesting
import androidx.paging.LoadType
import app.pachli.BuildConfig
import app.pachli.core.common.string.isLessThan
import app.pachli.core.network.model.Links
import app.pachli.core.network.model.Status
import java.util.TreeMap
import java.util.LinkedList
import kotlin.Result.Companion.failure
import kotlin.Result.Companion.success
import retrofit2.HttpException
@ -43,53 +44,7 @@ data class Page(
*/
val nextKey: String? = null,
) {
override fun toString() = "k: ${data.lastOrNull()?.id}, prev: $prevKey, next: $nextKey, size: ${"%2d".format(data.size)}, range: ${data.firstOrNull()?.id}..${data.lastOrNull()?.id}"
/**
* Return a new page consisting of this page, plus the data from [pages].
*/
fun merge(vararg pages: Page?): Page {
val d = data
var next = nextKey
var prev = prevKey
pages.filterNotNull().forEach {
d.addAll(it.data)
if (next != null) {
if (it.nextKey == null || it.nextKey.isLessThan(next!!)) next = it.nextKey
}
if (prev != null) {
if (prev!!.isLessThan(it.prevKey ?: "")) prev = it.prevKey
}
}
d.sortWith(compareBy({ it.id.length }, { it.id }))
d.reverse()
if (nextKey?.isLessThan(next ?: "") == true) throw java.lang.IllegalStateException("New next $next is greater than old nextKey $nextKey")
if (prev?.isLessThan(prevKey ?: "") == true) throw java.lang.IllegalStateException("New prev $prev is less than old $prevKey")
// Debug assertions
if (BuildConfig.DEBUG) {
// There should never be duplicate items across all the pages.
val ids = d.map { it.id }
val groups = ids.groupingBy { it }.eachCount().filter { it.value > 1 }
if (groups.isNotEmpty()) {
throw IllegalStateException("Duplicate item IDs in results!: $groups")
}
// Data should always be sorted newest first
if (d.first().id.isLessThan(d.last().id)) {
throw IllegalStateException("Items in data are *not* sorted newest first")
}
}
return Page(
data = d,
nextKey = next,
prevKey = prev,
)
}
override fun toString() = "size: ${"%2d".format(data.size)}, range: ${data.firstOrNull()?.id}..${data.lastOrNull()?.id}, prevKey: $prevKey, nextKey: $nextKey"
companion object {
fun tryFrom(response: Response<List<Status>>): Result<Page> {
@ -114,56 +69,185 @@ data class Page(
}
/**
* Cache of pages from Mastodon API calls.
* Cache of pages from the Mastodon API.
*
* Cache pages are identified by the ID of the **last** (smallest, oldest) key in the page.
* Add pages to the cache with [add].
*
* It's the last item, and not the first because a page may be incomplete. E.g,.
* a prepend operation completes, and instead of loading pageSize items it loads
* (pageSize - 10) items, because only (pageSize - 10) items were available at the
* time of the API call.
* To get a page from the cache you can either:
*
* - Get the first page (contains newest items) with [firstPage]
* - Get the last page (contains oldest items) with [lastPage]
* - Get the page that contains an item with a given ID with [getPageById]
*
* If you have a page and want to get the immediately previous (newer) page use
* [getPrevPage] passing the `prevKey` of the original page.
*
* ```kotlin
* val page = pageCache.getPageById("some_id")
* val previousPage = pageCache.getPageBefore(page.prevKey)
* ```
*
* If you have a page and want to get the immediately next (older) page use
* [getNextPage] passing the `nextKey` of the original page.
*
* ```kotlin
* val page = pageCache.getPageById("some_id")
* val nextPage = pageCache.getPageAfter(page.nextKey)
* ```
*
* If the page was subsequently refreshed, *and* the ID of the first (newest) item
* was used as the key then you might have two pages that contain overlapping
* items.
*/
class PageCache : TreeMap<String, Page>(compareBy({ it.length }, { it })) {
// This is more complicated than I'd like.
//
// A naive approach would model a cache of pages as an ordered map, where the map key is an
// ID of the oldest status in the cache.
//
// This does not work because not all timelines of statuses are ordered by status ID. E.g.,
//
// - Trending statuses is ordered by server-determined popularity
// - Bookmarks and favourites are ordered by the time the user performed the bookmark or
// favourite operation
//
// So a page of data returned from the Mastodon API does not have an intrinsic ID that can
// be used as a cache key.
//
// In addition, we generally want to find a page using one of three identifiers:
//
// - The item ID of an item that is in the page (e.g., status ID)
// - The `prevKey` value of another page
// - The `nextKey` value of another page
//
// So a single map with a single key doesn't work either.
//
// The three identifiers (status ID, `prevKey`, and `nextKey`) can be in different
// namespaces. For some timelines (e.g., the local timeline) they are status IDs. But in
// other timelines, like bookmarks or favourites, they are opaque tokens.
//
// For example, bookmarks might have item keys that look like 110542553707722778
// but prevKey / nextKey values that look like 1480606 / 1229303.
//
// `prevKey` and `nextKey` values are not guaranteed to match on either side. So if you
// have three pages like this
//
// <-- newer older -->
// Page1 Page2 Page3
//
// Page1 might point back to Page2 with `nextKey = xxx`. But Page3 **does not** have to
// point back to Page2 with `prevKey = xxx`, if can use a different value. And if all
// you have is Page2 you can't ask "What prevKey value does Page3 use to point back?"
class PageCache {
/** Map from item identifier (e.g,. status ID) to the page that contains this item */
@VisibleForTesting
val idToPage = mutableMapOf<String, Page>()
/**
* Adds a new page to the cache or updates the existing page with the given key
* List of all pages, in display order. Pages at the front of the list are
* newer results from the API and are displayed first.
*/
fun upsert(page: Page) {
val key = page.data.last().id
private val pages = LinkedList<Page>()
Timber.d("Inserting new page:")
Timber.d(" %s", page)
/** The first page in the cache (i.e., the top / newest entry in the timeline) */
val firstPage: Page?
get() = pages.firstOrNull()
this[key] = page
/** The last page in the cache (i.e., the bottom / oldest entry in the timeline) */
val lastPage: Page?
get() = pages.lastOrNull()
// There should never be duplicate items across all the pages. Enforce this in debug mode.
if (BuildConfig.DEBUG) {
val ids = buildList {
this.addAll(this@PageCache.map { entry -> entry.value.data.map { it.id } }.flatten())
/** The size of the cache, in pages */
val size
get() = pages.size
/** The values in the cache */
val values
get() = idToPage.values
/** Adds [page] to the cache with the given [loadType] */
fun add(page: Page, loadType: LoadType) {
// Refreshing clears the cache then adds the page. Prepend and Append
// only have to add the page at the appropriate position
when (loadType) {
LoadType.REFRESH -> {
clear()
pages.add(page)
}
val groups = ids.groupingBy { it }.eachCount().filter { it.value > 1 }
if (groups.isNotEmpty()) {
throw IllegalStateException("Duplicate item IDs in results!: $groups")
LoadType.PREPEND -> pages.addFirst(page)
LoadType.APPEND -> pages.addLast(page)
}
// Insert the items from the page in to the cache
page.data.forEach { status ->
// There should never be duplicate items across all pages. Enforce this in debug mode
if (BuildConfig.DEBUG) {
if (idToPage.containsKey(status.id)) {
debug()
throw IllegalStateException("Duplicate item ID ${status.id} in pagesById")
}
}
idToPage[status.id] = page
}
}
/** @return page that contains [statusId], null if that [statusId] is not in the cache */
fun getPageById(statusId: String?) = idToPage[statusId]
/** @return page after the page that has the given [nextKey] value */
fun getNextPage(nextKey: String?): Page? {
return synchronized(pages) {
val index = pages.indexOfFirst { it.nextKey == nextKey }.takeIf { it != -1 } ?: return null
pages.getOrNull(index + 1)
}
}
/** @return page before the page that has the given [prevKey] value */
fun getPrevPage(prevKey: String?): Page? {
return synchronized(pages) {
val index = pages.indexOfFirst { it.prevKey == prevKey }.takeIf { it != -1 } ?: return null
pages.getOrNull(index - 1)
}
}
/** @return true if the page cache is empty */
fun isEmpty() = idToPage.isEmpty()
/** Clear the cache */
fun clear() {
idToPage.clear()
pages.clear()
}
/** @return the number of **items** in the pages **before** the page identified by [prevKey] */
fun itemsBefore(prevKey: String?): Int {
prevKey ?: return 0
val index = pages.indexOfFirst { it.prevKey == prevKey }
if (index <= 0) return 0
return pages.subList(0, index).fold(0) { sum, page -> sum + page.data.size }
}
/**
* Logs the current state of the cache
* @return the number of **items** in the pages **after** the page identified by [nextKey]
*/
fun itemsAfter(nextKey: String?): Int {
nextKey ?: return 0
val index = pages.indexOfFirst { it.nextKey == nextKey }
if (index == -1 || index == pages.size) return 0
return pages.subList(index + 1, pages.size).fold(0) { sum, page -> sum + page.data.size }
}
/** Logs debug information when [BuildConfig.DEBUG] is true */
fun debug() {
if (BuildConfig.DEBUG) { // Makes it easier for Proguard to optimise this out
Timber.d("Page cache state:")
if (this.isEmpty()) {
Timber.d(" ** empty **")
} else {
this.onEachIndexed { index, entry ->
Timber.d(" %d: %s", index, entry.value)
}
}
if (!BuildConfig.DEBUG) return
Timber.d("Page cache state:")
if (idToPage.isEmpty()) {
Timber.d(" ** empty **")
return
}
idToPage.values.groupBy { it.prevKey }.values.forEachIndexed { index, pages ->
Timber.d(" %d: %s", index, pages.first())
}
}
}

View File

@ -17,6 +17,7 @@
package app.pachli.components.timeline
import androidx.paging.LoadType
import androidx.paging.PagingSource
import androidx.paging.PagingSource.LoadResult
import androidx.test.ext.junit.runners.AndroidJUnit4
@ -58,9 +59,9 @@ class NetworkTimelinePagingSourceTest {
fun `load() for an item in a page returns the page containing that item and next, prev keys`() = runTest {
// Given
val pages = PageCache().apply {
upsert(Page(data = mutableListOf(mockStatus(id = "2")), nextKey = "1"))
upsert(Page(data = mutableListOf(mockStatus(id = "1")), nextKey = "0", prevKey = "2"))
upsert(Page(data = mutableListOf(mockStatus(id = "0")), prevKey = "1"))
add(Page(data = mutableListOf(mockStatus(id = "3")), nextKey = "1", prevKey = "4"), LoadType.REFRESH)
add(Page(data = mutableListOf(mockStatus(id = "1"), mockStatus(id = "2")), nextKey = "0", prevKey = "3"), LoadType.APPEND)
add(Page(data = mutableListOf(mockStatus(id = "0")), prevKey = "1"), LoadType.APPEND)
}
val pagingSource = NetworkTimelinePagingSource(pages)
@ -72,8 +73,8 @@ class NetworkTimelinePagingSourceTest {
assertThat((loadResult as? LoadResult.Page))
.isEqualTo(
LoadResult.Page(
data = listOf(mockStatus(id = "1")),
prevKey = "2",
data = listOf(mockStatus(id = "1"), mockStatus(id = "2")),
prevKey = "3",
nextKey = "0",
itemsBefore = 1,
itemsAfter = 1,
@ -85,9 +86,9 @@ class NetworkTimelinePagingSourceTest {
fun `append returns the page after`() = runTest {
// Given
val pages = PageCache().apply {
upsert(Page(data = mutableListOf(mockStatus(id = "2")), nextKey = "1"))
upsert(Page(data = mutableListOf(mockStatus(id = "1")), nextKey = "0", prevKey = "2"))
upsert(Page(data = mutableListOf(mockStatus(id = "0")), prevKey = "1"))
add(Page(data = mutableListOf(mockStatus(id = "2")), nextKey = "1"), LoadType.REFRESH)
add(Page(data = mutableListOf(mockStatus(id = "1")), nextKey = "0", prevKey = "2"), LoadType.APPEND)
add(Page(data = mutableListOf(mockStatus(id = "0")), prevKey = "1"), LoadType.APPEND)
}
val pagingSource = NetworkTimelinePagingSource(pages)
@ -112,9 +113,9 @@ class NetworkTimelinePagingSourceTest {
fun `prepend returns the page before`() = runTest {
// Given
val pages = PageCache().apply {
upsert(Page(data = mutableListOf(mockStatus(id = "2")), nextKey = "1"))
upsert(Page(data = mutableListOf(mockStatus(id = "1")), nextKey = "0", prevKey = "2"))
upsert(Page(data = mutableListOf(mockStatus(id = "0")), prevKey = "1"))
add(Page(data = mutableListOf(mockStatus(id = "2")), nextKey = "1"), LoadType.REFRESH)
add(Page(data = mutableListOf(mockStatus(id = "1")), nextKey = "0", prevKey = "2"), LoadType.APPEND)
add(Page(data = mutableListOf(mockStatus(id = "0")), prevKey = "1"), LoadType.APPEND)
}
val pagingSource = NetworkTimelinePagingSource(pages)
@ -139,9 +140,9 @@ class NetworkTimelinePagingSourceTest {
fun `Refresh with null key returns the latest page`() = runTest {
// Given
val pages = PageCache().apply {
upsert(Page(data = mutableListOf(mockStatus(id = "2")), nextKey = "1"))
upsert(Page(data = mutableListOf(mockStatus(id = "1")), nextKey = "0", prevKey = "2"))
upsert(Page(data = mutableListOf(mockStatus(id = "0")), prevKey = "1"))
add(Page(data = mutableListOf(mockStatus(id = "2")), nextKey = "1"), LoadType.REFRESH)
add(Page(data = mutableListOf(mockStatus(id = "1")), nextKey = "0", prevKey = "2"), LoadType.APPEND)
add(Page(data = mutableListOf(mockStatus(id = "0")), prevKey = "1"), LoadType.APPEND)
}
val pagingSource = NetworkTimelinePagingSource(pages)
@ -166,8 +167,8 @@ class NetworkTimelinePagingSourceTest {
fun `Append with a too-old key returns empty list`() = runTest {
// Given
val pages = PageCache().apply {
upsert(Page(data = mutableListOf(mockStatus(id = "20")), nextKey = "10"))
upsert(Page(data = mutableListOf(mockStatus(id = "10")), prevKey = "20"))
add(Page(data = mutableListOf(mockStatus(id = "20")), nextKey = "10"), LoadType.REFRESH)
add(Page(data = mutableListOf(mockStatus(id = "10")), prevKey = "20"), LoadType.APPEND)
}
val pagingSource = NetworkTimelinePagingSource(pages)
@ -193,8 +194,8 @@ class NetworkTimelinePagingSourceTest {
fun `Prepend with a too-new key returns empty list`() = runTest {
// Given
val pages = PageCache().apply {
upsert(Page(data = mutableListOf(mockStatus(id = "20")), nextKey = "10"))
upsert(Page(data = mutableListOf(mockStatus(id = "10")), prevKey = "20"))
add(Page(data = mutableListOf(mockStatus(id = "20")), nextKey = "10"), LoadType.REFRESH)
add(Page(data = mutableListOf(mockStatus(id = "10")), prevKey = "20"), LoadType.APPEND)
}
val pagingSource = NetworkTimelinePagingSource(pages)

View File

@ -149,18 +149,19 @@ class NetworkTimelineRemoteMediatorTest {
// Then
val expectedPages = PageCache().apply {
upsert(
add(
Page(
data = mutableListOf(mockStatus("7"), mockStatus("6"), mockStatus("5")),
prevKey = "7",
nextKey = "5",
),
LoadType.REFRESH,
)
}
assertThat(result).isInstanceOf(RemoteMediator.MediatorResult.Success::class.java)
assertThat((result as RemoteMediator.MediatorResult.Success).endOfPaginationReached).isFalse()
assertThat(pages).containsExactlyEntriesIn(expectedPages)
assertThat(pages.idToPage).containsExactlyEntriesIn(expectedPages.idToPage)
// Page cache was modified, so the pager should have been invalidated
verify(pagingSourceFactory).invalidate()
@ -171,12 +172,13 @@ class NetworkTimelineRemoteMediatorTest {
fun `should prepend statuses`() = runTest {
// Given
val pages = PageCache().apply {
upsert(
add(
Page(
data = mutableListOf(mockStatus("7"), mockStatus("6"), mockStatus("5")),
prevKey = "7",
nextKey = "5",
),
LoadType.REFRESH,
)
}
@ -212,25 +214,27 @@ class NetworkTimelineRemoteMediatorTest {
// Then
val expectedPages = PageCache().apply {
upsert(
add(
Page(
data = mutableListOf(mockStatus("7"), mockStatus("6"), mockStatus("5")),
prevKey = "7",
nextKey = "5",
),
LoadType.REFRESH,
)
upsert(
add(
Page(
data = mutableListOf(mockStatus("10"), mockStatus("9"), mockStatus("8")),
prevKey = "10",
nextKey = "8",
),
LoadType.PREPEND,
)
}
assertThat(result).isInstanceOf(RemoteMediator.MediatorResult.Success::class.java)
assertThat((result as RemoteMediator.MediatorResult.Success).endOfPaginationReached).isFalse()
assertThat(pages).containsExactlyEntriesIn(expectedPages)
assertThat(pages.idToPage).containsExactlyEntriesIn(expectedPages.idToPage)
// Page cache was modified, so the pager should have been invalidated
verify(pagingSourceFactory).invalidate()
@ -241,12 +245,13 @@ class NetworkTimelineRemoteMediatorTest {
fun `should append statuses`() = runTest {
// Given
val pages = PageCache().apply {
upsert(
add(
Page(
data = mutableListOf(mockStatus("7"), mockStatus("6"), mockStatus("5")),
prevKey = "7",
nextKey = "5",
),
LoadType.REFRESH,
)
}
@ -282,25 +287,27 @@ class NetworkTimelineRemoteMediatorTest {
// Then
val expectedPages = PageCache().apply {
upsert(
add(
Page(
data = mutableListOf(mockStatus("7"), mockStatus("6"), mockStatus("5")),
prevKey = "7",
nextKey = "5",
),
LoadType.REFRESH,
)
upsert(
add(
Page(
data = mutableListOf(mockStatus("4"), mockStatus("3"), mockStatus("2")),
prevKey = "4",
nextKey = "2",
),
LoadType.APPEND,
)
}
assertThat(result).isInstanceOf(RemoteMediator.MediatorResult.Success::class.java)
assertThat((result as RemoteMediator.MediatorResult.Success).endOfPaginationReached).isFalse()
assertThat(pages).containsExactlyEntriesIn(expectedPages)
assertThat(pages.idToPage).containsExactlyEntriesIn(expectedPages.idToPage)
// Page cache was modified, so the pager should have been invalidated
verify(pagingSourceFactory).invalidate()