Initial work on making sync cache state before we update any feed. Also added sample code for marking msg starred - inoreader.

This commit is contained in:
Martin Rotter 2017-10-10 11:55:25 +02:00
parent 3ac6361829
commit 0814528e03
15 changed files with 178 additions and 47 deletions

View File

@ -19,6 +19,7 @@
#include "core/feeddownloader.h"
#include "definitions/definitions.h"
#include "services/abstract/cacheforserviceroot.h"
#include "services/abstract/feed.h"
#include <QDebug>
@ -47,6 +48,21 @@ bool FeedDownloader::isUpdateRunning() const {
}
void FeedDownloader::updateAvailableFeeds() {
QList<CacheForServiceRoot*> caches;
foreach (const Feed* feed, m_feeds) {
CacheForServiceRoot* cache = dynamic_cast<CacheForServiceRoot*>(feed->getParentServiceRoot());
if (cache != nullptr && caches.contains(cache)) {
caches.append(cache);
}
}
// Now, we synchronously save cached data.
foreach (CacheForServiceRoot* cache, caches) {
cache->saveAllCachedData(false);
}
while (!m_feeds.isEmpty()) {
connect(m_feeds.first(), &Feed::messagesObtained, this, &FeedDownloader::oneFeedUpdateFinished,
(Qt::ConnectionType)(Qt::UniqueConnection | Qt::AutoConnection));

View File

@ -40,7 +40,7 @@ class CacheForServiceRoot {
void saveCacheToFile(int acc_id);
void loadCacheFromFile(int acc_id);
virtual void saveAllCachedData() = 0;
virtual void saveAllCachedData(bool async = true) = 0;
protected:
QPair<QMap<RootItem::ReadStatus, QStringList>, QMap<RootItem::Importance, QList<Message>>> takeMessageCache();

View File

@ -183,13 +183,6 @@ void Feed::run() {
<< customId() << " in thread: \'"
<< QThread::currentThreadId() << "\'.";
// Save all cached data first.
auto cache = dynamic_cast<CacheForServiceRoot*>(getParentServiceRoot());
if (cache != nullptr) {
cache->saveAllCachedData();
}
bool error_during_obtaining;
QList<Message> msgs = obtainNewMessages(&error_during_obtaining);

View File

@ -165,7 +165,11 @@ void InoreaderServiceRoot::addNewFeed(const QString& url) {
void InoreaderServiceRoot::addNewCategory() {}
void InoreaderServiceRoot::saveAllCachedData() {
void InoreaderServiceRoot::saveAllCachedData(bool async) {
Q_UNUSED(async)
// TODO: implementovat toto, aby bylo možno ukládat data i synchronně
QPair<QMap<RootItem::ReadStatus, QStringList>, QMap<RootItem::Importance, QList<Message>>> msgCache = takeMessageCache();
QMapIterator<RootItem::ReadStatus, QStringList> i(msgCache.first);
@ -176,7 +180,7 @@ void InoreaderServiceRoot::saveAllCachedData() {
QStringList ids = i.value();
if (!ids.isEmpty()) {
network()->markMessagesRead(key, ids);
network()->markMessagesRead(key, ids, async);
}
}
@ -196,7 +200,7 @@ void InoreaderServiceRoot::saveAllCachedData() {
custom_ids.append(msg.m_customId);
}
network()->markMessagesStarred(key, custom_ids);
network()->markMessagesStarred(key, custom_ids, async);
}
}
}

View File

@ -49,7 +49,7 @@ class InoreaderServiceRoot : public ServiceRoot, public CacheForServiceRoot {
RootItem* obtainNewTreeForSyncIn() const;
void saveAllCachedData();
void saveAllCachedData(bool async = true);
public slots:
void addNewFeed(const QString& url);

View File

@ -153,7 +153,7 @@ QList<Message> InoreaderNetworkFactory::messages(const QString& stream_id, Feed:
}
}
void InoreaderNetworkFactory::markMessagesRead(RootItem::ReadStatus status, const QStringList& custom_ids) {
void InoreaderNetworkFactory::markMessagesRead(RootItem::ReadStatus status, const QStringList& custom_ids, bool async) {
QString target_url = INOREADER_API_EDIT_TAG;
if (status == RootItem::ReadStatus::Read) {
@ -197,18 +197,97 @@ void InoreaderNetworkFactory::markMessagesRead(RootItem::ReadStatus status, cons
QString batch_final_url = target_url + working_subset.join(QL1C('&'));
// We send this batch.
NetworkFactory::performAsyncNetworkOperation(batch_final_url,
timeout,
QByteArray(),
QNetworkAccessManager::Operation::GetOperation,
headers);
if (async) {
NetworkFactory::performAsyncNetworkOperation(batch_final_url,
timeout,
QByteArray(),
QNetworkAccessManager::Operation::GetOperation,
headers);
}
else {
QByteArray output;
NetworkFactory::performNetworkOperation(batch_final_url,
timeout,
QByteArray(),
output,
QNetworkAccessManager::Operation::GetOperation,
headers);
}
// Cleanup for next batch.
working_subset.clear();
}
}
void InoreaderNetworkFactory::markMessagesStarred(RootItem::Importance importance, const QStringList& custom_ids) {}
void InoreaderNetworkFactory::markMessagesStarred(RootItem::Importance importance, const QStringList& custom_ids, bool async) {
QString target_url = INOREADER_API_EDIT_TAG;
if (importance == RootItem::Importance::Important) {
target_url += QString("?a=user/-/") + INOREADER_STATE_IMPORTANT + "&";
}
else {
target_url += QString("?r=user/-/") + INOREADER_STATE_IMPORTANT + "&";
}
QString bearer = m_oauth2->bearer().toLocal8Bit();
if (bearer.isEmpty()) {
return;
}
QList<QPair<QByteArray, QByteArray>> headers;
headers.append(QPair<QByteArray, QByteArray>(QString(HTTP_HEADERS_AUTHORIZATION).toLocal8Bit(),
m_oauth2->bearer().toLocal8Bit()));
QStringList trimmed_ids;
QRegularExpression regex_short_id(QSL("[0-9a-zA-Z]+$"));
foreach (const QString& id, custom_ids) {
QString simplified_id = regex_short_id.match(id).captured();
trimmed_ids.append(QString("i=") + simplified_id);
}
QStringList working_subset;
int timeout = qApp->settings()->value(GROUP(Feeds), SETTING(Feeds::UpdateTimeout)).toInt();
working_subset.reserve(trimmed_ids.size() > 200 ? 200 : trimmed_ids.size());
// Now, we perform messages update in batches (max 200 messages per batch).
while (!trimmed_ids.isEmpty()) {
// We take 200 IDs.
for (int i = 0; i < 200 && !trimmed_ids.isEmpty(); i++) {
working_subset.append(trimmed_ids.takeFirst());
}
QString batch_final_url = target_url + working_subset.join(QL1C('&'));
// We send this batch.
if (async) {
NetworkFactory::performAsyncNetworkOperation(batch_final_url,
timeout,
QByteArray(),
QNetworkAccessManager::Operation::GetOperation,
headers);
}
else {
QByteArray output;
NetworkFactory::performNetworkOperation(batch_final_url,
timeout,
QByteArray(),
output,
QNetworkAccessManager::Operation::GetOperation,
headers);
}
// Cleanup for next batch.
working_subset.clear();
}
}
void InoreaderNetworkFactory::onTokensError(const QString& error, const QString& error_description) {
Q_UNUSED(error)

View File

@ -55,8 +55,8 @@ class InoreaderNetworkFactory : public QObject {
RootItem* feedsCategories(bool obtain_icons);
QList<Message> messages(const QString& stream_id, Feed::Status& error);
void markMessagesRead(RootItem::ReadStatus status, const QStringList& custom_ids);
void markMessagesStarred(RootItem::Importance importance, const QStringList& custom_ids);
void markMessagesRead(RootItem::ReadStatus status, const QStringList& custom_ids, bool async = true);
void markMessagesStarred(RootItem::Importance importance, const QStringList& custom_ids, bool async = true);
private slots:
void onTokensError(const QString& error, const QString& error_description);

View File

@ -337,7 +337,7 @@ QNetworkReply::NetworkError OwnCloudNetworkFactory::triggerFeedUpdate(int feed_i
return (m_lastError = network_reply.first);
}
void OwnCloudNetworkFactory::markMessagesRead(RootItem::ReadStatus status, const QStringList& custom_ids) {
void OwnCloudNetworkFactory::markMessagesRead(RootItem::ReadStatus status, const QStringList& custom_ids, bool async) {
QJsonObject json;
QJsonArray ids;
QString final_url;
@ -359,17 +359,30 @@ void OwnCloudNetworkFactory::markMessagesRead(RootItem::ReadStatus status, const
headers << QPair<QByteArray, QByteArray>(HTTP_HEADERS_CONTENT_TYPE, OWNCLOUD_CONTENT_TYPE_JSON);
headers << NetworkFactory::generateBasicAuthHeader(m_authUsername, m_authPassword);
NetworkFactory::performAsyncNetworkOperation(final_url,
qApp->settings()->value(GROUP(Feeds),
SETTING(Feeds::UpdateTimeout)).toInt(),
QJsonDocument(json).toJson(QJsonDocument::Compact),
QNetworkAccessManager::PutOperation,
headers);
if (async) {
NetworkFactory::performAsyncNetworkOperation(final_url,
qApp->settings()->value(GROUP(Feeds),
SETTING(Feeds::UpdateTimeout)).toInt(),
QJsonDocument(json).toJson(QJsonDocument::Compact),
QNetworkAccessManager::PutOperation,
headers);
}
else {
QByteArray output;
NetworkFactory::performNetworkOperation(final_url,
qApp->settings()->value(GROUP(Feeds),
SETTING(Feeds::UpdateTimeout)).toInt(),
QJsonDocument(json).toJson(QJsonDocument::Compact),
output,
QNetworkAccessManager::PutOperation,
headers);
}
}
void OwnCloudNetworkFactory::markMessagesStarred(RootItem::Importance importance,
const QStringList& feed_ids,
const QStringList& guid_hashes) {
const QStringList& guid_hashes, bool async) {
QJsonObject json;
QJsonArray ids;
QString final_url;
@ -395,12 +408,25 @@ void OwnCloudNetworkFactory::markMessagesStarred(RootItem::Importance importance
headers << QPair<QByteArray, QByteArray>(HTTP_HEADERS_CONTENT_TYPE, OWNCLOUD_CONTENT_TYPE_JSON);
headers << NetworkFactory::generateBasicAuthHeader(m_authUsername, m_authPassword);
NetworkFactory::performAsyncNetworkOperation(final_url,
qApp->settings()->value(GROUP(Feeds),
SETTING(Feeds::UpdateTimeout)).toInt(),
QJsonDocument(json).toJson(QJsonDocument::Compact),
QNetworkAccessManager::PutOperation,
headers);
if (async) {
NetworkFactory::performAsyncNetworkOperation(final_url,
qApp->settings()->value(GROUP(Feeds),
SETTING(Feeds::UpdateTimeout)).toInt(),
QJsonDocument(json).toJson(QJsonDocument::Compact),
QNetworkAccessManager::PutOperation,
headers);
}
else {
QByteArray output;
NetworkFactory::performNetworkOperation(final_url,
qApp->settings()->value(GROUP(Feeds),
SETTING(Feeds::UpdateTimeout)).toInt(),
QJsonDocument(json).toJson(QJsonDocument::Compact),
output,
QNetworkAccessManager::PutOperation,
headers);
}
}
int OwnCloudNetworkFactory::batchSize() const {

View File

@ -129,8 +129,9 @@ class OwnCloudNetworkFactory {
// Misc methods.
QNetworkReply::NetworkError triggerFeedUpdate(int feed_id);
void markMessagesRead(RootItem::ReadStatus status, const QStringList& custom_ids);
void markMessagesStarred(RootItem::Importance importance, const QStringList& feed_ids, const QStringList& guid_hashes);
void markMessagesRead(RootItem::ReadStatus status, const QStringList& custom_ids, bool async = true);
void markMessagesStarred(RootItem::Importance importance, const QStringList& feed_ids,
const QStringList& guid_hashes, bool async = true);
// Gets/sets the amount of messages to obtain during single feed update.
int batchSize() const;

View File

@ -106,7 +106,11 @@ OwnCloudNetworkFactory* OwnCloudServiceRoot::network() const {
return m_network;
}
void OwnCloudServiceRoot::saveAllCachedData() {
void OwnCloudServiceRoot::saveAllCachedData(bool async) {
Q_UNUSED(async)
// TODO: implementovat toto, aby bylo možno ukládat data i synchronně
QPair<QMap<RootItem::ReadStatus, QStringList>, QMap<RootItem::Importance, QList<Message>>> msgCache = takeMessageCache();
QMapIterator<RootItem::ReadStatus, QStringList> i(msgCache.first);
@ -117,7 +121,7 @@ void OwnCloudServiceRoot::saveAllCachedData() {
QStringList ids = i.value();
if (!ids.isEmpty()) {
network()->markMessagesRead(key, ids);
network()->markMessagesRead(key, ids, async);
}
}
@ -138,7 +142,7 @@ void OwnCloudServiceRoot::saveAllCachedData() {
guid_hashes.append(msg.m_customHash);
}
network()->markMessagesStarred(key, feed_ids, guid_hashes);
network()->markMessagesStarred(key, feed_ids, guid_hashes, async);
}
}
}

View File

@ -50,7 +50,7 @@ class OwnCloudServiceRoot : public ServiceRoot, public CacheForServiceRoot {
void updateTitle();
void saveAccountDataToDatabase();
void saveAllCachedData();
void saveAllCachedData(bool async = true);
public slots:
void addNewFeed(const QString& url);

View File

@ -253,7 +253,9 @@ TtRssGetHeadlinesResponse TtRssNetworkFactory::getHeadlines(int feed_id, int lim
TtRssUpdateArticleResponse TtRssNetworkFactory::updateArticles(const QStringList& ids,
UpdateArticle::OperatingField field,
UpdateArticle::Mode mode) {
UpdateArticle::Mode mode, bool async) {
Q_UNUSED(async)
QJsonObject json;
json["op"] = QSL("updateArticle");

View File

@ -160,7 +160,7 @@ class TtRssNetworkFactory {
bool sanitize);
TtRssUpdateArticleResponse updateArticles(const QStringList& ids, UpdateArticle::OperatingField field,
UpdateArticle::Mode mode);
UpdateArticle::Mode mode, bool async = true);
TtRssSubscribeToFeedResponse subscribeToFeed(const QString& url, int category_id, bool protectd = false,
const QString& username = QString(), const QString& password = QString());

View File

@ -125,7 +125,11 @@ bool TtRssServiceRoot::canBeDeleted() const {
return true;
}
void TtRssServiceRoot::saveAllCachedData() {
void TtRssServiceRoot::saveAllCachedData(bool async) {
Q_UNUSED(async)
// TODO: implementovat toto, aby bylo možno ukládat data i synchronně
QPair<QMap<RootItem::ReadStatus, QStringList>, QMap<RootItem::Importance, QList<Message>>> msgCache = takeMessageCache();
QMapIterator<RootItem::ReadStatus, QStringList> i(msgCache.first);
@ -138,7 +142,8 @@ void TtRssServiceRoot::saveAllCachedData() {
if (!ids.isEmpty()) {
network()->updateArticles(ids,
UpdateArticle::Unread,
key == RootItem::Unread ? UpdateArticle::SetToTrue : UpdateArticle::SetToFalse);
key == RootItem::Unread ? UpdateArticle::SetToTrue : UpdateArticle::SetToFalse,
async);
}
}
@ -156,7 +161,8 @@ void TtRssServiceRoot::saveAllCachedData() {
network()->updateArticles(ids,
UpdateArticle::Starred,
key == RootItem::Important ? UpdateArticle::SetToTrue : UpdateArticle::SetToFalse);
key == RootItem::Important ? UpdateArticle::SetToTrue : UpdateArticle::SetToFalse,
async);
}
}
}

View File

@ -48,7 +48,7 @@ class TtRssServiceRoot : public ServiceRoot, public CacheForServiceRoot {
QString additionalTooltip() const;
void saveAllCachedData();
void saveAllCachedData(bool async = true);
// Access to network.
TtRssNetworkFactory* network() const;