Correctly exit if error in scraping instead of endless loop

This commit is contained in:
akaessens 2021-08-10 14:25:43 +02:00
parent 2b035b6975
commit b4d37fbc3f
2 changed files with 4 additions and 2 deletions

View File

@ -19,7 +19,7 @@ public class DocumentReceiver {
// use default android user agent // use default android user agent
String user_agent = "Mozilla/5.0 (X11; Linux x86_64)"; String user_agent = "Mozilla/5.0 (X11; Linux x86_64)";
Log.d("scraperLog", "DocumentReceiver: "+url); Log.d("scraperLog", "DocumentReceiver: " + url);
Connection connection = Jsoup.connect(url).userAgent(user_agent).followRedirects(true); Connection connection = Jsoup.connect(url).userAgent(user_agent).followRedirects(true);
@ -27,7 +27,7 @@ public class DocumentReceiver {
document = response.parse(); document = response.parse();
Log.d("scraperLog", "Document title: "+document.title()); Log.d("scraperLog", "Document title: " + document.title());
try { try {
// accept cookies needed? // accept cookies needed?

View File

@ -99,9 +99,11 @@ public class FbPageScraper extends AsyncTask<Void, Void, Void> {
} catch (IOException e) { } catch (IOException e) {
e.printStackTrace(); e.printStackTrace();
this.error = R.string.error_connection; this.error = R.string.error_connection;
return null;
} catch (Exception e) { } catch (Exception e) {
e.printStackTrace(); e.printStackTrace();
this.error = R.string.error_unknown; this.error = R.string.error_unknown;
return null;
} }
} while (url != null); } while (url != null);