level 1 of making loading more content work
This commit is contained in:
parent
6dc5350c43
commit
53059bcb91
|
@ -32,7 +32,7 @@
|
|||
android:theme="@style/AppTheme">
|
||||
<meta-data
|
||||
android:name="android.support.PARENT_ACTIVITY"
|
||||
android:value=".VideoItemListActivity" />
|
||||
android:value=".MainActivity" />
|
||||
|
||||
<intent-filter>
|
||||
<action android:name="android.intent.action.VIEW" />
|
||||
|
|
|
@ -11,10 +11,8 @@ import android.support.v7.widget.LinearLayoutManager;
|
|||
import android.support.v7.widget.RecyclerView;
|
||||
import android.support.v7.widget.Toolbar;
|
||||
import android.util.Log;
|
||||
import android.view.LayoutInflater;
|
||||
import android.view.View;
|
||||
import android.widget.ImageView;
|
||||
import android.widget.LinearLayout;
|
||||
import android.widget.ProgressBar;
|
||||
import android.widget.Toast;
|
||||
|
||||
|
@ -27,12 +25,10 @@ import org.schabi.newpipe.extractor.ChannelInfo;
|
|||
import org.schabi.newpipe.extractor.ExtractionException;
|
||||
import org.schabi.newpipe.extractor.ParsingException;
|
||||
import org.schabi.newpipe.extractor.ServiceList;
|
||||
import org.schabi.newpipe.extractor.StreamPreviewInfo;
|
||||
import org.schabi.newpipe.extractor.StreamingService;
|
||||
import org.schabi.newpipe.info_list.InfoListAdapter;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
|
||||
public class ChannelActivity extends AppCompatActivity {
|
||||
|
||||
|
@ -47,6 +43,7 @@ public class ChannelActivity extends AppCompatActivity {
|
|||
private int serviceId = -1;
|
||||
private String channelUrl = "";
|
||||
private int pageNumber = 0;
|
||||
private boolean hasNextPage = true;
|
||||
private boolean isLoading = false;
|
||||
|
||||
private ImageLoader imageLoader = ImageLoader.getInstance();
|
||||
|
@ -91,22 +88,23 @@ public class ChannelActivity extends AppCompatActivity {
|
|||
totalItemCount = layoutManager.getItemCount();
|
||||
pastVisiblesItems = layoutManager.findFirstVisibleItemPosition();
|
||||
|
||||
if ( (visibleItemCount + pastVisiblesItems) >= totalItemCount && !isLoading)
|
||||
if ( (visibleItemCount + pastVisiblesItems) >= totalItemCount
|
||||
&& !isLoading
|
||||
&& hasNextPage)
|
||||
{
|
||||
pageNumber++;
|
||||
Log.d(TAG, "bottomn");
|
||||
requestData(true);
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
requestData(pageNumber);
|
||||
requestData(false);
|
||||
}
|
||||
|
||||
|
||||
|
||||
private void updateUi(final ChannelInfo info) {
|
||||
isLoading = false;
|
||||
CollapsingToolbarLayout ctl = (CollapsingToolbarLayout) findViewById(R.id.channel_toolbar_layout);
|
||||
ProgressBar progressBar = (ProgressBar) findViewById(R.id.progressBar);
|
||||
ImageView channelBanner = (ImageView) findViewById(R.id.channel_banner_image);
|
||||
|
@ -144,11 +142,9 @@ public class ChannelActivity extends AppCompatActivity {
|
|||
} else {
|
||||
feedButton.setVisibility(View.GONE);
|
||||
}
|
||||
|
||||
initVideos(info);
|
||||
}
|
||||
|
||||
private void initVideos(final ChannelInfo info) {
|
||||
private void addVideos(final ChannelInfo info) {
|
||||
infoListAdapter.addStreamItemList(info.related_streams);
|
||||
}
|
||||
|
||||
|
@ -162,7 +158,7 @@ public class ChannelActivity extends AppCompatActivity {
|
|||
});
|
||||
}
|
||||
|
||||
private void requestData(int page) {
|
||||
private void requestData(final boolean onlyVideos) {
|
||||
// start processing
|
||||
isLoading = true;
|
||||
Thread channelExtractorThread = new Thread(new Runnable() {
|
||||
|
@ -173,7 +169,7 @@ public class ChannelActivity extends AppCompatActivity {
|
|||
try {
|
||||
StreamingService service = ServiceList.getService(serviceId);
|
||||
ChannelExtractor extractor = service.getChannelExtractorInstance(
|
||||
channelUrl, new Downloader());
|
||||
channelUrl, pageNumber, new Downloader());
|
||||
|
||||
final ChannelInfo info = ChannelInfo.getInfo(extractor, new Downloader());
|
||||
|
||||
|
@ -181,8 +177,13 @@ public class ChannelActivity extends AppCompatActivity {
|
|||
h.post(new Runnable() {
|
||||
@Override
|
||||
public void run() {
|
||||
isLoading = false;
|
||||
if(!onlyVideos) {
|
||||
updateUi(info);
|
||||
}
|
||||
hasNextPage = info.hasNextPage;
|
||||
addVideos(info);
|
||||
}
|
||||
});
|
||||
|
||||
// look for non critical errors during extraction
|
||||
|
|
|
@ -5,10 +5,12 @@ import java.io.IOException;
|
|||
import java.io.InputStreamReader;
|
||||
import java.net.URL;
|
||||
import java.net.UnknownHostException;
|
||||
import java.util.HashMap;
|
||||
import java.util.Iterator;
|
||||
import java.util.Map;
|
||||
|
||||
import javax.net.ssl.HttpsURLConnection;
|
||||
|
||||
import info.guardianproject.netcipher.NetCipher;
|
||||
|
||||
/**
|
||||
* Created by Christian Schabesberger on 28.01.16.
|
||||
|
@ -40,10 +42,26 @@ public class Downloader implements org.schabi.newpipe.extractor.Downloader {
|
|||
* @param language the language (usually a 2-character code) to set as the preferred language
|
||||
* @return the contents of the specified text file*/
|
||||
public String download(String siteUrl, String language) throws IOException {
|
||||
Map<String, String> requestProperties = new HashMap<>();
|
||||
requestProperties.put("Accept-Language", language);
|
||||
return download(siteUrl, requestProperties);
|
||||
}
|
||||
|
||||
|
||||
/**Download the text file at the supplied URL as in download(String),
|
||||
* but set the HTTP header field "Accept-Language" to the supplied string.
|
||||
* @param siteUrl the URL of the text file to return the contents of
|
||||
* @param customProperties set request header properties
|
||||
* @return the contents of the specified text file
|
||||
* @throws IOException*/
|
||||
public String download(String siteUrl, Map<String, String> customProperties) throws IOException {
|
||||
URL url = new URL(siteUrl);
|
||||
HttpsURLConnection con = (HttpsURLConnection) url.openConnection();
|
||||
//HttpsURLConnection con = NetCipher.getHttpsURLConnection(url);
|
||||
con.setRequestProperty("Accept-Language", language);
|
||||
Iterator it = customProperties.entrySet().iterator();
|
||||
while(it.hasNext()) {
|
||||
Map.Entry pair = (Map.Entry)it.next();
|
||||
con.setRequestProperty((String)pair.getKey(), (String)pair.getValue());
|
||||
}
|
||||
return dl(con);
|
||||
}
|
||||
|
||||
|
|
|
@ -29,7 +29,7 @@ public abstract class ChannelExtractor {
|
|||
private Downloader downloader;
|
||||
private StreamPreviewInfoCollector previewInfoCollector;
|
||||
|
||||
public ChannelExtractor(UrlIdHandler urlIdHandler, String url, Downloader dl, int serviceId)
|
||||
public ChannelExtractor(UrlIdHandler urlIdHandler, String url, int page, Downloader dl, int serviceId)
|
||||
throws ExtractionException, IOException {
|
||||
this.serviceId = serviceId;
|
||||
this.urlIdHandler = urlIdHandler;
|
||||
|
@ -48,6 +48,7 @@ public abstract class ChannelExtractor {
|
|||
public abstract String getBannerUrl() throws ParsingException;
|
||||
public abstract String getFeedUrl() throws ParsingException;
|
||||
public abstract StreamPreviewInfoCollector getStreams() throws ParsingException;
|
||||
public abstract boolean hasNextPage() throws ParsingException;
|
||||
public int getServiceId() {
|
||||
return serviceId;
|
||||
}
|
||||
|
|
|
@ -39,6 +39,7 @@ public class ChannelInfo {
|
|||
// importand data
|
||||
info.service_id = extractor.getServiceId();
|
||||
info.channel_name = extractor.getChannelName();
|
||||
info.hasNextPage = extractor.hasNextPage();
|
||||
|
||||
try {
|
||||
info.avatar_url = extractor.getAvatarUrl();
|
||||
|
@ -72,6 +73,7 @@ public class ChannelInfo {
|
|||
public String banner_url = "";
|
||||
public String feed_url = "";
|
||||
public List<StreamPreviewInfo> related_streams = null;
|
||||
public boolean hasNextPage = false;
|
||||
|
||||
public List<Throwable> errors = new Vector<>();
|
||||
}
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package org.schabi.newpipe.extractor;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* Created by Christian Schabesberger on 28.01.16.
|
||||
|
@ -32,6 +33,14 @@ public interface Downloader {
|
|||
* @throws IOException*/
|
||||
String download(String siteUrl, String language) throws IOException;
|
||||
|
||||
/**Download the text file at the supplied URL as in download(String),
|
||||
* but set the HTTP header field "Accept-Language" to the supplied string.
|
||||
* @param siteUrl the URL of the text file to return the contents of
|
||||
* @param customProperties set request header properties
|
||||
* @return the contents of the specified text file
|
||||
* @throws IOException*/
|
||||
String download(String siteUrl, Map<String, String> customProperties) throws IOException;
|
||||
|
||||
/**Download (via HTTP) the text file located at the supplied URL, and return its contents.
|
||||
* Primarily intended for downloading web pages.
|
||||
* @param siteUrl the URL of the text file to download
|
||||
|
|
|
@ -40,7 +40,7 @@ public abstract class StreamingService {
|
|||
public abstract SearchEngine getSearchEngineInstance(Downloader downloader);
|
||||
public abstract UrlIdHandler getUrlIdHandlerInstance();
|
||||
public abstract UrlIdHandler getChannelUrlIdHandlerInstance();
|
||||
public abstract ChannelExtractor getChannelExtractorInstance(String url, Downloader downloader)
|
||||
public abstract ChannelExtractor getChannelExtractorInstance(String url, int page, Downloader downloader)
|
||||
throws ExtractionException, IOException;
|
||||
|
||||
public final int getServiceId() {
|
||||
|
|
|
@ -1,20 +1,9 @@
|
|||
package org.schabi.newpipe.extractor.services.youtube;
|
||||
|
||||
import android.util.Log;
|
||||
|
||||
/*
|
||||
import com.steadystate.css.dom.CSSStyleDeclarationImpl;
|
||||
import com.steadystate.css.dom.CSSStyleSheetImpl;
|
||||
import com.steadystate.css.parser.CSSOMParser;
|
||||
import com.steadystate.css.parser.SACParserCSS3;
|
||||
import org.w3c.css.sac.CSSParseException;
|
||||
import org.w3c.css.sac.InputSource;
|
||||
import org.w3c.dom.css.CSSRule;
|
||||
import org.w3c.dom.css.CSSRuleList;
|
||||
import org.w3c.dom.css.CSSStyleSheet;
|
||||
import java.io.StringReader;
|
||||
*/
|
||||
|
||||
import org.json.JSONException;
|
||||
import org.json.JSONObject;
|
||||
import org.jsoup.Jsoup;
|
||||
import org.jsoup.nodes.Document;
|
||||
import org.jsoup.nodes.Element;
|
||||
|
@ -30,6 +19,8 @@ import org.schabi.newpipe.extractor.UrlIdHandler;
|
|||
|
||||
|
||||
import java.io.IOException;
|
||||
import java.util.HashMap;
|
||||
import java.util.Map;
|
||||
|
||||
/**
|
||||
* Created by Christian Schabesberger on 25.07.16.
|
||||
|
@ -58,32 +49,66 @@ public class YoutubeChannelExtractor extends ChannelExtractor {
|
|||
// private CSSOMParser cssParser = new CSSOMParser(new SACParserCSS3());
|
||||
|
||||
private Downloader downloader;
|
||||
private final Document doc;
|
||||
private final String channelUrl;
|
||||
private String vUrl ="";
|
||||
private Document doc = null;
|
||||
|
||||
private boolean isAjaxPage = false;
|
||||
private static String userUrl = "";
|
||||
private static String channelName = "";
|
||||
private static String avatarUrl = "";
|
||||
private static String bannerUrl = "";
|
||||
private static String feedUrl = "";
|
||||
// the fist page is html all other pages are ajax. Every new page can be requested by sending
|
||||
// this request url.
|
||||
private static String nextPageUrl = "";
|
||||
|
||||
public YoutubeChannelExtractor(UrlIdHandler urlIdHandler, String url, Downloader dl, int serviceId)
|
||||
public YoutubeChannelExtractor(UrlIdHandler urlIdHandler, String url, int page, Downloader dl, int serviceId)
|
||||
throws ExtractionException, IOException {
|
||||
super(urlIdHandler, url, dl, serviceId);
|
||||
super(urlIdHandler, url, page, dl, serviceId);
|
||||
|
||||
channelUrl = urlIdHandler.cleanUrl(url) ; //+ "/video?veiw=0&flow=list&sort=dd";
|
||||
url = urlIdHandler.cleanUrl(url) ; //+ "/video?veiw=0&flow=list&sort=dd";
|
||||
downloader = dl;
|
||||
// we first need to get the user url. Otherwise we can't find videos
|
||||
String channelPageContent = downloader.download(channelUrl);
|
||||
Document channelDoc = Jsoup.parse(channelPageContent, channelUrl);
|
||||
String userUrl = getUserUrl(channelDoc);
|
||||
|
||||
vUrl = userUrl + "/videos?veiw=0&flow=list&sort=dd";
|
||||
String pageContent = downloader.download(vUrl);
|
||||
doc = Jsoup.parse(pageContent, vUrl);
|
||||
if(page == 0) {
|
||||
if (isUserUrl(url)) {
|
||||
userUrl = url;
|
||||
} else {
|
||||
// we first need to get the user url. Otherwise we can't find videos
|
||||
String channelPageContent = downloader.download(url);
|
||||
Document channelDoc = Jsoup.parse(channelPageContent, url);
|
||||
userUrl = getUserUrl(channelDoc);
|
||||
}
|
||||
|
||||
userUrl = userUrl + "/videos?veiw=0&flow=list&sort=dd&live_view=500";
|
||||
String pageContent = downloader.download(userUrl);
|
||||
doc = Jsoup.parse(pageContent, userUrl);
|
||||
nextPageUrl = getNextPageUrl(doc);
|
||||
isAjaxPage = false;
|
||||
} else {
|
||||
Map<String, String> userProperties = new HashMap<>();
|
||||
userProperties.put("Referer", userUrl);
|
||||
String ajaxDataRaw = downloader.download(nextPageUrl, userProperties);
|
||||
JSONObject ajaxData;
|
||||
String htmlDataRaw;
|
||||
try {
|
||||
ajaxData = new JSONObject(ajaxDataRaw);
|
||||
htmlDataRaw = ajaxData.getString("content_html");
|
||||
} catch (JSONException e) {
|
||||
throw new ParsingException("Could not parse json data for next page", e);
|
||||
}
|
||||
doc = Jsoup.parse(htmlDataRaw, nextPageUrl);
|
||||
nextPageUrl = getNextPageUrl(ajaxData);
|
||||
isAjaxPage = true;
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getChannelName() throws ParsingException {
|
||||
try {
|
||||
return doc.select("span[class=\"qualified-channel-title-text\"]").first()
|
||||
if(!isAjaxPage) {
|
||||
channelName = doc.select("span[class=\"qualified-channel-title-text\"]").first()
|
||||
.select("a").first().text();
|
||||
}
|
||||
return channelName;
|
||||
} catch(Exception e) {
|
||||
throw new ParsingException("Could not get channel name");
|
||||
}
|
||||
|
@ -92,8 +117,11 @@ public class YoutubeChannelExtractor extends ChannelExtractor {
|
|||
@Override
|
||||
public String getAvatarUrl() throws ParsingException {
|
||||
try {
|
||||
return doc.select("img[class=\"channel-header-profile-image\"]")
|
||||
if(!isAjaxPage) {
|
||||
avatarUrl = doc.select("img[class=\"channel-header-profile-image\"]")
|
||||
.first().attr("abs:src");
|
||||
}
|
||||
return avatarUrl;
|
||||
} catch(Exception e) {
|
||||
throw new ParsingException("Could not get avatar", e);
|
||||
}
|
||||
|
@ -101,16 +129,18 @@ public class YoutubeChannelExtractor extends ChannelExtractor {
|
|||
|
||||
@Override
|
||||
public String getBannerUrl() throws ParsingException {
|
||||
|
||||
try {
|
||||
if(!isAjaxPage) {
|
||||
Element el = doc.select("div[id=\"gh-banner\"]").first().select("style").first();
|
||||
String cssContent = el.html();
|
||||
String url = "https:" + Parser.matchGroup1("url\\((.*)\\)", cssContent);
|
||||
if (url.contains("s.ytimg.com")) {
|
||||
return null;
|
||||
bannerUrl = null;
|
||||
} else {
|
||||
return url;
|
||||
bannerUrl = url;
|
||||
}
|
||||
}
|
||||
return bannerUrl;
|
||||
} catch(Exception e) {
|
||||
throw new ParsingException("Could not get Banner", e);
|
||||
}
|
||||
|
@ -119,7 +149,12 @@ public class YoutubeChannelExtractor extends ChannelExtractor {
|
|||
@Override
|
||||
public StreamPreviewInfoCollector getStreams() throws ParsingException {
|
||||
StreamPreviewInfoCollector collector = getStreamPreviewInfoCollector();
|
||||
Element ul = doc.select("ul[id=\"browse-items-primary\"]").first();
|
||||
Element ul = null;
|
||||
if(isAjaxPage) {
|
||||
ul = doc.select("body").first();
|
||||
} else {
|
||||
ul = doc.select("ul[id=\"browse-items-primary\"]").first();
|
||||
}
|
||||
|
||||
for(final Element li : ul.children()) {
|
||||
if (li.select("div[class=\"feed-item-dismissable\"]").first() != null) {
|
||||
|
@ -235,26 +270,6 @@ public class YoutubeChannelExtractor extends ChannelExtractor {
|
|||
throw new ParsingException("Could not get thumbnail url", e);
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
return collector;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getFeedUrl() throws ParsingException {
|
||||
try {
|
||||
return doc.select("link[title=\"RSS\"]").first().attr("abs:href");
|
||||
} catch(Exception e) {
|
||||
throw new ParsingException("Could not get feed url", e);
|
||||
}
|
||||
}
|
||||
|
||||
private String getUserUrl(Document d) throws ParsingException {
|
||||
return d.select("span[class=\"qualified-channel-title-text\"]").first()
|
||||
.select("a").first().attr("abs:href");
|
||||
}
|
||||
|
||||
private boolean isLiveStream(Element item) {
|
||||
Element bla = item.select("span[class*=\"yt-badge-live\"]").first();
|
||||
|
@ -268,4 +283,59 @@ public class YoutubeChannelExtractor extends ChannelExtractor {
|
|||
}
|
||||
return bla != null;
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
return collector;
|
||||
}
|
||||
|
||||
@Override
|
||||
public String getFeedUrl() throws ParsingException {
|
||||
try {
|
||||
if(!isAjaxPage) {
|
||||
feedUrl = doc.select("link[title=\"RSS\"]").first().attr("abs:href");
|
||||
}
|
||||
return feedUrl;
|
||||
} catch(Exception e) {
|
||||
throw new ParsingException("Could not get feed url", e);
|
||||
}
|
||||
}
|
||||
|
||||
@Override
|
||||
public boolean hasNextPage() throws ParsingException {
|
||||
return !nextPageUrl.isEmpty();
|
||||
}
|
||||
|
||||
private String getUserUrl(Document d) throws ParsingException {
|
||||
return d.select("span[class=\"qualified-channel-title-text\"]").first()
|
||||
.select("a").first().attr("abs:href");
|
||||
}
|
||||
|
||||
private boolean isUserUrl(String url) throws ParsingException {
|
||||
return url.contains("/user/");
|
||||
}
|
||||
|
||||
private String getNextPageUrl(Document d) throws ParsingException {
|
||||
try {
|
||||
Element button = d.select("button[class*=\"yt-uix-load-more\"]").first();
|
||||
return button.attr("abs:data-uix-load-more-href");
|
||||
} catch(Exception e) {
|
||||
throw new ParsingException("could not load next page url", e);
|
||||
}
|
||||
}
|
||||
|
||||
private String getNextPageUrl(JSONObject ajaxData) throws ParsingException {
|
||||
Document doc = null;
|
||||
try {
|
||||
String docRaw = ajaxData.getString("load_more_widget_html");
|
||||
if(docRaw.isEmpty()) {
|
||||
return "";
|
||||
}
|
||||
doc = Jsoup.parse(docRaw);
|
||||
} catch(JSONException je) {
|
||||
throw new ParsingException("Could not get load_more_widget from ajax response", je);
|
||||
}
|
||||
return getNextPageUrl(doc);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -70,8 +70,8 @@ public class YoutubeService extends StreamingService {
|
|||
}
|
||||
|
||||
@Override
|
||||
public ChannelExtractor getChannelExtractorInstance(String url, Downloader downloader)
|
||||
public ChannelExtractor getChannelExtractorInstance(String url, int page, Downloader downloader)
|
||||
throws ExtractionException, IOException {
|
||||
return new YoutubeChannelExtractor(getChannelUrlIdHandlerInstance(), url, downloader, getServiceId());
|
||||
return new YoutubeChannelExtractor(getChannelUrlIdHandlerInstance(), url, page, downloader, getServiceId());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -46,9 +46,11 @@ public class InfoListAdapter extends RecyclerView.Adapter<InfoItemHolder> {
|
|||
}
|
||||
|
||||
public void addStreamItemList(List<StreamPreviewInfo> videos) {
|
||||
if(videos!= null) {
|
||||
streamList.addAll(videos);
|
||||
notifyDataSetChanged();
|
||||
}
|
||||
}
|
||||
|
||||
public void clearSteamItemList() {
|
||||
streamList = new Vector<>();
|
||||
|
|
|
@ -237,7 +237,7 @@ public class SearchInfoItemFragment extends Fragment {
|
|||
searchView.setSuggestionsAdapter(suggestionListAdapter);
|
||||
searchView.setOnSuggestionListener(new SearchSuggestionListener(searchView, suggestionListAdapter));
|
||||
searchView.setOnQueryTextListener(new SearchQueryListener());
|
||||
if(!searchQuery.isEmpty()) {
|
||||
if(searchQuery != null && !searchQuery.isEmpty()) {
|
||||
searchView.setQuery(searchQuery, false);
|
||||
searchView.setIconifiedByDefault(false);
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue