diff --git a/README.md b/README.md
index 137cbab8..bd2f02bd 100644
--- a/README.md
+++ b/README.md
@@ -414,6 +414,7 @@ Use `--url`/`-u` to get a list of downloadable resource URLs extracted from the
| 酷我音乐 | | | |✓|
| 乐视网 | |✓| | |
| 荔枝FM | | | |✓|
+| 懒人听书 | | | |✓|
| 秒拍 | |✓| | |
| MioMio弹幕网 | |✓| | |
| MissEvan
猫耳FM | | | |✓|
diff --git a/src/you_get/common.py b/src/you_get/common.py
index 224249b4..268448d8 100755
--- a/src/you_get/common.py
+++ b/src/you_get/common.py
@@ -76,6 +76,7 @@ SITES = {
'letv' : 'le',
'lizhi' : 'lizhi',
'longzhu' : 'longzhu',
+ 'lrts' : 'lrts',
'magisto' : 'magisto',
'metacafe' : 'metacafe',
'mgtv' : 'mgtv',
@@ -1555,6 +1556,21 @@ def script_main(download, download_playlist, **kwargs):
'-l', '--playlist', action='store_true',
help='Prefer to download a playlist'
)
+
+ playlist_grp = parser.add_argument_group('Playlist optional options')
+ playlist_grp.add_argument(
+ '-first', '--first', metavar='FIRST',
+ help='the first number'
+ )
+ playlist_grp.add_argument(
+ '-last', '--last', metavar='LAST',
+ help='the last number'
+ )
+ playlist_grp.add_argument(
+ '-size', '--page-size', metavar='PAGE_SIZE',
+ help='the page size number'
+ )
+
download_grp.add_argument(
'-a', '--auto-rename', action='store_true', default=False,
help='Auto rename same name different files'
@@ -1672,7 +1688,7 @@ def script_main(download, download_playlist, **kwargs):
socket.setdefaulttimeout(args.timeout)
try:
- extra = {}
+ extra = {'args': args}
if extractor_proxy:
extra['extractor_proxy'] = extractor_proxy
if stream_id:
diff --git a/src/you_get/extractors/lrts.py b/src/you_get/extractors/lrts.py
new file mode 100644
index 00000000..94d12a25
--- /dev/null
+++ b/src/you_get/extractors/lrts.py
@@ -0,0 +1,81 @@
+#!/usr/bin/env python
+
+__all__ = ['lrts_download']
+
+import logging
+from ..common import *
+from ..util import log, term
+
+def lrts_download(url, output_dir='.', merge=True, info_only=False, **kwargs):
+ html = get_html(url)
+ args = kwargs.get('args')
+ if not args: args = {}
+ matched = re.search(r"/book/(\d+)", url)
+ if not matched:
+ raise AssertionError("not found book number: %s" % url)
+ book_no = matched.group(1)
+ book_title = book_no
+ matched = re.search(r"
([^-]*)[-](.*)[,](.*)", html)
+ if matched:
+ book_title = matched.group(1)
+
+ matched = re.search(r"var totalCount='(\d+)'", html)
+ if not matched:
+ raise AssertionError("not found total count in html")
+ total_count = int(matched.group(1))
+ log.i('%s total: %s' % (book_title, total_count))
+ first_page = 0
+ if ('first' in args and args.first!= None):
+ first_page = int(args.first)
+
+ page_size = 10
+ if ('page_size' in args and args.page_size != None):
+ page_size = int(args.page_size)
+ last_page = (total_count // page_size) + 1
+ if ('last' in args and args.last != None):
+ last_page = int(args.last)
+
+ log.i('page size is %s, page from %s to %s' % (page_size, first_page, last_page))
+ headers = {
+ 'Referer': url
+ }
+ items = []
+ for page in range(first_page, last_page):
+ page_url = 'http://www.lrts.me/ajax/book/%s/%s/%s' % (book_no, page, page_size)
+ response_content = json.loads(post_content(page_url, headers))
+ if response_content['status'] != 'success':
+ raise AssertionError("got the page failed: %s" % (page_url))
+ data = response_content['data']['data']
+ if data:
+ for i in data:
+ i['resName'] = parse.unquote(i['resName'])
+ items.extend(data)
+ else:
+ break
+ headers = {
+ 'Referer': 'http://www.lrts.me/playlist'
+ }
+
+ for item in items:
+ i_url = 'http://www.lrts.me/ajax/path/4/%s/%s' % (item['fatherResId'], item['resId'])
+ response_content = json.loads(post_content(i_url, headers))
+ if response_content['status'] == 'success' and response_content['data']:
+ item['ok'] = True
+ item['url'] = response_content['data']
+ logging.debug('ok')
+
+ items = list(filter(lambda i: 'ok' in i and i['ok'], items))
+ log.i('Downloading %s: %s count ...' % (book_title, len(items)))
+
+ for item in items:
+ title = item['resName']
+ file_url = item['url']
+ # if not file_url: continue
+ _, _, size = url_info(file_url)
+ print_info(site_info, title, 'mp3', size)
+ if not info_only:
+ download_urls([file_url], title, 'mp3', size, output_dir, merge=merge)
+
+site_info = "lrts.me"
+download = lrts_download
+download_playlist = lrts_download