Skip to content

Commit

Permalink
Update default_settings.py and rename SCRAPYD_LOGS_DIR (#79)
Browse files Browse the repository at this point in the history
  • Loading branch information
my8100 authored Aug 3, 2019
1 parent bd1c71f commit 7ca184f
Show file tree
Hide file tree
Showing 17 changed files with 94 additions and 94 deletions.
2 changes: 1 addition & 1 deletion .circleci/config.yml
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ jobs:
condition: <<parameters.set-data-path>>
steps:
- run:
name: Set DATABASE_URL to sqlite
name: Setup DATA_PATH
command: |
printf "\nDATA_PATH = '"$DATA_PATH"'\n" >> scrapydweb_settings_v8.py
- when:
Expand Down
91 changes: 45 additions & 46 deletions scrapydweb/default_settings.py
Original file line number Diff line number Diff line change
@@ -1,58 +1,30 @@
# coding: utf-8
"""
How ScrapydWeb works:
BROWSER_HOST <<<>>> SCRAPYDWEB_BIND:SCRAPYDWEB_PORT <<<>>> your SCRAPYD_SERVERS
BROWSER <<<>>> SCRAPYDWEB_BIND:SCRAPYDWEB_PORT <<<>>> your SCRAPYD_SERVERS
GitHub: https://github.com/my8100/scrapydweb
DOCS: https://github.com/my8100/files/blob/master/scrapydweb/README.md
文档:https://github.com/my8100/files/blob/master/scrapydweb/README_CN.md
"""


###############################################################################
###############################################################################
## QUICK SETUP: Simply search and update the SCRAPYD_SERVERS option, leave the rest as default.
## Recommended Reading: [How to efficiently manage your distributed web scraping projects]
## (https://github.com/my8100/files/blob/master/scrapydweb/README.md)
## ------------------------------ Chinese -------------------------------------
## 快速设置:搜索并更新 SCRAPYD_SERVERS 配置项即可,其余配置项保留默认值。
## 推荐阅读:[如何简单高效地部署和监控分布式爬虫项目]
## (https://github.com/my8100/files/blob/master/scrapydweb/README_CN.md)
###############################################################################
###############################################################################


############################## ScrapydWeb #####################################
############################## QUICK SETUP start ##############################
############################## 快速设置 开始 ###################################
# Setting SCRAPYDWEB_BIND to '0.0.0.0' or IP-OF-THE-CURRENT-HOST would make
# ScrapydWeb server visible externally; Otherwise, set it to '127.0.0.1'.
# The default is '0.0.0.0'.
SCRAPYDWEB_BIND = '0.0.0.0'
# Accept connections on the specified port, the default is 5000.
SCRAPYDWEB_PORT = 5000

# The default is False, set it to True to enable basic auth for web UI.
# The default is False, set it to True to enable basic auth for the web UI.
ENABLE_AUTH = False
# In order to enable basic auth, both USERNAME and PASSWORD should be non-empty strings.
USERNAME = ''
PASSWORD = ''

# The default is False, set it to True and add both CERTIFICATE_FILEPATH and PRIVATEKEY_FILEPATH
# to run ScrapydWeb in HTTPS mode.
# Note that this feature is not fully tested, please leave your comment here if ScrapydWeb
# raises any excepion at startup: https://github.com/my8100/scrapydweb/issues/18
ENABLE_HTTPS = False
# e.g. '/home/username/cert.pem'
CERTIFICATE_FILEPATH = ''
# e.g. '/home/username/cert.key'
PRIVATEKEY_FILEPATH = ''


############################## Scrapy #########################################
# ScrapydWeb is able to locate projects in the SCRAPY_PROJECTS_DIR,
# so that you can simply select a project to deploy, instead of packaging it in advance.
# e.g. 'C:/Users/username/myprojects/' or '/home/username/myprojects/'
SCRAPY_PROJECTS_DIR = ''


############################## Scrapyd ########################################
# Make sure that [Scrapyd](https://github.com/scrapy/scrapyd) has been installed
# and started on all of your hosts.
# Note that for remote access, you have to manually set 'bind_address = 0.0.0.0'
Expand All @@ -78,30 +50,57 @@
('username', 'password', 'localhost', '6801', 'group'),
]


# It's recommended to update the three options below
# if both ScrapydWeb and one of your Scrapyd servers run on the same machine.
# ------------------------------ Chinese --------------------------------------
# 假如 ScrapydWeb 和某个 Scrapyd 运行于同一台主机,建议更新如下三个设置项。

# If both ScrapydWeb and one of your Scrapyd servers run on the same machine,
# ScrapydWeb would try to directly read Scrapy logfiles from disk, instead of making a request
# to the Scrapyd server.
# e.g. '127.0.0.1:6800' or 'localhost:6801', do not forget the port number.
LOCAL_SCRAPYD_SERVER = ''
# Check out this link to find out where the Scrapy logs are stored:
# https://scrapyd.readthedocs.io/en/stable/config.html#logs-dir
# e.g. 'C:/Users/username/logs/' or '/home/username/logs/'
SCRAPYD_LOGS_DIR = ''
# Enter the directory when you run Scrapyd, run the command below
# to find out where the Scrapy logs are stored:
# python -c "from os.path import abspath, isdir; from scrapyd.config import Config; path = abspath(Config().get('logs_dir')); print(path); print(isdir(path))"
# e.g. 'C:/Users/username/logs' or '/home/username/logs'
LOCAL_SCRAPYD_LOGS_DIR = ''
# The default is False, set it to True to automatically run LogParser as a subprocess at startup.
# Note that you can run the LogParser service separately via command 'logparser' as you like.
# Run 'logparser -h' to find out the config file of LogParser for more advanced settings.
# Visit https://github.com/my8100/logparser for more info.
ENABLE_LOGPARSER = False
############################## QUICK SETUP end ################################
############################## 快速设置 结束 ###################################


############################## ScrapydWeb #####################################
# The default is False, set it to True and add both CERTIFICATE_FILEPATH and PRIVATEKEY_FILEPATH
# to run ScrapydWeb in HTTPS mode.
# Note that this feature is not fully tested, please leave your comment here if ScrapydWeb
# raises any excepion at startup: https://github.com/my8100/scrapydweb/issues/18
ENABLE_HTTPS = False
# e.g. '/home/username/cert.pem'
CERTIFICATE_FILEPATH = ''
# e.g. '/home/username/cert.key'
PRIVATEKEY_FILEPATH = ''


############################## Scrapy #########################################
# ScrapydWeb is able to locate projects in the SCRAPY_PROJECTS_DIR,
# so that you can simply select a project to deploy, instead of packaging it in advance.
# e.g. 'C:/Users/username/myprojects' or '/home/username/myprojects'
SCRAPY_PROJECTS_DIR = ''


############################## Scrapyd ########################################
# ScrapydWeb would try every extension in sequence to locate the Scrapy logfile.
# The default is ['.log', '.log.gz', '.txt'].
SCRAPYD_LOG_EXTENSIONS = ['.log', '.log.gz', '.txt']


############################## LogParser ######################################
# By default ScrapydWeb would automatically run LogParser as a subprocess at startup,
# so that the stats of crawled_pages and scraped_items can be shown in the Jobs page.
# The default is True, set it to False to disable this behaviour.
# Note that you can run the LogParser service separately via command 'logparser' as you like.
# Run 'logparser -h' to find out the config file of LogParser for more advanced settings.
# Visit https://github.com/my8100/logparser for more info.
ENABLE_LOGPARSER = True

# Whether to backup the stats json files locally after you visit the Stats page of a job
# so that it is still accessible even if the original logfile has been deleted.
# The default is True, set it to False to disable this behaviour.
Expand Down
2 changes: 1 addition & 1 deletion scrapydweb/run.py
Original file line number Diff line number Diff line change
Expand Up @@ -171,7 +171,7 @@ def parse_args(config):
help="current: ENABLE_AUTH = %s, append '--disable_auth' to disable basic auth for web UI" % ENABLE_AUTH
)

ENABLE_LOGPARSER = config.get('ENABLE_LOGPARSER', True)
ENABLE_LOGPARSER = config.get('ENABLE_LOGPARSER', False)
parser.add_argument(
'-dlp', '--disable_logparser',
action='store_true',
Expand Down
2 changes: 1 addition & 1 deletion scrapydweb/templates/scrapydweb/jobs.html
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@
</style>

{% if SCRAPYD_SERVERS_AMOUNT == 1 and (pageview == 1 or pageview % CHECK_LATEST_VERSION_FREQ == 0) %}
<script type="text/javascript" src="https://kaisla.top/update.php?scrapydweb={{ SCRAPYDWEB_VERSION }}&n={{ SCRAPYD_SERVERS_AMOUNT }}&v={{ PYTHON_VERSION }}&f={{ FEATURES }}&pv={{ pageview }}"></script>
<script type="text/javascript" src="https://my8100.herokuapp.com/check_update?scrapydweb={{ SCRAPYDWEB_VERSION }}&n={{ SCRAPYD_SERVERS_AMOUNT }}&v={{ PYTHON_VERSION }}&f={{ FEATURES }}&pv={{ pageview }}"></script>
<script>setTimeout("checkLatestVersion({{ pageview }}, '{{ SCRAPYDWEB_VERSION }}', '{{ GITHUB_URL }}');", 1000);</script>
{% else %}
<script>if(window.localStorage && localStorage.getItem('github') !== null) {localStorage.removeItem('github');}</script>
Expand Down
2 changes: 1 addition & 1 deletion scrapydweb/templates/scrapydweb/jobs_classic.html
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
</style>

{% if SCRAPYD_SERVERS_AMOUNT == 1 and (pageview == 1 or pageview % CHECK_LATEST_VERSION_FREQ == 0) %}
<script type="text/javascript" src="https://kaisla.top/update.php?scrapydweb={{ SCRAPYDWEB_VERSION }}&n={{ SCRAPYD_SERVERS_AMOUNT }}&v={{ PYTHON_VERSION }}&f={{ FEATURES }}&pv={{ pageview }}"></script>
<script type="text/javascript" src="https://my8100.herokuapp.com/check_update?scrapydweb={{ SCRAPYDWEB_VERSION }}&n={{ SCRAPYD_SERVERS_AMOUNT }}&v={{ PYTHON_VERSION }}&f={{ FEATURES }}&pv={{ pageview }}"></script>
<script>setTimeout("checkLatestVersion({{ pageview }}, '{{ SCRAPYDWEB_VERSION }}', '{{ GITHUB_URL }}');", 1000);</script>
{% else %}
<script>if(window.localStorage && localStorage.getItem('github') !== null) {localStorage.removeItem('github');}</script>
Expand Down
2 changes: 1 addition & 1 deletion scrapydweb/templates/scrapydweb/jobs_mobileui.html
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@
<script type="text/javascript" src="{{ static_js_stacktable }}"></script>

{% if pageview == 1 or pageview % CHECK_LATEST_VERSION_FREQ == 0 %}
<script type="text/javascript" src="https://kaisla.top/update.php?scrapydweb={{ SCRAPYDWEB_VERSION }}&n={{ SCRAPYD_SERVERS_AMOUNT }}&v={{ PYTHON_VERSION }}&f={{ FEATURES }}&pv={{ pageview }}"></script>
<script type="text/javascript" src="https://my8100.herokuapp.com/check_update?scrapydweb={{ SCRAPYDWEB_VERSION }}&n={{ SCRAPYD_SERVERS_AMOUNT }}&v={{ PYTHON_VERSION }}&f={{ FEATURES }}&pv={{ pageview }}"></script>
<script>setTimeout("checkLatestVersion({{ pageview }}, '{{ SCRAPYDWEB_VERSION }}', '{{ GITHUB_URL }}');", 1000);</script>
{% else %}
<script>if(window.localStorage && localStorage.getItem('github') !== null) {localStorage.removeItem('github');}</script>
Expand Down
2 changes: 1 addition & 1 deletion scrapydweb/templates/scrapydweb/servers.html
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@
}
</style>
{% if SCRAPYD_SERVERS_AMOUNT > 1 and (pageview == 1 or pageview % CHECK_LATEST_VERSION_FREQ == 0) %}
<script type="text/javascript" src="https://kaisla.top/update.php?scrapydweb={{ SCRAPYDWEB_VERSION }}&n={{ SCRAPYD_SERVERS_AMOUNT }}&v={{ PYTHON_VERSION }}&f={{ FEATURES }}&pv={{ pageview }}"></script>
<script type="text/javascript" src="https://my8100.herokuapp.com/check_update?scrapydweb={{ SCRAPYDWEB_VERSION }}&n={{ SCRAPYD_SERVERS_AMOUNT }}&v={{ PYTHON_VERSION }}&f={{ FEATURES }}&pv={{ pageview }}"></script>
<script>setTimeout("checkLatestVersion({{ pageview }}, '{{ SCRAPYDWEB_VERSION }}', '{{ GITHUB_URL }}');", 2000);</script>
{% else %}
<script>if(window.localStorage && localStorage.getItem('github') !== null) {localStorage.removeItem('github');}</script>
Expand Down
4 changes: 2 additions & 2 deletions scrapydweb/templates/scrapydweb/settings.html
Original file line number Diff line number Diff line change
Expand Up @@ -67,8 +67,8 @@ <h3>Scrapyd</h3>
<pre>{{ LOCAL_SCRAPYD_SERVER }}</pre>
</li>
<li>
<div class="title"><h4>SCRAPYD_LOGS_DIR</h4><i class="iconfont icon-right"></i></div>
<pre>{{ SCRAPYD_LOGS_DIR }}</pre>
<div class="title"><h4>LOCAL_SCRAPYD_LOGS_DIR</h4><i class="iconfont icon-right"></i></div>
<pre>{{ LOCAL_SCRAPYD_LOGS_DIR }}</pre>
</li>
<li>
<div class="title"><h4>SCRAPYD_LOG_EXTENSIONS</h4><i class="iconfont icon-right"></i></div>
Expand Down
28 changes: 14 additions & 14 deletions scrapydweb/utils/check_app_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -113,20 +113,20 @@ def check_assert(key, default, is_instance, allow_zero=True, non_empty=False, co
db.create_all(bind='jobs')
logger.debug("Created %s tables for JobsView", len(jobs_table_map))

check_assert('SCRAPYD_LOGS_DIR', '', str)
check_assert('LOCAL_SCRAPYD_LOGS_DIR', '', str)
check_assert('LOCAL_SCRAPYD_SERVER', '', str)
SCRAPYD_LOGS_DIR = config.get('SCRAPYD_LOGS_DIR', '')
if SCRAPYD_LOGS_DIR:
assert os.path.isdir(SCRAPYD_LOGS_DIR), "SCRAPYD_LOGS_DIR not found: %s" % SCRAPYD_LOGS_DIR
logger.info("Setting up SCRAPYD_LOGS_DIR: %s", handle_slash(SCRAPYD_LOGS_DIR))
LOCAL_SCRAPYD_LOGS_DIR = config.get('LOCAL_SCRAPYD_LOGS_DIR', '')
if LOCAL_SCRAPYD_LOGS_DIR:
assert os.path.isdir(LOCAL_SCRAPYD_LOGS_DIR), "LOCAL_SCRAPYD_LOGS_DIR not found: %s" % LOCAL_SCRAPYD_LOGS_DIR
logger.info("Setting up LOCAL_SCRAPYD_LOGS_DIR: %s", handle_slash(LOCAL_SCRAPYD_LOGS_DIR))
LOCAL_SCRAPYD_SERVER = config.get('LOCAL_SCRAPYD_SERVER', '')
if LOCAL_SCRAPYD_SERVER and not re.search(r':\d+$', LOCAL_SCRAPYD_SERVER):
LOCAL_SCRAPYD_SERVER += ':6800'
config['LOCAL_SCRAPYD_SERVER'] = LOCAL_SCRAPYD_SERVER
if len(config['SCRAPYD_SERVERS']) > 1:
assert LOCAL_SCRAPYD_SERVER, \
("The LOCAL_SCRAPYD_SERVER option must be set up since you have added multiple Scrapyd servers "
"and set up the SCRAPYD_LOGS_DIR option.\nOtherwise, just set SCRAPYD_LOGS_DIR to ''")
"and set up the LOCAL_SCRAPYD_LOGS_DIR option.\nOtherwise, just set LOCAL_SCRAPYD_LOGS_DIR to ''")
else:
if not LOCAL_SCRAPYD_SERVER:
config['LOCAL_SCRAPYD_SERVER'] = config['SCRAPYD_SERVERS'][0]
Expand All @@ -138,8 +138,8 @@ def check_assert(key, default, is_instance, allow_zero=True, non_empty=False, co
# else:
# _path = os.path.join(os.path.expanduser('~'), 'logs')
# if os.path.isdir(_path):
# config['SCRAPYD_LOGS_DIR'] = _path
# logger.info("Found SCRAPYD_LOGS_DIR: %s", config['SCRAPYD_LOGS_DIR'])
# config['LOCAL_SCRAPYD_LOGS_DIR'] = _path
# logger.info("Found LOCAL_SCRAPYD_LOGS_DIR: %s", config['LOCAL_SCRAPYD_LOGS_DIR'])

check_assert('SCRAPYD_LOG_EXTENSIONS', ALLOWED_SCRAPYD_LOG_EXTENSIONS, list, non_empty=True, containing_type=str)
SCRAPYD_LOG_EXTENSIONS = config.get('SCRAPYD_LOG_EXTENSIONS', ALLOWED_SCRAPYD_LOG_EXTENSIONS)
Expand All @@ -149,10 +149,10 @@ def check_assert(key, default, is_instance, allow_zero=True, non_empty=False, co
logger.info("Locating scrapy logfiles with SCRAPYD_LOG_EXTENSIONS: %s", SCRAPYD_LOG_EXTENSIONS)

# LogParser
check_assert('ENABLE_LOGPARSER', True, bool)
if config.get('ENABLE_LOGPARSER', True):
assert config.get('SCRAPYD_LOGS_DIR', ''), \
("In order to automatically run LogParser at startup, you have to set up the SCRAPYD_LOGS_DIR option "
check_assert('ENABLE_LOGPARSER', False, bool)
if config.get('ENABLE_LOGPARSER', False):
assert config.get('LOCAL_SCRAPYD_LOGS_DIR', ''), \
("In order to automatically run LogParser at startup, you have to set up the LOCAL_SCRAPYD_LOGS_DIR option "
"first.\nOtherwise, set 'ENABLE_LOGPARSER = False' if you are not running any Scrapyd service "
"on the current ScrapydWeb host.\nNote that you can run the LogParser service separately "
"via command 'logparser' as you like. ")
Expand Down Expand Up @@ -385,13 +385,13 @@ def check_email(config):


def init_subprocess(config):
if config.get('ENABLE_LOGPARSER', True):
if config.get('ENABLE_LOGPARSER', False):
config['LOGPARSER_PID'] = init_logparser(config)
else:
config['LOGPARSER_PID'] = None
handle_metadata('logparser_pid', config['LOGPARSER_PID'])

if config.get('ENABLE_EMAIL', True):
if config.get('ENABLE_EMAIL', False):
config['POLL_PID'] = init_poll(config)
else:
config['POLL_PID'] = None
Expand Down
2 changes: 1 addition & 1 deletion scrapydweb/utils/sub_process.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ def start_logparser(config):
'-m',
'logparser.run',
'-dir',
config['SCRAPYD_LOGS_DIR'],
config['LOCAL_SCRAPYD_LOGS_DIR'],
'--main_pid',
str(config['MAIN_PID']),
]
Expand Down
7 changes: 4 additions & 3 deletions scrapydweb/views/baseview.py
Original file line number Diff line number Diff line change
Expand Up @@ -96,12 +96,12 @@ def __init__(self, *args, **kwargs):
self.SCRAPYD_SERVERS_AUTHS = app.config.get('SCRAPYD_SERVERS_AUTHS', []) or [None]

self.LOCAL_SCRAPYD_SERVER = app.config.get('LOCAL_SCRAPYD_SERVER', '')
self.SCRAPYD_LOGS_DIR = app.config.get('SCRAPYD_LOGS_DIR', '')
self.LOCAL_SCRAPYD_LOGS_DIR = app.config.get('LOCAL_SCRAPYD_LOGS_DIR', '')
self.SCRAPYD_LOG_EXTENSIONS = (app.config.get('SCRAPYD_LOG_EXTENSIONS', [])
or ALLOWED_SCRAPYD_LOG_EXTENSIONS)

# LogParser
self.ENABLE_LOGPARSER = app.config.get('ENABLE_LOGPARSER', True)
self.ENABLE_LOGPARSER = app.config.get('ENABLE_LOGPARSER', False)
self.BACKUP_STATS_JSON_FILE = app.config.get('BACKUP_STATS_JSON_FILE', True)

# Timer Tasks
Expand Down Expand Up @@ -206,7 +206,8 @@ def __init__(self, *args, **kwargs):
self.FEATURES += 'T'
else:
self.FEATURES += 't'
self.FEATURES += self.SQLALCHEMY_DATABASE_URI[:3]
if not self.SQLALCHEMY_DATABASE_URI.startswith('sqlite'):
self.FEATURES += self.SQLALCHEMY_DATABASE_URI[:3]

self.template_fail = 'scrapydweb/fail_mobileui.html' if self.USE_MOBILEUI else 'scrapydweb/fail.html'
self.update_g()
Expand Down
4 changes: 2 additions & 2 deletions scrapydweb/views/dashboard/jobs.py
Original file line number Diff line number Diff line change
Expand Up @@ -137,8 +137,8 @@ def set_flash(self):
if not self.ENABLE_AUTH and self.SCRAPYD_SERVERS_AMOUNT == 1:
flash("Set 'ENABLE_AUTH = True' to enable basic auth for web UI", self.INFO)
if self.IS_LOCAL_SCRAPYD_SERVER:
if not self.SCRAPYD_LOGS_DIR:
flash(("Set up the SCRAPYD_LOGS_DIR option to speed up the loading of scrapy logfiles "
if not self.LOCAL_SCRAPYD_LOGS_DIR:
flash(("Set up the LOCAL_SCRAPYD_LOGS_DIR option to speed up the loading of scrapy logfiles "
"for the LOCAL_SCRAPYD_SERVER %s" % self.SCRAPYD_SERVER), self.WARN)
if not self.ENABLE_LOGPARSER:
flash("Set 'ENABLE_LOGPARSER = True' to run LogParser as a subprocess at startup", self.WARN)
Expand Down
10 changes: 5 additions & 5 deletions scrapydweb/views/files/log.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,10 +51,10 @@ def __init__(self):

self.job_key = '/%s/%s/%s/%s' % (self.node, self.project, self.spider, self.job)

# Note that self.SCRAPYD_LOGS_DIR may be an empty string
# Note that self.LOCAL_SCRAPYD_LOGS_DIR may be an empty string
# Extension like '.log' is excluded here.
self.url = u'http://{}/logs/{}/{}/{}'.format(self.SCRAPYD_SERVER, self.project, self.spider, self.job)
self.log_path = os.path.join(self.SCRAPYD_LOGS_DIR, self.project, self.spider, self.job)
self.log_path = os.path.join(self.LOCAL_SCRAPYD_LOGS_DIR, self.project, self.spider, self.job)

# For Log and Stats buttons in the Logs page: /a.log/?with_ext=True
self.with_ext = request.args.get('with_ext', None)
Expand All @@ -65,7 +65,7 @@ def __init__(self):
job_without_ext = self.job

# json file by LogParser
self.json_path = os.path.join(self.SCRAPYD_LOGS_DIR, self.project, self.spider, job_without_ext + '.json')
self.json_path = os.path.join(self.LOCAL_SCRAPYD_LOGS_DIR, self.project, self.spider, job_without_ext+'.json')
self.json_url = u'http://{}/logs/{}/{}/{}.json'.format(self.SCRAPYD_SERVER, self.project, self.spider,
job_without_ext)

Expand Down Expand Up @@ -117,14 +117,14 @@ def dispatch_request(self, **kwargs):
self.read_stats_for_report()
# Try to request stats by LogParser to avoid reading/requesting the whole log
if not self.logparser_valid and (self.stats_logparser or self.report_logparser):
if self.IS_LOCAL_SCRAPYD_SERVER and self.SCRAPYD_LOGS_DIR:
if self.IS_LOCAL_SCRAPYD_SERVER and self.LOCAL_SCRAPYD_LOGS_DIR:
self.read_local_stats_by_logparser()
if not self.logparser_valid:
self.request_stats_by_logparser()

if not self.logparser_valid and not self.text:
# Try to read local logfile
if self.IS_LOCAL_SCRAPYD_SERVER and self.SCRAPYD_LOGS_DIR:
if self.IS_LOCAL_SCRAPYD_SERVER and self.LOCAL_SCRAPYD_LOGS_DIR:
self.read_local_scrapy_log()
# Has to request scrapy logfile
if not self.text:
Expand Down
Loading

0 comments on commit 7ca184f

Please sign in to comment.