[flake8] Resolve E1?? errors (#3805)

This commit is contained in:
John Bodley 2017-11-10 12:06:22 -08:00 committed by Maxime Beauchemin
parent 35810ce2bf
commit 690de862e8
18 changed files with 127 additions and 110 deletions

View File

@ -141,13 +141,17 @@ def load_examples(load_test_data):
@manager.option(
'-d', '--datasource',
help=(
"Specify which datasource name to load, if omitted, all "
"datasources will be refreshed"))
"Specify which datasource name to load, if omitted, all "
"datasources will be refreshed"
),
)
@manager.option(
'-m', '--merge',
help=(
"Specify using 'merge' property during operation. "
"Default value is False "))
"Specify using 'merge' property during operation. "
"Default value is False "
),
)
def refresh_druid(datasource, merge):
"""Refresh druid datasources"""
session = db.session()

View File

@ -47,8 +47,10 @@ class BaseDatasource(AuditMixinNullable, ImportMixin):
return relationship(
'Slice',
primaryjoin=lambda: and_(
foreign(Slice.datasource_id) == self.id,
foreign(Slice.datasource_type) == self.type))
foreign(Slice.datasource_id) == self.id,
foreign(Slice.datasource_type) == self.type,
),
)
# placeholder for a relationship to a derivative of BaseColumn
columns = []

View File

@ -72,4 +72,4 @@ class ConnectorRegistry(object):
cls, session, database, datasource_name, schema=None):
datasource_class = ConnectorRegistry.sources[database.type]
return datasource_class.query_datasources_by_name(
session, database, datasource_name, schema=None)
session, database, datasource_name, schema=None)

View File

@ -1,4 +1,4 @@
# pylint: disable=invalid-unary-operand-type
# pylint: disable=invalid-unary-operand-type
from collections import OrderedDict
from copy import deepcopy
from datetime import datetime, timedelta
@ -919,8 +919,8 @@ class DruidDatasource(Model, BaseDatasource):
columns_dict = {c.column_name: c for c in self.columns}
all_metrics, post_aggs = self._metrics_and_post_aggs(
metrics,
metrics_dict)
metrics,
metrics_dict)
aggregations = OrderedDict()
for m in self.metrics:
@ -996,16 +996,16 @@ class DruidDatasource(Model, BaseDatasource):
client.topn(**pre_qry)
query_str += "// Two phase query\n// Phase 1\n"
query_str += json.dumps(
client.query_builder.last_query.query_dict, indent=2)
client.query_builder.last_query.query_dict, indent=2)
query_str += "\n"
if phase == 1:
return query_str
query_str += (
"// Phase 2 (built based on phase one's results)\n")
"// Phase 2 (built based on phase one's results)\n")
df = client.export_pandas()
qry['filter'] = self._add_filter_from_pre_query_data(
df,
qry['dimensions'], filters)
df,
qry['dimensions'], filters)
qry['threshold'] = timeseries_limit or 1000
if row_limit and granularity == 'all':
qry['threshold'] = row_limit
@ -1046,8 +1046,10 @@ class DruidDatasource(Model, BaseDatasource):
"// Phase 2 (built based on phase one's results)\n")
df = client.export_pandas()
qry['filter'] = self._add_filter_from_pre_query_data(
df,
qry['dimensions'], filters)
df,
qry['dimensions'],
filters,
)
qry['limit_spec'] = None
if row_limit:
qry['limit_spec'] = {

View File

@ -461,7 +461,9 @@ class PrestoEngineSpec(BaseEngineSpec):
result_set_df = db.get_df(
"""SELECT table_schema, table_name FROM INFORMATION_SCHEMA.{}S
ORDER BY concat(table_schema, '.', table_name)""".format(
datasource_type.upper()), None)
datasource_type.upper(),
),
None)
result_sets = defaultdict(list)
for unused, row in result_set_df.iterrows():
result_sets[row['table_schema']].append(row['table_name'])
@ -879,8 +881,8 @@ class HiveEngineSpec(PrestoEngineSpec):
backend_name = url.get_backend_name()
# Must be Hive connection, enable impersonation, and set param auth=LDAP|KERBEROS
if backend_name == "hive" and "auth" in url.query.keys() and \
impersonate_user is True and username is not None:
if (backend_name == "hive" and "auth" in url.query.keys() and
impersonate_user is True and username is not None):
configuration["hive.server2.proxy.user"] = username
return configuration

View File

@ -156,7 +156,7 @@ def merge_perm(sm, permission_name, view_menu_name, connection):
.values(
permission_id=permission.id,
view_menu_id=view_menu.id,
),
),
)

View File

@ -107,8 +107,10 @@ def is_admin_only(pvm):
if (pvm.view_menu.name in READ_ONLY_MODEL_VIEWS and
pvm.permission.name not in READ_ONLY_PERMISSION):
return True
return (pvm.view_menu.name in ADMIN_ONLY_VIEW_MENUS or
pvm.permission.name in ADMIN_ONLY_PERMISSIONS)
return (
pvm.view_menu.name in ADMIN_ONLY_VIEW_MENUS or
pvm.permission.name in ADMIN_ONLY_PERMISSIONS
)
def is_alpha_only(pvm):

View File

@ -105,7 +105,8 @@ def get_sql_results(
def execute_sql(
ctask, query_id, return_results=True, store_results=False, user_name=None):
ctask, query_id, return_results=True, store_results=False, user_name=None,
):
"""Executes the sql query returns the results."""
session = get_session(not ctask.request.called_directly)

View File

@ -33,8 +33,9 @@ class DummyStatsLogger(BaseStatsLogger):
Fore.CYAN + "[stats_logger] (incr) " + key + Style.RESET_ALL)
def decr(self, key):
logging.debug(Fore.CYAN + "[stats_logger] (decr) " + key +
Style.RESET_ALL)
logging.debug((
Fore.CYAN + "[stats_logger] (decr) " + key +
Style.RESET_ALL))
def gauge(self, key, value):
logging.debug((

View File

@ -1208,9 +1208,9 @@ class DistributionBarViz(DistributionPieViz):
d = super(DistributionBarViz, self).query_obj() # noqa
fd = self.form_data
if (
len(d['groupby']) <
len(fd.get('groupby') or []) + len(fd.get('columns') or [])
):
len(d['groupby']) <
len(fd.get('groupby') or []) + len(fd.get('columns') or [])
):
raise Exception(
_("Can't have overlap between Series and Breakdowns"))
if not fd.get('metrics'):
@ -1523,7 +1523,7 @@ class IFrameViz(BaseViz):
is_timeseries = False
def get_df(self):
return None
return None
class ParallelCoordinatesViz(BaseViz):

View File

@ -547,11 +547,11 @@ class RequestAccessTests(SupersetTestCase):
'/superset/update_role/',
data=json.dumps({
'users': [{
'username': 'gamma',
'first_name': 'Gamma',
'last_name': 'Gamma',
'email': 'gamma@superset.com',
}],
'username': 'gamma',
'first_name': 'Gamma',
'last_name': 'Gamma',
'email': 'gamma@superset.com',
}],
'role_name': update_role_str,
}),
follow_redirects=True,

View File

@ -28,9 +28,9 @@ class SupersetTestCase(unittest.TestCase):
def __init__(self, *args, **kwargs):
if (
self.requires_examples and
not os.environ.get('SOLO_TEST') and
not os.environ.get('examples_loaded')
self.requires_examples and
not os.environ.get('SOLO_TEST') and
not os.environ.get('examples_loaded')
):
logging.info("Loading examples")
cli.load_examples(load_test_data=True)
@ -133,8 +133,8 @@ class SupersetTestCase(unittest.TestCase):
def get_slice(self, slice_name, session):
slc = (
session.query(models.Slice)
.filter_by(slice_name=slice_name)
.one()
.filter_by(slice_name=slice_name)
.one()
)
session.expunge_all()
return slc
@ -169,20 +169,20 @@ class SupersetTestCase(unittest.TestCase):
def get_main_database(self, session):
return (
db.session.query(models.Database)
.filter_by(database_name='main')
.first()
.filter_by(database_name='main')
.first()
)
def get_access_requests(self, username, ds_type, ds_id):
DAR = models.DatasourceAccessRequest
return (
db.session.query(DAR)
.filter(
.filter(
DAR.created_by == sm.find_user(username=username),
DAR.datasource_type == ds_type,
DAR.datasource_id == ds_id,
)
.first()
)
.first()
)
def logout(self):

View File

@ -365,7 +365,8 @@ class CoreTests(SupersetTestCase):
resp = self.client.get('/kv/{}/'.format(kv.id))
self.assertEqual(resp.status_code, 200)
self.assertEqual(json.loads(value),
self.assertEqual(
json.loads(value),
json.loads(resp.data.decode('utf-8')))
try:
@ -436,8 +437,8 @@ class CoreTests(SupersetTestCase):
self.login(username=username)
dash = (
db.session.query(models.Dashboard)
.filter_by(slug="births")
.first()
.filter_by(slug="births")
.first()
)
origin_title = dash.dashboard_title
positions = []
@ -459,8 +460,8 @@ class CoreTests(SupersetTestCase):
self.get_resp(url, data=dict(data=json.dumps(data)))
updatedDash = (
db.session.query(models.Dashboard)
.filter_by(slug="births")
.first()
.filter_by(slug="births")
.first()
)
self.assertEqual(updatedDash.dashboard_title, 'new title')
# # bring back dashboard original title

View File

@ -21,49 +21,49 @@ class PickableMock(Mock):
return (Mock, ())
SEGMENT_METADATA = [{
"id": "some_id",
"intervals": ["2013-05-13T00:00:00.000Z/2013-05-14T00:00:00.000Z"],
"columns": {
"__time": {
"type": "LONG", "hasMultipleValues": False,
"size": 407240380, "cardinality": None, "errorMessage": None},
"dim1": {
"type": "STRING", "hasMultipleValues": False,
"size": 100000, "cardinality": 1944, "errorMessage": None},
"dim2": {
"type": "STRING", "hasMultipleValues": True,
"size": 100000, "cardinality": 1504, "errorMessage": None},
"metric1": {
"type": "FLOAT", "hasMultipleValues": False,
"size": 100000, "cardinality": None, "errorMessage": None},
},
"aggregators": {
"metric1": {
"type": "longSum",
"name": "metric1",
"fieldName": "metric1"},
},
"size": 300000,
"numRows": 5000000,
"id": "some_id",
"intervals": ["2013-05-13T00:00:00.000Z/2013-05-14T00:00:00.000Z"],
"columns": {
"__time": {
"type": "LONG", "hasMultipleValues": False,
"size": 407240380, "cardinality": None, "errorMessage": None},
"dim1": {
"type": "STRING", "hasMultipleValues": False,
"size": 100000, "cardinality": 1944, "errorMessage": None},
"dim2": {
"type": "STRING", "hasMultipleValues": True,
"size": 100000, "cardinality": 1504, "errorMessage": None},
"metric1": {
"type": "FLOAT", "hasMultipleValues": False,
"size": 100000, "cardinality": None, "errorMessage": None},
},
"aggregators": {
"metric1": {
"type": "longSum",
"name": "metric1",
"fieldName": "metric1"},
},
"size": 300000,
"numRows": 5000000,
}]
GB_RESULT_SET = [
{
"version": "v1",
"timestamp": "2012-01-01T00:00:00.000Z",
"event": {
"dim1": 'Canada',
"metric1": 12345678,
{
"version": "v1",
"timestamp": "2012-01-01T00:00:00.000Z",
"event": {
"dim1": 'Canada',
"metric1": 12345678,
},
},
},
{
"version": "v1",
"timestamp": "2012-01-01T00:00:00.000Z",
"event": {
"dim1": 'USA',
"metric1": 12345678 / 2,
{
"version": "v1",
"timestamp": "2012-01-01T00:00:00.000Z",
"event": {
"dim1": 'USA',
"metric1": 12345678 / 2,
},
},
},
]
@ -337,26 +337,30 @@ class DruidTests(SupersetTestCase):
metric_name='unused_count',
verbose_name='COUNT(*)',
metric_type='count',
json=json.dumps({'type': 'count', 'name': 'unused_count'})),
json=json.dumps({'type': 'count', 'name': 'unused_count'}),
),
'some_sum': DruidMetric(
metric_name='some_sum',
verbose_name='SUM(*)',
metric_type='sum',
json=json.dumps({'type': 'sum', 'name': 'sum'})),
json=json.dumps({'type': 'sum', 'name': 'sum'}),
),
'a_histogram': DruidMetric(
metric_name='a_histogram',
verbose_name='APPROXIMATE_HISTOGRAM(*)',
metric_type='approxHistogramFold',
json=json.dumps(
{'type': 'approxHistogramFold', 'name': 'a_histogram'}),
{'type': 'approxHistogramFold', 'name': 'a_histogram'},
),
),
'aCustomMetric': DruidMetric(
metric_name='aCustomMetric',
verbose_name='MY_AWESOME_METRIC(*)',
metric_type='aCustomType',
json=json.dumps(
{'type': 'customMetric', 'name': 'aCustomMetric'}),
{'type': 'customMetric', 'name': 'aCustomMetric'},
),
),
'quantile_p95': DruidMetric(
metric_name='quantile_p95',
verbose_name='P95(*)',
@ -365,7 +369,9 @@ class DruidTests(SupersetTestCase):
'type': 'quantile',
'probability': 0.95,
'name': 'p95',
'fieldName': 'a_histogram'})),
'fieldName': 'a_histogram',
}),
),
'aCustomPostAgg': DruidMetric(
metric_name='aCustomPostAgg',
verbose_name='CUSTOM_POST_AGG(*)',
@ -375,7 +381,10 @@ class DruidTests(SupersetTestCase):
'name': 'aCustomPostAgg',
'field': {
'type': 'fieldAccess',
'fieldName': 'aCustomMetric'}})),
'fieldName': 'aCustomMetric',
},
}),
),
}
metrics = ['some_sum']

View File

@ -89,8 +89,8 @@ class EmailSmtpTest(unittest.TestCase):
'from', 'to', MIMEMultipart(), app.config, dryrun=False)
assert not mock_smtp.called
mock_smtp_ssl.assert_called_with(
app.config.get('SMTP_HOST'),
app.config.get('SMTP_PORT'),
app.config.get('SMTP_HOST'),
app.config.get('SMTP_PORT'),
)
@mock.patch('smtplib.SMTP_SSL')
@ -104,8 +104,8 @@ class EmailSmtpTest(unittest.TestCase):
'from', 'to', MIMEMultipart(), app.config, dryrun=False)
assert not mock_smtp_ssl.called
mock_smtp.assert_called_with(
app.config.get('SMTP_HOST'),
app.config.get('SMTP_PORT'),
app.config.get('SMTP_HOST'),
app.config.get('SMTP_PORT'),
)
assert not mock_smtp.login.called

View File

@ -110,8 +110,10 @@ class UtilsTestCase(unittest.TestCase):
self.assertEquals(form_data, expected)
def test_datetime_f(self):
self.assertEquals(datetime_f(datetime(1990, 9, 21, 19, 11, 19, 626096)),
'<nobr>1990-09-21T19:11:19.626096</nobr>')
self.assertEquals(
datetime_f(datetime(1990, 9, 21, 19, 11, 19, 626096)),
'<nobr>1990-09-21T19:11:19.626096</nobr>',
)
self.assertEquals(len(datetime_f(datetime.now())), 28)
self.assertEquals(datetime_f(None), '<nobr>None</nobr>')
iso = datetime.now().isoformat()[:10].split('-')

View File

@ -545,7 +545,8 @@ class PartitionVizTestCase(unittest.TestCase):
self.assertEqual(3, len(nest[0]['children']))
self.assertEqual(3, len(nest[0]['children'][0]['children']))
self.assertEqual(1, len(nest[0]['children'][0]['children'][0]['children']))
self.assertEqual(1,
self.assertEqual(
1,
len(nest[0]['children']
[0]['children']
[0]['children']

10
tox.ini
View File

@ -17,16 +17,6 @@ exclude =
superset/migrations
superset/templates
ignore =
E111
E114
E116
E121
E123
E125
E126
E127
E128
E131
E302
E303
E305