chore(misc): Spelling (#19678)
Signed-off-by: Josh Soref <jsoref@users.noreply.github.com> Co-authored-by: Josh Soref <jsoref@users.noreply.github.com>
This commit is contained in:
parent
8c38878048
commit
02e5dcbbf2
|
|
@ -24,7 +24,7 @@ ENV LANG=C.UTF-8 \
|
||||||
|
|
||||||
RUN apt-get update -y
|
RUN apt-get update -y
|
||||||
|
|
||||||
# Install dependencies to fix `curl https support error` and `elaying package configuration warning`
|
# Install dependencies to fix `curl https support error` and `delaying package configuration warning`
|
||||||
RUN apt-get install -y apt-transport-https apt-utils
|
RUN apt-get install -y apt-transport-https apt-utils
|
||||||
|
|
||||||
# Install superset dependencies
|
# Install superset dependencies
|
||||||
|
|
|
||||||
|
|
@ -24,7 +24,7 @@ ENV LANG=C.UTF-8 \
|
||||||
|
|
||||||
RUN apt-get update -y
|
RUN apt-get update -y
|
||||||
|
|
||||||
# Install dependencies to fix `curl https support error` and `elaying package configuration warning`
|
# Install dependencies to fix `curl https support error` and `delaying package configuration warning`
|
||||||
RUN apt-get install -y apt-transport-https apt-utils
|
RUN apt-get install -y apt-transport-https apt-utils
|
||||||
|
|
||||||
# Install superset dependencies
|
# Install superset dependencies
|
||||||
|
|
|
||||||
|
|
@ -746,7 +746,7 @@
|
||||||
"type": "array"
|
"type": "array"
|
||||||
},
|
},
|
||||||
"metrics": {
|
"metrics": {
|
||||||
"description": "Aggregate expressions. Metrics can be passed as both references to datasource metrics (strings), or ad-hoc metricswhich are defined only within the query object. See `ChartDataAdhocMetricSchema` for the structure of ad-hoc metrics. When metrics is undefined or null, the query is executed without a groupby. However, when metrics is an array (length >= 0), a groupby clause is added to the query.",
|
"description": "Aggregate expressions. Metrics can be passed as both references to datasource metrics (strings), or ad-hoc metrics which are defined only within the query object. See `ChartDataAdhocMetricSchema` for the structure of ad-hoc metrics. When metrics is undefined or null, the query is executed without a groupby. However, when metrics is an array (length >= 0), a groupby clause is added to the query.",
|
||||||
"items": {},
|
"items": {},
|
||||||
"nullable": true,
|
"nullable": true,
|
||||||
"type": "array"
|
"type": "array"
|
||||||
|
|
@ -1309,7 +1309,7 @@
|
||||||
"type": "boolean"
|
"type": "boolean"
|
||||||
},
|
},
|
||||||
"metrics": {
|
"metrics": {
|
||||||
"description": "Aggregate expressions. Metrics can be passed as both references to datasource metrics (strings), or ad-hoc metricswhich are defined only within the query object. See `ChartDataAdhocMetricSchema` for the structure of ad-hoc metrics.",
|
"description": "Aggregate expressions. Metrics can be passed as both references to datasource metrics (strings), or ad-hoc metrics which are defined only within the query object. See `ChartDataAdhocMetricSchema` for the structure of ad-hoc metrics.",
|
||||||
"items": {},
|
"items": {},
|
||||||
"nullable": true,
|
"nullable": true,
|
||||||
"type": "array"
|
"type": "array"
|
||||||
|
|
@ -1968,7 +1968,7 @@
|
||||||
"type": "string"
|
"type": "string"
|
||||||
},
|
},
|
||||||
"query_context_generation": {
|
"query_context_generation": {
|
||||||
"description": "The query context generation represents whether the query_contextis user generated or not so that it does not update user modfiedstate.",
|
"description": "The query context generation represents whether the query_contexts user generated or not so that it does not update user modfiedstate.",
|
||||||
"nullable": true,
|
"nullable": true,
|
||||||
"type": "boolean"
|
"type": "boolean"
|
||||||
},
|
},
|
||||||
|
|
@ -2075,7 +2075,7 @@
|
||||||
"type": "string"
|
"type": "string"
|
||||||
},
|
},
|
||||||
"query_context_generation": {
|
"query_context_generation": {
|
||||||
"description": "The query context generation represents whether the query_contextis user generated or not so that it does not update user modfiedstate.",
|
"description": "The query context generation represents whether the query_contexts user generated or not so that it does not update user modfiedstate.",
|
||||||
"nullable": true,
|
"nullable": true,
|
||||||
"type": "boolean"
|
"type": "boolean"
|
||||||
},
|
},
|
||||||
|
|
@ -2760,7 +2760,7 @@
|
||||||
"type": "string"
|
"type": "string"
|
||||||
},
|
},
|
||||||
"query_context_generation": {
|
"query_context_generation": {
|
||||||
"description": "The query context generation represents whether the query_contextis user generated or not so that it does not update user modfiedstate.",
|
"description": "The query context generation represents whether the query_contexts user generated or not so that it does not update user modfiedstate.",
|
||||||
"nullable": true,
|
"nullable": true,
|
||||||
"type": "boolean"
|
"type": "boolean"
|
||||||
},
|
},
|
||||||
|
|
@ -2867,7 +2867,7 @@
|
||||||
"type": "string"
|
"type": "string"
|
||||||
},
|
},
|
||||||
"query_context_generation": {
|
"query_context_generation": {
|
||||||
"description": "The query context generation represents whether the query_contextis user generated or not so that it does not update user modfiedstate.",
|
"description": "The query context generation represents whether the query_contexts user generated or not so that it does not update user modfiedstate.",
|
||||||
"nullable": true,
|
"nullable": true,
|
||||||
"type": "boolean"
|
"type": "boolean"
|
||||||
},
|
},
|
||||||
|
|
|
||||||
|
|
@ -143,7 +143,7 @@ Date: {date_str}
|
||||||
"--include-last/--skip-last",
|
"--include-last/--skip-last",
|
||||||
default=False,
|
default=False,
|
||||||
show_default=True,
|
show_default=True,
|
||||||
help="Whether to also cancel the lastest run.",
|
help="Whether to also cancel the latest run.",
|
||||||
)
|
)
|
||||||
@click.option(
|
@click.option(
|
||||||
"--include-running/--skip-running",
|
"--include-running/--skip-running",
|
||||||
|
|
|
||||||
|
|
@ -24,7 +24,7 @@ def cleanup_permissions() -> None:
|
||||||
pvms = security_manager.get_session.query(
|
pvms = security_manager.get_session.query(
|
||||||
security_manager.permissionview_model
|
security_manager.permissionview_model
|
||||||
).all()
|
).all()
|
||||||
print("# of permission view menues is: {}".format(len(pvms)))
|
print("# of permission view menus is: {}".format(len(pvms)))
|
||||||
pvms_dict = defaultdict(list)
|
pvms_dict = defaultdict(list)
|
||||||
for pvm in pvms:
|
for pvm in pvms:
|
||||||
pvms_dict[(pvm.permission, pvm.view_menu)].append(pvm)
|
pvms_dict[(pvm.permission, pvm.view_menu)].append(pvm)
|
||||||
|
|
@ -43,9 +43,9 @@ def cleanup_permissions() -> None:
|
||||||
pvms = security_manager.get_session.query(
|
pvms = security_manager.get_session.query(
|
||||||
security_manager.permissionview_model
|
security_manager.permissionview_model
|
||||||
).all()
|
).all()
|
||||||
print("Stage 1: # of permission view menues is: {}".format(len(pvms)))
|
print("Stage 1: # of permission view menus is: {}".format(len(pvms)))
|
||||||
|
|
||||||
# 2. Clean up None permissions or view menues
|
# 2. Clean up None permissions or view menus
|
||||||
pvms = security_manager.get_session.query(
|
pvms = security_manager.get_session.query(
|
||||||
security_manager.permissionview_model
|
security_manager.permissionview_model
|
||||||
).all()
|
).all()
|
||||||
|
|
@ -57,15 +57,15 @@ def cleanup_permissions() -> None:
|
||||||
pvms = security_manager.get_session.query(
|
pvms = security_manager.get_session.query(
|
||||||
security_manager.permissionview_model
|
security_manager.permissionview_model
|
||||||
).all()
|
).all()
|
||||||
print("Stage 2: # of permission view menues is: {}".format(len(pvms)))
|
print("Stage 2: # of permission view menus is: {}".format(len(pvms)))
|
||||||
|
|
||||||
# 3. Delete empty permission view menues from roles
|
# 3. Delete empty permission view menus from roles
|
||||||
roles = security_manager.get_session.query(security_manager.role_model).all()
|
roles = security_manager.get_session.query(security_manager.role_model).all()
|
||||||
for role in roles:
|
for role in roles:
|
||||||
role.permissions = [p for p in role.permissions if p]
|
role.permissions = [p for p in role.permissions if p]
|
||||||
security_manager.get_session.commit()
|
security_manager.get_session.commit()
|
||||||
|
|
||||||
# 4. Delete empty roles from permission view menues
|
# 4. Delete empty roles from permission view menus
|
||||||
pvms = security_manager.get_session.query(
|
pvms = security_manager.get_session.query(
|
||||||
security_manager.permissionview_model
|
security_manager.permissionview_model
|
||||||
).all()
|
).all()
|
||||||
|
|
|
||||||
|
|
@ -19,7 +19,7 @@
|
||||||
set -e
|
set -e
|
||||||
|
|
||||||
# Temporary fix, probably related with https://bugs.launchpad.net/ubuntu/+source/opencv/+bug/1890170
|
# Temporary fix, probably related with https://bugs.launchpad.net/ubuntu/+source/opencv/+bug/1890170
|
||||||
# MySQL was failling with:
|
# MySQL was failing with:
|
||||||
# from . import _mysql
|
# from . import _mysql
|
||||||
# ImportError: /lib/x86_64-linux-gnu/libstdc++.so.6: cannot allocate memory in static TLS block
|
# ImportError: /lib/x86_64-linux-gnu/libstdc++.so.6: cannot allocate memory in static TLS block
|
||||||
export LD_PRELOAD=/lib/x86_64-linux-gnu/libstdc++.so.6
|
export LD_PRELOAD=/lib/x86_64-linux-gnu/libstdc++.so.6
|
||||||
|
|
|
||||||
|
|
@ -24,7 +24,7 @@ set -e
|
||||||
#
|
#
|
||||||
function reset_db() {
|
function reset_db() {
|
||||||
echo --------------------
|
echo --------------------
|
||||||
echo Reseting test DB
|
echo Resetting test DB
|
||||||
echo --------------------
|
echo --------------------
|
||||||
docker-compose stop superset-tests-worker superset || true
|
docker-compose stop superset-tests-worker superset || true
|
||||||
RESET_DB_CMD="psql \"postgresql://${DB_USER}:${DB_PASSWORD}@127.0.0.1:5432\" <<-EOF
|
RESET_DB_CMD="psql \"postgresql://${DB_USER}:${DB_PASSWORD}@127.0.0.1:5432\" <<-EOF
|
||||||
|
|
|
||||||
|
|
@ -98,7 +98,7 @@ describe('server', () => {
|
||||||
expect(endMock).toHaveBeenLastCalledWith('OK');
|
expect(endMock).toHaveBeenLastCalledWith('OK');
|
||||||
});
|
});
|
||||||
|
|
||||||
test('reponds with a 404 when not found', () => {
|
test('responds with a 404 when not found', () => {
|
||||||
const endMock = jest.fn();
|
const endMock = jest.fn();
|
||||||
const writeHeadMock = jest.fn();
|
const writeHeadMock = jest.fn();
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -24,7 +24,7 @@ block content
|
||||||
|
|
||||||
div Sockets connected:
|
div Sockets connected:
|
||||||
span#socket-count 0
|
span#socket-count 0
|
||||||
div Messages recevied:
|
div Messages received:
|
||||||
span#message-count 0
|
span#message-count 0
|
||||||
div Last message received:
|
div Last message received:
|
||||||
code#message-debug
|
code#message-debug
|
||||||
|
|
|
||||||
|
|
@ -1026,7 +1026,7 @@ def send_mime_email(
|
||||||
smtp_password = config["SMTP_PASSWORD"]
|
smtp_password = config["SMTP_PASSWORD"]
|
||||||
smtp_starttls = config["SMTP_STARTTLS"]
|
smtp_starttls = config["SMTP_STARTTLS"]
|
||||||
smtp_ssl = config["SMTP_SSL"]
|
smtp_ssl = config["SMTP_SSL"]
|
||||||
smpt_ssl_server_auth = config["SMTP_SSL_SERVER_AUTH"]
|
smtp_ssl_server_auth = config["SMTP_SSL_SERVER_AUTH"]
|
||||||
|
|
||||||
if dryrun:
|
if dryrun:
|
||||||
logger.info("Dryrun enabled, email notification content is below:")
|
logger.info("Dryrun enabled, email notification content is below:")
|
||||||
|
|
@ -1035,7 +1035,7 @@ def send_mime_email(
|
||||||
|
|
||||||
# Default ssl context is SERVER_AUTH using the default system
|
# Default ssl context is SERVER_AUTH using the default system
|
||||||
# root CA certificates
|
# root CA certificates
|
||||||
ssl_context = ssl.create_default_context() if smpt_ssl_server_auth else None
|
ssl_context = ssl.create_default_context() if smtp_ssl_server_auth else None
|
||||||
smtp = (
|
smtp = (
|
||||||
smtplib.SMTP_SSL(smtp_host, smtp_port, context=ssl_context)
|
smtplib.SMTP_SSL(smtp_host, smtp_port, context=ssl_context)
|
||||||
if smtp_ssl
|
if smtp_ssl
|
||||||
|
|
|
||||||
|
|
@ -451,7 +451,7 @@ class TestPostChartDataApi(BaseTestChartDataApi):
|
||||||
|
|
||||||
def test_with_invalid_where_parameter__400(self):
|
def test_with_invalid_where_parameter__400(self):
|
||||||
self.query_context_payload["queries"][0]["filters"] = []
|
self.query_context_payload["queries"][0]["filters"] = []
|
||||||
# erroneus WHERE-clause
|
# erroneous WHERE-clause
|
||||||
self.query_context_payload["queries"][0]["extras"]["where"] = "(gender abc def)"
|
self.query_context_payload["queries"][0]["extras"]["where"] = "(gender abc def)"
|
||||||
|
|
||||||
rv = self.post_assert_metric(CHART_DATA_URI, self.query_context_payload, "data")
|
rv = self.post_assert_metric(CHART_DATA_URI, self.query_context_payload, "data")
|
||||||
|
|
|
||||||
|
|
@ -1619,7 +1619,7 @@ class TestCore(SupersetTestCase):
|
||||||
Handle injected exceptions from the db mutator
|
Handle injected exceptions from the db mutator
|
||||||
"""
|
"""
|
||||||
|
|
||||||
# Assert we can handle a custom excetion at the mutator level
|
# Assert we can handle a custom exception at the mutator level
|
||||||
exception = SupersetException("Error message")
|
exception = SupersetException("Error message")
|
||||||
mock_db_connection_mutator.side_effect = exception
|
mock_db_connection_mutator.side_effect = exception
|
||||||
dash = db.session.query(Dashboard).first()
|
dash = db.session.query(Dashboard).first()
|
||||||
|
|
|
||||||
|
|
@ -209,7 +209,7 @@ def mock_upload_to_s3(filename: str, upload_prefix: str, table: Table) -> str:
|
||||||
container.exec_run(f"hdfs dfs -mkdir -p {dest_dir}")
|
container.exec_run(f"hdfs dfs -mkdir -p {dest_dir}")
|
||||||
dest = os.path.join(dest_dir, os.path.basename(filename))
|
dest = os.path.join(dest_dir, os.path.basename(filename))
|
||||||
container.exec_run(f"hdfs dfs -put {src} {dest}")
|
container.exec_run(f"hdfs dfs -put {src} {dest}")
|
||||||
# hive external table expectes a directory for the location
|
# hive external table expects a directory for the location
|
||||||
return dest_dir
|
return dest_dir
|
||||||
|
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1810,7 +1810,7 @@ class TestDatasetApi(SupersetTestCase):
|
||||||
"datasource_access", dataset.perm
|
"datasource_access", dataset.perm
|
||||||
)
|
)
|
||||||
|
|
||||||
# add perissions to allow export + access to query this dataset
|
# add permissions to allow export + access to query this dataset
|
||||||
gamma_role = security_manager.find_role("Gamma")
|
gamma_role = security_manager.find_role("Gamma")
|
||||||
security_manager.add_permission_role(gamma_role, perm1)
|
security_manager.add_permission_role(gamma_role, perm1)
|
||||||
security_manager.add_permission_role(gamma_role, perm2)
|
security_manager.add_permission_role(gamma_role, perm2)
|
||||||
|
|
|
||||||
|
|
@ -72,7 +72,7 @@ class TestExportDatasetsCommand(SupersetTestCase):
|
||||||
|
|
||||||
metadata = yaml.safe_load(contents["datasets/examples/energy_usage.yaml"])
|
metadata = yaml.safe_load(contents["datasets/examples/energy_usage.yaml"])
|
||||||
|
|
||||||
# sort columns for deterministc comparison
|
# sort columns for deterministic comparison
|
||||||
metadata["columns"] = sorted(metadata["columns"], key=itemgetter("column_name"))
|
metadata["columns"] = sorted(metadata["columns"], key=itemgetter("column_name"))
|
||||||
metadata["metrics"] = sorted(metadata["metrics"], key=itemgetter("metric_name"))
|
metadata["metrics"] = sorted(metadata["metrics"], key=itemgetter("metric_name"))
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -233,7 +233,7 @@ class TestDatasource(SupersetTestCase):
|
||||||
resp = self.get_json_resp(url)
|
resp = self.get_json_resp(url)
|
||||||
self.assertEqual(resp["error"], "Only `SELECT` statements are allowed")
|
self.assertEqual(resp["error"], "Only `SELECT` statements are allowed")
|
||||||
|
|
||||||
def test_external_metadata_for_mutistatement_virtual_table(self):
|
def test_external_metadata_for_multistatement_virtual_table(self):
|
||||||
self.login(username="admin")
|
self.login(username="admin")
|
||||||
table = SqlaTable(
|
table = SqlaTable(
|
||||||
table_name="multistatement_sql_table",
|
table_name="multistatement_sql_table",
|
||||||
|
|
|
||||||
|
|
@ -476,15 +476,15 @@ class TestSqlaTableModel(SupersetTestCase):
|
||||||
# TODO(bkyryliuk): make it work for presto.
|
# TODO(bkyryliuk): make it work for presto.
|
||||||
return
|
return
|
||||||
|
|
||||||
def cannonicalize_df(df):
|
def canonicalize_df(df):
|
||||||
ret = df.sort_values(by=list(df.columns.values), inplace=False)
|
ret = df.sort_values(by=list(df.columns.values), inplace=False)
|
||||||
ret.reset_index(inplace=True, drop=True)
|
ret.reset_index(inplace=True, drop=True)
|
||||||
return ret
|
return ret
|
||||||
|
|
||||||
df1 = self.query_with_expr_helper(is_timeseries=True, inner_join=True)
|
df1 = self.query_with_expr_helper(is_timeseries=True, inner_join=True)
|
||||||
name_list1 = cannonicalize_df(df1).name.values.tolist()
|
name_list1 = canonicalize_df(df1).name.values.tolist()
|
||||||
df2 = self.query_with_expr_helper(is_timeseries=True, inner_join=False)
|
df2 = self.query_with_expr_helper(is_timeseries=True, inner_join=False)
|
||||||
name_list2 = cannonicalize_df(df1).name.values.tolist()
|
name_list2 = canonicalize_df(df1).name.values.tolist()
|
||||||
self.assertFalse(df2.empty)
|
self.assertFalse(df2.empty)
|
||||||
|
|
||||||
assert name_list2 == name_list1
|
assert name_list2 == name_list1
|
||||||
|
|
|
||||||
|
|
@ -74,7 +74,7 @@ class TestQueryContext(SupersetTestCase):
|
||||||
for query_idx, query in enumerate(query_context.queries):
|
for query_idx, query in enumerate(query_context.queries):
|
||||||
payload_query = payload["queries"][query_idx]
|
payload_query = payload["queries"][query_idx]
|
||||||
|
|
||||||
# check basic properies
|
# check basic properties
|
||||||
self.assertEqual(query.extras, payload_query["extras"])
|
self.assertEqual(query.extras, payload_query["extras"])
|
||||||
self.assertEqual(query.filter, payload_query["filters"])
|
self.assertEqual(query.filter, payload_query["filters"])
|
||||||
self.assertEqual(query.columns, payload_query["columns"])
|
self.assertEqual(query.columns, payload_query["columns"])
|
||||||
|
|
|
||||||
|
|
@ -571,7 +571,7 @@ class TestReportSchedulesApi(SupersetTestCase):
|
||||||
@pytest.mark.usefixtures("create_report_schedules")
|
@pytest.mark.usefixtures("create_report_schedules")
|
||||||
def test_get_related_report_schedule(self):
|
def test_get_related_report_schedule(self):
|
||||||
"""
|
"""
|
||||||
ReportSchedule Api: Test get releated report schedule
|
ReportSchedule Api: Test get related report schedule
|
||||||
"""
|
"""
|
||||||
self.login(username="admin")
|
self.login(username="admin")
|
||||||
related_columns = ["created_by", "chart", "dashboard", "database"]
|
related_columns = ["created_by", "chart", "dashboard", "database"]
|
||||||
|
|
|
||||||
|
|
@ -91,7 +91,7 @@ class TestSqlLab(SupersetTestCase):
|
||||||
data = self.run_sql("SELECT * FROM birth_names LIMIT 10", "1")
|
data = self.run_sql("SELECT * FROM birth_names LIMIT 10", "1")
|
||||||
self.assertLess(0, len(data["data"]))
|
self.assertLess(0, len(data["data"]))
|
||||||
|
|
||||||
data = self.run_sql("SELECT * FROM unexistant_table", "2")
|
data = self.run_sql("SELECT * FROM nonexistent_table", "2")
|
||||||
if backend() == "presto":
|
if backend() == "presto":
|
||||||
assert (
|
assert (
|
||||||
data["errors"][0]["error_type"]
|
data["errors"][0]["error_type"]
|
||||||
|
|
|
||||||
|
|
@ -64,7 +64,7 @@ def test_pivot_df_no_cols_no_rows_single_metric():
|
||||||
""".strip()
|
""".strip()
|
||||||
)
|
)
|
||||||
|
|
||||||
# tranpose_pivot and combine_metrics do nothing in this case
|
# transpose_pivot and combine_metrics do nothing in this case
|
||||||
pivoted = pivot_df(
|
pivoted = pivot_df(
|
||||||
df,
|
df,
|
||||||
rows=[],
|
rows=[],
|
||||||
|
|
@ -169,7 +169,7 @@ def test_pivot_df_no_cols_no_rows_two_metrics():
|
||||||
""".strip()
|
""".strip()
|
||||||
)
|
)
|
||||||
|
|
||||||
# tranpose_pivot and combine_metrics do nothing in this case
|
# transpose_pivot and combine_metrics do nothing in this case
|
||||||
pivoted = pivot_df(
|
pivoted = pivot_df(
|
||||||
df,
|
df,
|
||||||
rows=[],
|
rows=[],
|
||||||
|
|
|
||||||
|
|
@ -77,11 +77,11 @@ def test_extract_errors() -> None:
|
||||||
)
|
)
|
||||||
]
|
]
|
||||||
|
|
||||||
msg = "syntax error line 1 at position 10 unexpected 'limmmited'."
|
msg = "syntax error line 1 at position 10 unexpected 'limited'."
|
||||||
result = SnowflakeEngineSpec.extract_errors(Exception(msg))
|
result = SnowflakeEngineSpec.extract_errors(Exception(msg))
|
||||||
assert result == [
|
assert result == [
|
||||||
SupersetError(
|
SupersetError(
|
||||||
message='Please check your query for syntax errors at or near "limmmited". Then, try running your query again.',
|
message='Please check your query for syntax errors at or near "limited". Then, try running your query again.',
|
||||||
error_type=SupersetErrorType.SYNTAX_ERROR,
|
error_type=SupersetErrorType.SYNTAX_ERROR,
|
||||||
level=ErrorLevel.ERROR,
|
level=ErrorLevel.ERROR,
|
||||||
extra={
|
extra={
|
||||||
|
|
|
||||||
|
|
@ -675,7 +675,7 @@ WHERE TABLE_SCHEMA like "%bi%"),0x7e)));
|
||||||
"""
|
"""
|
||||||
select (extractvalue(1,concat(0x7e,(select GROUP_CONCAT(COLUMN_NAME)
|
select (extractvalue(1,concat(0x7e,(select GROUP_CONCAT(COLUMN_NAME)
|
||||||
from INFORMATION_SCHEMA.COLUMNS
|
from INFORMATION_SCHEMA.COLUMNS
|
||||||
WHERE TABLE_NAME="bi_achivement_daily"),0x7e)));
|
WHERE TABLE_NAME="bi_achievement_daily"),0x7e)));
|
||||||
"""
|
"""
|
||||||
)
|
)
|
||||||
== {Table("COLUMNS", "INFORMATION_SCHEMA")}
|
== {Table("COLUMNS", "INFORMATION_SCHEMA")}
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue