diff --git a/RELEASING/changelog.py b/RELEASING/changelog.py index 80ff98cba..c33a9b03c 100644 --- a/RELEASING/changelog.py +++ b/RELEASING/changelog.py @@ -272,14 +272,14 @@ class GitLogs: @staticmethod def _git_get_current_head() -> str: - output = os.popen("git status | head -1").read() + output = os.popen("git status | head -1").read() # noqa: S605, S607 match = re.match("(?:HEAD detached at|On branch) (.*)", output) if not match: return "" return match.group(1) def _git_checkout(self, git_ref: str) -> None: - os.popen(f"git checkout {git_ref}").read() + os.popen(f"git checkout {git_ref}").read() # noqa: S605 current_head = self._git_get_current_head() if current_head != git_ref: print(f"Could not checkout {git_ref}") @@ -290,7 +290,7 @@ class GitLogs: current_git_ref = self._git_get_current_head() self._git_checkout(self._git_ref) output = ( - os.popen('git --no-pager log --pretty=format:"%h|%an|%ae|%ad|%s|"') + os.popen('git --no-pager log --pretty=format:"%h|%an|%ae|%ad|%s|"') # noqa: S605, S607 .read() .split("\n") ) diff --git a/RELEASING/generate_email.py b/RELEASING/generate_email.py index ac9ca4a27..2936635cf 100755 --- a/RELEASING/generate_email.py +++ b/RELEASING/generate_email.py @@ -31,7 +31,7 @@ except ModuleNotFoundError: RECEIVER_EMAIL = "dev@superset.apache.org" PROJECT_NAME = "Superset" PROJECT_MODULE = "superset" -PROJECT_DESCRIPTION = "Apache Superset is a modern, enterprise-ready business intelligence web application." +PROJECT_DESCRIPTION = "Apache Superset is a modern, enterprise-ready business intelligence web application." # noqa: E501 def string_comma_to_list(message: str) -> list[str]: diff --git a/RELEASING/verify_release.py b/RELEASING/verify_release.py index 350263610..61bbf073a 100755 --- a/RELEASING/verify_release.py +++ b/RELEASING/verify_release.py @@ -23,12 +23,12 @@ from typing import Optional import requests -# Part 1: Verify SHA512 hash - this is the same as running `shasum -a 512 {release}` and comparing it against `{release}.sha512` +# Part 1: Verify SHA512 hash - this is the same as running `shasum -a 512 {release}` and comparing it against `{release}.sha512` # noqa: E501 def get_sha512_hash(filename: str) -> str: """Run the shasum command on the file and return the SHA512 hash.""" - result = subprocess.run(["shasum", "-a", "512", filename], stdout=subprocess.PIPE) + result = subprocess.run(["shasum", "-a", "512", filename], stdout=subprocess.PIPE) # noqa: S603, S607 sha512_hash = result.stdout.decode().split()[0] return sha512_hash @@ -43,7 +43,7 @@ def read_sha512_file(filename: str) -> str: def verify_sha512(filename: str) -> str: - """Verify if the SHA512 hash of the file matches with the hash in the .sha512 file.""" + """Verify if the SHA512 hash of the file matches with the hash in the .sha512 file.""" # noqa: E501 sha512_hash = get_sha512_hash(filename) sha512_file_content = read_sha512_file(filename) @@ -53,14 +53,15 @@ def verify_sha512(filename: str) -> str: return "SHA failed" -# Part 2: Verify RSA key - this is the same as running `gpg --verify {release}.asc {release}` and comparing the RSA key and email address against the KEYS file +# Part 2: Verify RSA key - this is the same as running `gpg --verify {release}.asc {release}` and comparing the RSA key and email address against the KEYS file # noqa: E501 def get_gpg_info(filename: str) -> tuple[Optional[str], Optional[str]]: """Run the GPG verify command and extract RSA key and email address.""" asc_filename = filename + ".asc" - result = subprocess.run( - ["gpg", "--verify", asc_filename, filename], capture_output=True + result = subprocess.run( # noqa: S603 + ["gpg", "--verify", asc_filename, filename], # noqa: S607 + capture_output=True, # noqa: S607 ) output = result.stderr.decode() @@ -90,7 +91,7 @@ def get_gpg_info(filename: str) -> tuple[Optional[str], Optional[str]]: def verify_key(key: str, email: Optional[str]) -> str: """Fetch the KEYS file and verify if the RSA/EDDSA key and email match.""" url = "https://downloads.apache.org/superset/KEYS" - response = requests.get(url) + response = requests.get(url) # noqa: S113 if response.status_code == 200: if key not in response.text: return "RSA/EDDSA key not found on KEYS page" diff --git a/docker/pythonpath_dev/superset_config.py b/docker/pythonpath_dev/superset_config.py index e8223e535..5b14e6b18 100644 --- a/docker/pythonpath_dev/superset_config.py +++ b/docker/pythonpath_dev/superset_config.py @@ -99,7 +99,7 @@ CELERY_CONFIG = CeleryConfig FEATURE_FLAGS = {"ALERT_REPORTS": True} ALERT_REPORTS_NOTIFICATION_DRY_RUN = True -WEBDRIVER_BASEURL = "http://superset:8088/" # When using docker compose baseurl should be http://superset_app:8088/ +WEBDRIVER_BASEURL = "http://superset:8088/" # When using docker compose baseurl should be http://superset_app:8088/ # noqa: E501 # The base URL for the email report hyperlinks. WEBDRIVER_BASEURL_USER_FRIENDLY = WEBDRIVER_BASEURL SQLLAB_CTAS_NO_LIMIT = True diff --git a/pyproject.toml b/pyproject.toml index 510fd12c5..260110efe 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -24,7 +24,7 @@ name = "apache-superset" description = "A modern, enterprise-ready business intelligence web application" readme = "README.md" dynamic = ["version", "scripts", "entry-points"] -requires-python = "~=3.9" +requires-python = ">=3.9" license = { file="LICENSE.txt" } authors = [ { name = "Apache Software Foundation", email = "dev@superset.apache.org" }, @@ -276,8 +276,8 @@ exclude = [ line-length = 88 indent-width = 4 -# Assume Python 3.8 -target-version = "py310" +# Assume Python 3.9 +target-version = "py39" [tool.ruff.lint] # Enable Pyflakes (`F`) and a subset of the pycodestyle (`E`) codes by default. @@ -290,22 +290,24 @@ select = [ "E9", "PT009", "TRY201", - # TODO add these rules in follow up PR - # "B", - # "C", - # "E", - # "F", - #"F", - # "I", - # "N", - # "PT", - # "Q", - # "S", - # "T", - #"W", + "B", + "C", + "E", + "F", + "F", + "I", + "N", + "PT", + "Q", + "S", + "T", + "W", ] ignore = [ "S101", + "PT006", + "T201", + "N999", ] extend-select = ["I"] diff --git a/scripts/benchmark_migration.py b/scripts/benchmark_migration.py index 90d94853d..8da9a6860 100644 --- a/scripts/benchmark_migration.py +++ b/scripts/benchmark_migration.py @@ -70,7 +70,7 @@ def extract_modified_tables(module: ModuleType) -> set[str]: return tables -def find_models(module: ModuleType) -> list[type[Model]]: +def find_models(module: ModuleType) -> list[type[Model]]: # noqa: C901 """ Find all models in a migration script. """ @@ -94,7 +94,7 @@ def find_models(module: ModuleType) -> list[type[Model]]: # downgrade sqlalchemy_uri = current_app.config["SQLALCHEMY_DATABASE_URI"] engine = create_engine(sqlalchemy_uri) - Base = automap_base() + Base = automap_base() # noqa: N806 Base.prepare(engine, reflect=True) seen = set() while tables: @@ -138,7 +138,7 @@ def find_models(module: ModuleType) -> list[type[Model]]: @click.option("--limit", default=1000, help="Maximum number of entities.") @click.option("--force", is_flag=True, help="Do not prompt for confirmation.") @click.option("--no-auto-cleanup", is_flag=True, help="Do not remove created models.") -def main( +def main( # noqa: C901 filepath: str, limit: int = 1000, force: bool = False, no_auto_cleanup: bool = False ) -> None: auto_cleanup = not no_auto_cleanup diff --git a/scripts/cancel_github_workflows.py b/scripts/cancel_github_workflows.py index fcf3bc494..b8ae06541 100755 --- a/scripts/cancel_github_workflows.py +++ b/scripts/cancel_github_workflows.py @@ -49,7 +49,7 @@ github_repo = os.environ.get("GITHUB_REPOSITORY", "apache/superset") def request( method: Literal["GET", "POST", "DELETE", "PUT"], endpoint: str, **kwargs: Any ) -> dict[str, Any]: - resp = requests.request( + resp = requests.request( # noqa: S113 method, f"https://api.github.com/{endpoint.lstrip('/')}", headers={"Authorization": f"Bearer {github_token}"}, @@ -152,7 +152,7 @@ Date: {date_str} help="Whether to also cancel running workflows.", ) @click.argument("branch_or_pull", required=False) -def cancel_github_workflows( +def cancel_github_workflows( # noqa: C901 branch_or_pull: Optional[str], repo: str, event: list[str], diff --git a/scripts/change_detector.py b/scripts/change_detector.py index df46538f1..7394936d3 100755 --- a/scripts/change_detector.py +++ b/scripts/change_detector.py @@ -51,12 +51,12 @@ GITHUB_TOKEN = os.environ.get("GITHUB_TOKEN") def fetch_files_github_api(url: str): # type: ignore """Fetches data using GitHub API.""" - req = Request(url) + req = Request(url) # noqa: S310 req.add_header("Authorization", f"Bearer {GITHUB_TOKEN}") req.add_header("Accept", "application/vnd.github.v3+json") print(f"Fetching from {url}") - with urlopen(req) as response: + with urlopen(req) as response: # noqa: S310 body = response.read() return json.loads(body) @@ -130,7 +130,7 @@ def main(event_type: str, sha: str, repo: str) -> None: ) # Output results - output_path = os.getenv("GITHUB_OUTPUT") or "/tmp/GITHUB_OUTPUT.txt" + output_path = os.getenv("GITHUB_OUTPUT") or "/tmp/GITHUB_OUTPUT.txt" # noqa: S108 with open(output_path, "a") as f: for check, changed in changes_detected.items(): if changed: @@ -139,8 +139,8 @@ def main(event_type: str, sha: str, repo: str) -> None: def get_git_sha() -> str: - return os.getenv("GITHUB_SHA") or subprocess.check_output( - ["git", "rev-parse", "HEAD"] + return os.getenv("GITHUB_SHA") or subprocess.check_output( # noqa: S603 + ["git", "rev-parse", "HEAD"] # noqa: S607 ).strip().decode("utf-8") diff --git a/scripts/check-env.py b/scripts/check-env.py index 647aa1142..e7816fb58 100755 --- a/scripts/check-env.py +++ b/scripts/check-env.py @@ -47,7 +47,7 @@ class Requirement: def get_version(self) -> Optional[str]: try: - version = subprocess.check_output(self.command, shell=True).decode().strip() + version = subprocess.check_output(self.command, shell=True).decode().strip() # noqa: S602 if self.version_post_process: version = self.version_post_process(version) return version.split()[-1] @@ -76,7 +76,7 @@ class Requirement: def format_result(self) -> str: ideal_range_str = f"{self.ideal_range[0]} - {self.ideal_range[1]}" supported_range_str = f"{self.supported_range[0]} - {self.supported_range[1]}" - return f"{self.status.split()[0]} {self.name:<25} {self.version or 'N/A':<25} {ideal_range_str:<25} {supported_range_str:<25}" + return f"{self.status.split()[0]} {self.name:<25} {self.version or 'N/A':<25} {ideal_range_str:<25} {supported_range_str:<25}" # noqa: E501 def check_memory(min_gb: int) -> str: @@ -101,8 +101,9 @@ def get_cpu_info() -> str: def get_docker_platform() -> str: try: output = ( - subprocess.check_output( - "docker info --format '{{.OperatingSystem}}'", shell=True + subprocess.check_output( # noqa: S602 + "docker info --format '{{.OperatingSystem}}'", # noqa: S607 + shell=True, # noqa: S607 ) .decode() .strip() @@ -117,7 +118,7 @@ def get_docker_platform() -> str: @click.command( help=""" This script checks the local environment for various software versions and other requirements, providing feedback on whether they are ideal, supported, or unsupported. -""" +""" # noqa: E501 ) @click.option( "--docker", is_flag=True, help="Check Docker and Docker Compose requirements" @@ -128,7 +129,7 @@ This script checks the local environment for various software versions and other help="Check frontend requirements (npm, Node.js, memory)", ) @click.option("--backend", is_flag=True, help="Check backend requirements (Python)") -def main(docker: bool, frontend: bool, backend: bool) -> None: +def main(docker: bool, frontend: bool, backend: bool) -> None: # noqa: C901 requirements = [ Requirement( "python", diff --git a/scripts/cypress_run.py b/scripts/cypress_run.py index 4b9e00feb..ca8b68cd3 100644 --- a/scripts/cypress_run.py +++ b/scripts/cypress_run.py @@ -74,7 +74,7 @@ def run_cypress_for_test_file( print(f"DRY RUN: {cmd}") return 0 - process = subprocess.Popen( + process = subprocess.Popen( # noqa: S602 cmd, shell=True, stdout=subprocess.PIPE, diff --git a/scripts/erd/erd.py b/scripts/erd/erd.py index d49940feb..84f7fee29 100644 --- a/scripts/erd/erd.py +++ b/scripts/erd/erd.py @@ -171,7 +171,7 @@ def generate_erd(file_path: str) -> None: """ data = introspect_models() templates_path = os.path.dirname(__file__) - env = jinja2.Environment(loader=jinja2.FileSystemLoader(templates_path)) + env = jinja2.Environment(loader=jinja2.FileSystemLoader(templates_path)) # noqa: S701 # Load the template template = env.get_template("erd.template.puml") diff --git a/setup.py b/setup.py index 00b8d22e2..b89288f76 100644 --- a/setup.py +++ b/setup.py @@ -30,7 +30,7 @@ with open(PACKAGE_JSON) as package_file: def get_git_sha() -> str: try: - output = subprocess.check_output(["git", "rev-parse", "HEAD"]) + output = subprocess.check_output(["git", "rev-parse", "HEAD"]) # noqa: S603, S607 return output.decode().strip() except Exception: # pylint: disable=broad-except return "" @@ -58,7 +58,7 @@ setup( zip_safe=False, entry_points={ "console_scripts": ["superset=superset.cli.main:superset"], - # the `postgres` and `postgres+psycopg2://` schemes were removed in SQLAlchemy 1.4 + # the `postgres` and `postgres+psycopg2://` schemes were removed in SQLAlchemy 1.4 # noqa: E501 # add an alias here to prevent breaking existing databases "sqlalchemy.dialects": [ "postgres.psycopg2 = sqlalchemy.dialects.postgresql:dialect", diff --git a/superset/advanced_data_type/api.py b/superset/advanced_data_type/api.py index c96c54157..3659429ba 100644 --- a/superset/advanced_data_type/api.py +++ b/superset/advanced_data_type/api.py @@ -39,7 +39,7 @@ class AdvancedDataTypeRestApi(BaseSupersetApi): -Will return available AdvancedDataTypes when the /types endpoint is accessed -Will return a AdvancedDataTypeResponse object when the /convert endpoint is accessed and is passed in valid arguments - """ + """ # noqa: E501 allow_browser_login = True resource_name = "advanced_data_type" @@ -92,7 +92,7 @@ class AdvancedDataTypeRestApi(BaseSupersetApi): $ref: '#/components/responses/404' 500: $ref: '#/components/responses/500' - """ + """ # noqa: E501 item = kwargs["rison"] advanced_data_type = item["type"] values = item["values"] diff --git a/superset/advanced_data_type/plugins/internet_address.py b/superset/advanced_data_type/plugins/internet_address.py index f4a6faffe..582451fa4 100644 --- a/superset/advanced_data_type/plugins/internet_address.py +++ b/superset/advanced_data_type/plugins/internet_address.py @@ -65,7 +65,7 @@ def cidr_func(req: AdvancedDataTypeRequest) -> AdvancedDataTypeResponse: break else: resp["display_value"] = ", ".join( - map( + map( # noqa: C417 lambda x: f"{x['start']} - {x['end']}" if isinstance(x, dict) else str(x), @@ -76,7 +76,7 @@ def cidr_func(req: AdvancedDataTypeRequest) -> AdvancedDataTypeResponse: # Make this return a single clause -def cidr_translate_filter_func( +def cidr_translate_filter_func( # noqa: C901 col: Column, operator: FilterOperator, values: list[Any] ) -> Any: """ diff --git a/superset/advanced_data_type/plugins/internet_port.py b/superset/advanced_data_type/plugins/internet_port.py index 1af8e847a..3c03107ff 100644 --- a/superset/advanced_data_type/plugins/internet_port.py +++ b/superset/advanced_data_type/plugins/internet_port.py @@ -94,7 +94,7 @@ def port_translation_func(req: AdvancedDataTypeRequest) -> AdvancedDataTypeRespo break else: resp["display_value"] = ", ".join( - map( + map( # noqa: C417 lambda x: f"{x['start']} - {x['end']}" if isinstance(x, dict) else str(x), @@ -104,7 +104,7 @@ def port_translation_func(req: AdvancedDataTypeRequest) -> AdvancedDataTypeRespo return resp -def port_translate_filter_func( +def port_translate_filter_func( # noqa: C901 col: Column, operator: FilterOperator, values: list[Any] ) -> Any: """ diff --git a/superset/annotation_layers/annotations/api.py b/superset/annotation_layers/annotations/api.py index 0be6efbfa..d131b0f4f 100644 --- a/superset/annotation_layers/annotations/api.py +++ b/superset/annotation_layers/annotations/api.py @@ -181,7 +181,7 @@ class AnnotationRestApi(BaseSupersetModelRestApi): $ref: '#/components/responses/422' 500: $ref: '#/components/responses/500' - """ + """ # noqa: E501 self._apply_layered_relation_to_rison(pk, kwargs["rison"]) return self.get_list_headless(**kwargs) diff --git a/superset/async_events/async_query_manager.py b/superset/async_events/async_query_manager.py index b116c3cfc..0e59b2c3d 100644 --- a/superset/async_events/async_query_manager.py +++ b/superset/async_events/async_query_manager.py @@ -35,15 +35,15 @@ from superset.utils.core import get_user_id logger = logging.getLogger(__name__) -class CacheBackendNotInitialized(Exception): +class CacheBackendNotInitialized(Exception): # noqa: N818 pass -class AsyncQueryTokenException(Exception): +class AsyncQueryTokenException(Exception): # noqa: N818 pass -class AsyncQueryJobException(Exception): +class AsyncQueryJobException(Exception): # noqa: N818 pass @@ -88,7 +88,7 @@ def get_cache_backend( return RedisSentinelCacheBackend.from_config(cache_config) # TODO: Deprecate hardcoded plain Redis code and expand cache backend options. - # Maintain backward compatibility with 'GLOBAL_ASYNC_QUERIES_REDIS_CONFIG' until it is deprecated. + # Maintain backward compatibility with 'GLOBAL_ASYNC_QUERIES_REDIS_CONFIG' until it is deprecated. # noqa: E501 return redis.Redis( **config["GLOBAL_ASYNC_QUERIES_REDIS_CONFIG"], decode_responses=True ) @@ -265,7 +265,7 @@ class AsyncQueryManager: stream_name = f"{self._stream_prefix}{channel}" start_id = increment_id(last_id) if last_id else "-" results = self._cache.xrange(stream_name, start_id, "+", self.MAX_EVENT_COUNT) - # Decode bytes to strings, decode_responses is not supported at RedisCache and RedisSentinelCache + # Decode bytes to strings, decode_responses is not supported at RedisCache and RedisSentinelCache # noqa: E501 if isinstance(self._cache, (RedisSentinelCacheBackend, RedisCacheBackend)): decoded_results = [ ( diff --git a/superset/charts/api.py b/superset/charts/api.py index 64177950b..c7266866e 100644 --- a/superset/charts/api.py +++ b/superset/charts/api.py @@ -995,7 +995,7 @@ class ChartRestApi(BaseSupersetModelRestApi): $ref: '#/components/responses/404' 500: $ref: '#/components/responses/500' - """ + """ # noqa: E501 try: body = ChartCacheWarmUpRequestSchema().load(request.json) except ValidationError as error: diff --git a/superset/charts/data/api.py b/superset/charts/data/api.py index 0f6e605c7..a71171d29 100644 --- a/superset/charts/data/api.py +++ b/superset/charts/data/api.py @@ -306,7 +306,7 @@ class ChartDataRestApi(ChartRestApi): cached_data = self._load_query_context_form_from_cache(cache_key) # Set form_data in Flask Global as it is used as a fallback # for async queries with jinja context - setattr(g, "form_data", cached_data) + g.form_data = cached_data query_context = self._create_query_context_from_form(cached_data) command = ChartDataCommand(query_context) command.validate() @@ -343,7 +343,7 @@ class ChartDataRestApi(ChartRestApi): result = async_command.run(form_data, get_user_id()) return self.response(202, **result) - def _send_chart_response( + def _send_chart_response( # noqa: C901 self, result: dict[Any, Any], form_data: dict[str, Any] | None = None, diff --git a/superset/charts/post_processing.py b/superset/charts/post_processing.py index edc3c9447..807760cb6 100644 --- a/superset/charts/post_processing.py +++ b/superset/charts/post_processing.py @@ -59,7 +59,7 @@ def get_column_key(label: tuple[str, ...], metrics: list[str]) -> tuple[Any, ... return tuple(parts) -def pivot_df( # pylint: disable=too-many-locals, too-many-arguments, too-many-statements, too-many-branches +def pivot_df( # pylint: disable=too-many-locals, too-many-arguments, too-many-statements, too-many-branches # noqa: C901 df: pd.DataFrame, rows: list[str], columns: list[str], @@ -173,7 +173,7 @@ def pivot_df( # pylint: disable=too-many-locals, too-many-arguments, too-many-s subtotal = pivot_v2_aggfunc_map[aggfunc](df.iloc[:, slice_], axis=1) depth = df.columns.nlevels - len(subgroup) - 1 total = metric_name if level == 0 else __("Subtotal") - subtotal_name = tuple([*subgroup, total, *([""] * depth)]) + subtotal_name = tuple([*subgroup, total, *([""] * depth)]) # noqa: C409 # insert column after subgroup df.insert(int(slice_.stop), subtotal_name, subtotal) @@ -190,7 +190,7 @@ def pivot_df( # pylint: disable=too-many-locals, too-many-arguments, too-many-s ) depth = df.index.nlevels - len(subgroup) - 1 total = metric_name if level == 0 else __("Subtotal") - subtotal.name = tuple([*subgroup, total, *([""] * depth)]) + subtotal.name = tuple([*subgroup, total, *([""] * depth)]) # noqa: C409 # insert row after subgroup df = pd.concat( [df[: slice_.stop], subtotal.to_frame().T, df[slice_.stop :]] @@ -284,7 +284,7 @@ def table( format_ = "{:" + config["d3NumberFormat"] + "}" try: df[column] = df[column].apply(format_.format) - except Exception: # pylint: disable=broad-except + except Exception: # pylint: disable=broad-except # noqa: S110 # if we can't format the column for any reason, send as is pass @@ -298,7 +298,7 @@ post_processors = { @event_logger.log_this -def apply_post_process( +def apply_post_process( # noqa: C901 result: dict[Any, Any], form_data: Optional[dict[str, Any]] = None, datasource: Optional[Union["BaseDatasource", "Query"]] = None, diff --git a/superset/charts/schemas.py b/superset/charts/schemas.py index 89e47a9dc..5531e057c 100644 --- a/superset/charts/schemas.py +++ b/superset/charts/schemas.py @@ -320,7 +320,7 @@ class ChartDataAdhocMetricSchema(Schema): Ad-hoc metrics are used to define metrics outside the datasource. """ - expressionType = fields.String( + expressionType = fields.String( # noqa: N815 metadata={"description": "Simple or SQL metric", "example": "SQL"}, required=True, validate=validate.OneOf(choices=("SIMPLE", "SQL")), @@ -335,7 +335,7 @@ class ChartDataAdhocMetricSchema(Schema): ), ) column = fields.Nested(ChartDataColumnSchema) - sqlExpression = fields.String( + sqlExpression = fields.String( # noqa: N815 metadata={ "description": "The metric as defined by a SQL aggregate expression. " "Only required for SQL expression type.", @@ -349,14 +349,14 @@ class ChartDataAdhocMetricSchema(Schema): "example": "Weighted observations", }, ) - hasCustomLabel = fields.Boolean( + hasCustomLabel = fields.Boolean( # noqa: N815 metadata={ - "description": "When false, the label will be automatically generated based " - "on the aggregate expression. When true, a custom label has to be specified.", + "description": "When false, the label will be automatically generated based " # noqa: E501 + "on the aggregate expression. When true, a custom label has to be specified.", # noqa: E501 "example": True, }, ) - optionName = fields.String( + optionName = fields.String( # noqa: N815 metadata={ "description": "Unique identifier. Can be any string value, as long as all " "metrics have a unique identifier. If undefined, a random name" @@ -364,15 +364,15 @@ class ChartDataAdhocMetricSchema(Schema): "example": "metric_aec60732-fac0-4b17-b736-93f1a5c93e30", }, ) - timeGrain = fields.String( + timeGrain = fields.String( # noqa: N815 metadata={ "description": "Optional time grain for temporal filters", "example": "PT1M", }, ) - isExtra = fields.Boolean( + isExtra = fields.Boolean( # noqa: N815 metadata={ - "description": "Indicates if the filter has been added by a filter component " + "description": "Indicates if the filter has been added by a filter component " # noqa: E501 "as opposed to being a part of the original query." } ) @@ -437,8 +437,8 @@ class ChartDataRollingOptionsSchema(ChartDataPostProcessingOperationOptionsSchem metadata={ "description": "columns on which to perform rolling, mapping source " "column to target column. For instance, `{'y': 'y'}` will replace the " - "column `y` with the rolling value in `y`, while `{'y': 'y2'}` will add " - "a column `y2` based on rolling values calculated from `y`, leaving the " + "column `y` with the rolling value in `y`, while `{'y': 'y2'}` will add " # noqa: E501 + "a column `y2` based on rolling values calculated from `y`, leaving the " # noqa: E501 "original column `y` unchanged.", "example": {"weekly_rolling_sales": "sales"}, }, @@ -541,7 +541,7 @@ class ChartDataSelectOptionsSchema(ChartDataPostProcessingOperationOptionsSchema columns = fields.List( fields.String(), metadata={ - "description": "Columns which to select from the input data, in the desired " + "description": "Columns which to select from the input data, in the desired " # noqa: E501 "order. If columns are renamed, the original column name should be " "referenced here.", "example": ["country", "gender", "age"], @@ -691,8 +691,8 @@ class ChartDataBoxplotOptionsSchema(ChartDataPostProcessingOperationOptionsSchem "references to datasource metrics (strings), or ad-hoc metrics" "which are defined only within the query object. See " "`ChartDataAdhocMetricSchema` for the structure of ad-hoc metrics. " - "When metrics is undefined or null, the query is executed without a groupby. " - "However, when metrics is an array (length >= 0), a groupby clause is added " + "When metrics is undefined or null, the query is executed without a groupby. " # noqa: E501 + "However, when metrics is an array (length >= 0), a groupby clause is added " # noqa: E501 "to the query." }, allow_none=True, @@ -907,7 +907,7 @@ class ChartDataPostProcessingOperationSchema(Schema): class ChartDataFilterSchema(Schema): col = fields.Raw( metadata={ - "description": "The column to filter by. Can be either a string (physical or " + "description": "The column to filter by. Can be either a string (physical or " # noqa: E501 "saved expression) or an object (adhoc column)", "example": "country", }, @@ -934,7 +934,7 @@ class ChartDataFilterSchema(Schema): "example": "PT1M", }, ) - isExtra = fields.Boolean( + isExtra = fields.Boolean( # noqa: N815 metadata={ "description": "Indicates if the filter has been added by a filter " "component as opposed to being a part of the original query." @@ -995,7 +995,7 @@ class ChartDataExtrasSchema(Schema): class AnnotationLayerSchema(Schema): - annotationType = fields.String( + annotationType = fields.String( # noqa: N815 metadata={"description": "Type of annotation layer"}, validate=validate.OneOf(choices=[ann.value for ann in AnnotationType]), ) @@ -1003,20 +1003,20 @@ class AnnotationLayerSchema(Schema): metadata={"description": "Layer color"}, allow_none=True, ) - descriptionColumns = fields.List( + descriptionColumns = fields.List( # noqa: N815 fields.String(), metadata={ "description": "Columns to use as the description. If none are provided, " "all will be shown." }, ) - hideLine = fields.Boolean( + hideLine = fields.Boolean( # noqa: N815 metadata={ "description": "Should line be hidden. Only applies to line annotations" }, allow_none=True, ) - intervalEndColumn = fields.String( + intervalEndColumn = fields.String( # noqa: N815 metadata={ "description": "Column containing end of interval. " "Only applies to interval layers" @@ -1046,17 +1046,17 @@ class AnnotationLayerSchema(Schema): show = fields.Boolean( metadata={"description": "Should the layer be shown"}, required=True ) - showLabel = fields.Boolean( + showLabel = fields.Boolean( # noqa: N815 metadata={"description": "Should the label always be shown"}, allow_none=True, ) - showMarkers = fields.Boolean( + showMarkers = fields.Boolean( # noqa: N815 metadata={ "description": "Should markers be shown. Only applies to line annotations." }, required=True, ) - sourceType = fields.String( + sourceType = fields.String( # noqa: N815 metadata={"description": "Type of source for annotation data"}, validate=validate.OneOf( choices=( @@ -1078,11 +1078,11 @@ class AnnotationLayerSchema(Schema): ) ), ) - timeColumn = fields.String( + timeColumn = fields.String( # noqa: N815 metadata={"description": "Column with event date or interval start date"}, allow_none=True, ) - titleColumn = fields.String( + titleColumn = fields.String( # noqa: N815 metadata={"description": "Column with title"}, allow_none=True, ) @@ -1180,7 +1180,7 @@ class ChartDataQueryObjectSchema(Schema): fields.Nested(ChartDataPostProcessingOperationSchema, allow_none=True), allow_none=True, metadata={ - "description": "Post processing operations to be applied to the result set. " + "description": "Post processing operations to be applied to the result set. " # noqa: E501 "Operations are applied to the result set in sequential order." }, ) @@ -1420,7 +1420,7 @@ class ChartDataResponseResult(Schema): ) cache_timeout = fields.Integer( metadata={ - "description": "Cache timeout in following order: custom timeout, datasource " + "description": "Cache timeout in following order: custom timeout, datasource " # noqa: E501 "timeout, cache default timeout, config default cache timeout." }, required=True, @@ -1531,7 +1531,7 @@ class GetFavStarIdsSchema(Schema): result = fields.List( fields.Nested(ChartFavStarResponseResult), metadata={ - "description": "A list of results for each corresponding chart in the request" + "description": "A list of results for each corresponding chart in the request" # noqa: E501 }, ) @@ -1559,7 +1559,7 @@ class ChartCacheWarmUpRequestSchema(Schema): ) dashboard_id = fields.Integer( metadata={ - "description": "The ID of the dashboard to get filters for when warming cache" + "description": "The ID of the dashboard to get filters for when warming cache" # noqa: E501 } ) extra_filters = fields.String( diff --git a/superset/cli/lib.py b/superset/cli/lib.py index 68f6f0383..bf6d77ce3 100755 --- a/superset/cli/lib.py +++ b/superset/cli/lib.py @@ -29,7 +29,7 @@ if feature_flags_func: try: # pylint: disable=not-callable feature_flags = feature_flags_func(feature_flags) - except Exception: # pylint: disable=broad-except + except Exception: # pylint: disable=broad-except # noqa: S110 # bypass any feature flags that depend on context # that's not available pass diff --git a/superset/cli/main.py b/superset/cli/main.py index ffe3278b1..42315fd90 100755 --- a/superset/cli/main.py +++ b/superset/cli/main.py @@ -47,7 +47,7 @@ def superset() -> None: # add sub-commands -for load, module_name, is_pkg in pkgutil.walk_packages( +for load, module_name, is_pkg in pkgutil.walk_packages( # noqa: B007 cli.__path__, cli.__name__ + "." ): module = importlib.import_module(module_name) diff --git a/superset/cli/test.py b/superset/cli/test.py index 60ea532cb..e8cc0925e 100755 --- a/superset/cli/test.py +++ b/superset/cli/test.py @@ -76,5 +76,5 @@ def load_test_users() -> None: "user", username + "@fab.org", sm.find_role(role), - password="general", + password="general", # noqa: S106 ) diff --git a/superset/cli/test_db.py b/superset/cli/test_db.py index 4b8aebf93..b453711d8 100644 --- a/superset/cli/test_db.py +++ b/superset/cli/test_db.py @@ -198,7 +198,7 @@ def collect_connection_info( return yaml.safe_load(raw_engine_kwargs) -def test_db_engine_spec( +def test_db_engine_spec( # noqa: C901 console: Console, sqlalchemy_uri: str, ) -> type[BaseEngineSpec] | None: diff --git a/superset/commands/chart/importers/v1/__init__.py b/superset/commands/chart/importers/v1/__init__.py index dc5a70796..8ba24035a 100644 --- a/superset/commands/chart/importers/v1/__init__.py +++ b/superset/commands/chart/importers/v1/__init__.py @@ -47,7 +47,7 @@ class ImportChartsCommand(ImportModelsCommand): import_error = ChartImportError @staticmethod - def _import(configs: dict[str, Any], overwrite: bool = False) -> None: + def _import(configs: dict[str, Any], overwrite: bool = False) -> None: # noqa: C901 # discover datasets associated with charts dataset_uuids: set[str] = set() for file_name, config in configs.items(): diff --git a/superset/commands/chart/update.py b/superset/commands/chart/update.py index d6b212d5c..b7d0a4dab 100644 --- a/superset/commands/chart/update.py +++ b/superset/commands/chart/update.py @@ -71,7 +71,7 @@ class UpdateChartCommand(UpdateMixin, BaseCommand): return ChartDAO.update(self._model, self._properties) - def validate(self) -> None: + def validate(self) -> None: # noqa: C901 exceptions: list[ValidationError] = [] dashboard_ids = self._properties.get("dashboards") owner_ids: Optional[list[int]] = self._properties.get("owners") diff --git a/superset/commands/dashboard/importers/v0.py b/superset/commands/dashboard/importers/v0.py index 99090e7d4..4c46c4041 100644 --- a/superset/commands/dashboard/importers/v0.py +++ b/superset/commands/dashboard/importers/v0.py @@ -81,7 +81,7 @@ def import_chart( return slc_to_import.id -def import_dashboard( +def import_dashboard( # noqa: C901 # pylint: disable=too-many-locals,too-many-statements dashboard_to_import: Dashboard, dataset_id_mapping: Optional[dict[int, int]] = None, diff --git a/superset/commands/dashboard/importers/v1/__init__.py b/superset/commands/dashboard/importers/v1/__init__.py index 18cbb7da8..17fb85fcf 100644 --- a/superset/commands/dashboard/importers/v1/__init__.py +++ b/superset/commands/dashboard/importers/v1/__init__.py @@ -60,7 +60,7 @@ class ImportDashboardsCommand(ImportModelsCommand): # TODO (betodealmeida): refactor to use code from other commands # pylint: disable=too-many-branches, too-many-locals @staticmethod - def _import(configs: dict[str, Any], overwrite: bool = False) -> None: + def _import(configs: dict[str, Any], overwrite: bool = False) -> None: # noqa: C901 # discover charts and datasets associated with dashboards chart_uuids: set[str] = set() dataset_uuids: set[str] = set() diff --git a/superset/commands/dashboard/importers/v1/utils.py b/superset/commands/dashboard/importers/v1/utils.py index 5e949093b..26cbeb038 100644 --- a/superset/commands/dashboard/importers/v1/utils.py +++ b/superset/commands/dashboard/importers/v1/utils.py @@ -57,7 +57,7 @@ def build_uuid_to_id_map(position: dict[str, Any]) -> dict[str, int]: } -def update_id_refs( # pylint: disable=too-many-locals +def update_id_refs( # pylint: disable=too-many-locals # noqa: C901 config: dict[str, Any], chart_ids: dict[str, int], dataset_info: dict[str, dict[str, Any]], @@ -143,7 +143,7 @@ def update_id_refs( # pylint: disable=too-many-locals return fixed -def import_dashboard( +def import_dashboard( # noqa: C901 config: dict[str, Any], overwrite: bool = False, ignore_permissions: bool = False, diff --git a/superset/commands/dashboard/update.py b/superset/commands/dashboard/update.py index 079e62cfe..0f67bc5f0 100644 --- a/superset/commands/dashboard/update.py +++ b/superset/commands/dashboard/update.py @@ -119,7 +119,7 @@ class UpdateDashboardCommand(UpdateMixin, BaseCommand): if exceptions: raise DashboardInvalidError(exceptions=exceptions) - def process_tab_diff(self) -> None: + def process_tab_diff(self) -> None: # noqa: C901 def find_deleted_tabs() -> list[str]: position_json = self._properties.get("position_json", "") current_tabs = self._model.tabs # type: ignore @@ -143,7 +143,7 @@ class UpdateDashboardCommand(UpdateMixin, BaseCommand): """ The dashboard tab used in this report has been deleted and your report has been deactivated. Please update your report settings to remove or change the tab used. - """ + """ # noqa: E501 ) html_content = textwrap.dedent( diff --git a/superset/commands/database/importers/v1/utils.py b/superset/commands/database/importers/v1/utils.py index 0098bfa26..a36c41138 100644 --- a/superset/commands/database/importers/v1/utils.py +++ b/superset/commands/database/importers/v1/utils.py @@ -48,7 +48,7 @@ def import_database( config["id"] = existing.id elif not can_write: raise ImportFailedError( - "Database doesn't exist and user doesn't have permission to create databases" + "Database doesn't exist and user doesn't have permission to create databases" # noqa: E501 ) # Check if this URI is allowed if app.config["PREVENT_UNSAFE_DB_CONNECTIONS"]: diff --git a/superset/commands/database/ssh_tunnel/exceptions.py b/superset/commands/database/ssh_tunnel/exceptions.py index f74e8f397..765c88e68 100644 --- a/superset/commands/database/ssh_tunnel/exceptions.py +++ b/superset/commands/database/ssh_tunnel/exceptions.py @@ -69,9 +69,9 @@ class SSHTunnelRequiredFieldValidationError(ValidationError, SSHTunnelError): ) -class SSHTunnelMissingCredentials(CommandInvalidError, SSHTunnelError): +class SSHTunnelMissingCredentials(CommandInvalidError, SSHTunnelError): # noqa: N818 message = _("Must provide credentials for the SSH Tunnel") -class SSHTunnelInvalidCredentials(CommandInvalidError, SSHTunnelError): +class SSHTunnelInvalidCredentials(CommandInvalidError, SSHTunnelError): # noqa: N818 message = _("Cannot have multiple credentials for the SSH Tunnel") diff --git a/superset/commands/database/test_connection.py b/superset/commands/database/test_connection.py index 7330446d4..6988acc39 100644 --- a/superset/commands/database/test_connection.py +++ b/superset/commands/database/test_connection.py @@ -93,7 +93,7 @@ class TestConnectionDatabaseCommand(BaseCommand): self._context = context self._uri = uri - def run(self) -> None: # pylint: disable=too-many-statements,too-many-branches + def run(self) -> None: # pylint: disable=too-many-statements,too-many-branches # noqa: C901 self.validate() ex_str = "" ssh_tunnel = self._properties.get("ssh_tunnel") @@ -155,7 +155,7 @@ class TestConnectionDatabaseCommand(BaseCommand): raise SupersetTimeoutException( error_type=SupersetErrorType.CONNECTION_DATABASE_TIMEOUT, message=( - "Please check your connection details and database settings, " + "Please check your connection details and database settings, " # noqa: E501 "and ensure that your database is accepting connections, " "then try connecting again." ), diff --git a/superset/commands/dataset/exceptions.py b/superset/commands/dataset/exceptions.py index 83b5436c2..04afc4fc9 100644 --- a/superset/commands/dataset/exceptions.py +++ b/superset/commands/dataset/exceptions.py @@ -143,7 +143,7 @@ class OwnersNotFoundValidationError(ValidationError): super().__init__([_("Owners are invalid")], field_name="owners") -class DatasetDataAccessIsNotAllowed(ValidationError): +class DatasetDataAccessIsNotAllowed(ValidationError): # noqa: N818 status = 422 def __init__(self, message: str) -> None: @@ -195,7 +195,7 @@ class DatasetDuplicateFailedError(CreateFailedError): message = _("Dataset could not be duplicated.") -class DatasetForbiddenDataURI(ImportFailedError): +class DatasetForbiddenDataURI(ImportFailedError): # noqa: N818 message = _("Data URI is not allowed.") diff --git a/superset/commands/dataset/importers/v1/utils.py b/superset/commands/dataset/importers/v1/utils.py index 589451084..f3611e00d 100644 --- a/superset/commands/dataset/importers/v1/utils.py +++ b/superset/commands/dataset/importers/v1/utils.py @@ -102,7 +102,7 @@ def validate_data_uri(data_uri: str) -> None: raise DatasetForbiddenDataURI() -def import_dataset( +def import_dataset( # noqa: C901 config: dict[str, Any], overwrite: bool = False, force_data: bool = False, @@ -151,7 +151,7 @@ def import_dataset( try: dataset = SqlaTable.import_from_dict(config, recursive=True, sync=sync) except MultipleResultsFound: - # Finding multiple results when importing a dataset only happens because initially + # Finding multiple results when importing a dataset only happens because initially # noqa: E501 # datasets were imported without schemas (eg, `examples.NULL.users`), and later # they were fixed to have the default schema (eg, `examples.public.users`). If a # user created `examples.public.users` during that time the second import will @@ -193,7 +193,7 @@ def load_data(data_uri: str, dataset: SqlaTable, database: Database) -> None: """ validate_data_uri(data_uri) logger.info("Downloading data from %s", data_uri) - data = request.urlopen(data_uri) # pylint: disable=consider-using-with + data = request.urlopen(data_uri) # pylint: disable=consider-using-with # noqa: S310 if data_uri.endswith(".gz"): data = gzip.open(data) df = pd.read_csv(data, encoding="utf-8") diff --git a/superset/commands/exceptions.py b/superset/commands/exceptions.py index aa5188679..b17dce334 100644 --- a/superset/commands/exceptions.py +++ b/superset/commands/exceptions.py @@ -71,7 +71,7 @@ class CommandInvalidError(CommandException): self._exceptions.extend(exceptions) def get_list_classnames(self) -> list[str]: - return list(sorted({ex.__class__.__name__ for ex in self._exceptions})) + return sorted({ex.__class__.__name__ for ex in self._exceptions}) def normalized_messages(self) -> dict[Any, Any]: errors: dict[Any, Any] = {} diff --git a/superset/commands/explore/get.py b/superset/commands/explore/get.py index 535ed8ee3..dc600bbf9 100644 --- a/superset/commands/explore/get.py +++ b/superset/commands/explore/get.py @@ -59,7 +59,7 @@ class GetExploreCommand(BaseCommand, ABC): self._slice_id = params.slice_id # pylint: disable=too-many-locals,too-many-branches,too-many-statements - def run(self) -> Optional[dict[str, Any]]: + def run(self) -> Optional[dict[str, Any]]: # noqa: C901 initial_form_data = {} if self._permalink_key is not None: command = GetExplorePermalinkCommand(self._permalink_key) diff --git a/superset/commands/importers/v1/__init__.py b/superset/commands/importers/v1/__init__.py index f90708acf..989c494b6 100644 --- a/superset/commands/importers/v1/__init__.py +++ b/superset/commands/importers/v1/__init__.py @@ -91,7 +91,7 @@ class ImportModelsCommand(BaseCommand): if self.dao.model_cls: validate_metadata_type(metadata, self.dao.model_cls.__name__, exceptions) - # load the configs and make sure we have confirmation to overwrite existing models + # load the configs and make sure we have confirmation to overwrite existing models # noqa: E501 self._configs = load_configs( self.contents, self.schemas, diff --git a/superset/commands/importers/v1/assets.py b/superset/commands/importers/v1/assets.py index c0be04a66..41895f84d 100644 --- a/superset/commands/importers/v1/assets.py +++ b/superset/commands/importers/v1/assets.py @@ -82,7 +82,7 @@ class ImportAssetsCommand(BaseCommand): # pylint: disable=too-many-locals @staticmethod - def _import(configs: dict[str, Any]) -> None: + def _import(configs: dict[str, Any]) -> None: # noqa: C901 # import databases first database_ids: dict[str, int] = {} for file_name, config in configs.items(): diff --git a/superset/commands/importers/v1/examples.py b/superset/commands/importers/v1/examples.py index bcf6b5062..1ffb40c8b 100644 --- a/superset/commands/importers/v1/examples.py +++ b/superset/commands/importers/v1/examples.py @@ -87,7 +87,7 @@ class ImportExamplesCommand(ImportModelsCommand): ) @staticmethod - def _import( # pylint: disable=too-many-locals, too-many-branches + def _import( # pylint: disable=too-many-locals, too-many-branches # noqa: C901 configs: dict[str, Any], overwrite: bool = False, force_data: bool = False, diff --git a/superset/commands/importers/v1/utils.py b/superset/commands/importers/v1/utils.py index 51ab99271..ec5387708 100644 --- a/superset/commands/importers/v1/utils.py +++ b/superset/commands/importers/v1/utils.py @@ -96,7 +96,7 @@ def validate_metadata_type( # pylint: disable=too-many-locals,too-many-arguments -def load_configs( +def load_configs( # noqa: C901 contents: dict[str, str], schemas: dict[str, Schema], passwords: dict[str, str], diff --git a/superset/commands/report/alert.py b/superset/commands/report/alert.py index 30861bdda..ea45853b2 100644 --- a/superset/commands/report/alert.py +++ b/superset/commands/report/alert.py @@ -96,7 +96,7 @@ class AlertCommand(BaseCommand): if len(rows) > 1: raise AlertQueryMultipleRowsError( message=_( - "Alert query returned more than one row. %(num_rows)s rows returned", + "Alert query returned more than one row. %(num_rows)s rows returned", # noqa: E501 num_rows=len(rows), ) ) diff --git a/superset/commands/report/create.py b/superset/commands/report/create.py index 9191e5a17..e238102f9 100644 --- a/superset/commands/report/create.py +++ b/superset/commands/report/create.py @@ -61,7 +61,7 @@ class CreateReportScheduleCommand(CreateMixin, BaseReportScheduleCommand): a list of `ValidationErrors` to be returned in the API response if any. Fields were loaded according to the `ReportSchedulePostSchema` schema. - """ + """ # noqa: E501 # Required fields cron_schedule = self._properties["crontab"] name = self._properties["name"] diff --git a/superset/commands/report/exceptions.py b/superset/commands/report/exceptions.py index 495e0bff9..868862781 100644 --- a/superset/commands/report/exceptions.py +++ b/superset/commands/report/exceptions.py @@ -95,7 +95,7 @@ class ReportScheduleEitherChartOrDashboardError(ValidationError): ) -class ReportScheduleFrequencyNotAllowed(ValidationError): +class ReportScheduleFrequencyNotAllowed(ValidationError): # noqa: N818 """ Marshmallow validation error for report schedule configured to run more frequently than allowed @@ -140,7 +140,7 @@ class DashboardNotSavedValidationError(ValidationError): def __init__(self) -> None: super().__init__( _( - "Please save your dashboard first, then try creating a new email report." + "Please save your dashboard first, then try creating a new email report." # noqa: E501 ), field_name="dashboard", ) diff --git a/superset/commands/report/execute.py b/superset/commands/report/execute.py index c808a993b..9293e967a 100644 --- a/superset/commands/report/execute.py +++ b/superset/commands/report/execute.py @@ -226,7 +226,7 @@ class BaseReportState: ) -> list[str]: """ Retrieve the URL for the dashboard tabs, or return the dashboard URL if no tabs are available. - """ + """ # noqa: E501 force = "true" if self._report_schedule.force_screenshot else "false" if ( dashboard_state := self._report_schedule.extra.get("dashboard") @@ -464,7 +464,7 @@ class BaseReportState: } return log_data - def _get_notification_content(self) -> NotificationContent: + def _get_notification_content(self) -> NotificationContent: # noqa: C901 """ Gets a notification content, this is composed by a title and a screenshot diff --git a/superset/commands/report/update.py b/superset/commands/report/update.py index 2aab3bd8c..abae62cad 100644 --- a/superset/commands/report/update.py +++ b/superset/commands/report/update.py @@ -53,14 +53,14 @@ class UpdateReportScheduleCommand(UpdateMixin, BaseReportScheduleCommand): self.validate() return ReportScheduleDAO.update(self._model, self._properties) - def validate(self) -> None: + def validate(self) -> None: # noqa: C901 """ Validates the properties of a report schedule configuration, including uniqueness of name and type, relations based on the report type, frequency, etc. Populates a list of `ValidationErrors` to be returned in the API response if any. Fields were loaded according to the `ReportSchedulePutSchema` schema. - """ + """ # noqa: E501 # Load existing report schedule config self._model = ReportScheduleDAO.find_by_id(self._model_id) if not self._model: diff --git a/superset/commands/sql_lab/query.py b/superset/commands/sql_lab/query.py index 87bc0d28a..466d5f99c 100644 --- a/superset/commands/sql_lab/query.py +++ b/superset/commands/sql_lab/query.py @@ -39,7 +39,7 @@ class QueryPruneCommand(BaseCommand): Attributes: retention_period_days (int): The number of days for which records should be retained. Records older than this period will be deleted. - """ + """ # noqa: E501 def __init__(self, retention_period_days: int): """ @@ -83,7 +83,7 @@ class QueryPruneCommand(BaseCommand): # Update the total number of deleted records total_deleted += result.rowcount - # Explicitly commit the transaction given that if an error occurs, we want to ensure that the + # Explicitly commit the transaction given that if an error occurs, we want to ensure that the # noqa: E501 # records that have been deleted so far are committed db.session.commit() @@ -91,7 +91,7 @@ class QueryPruneCommand(BaseCommand): percentage_complete = (total_deleted / total_rows) * 100 if percentage_complete >= next_logging_threshold: logger.info( - "Deleted %s rows from the query table older than %s days (%d%% complete)", + "Deleted %s rows from the query table older than %s days (%d%% complete)", # noqa: E501 total_deleted, self.retention_period_days, percentage_complete, diff --git a/superset/common/query_context_factory.py b/superset/common/query_context_factory.py index 5503f3cb0..3e3fe2913 100644 --- a/superset/common/query_context_factory.py +++ b/superset/common/query_context_factory.py @@ -112,7 +112,7 @@ class QueryContextFactory: # pylint: disable=too-few-public-methods self._apply_filters(query_object) return query_object - def _apply_granularity( + def _apply_granularity( # noqa: C901 self, query_object: QueryObject, form_data: dict[str, Any] | None, diff --git a/superset/common/query_context_processor.py b/superset/common/query_context_processor.py index 27478fe6d..edf06e9d8 100644 --- a/superset/common/query_context_processor.py +++ b/superset/common/query_context_processor.py @@ -394,15 +394,15 @@ class QueryContextProcessor: :returns: The time offset. """ if offset == "inherit": - # return the difference in days between the from and the to dttm formatted as a string with the " days ago" suffix + # return the difference in days between the from and the to dttm formatted as a string with the " days ago" suffix # noqa: E501 return f"{(outer_to_dttm - outer_from_dttm).days} days ago" if self.is_valid_date(offset): - # return the offset as the difference in days between the outer from dttm and the offset date (which is a YYYY-MM-DD string) formatted as a string with the " days ago" suffix + # return the offset as the difference in days between the outer from dttm and the offset date (which is a YYYY-MM-DD string) formatted as a string with the " days ago" suffix # noqa: E501 offset_date = datetime.strptime(offset, "%Y-%m-%d") return f"{(outer_from_dttm - offset_date).days} days ago" return "" - def processing_time_offsets( # pylint: disable=too-many-locals,too-many-statements + def processing_time_offsets( # pylint: disable=too-many-locals,too-many-statements # noqa: C901 self, df: pd.DataFrame, query_object: QueryObject, @@ -433,14 +433,14 @@ class QueryContextProcessor: for offset in query_object.time_offsets: try: # pylint: disable=line-too-long - # Since the x-axis is also a column name for the time filter, x_axis_label will be set as granularity + # Since the x-axis is also a column name for the time filter, x_axis_label will be set as granularity # noqa: E501 # these query object are equivalent: - # 1) { granularity: 'dttm_col', time_range: '2020 : 2021', time_offsets: ['1 year ago']} + # 1) { granularity: 'dttm_col', time_range: '2020 : 2021', time_offsets: ['1 year ago']} # noqa: E501 # 2) { columns: [ - # {label: 'dttm_col', sqlExpression: 'dttm_col', "columnType": "BASE_AXIS" } + # {label: 'dttm_col', sqlExpression: 'dttm_col', "columnType": "BASE_AXIS" } # noqa: E501 # ], # time_offsets: ['1 year ago'], - # filters: [{col: 'dttm_col', op: 'TEMPORAL_RANGE', val: '2020 : 2021'}], + # filters: [{col: 'dttm_col', op: 'TEMPORAL_RANGE', val: '2020 : 2021'}], # noqa: E501 # } original_offset = offset if self.is_valid_date(offset) or offset == "inherit": @@ -494,10 +494,10 @@ class QueryContextProcessor: if flt.get("col") != x_axis_label ] - # Inherit or custom start dates might compute the same offset but the response cannot be given - # using cached data unless you are using the same date of inherited range, that's why we - # set the cache cache using a custom key that includes the original offset and the computed offset - # for those two scenarios, the rest of the scenarios will use the original offset as cache key + # Inherit or custom start dates might compute the same offset but the response cannot be given # noqa: E501 + # using cached data unless you are using the same date of inherited range, that's why we # noqa: E501 + # set the cache cache using a custom key that includes the original offset and the computed offset # noqa: E501 + # for those two scenarios, the rest of the scenarios will use the original offset as cache key # noqa: E501 cached_time_offset_key = ( offset if offset == original_offset else f"{offset}_{original_offset}" ) @@ -824,7 +824,7 @@ class QueryContextProcessor: return annotation_data @staticmethod - def get_viz_annotation_data( + def get_viz_annotation_data( # noqa: C901 annotation_layer: dict[str, Any], force: bool ) -> dict[str, Any]: # pylint: disable=import-outside-toplevel diff --git a/superset/common/query_object.py b/superset/common/query_object.py index 299947d27..32ab93448 100644 --- a/superset/common/query_object.py +++ b/superset/common/query_object.py @@ -346,7 +346,7 @@ class QueryObject: # pylint: disable=too-many-instance-attributes default=str, ) - def cache_key(self, **extra: Any) -> str: + def cache_key(self, **extra: Any) -> str: # noqa: C901 """ The cache key is made out of the key/values from to_dict(), plus any other key/values in `extra` diff --git a/superset/common/utils/query_cache_manager.py b/superset/common/utils/query_cache_manager.py index d2e6e0743..8a4883711 100644 --- a/superset/common/utils/query_cache_manager.py +++ b/superset/common/utils/query_cache_manager.py @@ -51,7 +51,7 @@ class QueryCacheManager: # pylint: disable=too-many-instance-attributes,too-many-arguments def __init__( self, - df: DataFrame = DataFrame(), + df: DataFrame = DataFrame(), # noqa: B008 query: str = "", annotation_data: dict[str, Any] | None = None, applied_template_filters: list[str] | None = None, diff --git a/superset/config.py b/superset/config.py index 9246e0f03..0bce5a876 100644 --- a/superset/config.py +++ b/superset/config.py @@ -530,7 +530,7 @@ DEFAULT_FEATURE_FLAGS: dict[str, bool] = { "DRILL_BY": True, "DATAPANEL_CLOSED_BY_DEFAULT": False, "HORIZONTAL_FILTER_BAR": False, - # The feature is off by default, and currently only supported in Presto and Postgres, + # The feature is off by default, and currently only supported in Presto and Postgres, # noqa: E501 # and Bigquery. # It also needs to be enabled on a per-database basis, by adding the key/value pair # `cost_estimate_enabled: true` to the database `extra` attribute. @@ -554,7 +554,7 @@ DEFAULT_FEATURE_FLAGS: dict[str, bool] = { "PLAYWRIGHT_REPORTS_AND_THUMBNAILS": False, # Set to True to enable experimental chart plugins "CHART_PLUGINS_EXPERIMENTAL": False, - # Regardless of database configuration settings, force SQLLAB to run async using Celery + # Regardless of database configuration settings, force SQLLAB to run async using Celery # noqa: E501 "SQLLAB_FORCE_RUN_ASYNC": False, # Set to True to to enable factory resent CLI command "ENABLE_FACTORY_RESET_COMMAND": False, @@ -1076,7 +1076,7 @@ SQLLAB_QUERY_RESULT_TIMEOUT = 0 # your specific infrastructure. For example, you could analyze queries a posteriori by # running EXPLAIN on them, and compute a histogram of relative costs to present the # cost as a percentile, this step is optional as every db engine spec has its own -# query cost formatter, but it you wanna customize it you can define it inside the config: +# query cost formatter, but it you wanna customize it you can define it inside the config: # noqa: E501 # def postgres_query_cost_formatter( # result: List[Dict[str, Any]] @@ -1147,7 +1147,7 @@ CSV_TO_HIVE_UPLOAD_DIRECTORY = "EXTERNAL_HIVE_TABLES/" # Function that creates upload directory dynamically based on the # database used, user and schema provided. -def CSV_TO_HIVE_UPLOAD_DIRECTORY_FUNC( # pylint: disable=invalid-name +def CSV_TO_HIVE_UPLOAD_DIRECTORY_FUNC( # pylint: disable=invalid-name # noqa: N802 database: Database, user: models.User, # pylint: disable=unused-argument schema: str | None, @@ -1214,7 +1214,7 @@ SMTP_STARTTLS = True SMTP_SSL = False SMTP_USER = "superset" SMTP_PORT = 25 -SMTP_PASSWORD = "superset" +SMTP_PASSWORD = "superset" # noqa: S105 SMTP_MAIL_FROM = "superset@superset.com" # If True creates a default SSL context with ssl.Purpose.CLIENT_AUTH using the # default system root CA certificates. @@ -1265,7 +1265,7 @@ TRACKING_URL_TRANSFORMER = lambda url: url # noqa: E731 DB_POLL_INTERVAL_SECONDS: dict[str, int] = {} # Interval between consecutive polls when using Presto Engine -# See here: https://github.com/dropbox/PyHive/blob/8eb0aeab8ca300f3024655419b93dad926c1a351/pyhive/presto.py#L93 # pylint: disable=line-too-long,useless-suppression +# See here: https://github.com/dropbox/PyHive/blob/8eb0aeab8ca300f3024655419b93dad926c1a351/pyhive/presto.py#L93 # pylint: disable=line-too-long,useless-suppression # noqa: E501 PRESTO_POLL_INTERVAL = int(timedelta(seconds=1).total_seconds()) # Allow list of custom authentications for each DB engine. @@ -1361,17 +1361,17 @@ DISALLOWED_SQL_FUNCTIONS: dict[str, set[str]] = { # NOTE: For backward compatibility, you can unpack any of the above arguments in your # function definition, but keep the **kwargs as the last argument to allow new args # to be added later without any errors. -# NOTE: whatever you in this function DOES NOT affect the cache key, so ideally this function +# NOTE: whatever you in this function DOES NOT affect the cache key, so ideally this function # noqa: E501 # is "functional", as in deterministic from its input. -def SQL_QUERY_MUTATOR( # pylint: disable=invalid-name,unused-argument +def SQL_QUERY_MUTATOR( # pylint: disable=invalid-name,unused-argument # noqa: N802 sql: str, **kwargs: Any ) -> str: return sql -# A variable that chooses whether to apply the SQL_QUERY_MUTATOR before or after splitting the input query +# A variable that chooses whether to apply the SQL_QUERY_MUTATOR before or after splitting the input query # noqa: E501 # It allows for using the SQL_QUERY_MUTATOR function for more than comments -# Usage: If you want to apply a change to every statement to a given query, set MUTATE_AFTER_SPLIT = True +# Usage: If you want to apply a change to every statement to a given query, set MUTATE_AFTER_SPLIT = True # noqa: E501 # An example use case is if data has role based access controls, and you want to apply # a SET ROLE statement alongside every user query. Changing this variable maintains # functionality for both the SQL_Lab and Charts. @@ -1381,7 +1381,7 @@ MUTATE_AFTER_SPLIT = False # This allows for a user to add header data to any outgoing emails. For example, # if you need to include metadata in the header or you want to change the specifications # of the email title, header, or sender. -def EMAIL_HEADER_MUTATOR( # pylint: disable=invalid-name,unused-argument +def EMAIL_HEADER_MUTATOR( # pylint: disable=invalid-name,unused-argument # noqa: N802 msg: MIMEMultipart, **kwargs: Any ) -> MIMEMultipart: return msg @@ -1397,7 +1397,7 @@ EXCLUDE_USERS_FROM_LISTS: list[str] | None = None # list/dropdown if you do not want these dbs to show as available. # The available list is generated by driver installed, and some engines have multiple # drivers. -# e.g., DBS_AVAILABLE_DENYLIST: Dict[str, Set[str]] = {"databricks": {"pyhive", "pyodbc"}} +# e.g., DBS_AVAILABLE_DENYLIST: Dict[str, Set[str]] = {"databricks": {"pyhive", "pyodbc"}} # noqa: E501 DBS_AVAILABLE_DENYLIST: dict[str, set[str]] = {} # This auth provider is used by background (offline) tasks that need to access @@ -1545,7 +1545,7 @@ TEST_DATABASE_CONNECTION_TIMEOUT = timedelta(seconds=30) # Details needed for databases that allows user to authenticate using personal OAuth2 # tokens. See https://github.com/apache/superset/issues/20300 for more information. The # scope and URIs are usually optional. -# NOTE that if you change the id, scope, or URIs in this file, you probably need to purge +# NOTE that if you change the id, scope, or URIs in this file, you probably need to purge # noqa: E501 # the existing tokens from the database. This needs to be done by running a query to # delete the existing tokens. DATABASE_OAUTH2_CLIENTS: dict[str, dict[str, Any]] = { @@ -1594,7 +1594,7 @@ TALISMAN_CONFIG = { "data:", "https://apachesuperset.gateway.scarf.sh", "https://static.scarf.sh/", - # "https://avatars.slack-edge.com", # Uncomment when SLACK_ENABLE_AVATARS is True + # "https://avatars.slack-edge.com", # Uncomment when SLACK_ENABLE_AVATARS is True # noqa: E501 ], "worker-src": ["'self'", "blob:"], "connect-src": [ @@ -1729,7 +1729,7 @@ GLOBAL_ASYNC_QUERIES_JWT_COOKIE_SAMESITE: None | (Literal["None", "Lax", "Strict None ) GLOBAL_ASYNC_QUERIES_JWT_COOKIE_DOMAIN = None -GLOBAL_ASYNC_QUERIES_JWT_SECRET = "test-secret-change-me" +GLOBAL_ASYNC_QUERIES_JWT_SECRET = "test-secret-change-me" # noqa: S105 GLOBAL_ASYNC_QUERIES_TRANSPORT: Literal["polling", "ws"] = "polling" GLOBAL_ASYNC_QUERIES_POLLING_DELAY = int( timedelta(milliseconds=500).total_seconds() * 1000 @@ -1760,9 +1760,9 @@ GLOBAL_ASYNC_QUERIES_CACHE_BACKEND = { # Embedded config options GUEST_ROLE_NAME = "Public" -GUEST_TOKEN_JWT_SECRET = "test-guest-secret-change-me" -GUEST_TOKEN_JWT_ALGO = "HS256" -GUEST_TOKEN_HEADER_NAME = "X-GuestToken" +GUEST_TOKEN_JWT_SECRET = "test-guest-secret-change-me" # noqa: S105 +GUEST_TOKEN_JWT_ALGO = "HS256" # noqa: S105 +GUEST_TOKEN_HEADER_NAME = "X-GuestToken" # noqa: S105 GUEST_TOKEN_JWT_EXP_SECONDS = 300 # 5 minutes # Guest token audience for the embedded superset, either string or callable GUEST_TOKEN_JWT_AUDIENCE: Callable[[], str] | str | None = None @@ -1829,7 +1829,7 @@ ZIPPED_FILE_MAX_SIZE = 100 * 1024 * 1024 # 100MB # Max allowed compression ratio for a zipped file ZIP_FILE_MAX_COMPRESS_RATIO = 200.0 -# Configuration for environment tag shown on the navbar. Setting 'text' to '' will hide the tag. +# Configuration for environment tag shown on the navbar. Setting 'text' to '' will hide the tag. # noqa: E501 # 'color' can either be a hex color code, or a dot-indexed theme color (e.g. error.base) ENVIRONMENT_TAG_CONFIG = { "variable": "SUPERSET_ENV", diff --git a/superset/connectors/sqla/models.py b/superset/connectors/sqla/models.py index 01889cb2e..ac3a75481 100644 --- a/superset/connectors/sqla/models.py +++ b/superset/connectors/sqla/models.py @@ -404,7 +404,7 @@ class BaseDatasource(AuditMixinNullable, ImportExportMixin): # pylint: disable= "select_star": self.select_star, } - def data_for_slices( # pylint: disable=too-many-locals + def data_for_slices( # pylint: disable=too-many-locals # noqa: C901 self, slices: list[Slice] ) -> dict[str, Any]: """ @@ -507,7 +507,7 @@ class BaseDatasource(AuditMixinNullable, ImportExportMixin): # pylint: disable= return data @staticmethod - def filter_values_handler( # pylint: disable=too-many-arguments + def filter_values_handler( # pylint: disable=too-many-arguments # noqa: C901 values: FilterValues | None, operator: str, target_generic_type: utils.GenericDataType, @@ -727,7 +727,7 @@ class BaseDatasource(AuditMixinNullable, ImportExportMixin): # pylint: disable= :param template_processor: The template processor to apply to the filters. :returns: A list of SQL clauses to be ANDed together. - """ + """ # noqa: E501 template_processor = template_processor or self.get_template_processor() all_filters: list[TextClause] = [] @@ -1062,7 +1062,7 @@ class SqlMetric(AuditMixinNullable, ImportExportMixin, CertificationMixin, Model "extra", "warning_text", ] - update_from_object_fields = list(s for s in export_fields if s != "table_id") + update_from_object_fields = list(s for s in export_fields if s != "table_id") # noqa: C400 export_parent = "table" def __repr__(self) -> str: @@ -1749,7 +1749,7 @@ class SqlaTable( # errors. This is particularly important for database OAuth2, see SIP-85. raise except Exception as ex: # pylint: disable=broad-except - # TODO (betodealmeida): review exception handling while querying the external + # TODO (betodealmeida): review exception handling while querying the external # noqa: E501 # database. Ideally we'd expect and handle external database error, but # everything else / the default should be to let things bubble up. df = pd.DataFrame() @@ -1937,7 +1937,7 @@ class SqlaTable( def default_query(qry: Query) -> Query: return qry.filter_by(is_sqllab_view=False) - def has_extra_cache_key_calls(self, query_obj: QueryObjectDict) -> bool: + def has_extra_cache_key_calls(self, query_obj: QueryObjectDict) -> bool: # noqa: C901 """ Detects the presence of calls to `ExtraCache` methods in items in query_obj that can be templated. If any are present, the query must be evaluated to extract @@ -2033,7 +2033,7 @@ class SqlaTable( """ session = inspect(target).session # pylint: disable=disallowed-name - # Forces an update to the table's changed_on value when a metric or column on the + # Forces an update to the table's changed_on value when a metric or column on the # noqa: E501 # table is updated. This busts the cache key for all charts that use the table. session.execute(update(SqlaTable).where(SqlaTable.id == target.table.id)) diff --git a/superset/connectors/sqla/utils.py b/superset/connectors/sqla/utils.py index 84a6753f2..f810426df 100644 --- a/superset/connectors/sqla/utils.py +++ b/superset/connectors/sqla/utils.py @@ -23,7 +23,7 @@ from typing import Callable, TYPE_CHECKING, TypeVar from uuid import UUID from flask_babel import lazy_gettext as _ -from sqlalchemy.engine.url import URL as SqlaURL +from sqlalchemy.engine.url import URL as SqlaURL # noqa: N811 from sqlalchemy.exc import NoSuchTableError from sqlalchemy.ext.declarative import DeclarativeMeta from sqlalchemy.orm.exc import ObjectDeletedError diff --git a/superset/constants.py b/superset/constants.py index fce4be326..b80540631 100644 --- a/superset/constants.py +++ b/superset/constants.py @@ -27,7 +27,7 @@ USER_AGENT = "Apache Superset" NULL_STRING = "" EMPTY_STRING = "" -CHANGE_ME_SECRET_KEY = "CHANGE_ME_TO_A_COMPLEX_RANDOM_SECRET" +CHANGE_ME_SECRET_KEY = "CHANGE_ME_TO_A_COMPLEX_RANDOM_SECRET" # noqa: S105 # UUID for the examples database EXAMPLES_DB_UUID = "a2dc77af-e654-49bb-b321-40f6b559a1ee" diff --git a/superset/daos/database.py b/superset/daos/database.py index 06b429bb6..09b2fedf9 100644 --- a/superset/daos/database.py +++ b/superset/daos/database.py @@ -51,7 +51,7 @@ class DatabaseDAO(BaseDAO[Database]): of the credentials. The masked values should be unmasked before the database is updated. - """ + """ # noqa: E501 if item and attributes and "encrypted_extra" in attributes: attributes["encrypted_extra"] = item.db_engine_spec.unmask_encrypted_extra( @@ -181,7 +181,7 @@ class SSHTunnelDAO(BaseDAO[SSHTunnel]): the aforementioned fields. The masked values should be unmasked before the ssh tunnel is updated. - """ + """ # noqa: E501 # ID cannot be updated so we remove it if present in the payload if item and attributes: diff --git a/superset/daos/tag.py b/superset/daos/tag.py index b155cf15c..1af19bc1d 100644 --- a/superset/daos/tag.py +++ b/superset/daos/tag.py @@ -319,7 +319,7 @@ class TagDAO(BaseDAO[Tag]): Example: favorited_ids([tag1, tag2, tag3]) Output: [tag_id1, tag_id3] # if the current user has favorited tag1 and tag3 - """ + """ # noqa: E501 ids = [tag.id for tag in tags] return [ star.tag_id diff --git a/superset/dashboards/api.py b/superset/dashboards/api.py index c1f1e656f..3a3752830 100644 --- a/superset/dashboards/api.py +++ b/superset/dashboards/api.py @@ -481,7 +481,7 @@ class DashboardRestApi(BaseSupersetModelRestApi): $ref: '#/components/responses/403' 404: $ref: '#/components/responses/404' - """ + """ # noqa: E501 try: tabs = DashboardDAO.get_tabs_for_dashboard(id_or_slug) result = self.tab_schema.dump(tabs) diff --git a/superset/dashboards/permalink/schemas.py b/superset/dashboards/permalink/schemas.py index d21e6a433..c3b81632d 100644 --- a/superset/dashboards/permalink/schemas.py +++ b/superset/dashboards/permalink/schemas.py @@ -18,18 +18,18 @@ from marshmallow import fields, Schema class DashboardPermalinkStateSchema(Schema): - dataMask = fields.Dict( + dataMask = fields.Dict( # noqa: N815 required=False, allow_none=True, metadata={"description": "Data mask used for native filter state"}, ) - activeTabs = fields.List( + activeTabs = fields.List( # noqa: N815 fields.String(), required=False, allow_none=True, metadata={"description": "Current active dashboard tabs"}, ) - urlParams = fields.List( + urlParams = fields.List( # noqa: N815 fields.Tuple( ( fields.String( @@ -55,7 +55,7 @@ class DashboardPermalinkStateSchema(Schema): class DashboardPermalinkSchema(Schema): - dashboardId = fields.String( + dashboardId = fields.String( # noqa: N815 required=True, allow_none=False, metadata={"description": "The id or slug of the dashboard"}, diff --git a/superset/dashboards/schemas.py b/superset/dashboards/schemas.py index 4426a97e7..1295f0b20 100644 --- a/superset/dashboards/schemas.py +++ b/superset/dashboards/schemas.py @@ -180,7 +180,7 @@ class DashboardJSONMetadataSchema(Schema): This field was removed in https://github.com/apache/superset/pull/23228, but might be present in old exports. - """ + """ # noqa: E501 if "show_native_filters" in data: del data["show_native_filters"] @@ -438,18 +438,18 @@ class DashboardColorsConfigUpdateSchema(BaseDashboardSchema): class DashboardScreenshotPostSchema(Schema): - dataMask = fields.Dict( + dataMask = fields.Dict( # noqa: N815 keys=fields.Str(), values=fields.Raw(), metadata={"description": "An object representing the data mask."}, ) - activeTabs = fields.List( + activeTabs = fields.List( # noqa: N815 fields.Str(), metadata={"description": "A list representing active tabs."} ) anchor = fields.String( metadata={"description": "A string representing the anchor."} ) - urlParams = fields.List( + urlParams = fields.List( # noqa: N815 fields.Tuple( (fields.Str(), fields.Str()), ), @@ -466,7 +466,7 @@ class GetFavStarIdsSchema(Schema): result = fields.List( fields.Nested(ChartFavStarResponseResult), metadata={ - "description": "A list of results for each corresponding chart in the request" + "description": "A list of results for each corresponding chart in the request" # noqa: E501 }, ) @@ -510,9 +510,9 @@ class DashboardCacheScreenshotResponseSchema(Schema): class CacheScreenshotSchema(Schema): - dataMask = fields.Dict(keys=fields.Str(), values=fields.Raw(), required=False) - activeTabs = fields.List(fields.Str(), required=False) + dataMask = fields.Dict(keys=fields.Str(), values=fields.Raw(), required=False) # noqa: N815 + activeTabs = fields.List(fields.Str(), required=False) # noqa: N815 anchor = fields.Str(required=False) - urlParams = fields.List( + urlParams = fields.List( # noqa: N815 fields.List(fields.Str(), validate=lambda x: len(x) == 2), required=False ) diff --git a/superset/databases/api.py b/superset/databases/api.py index 34bf88976..dcebcc3f6 100644 --- a/superset/databases/api.py +++ b/superset/databases/api.py @@ -405,7 +405,7 @@ class DatabaseRestApi(BaseSupersetModelRestApi): log_to_statsd=False, ) @requires_json - def post(self) -> FlaskResponse: + def post(self) -> FlaskResponse: # noqa: C901 """Create a new database. --- post: @@ -2076,7 +2076,7 @@ class DatabaseRestApi(BaseSupersetModelRestApi): if ( hasattr(engine_spec, "parameters_json_schema") and hasattr(engine_spec, "sqlalchemy_uri_placeholder") - and getattr(engine_spec, "default_driver") in drivers + and engine_spec.default_driver in drivers ): payload["parameters"] = engine_spec.parameters_json_schema() payload["sqlalchemy_uri_placeholder"] = ( @@ -2260,7 +2260,7 @@ class DatabaseRestApi(BaseSupersetModelRestApi): $ref: '#/components/responses/404' 500: $ref: '#/components/responses/500' - """ + """ # noqa: E501 database = DatabaseDAO.find_by_id(pk) if not database: return self.response_404() diff --git a/superset/databases/schemas.py b/superset/databases/schemas.py index ed4e67d30..0c821a6f3 100644 --- a/superset/databases/schemas.py +++ b/superset/databases/schemas.py @@ -652,7 +652,7 @@ class TableMetadataOptionsResponseSchema(Schema): class TableMetadataColumnsResponseSchema(Schema): keys = fields.List(fields.String(), metadata={"description": ""}) - longType = fields.String( + longType = fields.String( # noqa: N815 metadata={"description": "The actual backend long type for the column"} ) name = fields.String(metadata={"description": "The column name"}) @@ -697,7 +697,7 @@ class TableMetadataResponseSchema(Schema): fields.Nested(TableMetadataColumnsResponseSchema), metadata={"description": "A list of columns and their metadata"}, ) - foreignKeys = fields.List( + foreignKeys = fields.List( # noqa: N815 fields.Nested(TableMetadataForeignKeysIndexesResponseSchema), metadata={"description": "A list of foreign keys and their metadata"}, ) @@ -705,11 +705,11 @@ class TableMetadataResponseSchema(Schema): fields.Nested(TableMetadataForeignKeysIndexesResponseSchema), metadata={"description": "A list of indexes and their metadata"}, ) - primaryKey = fields.Nested( + primaryKey = fields.Nested( # noqa: N815 TableMetadataPrimaryKeyResponseSchema, metadata={"description": "Primary keys metadata"}, ) - selectStar = fields.String(metadata={"description": "SQL select star"}) + selectStar = fields.String(metadata={"description": "SQL select star"}) # noqa: N815 class TableExtraMetadataResponseSchema(Schema): @@ -884,7 +884,7 @@ class ImportV1DatabaseSchema(Schema): raise ValidationError("Must provide a password for the database") @validates_schema - def validate_ssh_tunnel_credentials( + def validate_ssh_tunnel_credentials( # noqa: C901 self, data: dict[str, Any], **kwargs: Any ) -> None: """If ssh_tunnel has a masked credentials, credentials are required""" @@ -973,7 +973,7 @@ class EngineInformationSchema(Schema): ) supports_dynamic_catalog = fields.Boolean( metadata={ - "description": "The database supports multiple catalogs in a single connection" + "description": "The database supports multiple catalogs in a single connection" # noqa: E501 } ) supports_oauth2 = fields.Boolean( diff --git a/superset/datasets/api.py b/superset/datasets/api.py index 762727aaf..b9094f306 100644 --- a/superset/datasets/api.py +++ b/superset/datasets/api.py @@ -1052,7 +1052,7 @@ class DatasetRestApi(BaseSupersetModelRestApi): $ref: '#/components/responses/404' 500: $ref: '#/components/responses/500' - """ + """ # noqa: E501 try: body = DatasetCacheWarmUpRequestSchema().load(request.json) except ValidationError as error: diff --git a/superset/datasets/schemas.py b/superset/datasets/schemas.py index 4b7e92d7f..1d271d3da 100644 --- a/superset/datasets/schemas.py +++ b/superset/datasets/schemas.py @@ -300,7 +300,7 @@ class DatasetCacheWarmUpRequestSchema(Schema): ) dashboard_id = fields.Integer( metadata={ - "description": "The ID of the dashboard to get filters for when warming cache" + "description": "The ID of the dashboard to get filters for when warming cache" # noqa: E501 } ) extra_filters = fields.String( diff --git a/superset/db_engine_specs/__init__.py b/superset/db_engine_specs/__init__.py index 2b840087c..3c0911c20 100644 --- a/superset/db_engine_specs/__init__.py +++ b/superset/db_engine_specs/__init__.py @@ -94,7 +94,7 @@ def get_engine_spec(backend: str, driver: Optional[str] = None) -> type[BaseEngi supporting that driver exists then a backend-only match is done, in order to allow new drivers to work with Superset even if they are not listed in the DB engine spec drivers. - """ + """ # noqa: E501 engine_specs = load_engine_specs() if driver is not None: @@ -121,7 +121,7 @@ backend_replacements = { # pylint: disable=too-many-branches -def get_available_engine_specs() -> dict[type[BaseEngineSpec], set[str]]: +def get_available_engine_specs() -> dict[type[BaseEngineSpec], set[str]]: # noqa: C901 """ Return available engine specs and installed drivers for them. """ diff --git a/superset/db_engine_specs/base.py b/superset/db_engine_specs/base.py index e6c52d684..cd2c318ad 100644 --- a/superset/db_engine_specs/base.py +++ b/superset/db_engine_specs/base.py @@ -205,7 +205,7 @@ class BaseEngineSpec: # pylint: disable=too-many-public-methods engine_name: str | None = None # for user messages, overridden in child classes - # These attributes map the DB engine spec to one or more SQLAlchemy dialects/drivers; + # These attributes map the DB engine spec to one or more SQLAlchemy dialects/drivers; # noqa: E501 # see the ``supports_url`` and ``supports_backend`` methods below. engine = "base" # str as defined in sqlalchemy.engine.engine engine_aliases: set[str] = set() @@ -410,12 +410,12 @@ class BaseEngineSpec: # pylint: disable=too-many-public-methods # if True, database will be listed as option in the upload file form supports_file_upload = True - # Is the DB engine spec able to change the default schema? This requires implementing + # Is the DB engine spec able to change the default schema? This requires implementing # noqa: E501 # a custom `adjust_engine_params` method. supports_dynamic_schema = False # Does the DB support catalogs? A catalog here is a group of schemas, and has - # different names depending on the DB: BigQuery calles it a "project", Postgres calls + # different names depending on the DB: BigQuery calles it a "project", Postgres calls # noqa: E501 # it a "database", Trino calls it a "catalog", etc. # # When this is changed to true in a DB engine spec it MUST support the @@ -433,7 +433,7 @@ class BaseEngineSpec: # pylint: disable=too-many-public-methods oauth2_scope = "" oauth2_authorization_request_uri: str | None = None # pylint: disable=invalid-name oauth2_token_request_uri: str | None = None - oauth2_token_request_type = "data" + oauth2_token_request_type = "data" # noqa: S105 # Driver-specific exception that should be mapped to OAuth2RedirectError oauth2_exception = OAuth2RedirectError @@ -690,7 +690,7 @@ class BaseEngineSpec: # pylint: disable=too-many-public-methods ) -> str | None: """ Return the schema configured in a SQLALchemy URI and connection arguments, if any. - """ + """ # noqa: E501 return None @classmethod @@ -719,7 +719,7 @@ class BaseEngineSpec: # pylint: disable=too-many-public-methods Determining the correct schema is crucial for managing access to data, so please make sure you understand this logic when working on a new DB engine spec. - """ + """ # noqa: E501 # dynamic schema varies on a per-query basis if cls.supports_dynamic_schema: return query.schema @@ -808,7 +808,7 @@ class BaseEngineSpec: # pylint: disable=too-many-public-methods ... connection = engine.connect() ... connection.execute(sql) - """ + """ # noqa: E501 return database.get_sqla_engine(catalog=catalog, schema=schema, source=source) @classmethod @@ -1101,7 +1101,7 @@ class BaseEngineSpec: # pylint: disable=too-many-public-methods """ # old method that doesn't work with catalogs if hasattr(cls, "extra_table_metadata"): - warnings.warn( + warnings.warn( # noqa: B028 "The `extra_table_metadata` method is deprecated, please implement " "the `get_extra_table_metadata` method in the DB engine spec.", DeprecationWarning, @@ -1145,7 +1145,7 @@ class BaseEngineSpec: # pylint: disable=too-many-public-methods return sql @classmethod - def apply_top_to_sql(cls, sql: str, limit: int) -> str: + def apply_top_to_sql(cls, sql: str, limit: int) -> str: # noqa: C901 """ Alters the SQL statement to apply a TOP clause :param limit: Maximum number of rows to be returned by the query @@ -1419,7 +1419,7 @@ class BaseEngineSpec: # pylint: disable=too-many-public-methods that when catalog support is added to Superset the interface remains the same. This is important because DB engine specs can be installed from 3rd party packages, so we want to keep these methods as stable as possible. - """ + """ # noqa: E501 return uri, { **connect_args, **cls.enforce_uri_query_params.get(uri.get_driver_name(), {}), diff --git a/superset/db_engine_specs/clickhouse.py b/superset/db_engine_specs/clickhouse.py index aa5399929..437c0d25c 100644 --- a/superset/db_engine_specs/clickhouse.py +++ b/superset/db_engine_specs/clickhouse.py @@ -285,7 +285,7 @@ class ClickHouseConnectEngineSpec(BasicParametersMixin, ClickHouseEngineSpec): return cls._function_names try: names = database.get_df( - "SELECT name FROM system.functions UNION ALL " + "SELECT name FROM system.functions UNION ALL " # noqa: S608 + "SELECT name FROM system.table_functions LIMIT 10000" )["name"].tolist() cls._function_names = names diff --git a/superset/db_engine_specs/couchbase.py b/superset/db_engine_specs/couchbase.py index f42fb699b..ac48ef11f 100644 --- a/superset/db_engine_specs/couchbase.py +++ b/superset/db_engine_specs/couchbase.py @@ -177,7 +177,7 @@ class CouchbaseEngineSpec(BasicParametersMixin, BaseEngineSpec): ) -> list[SupersetError]: """ Couchbase local server needs hostname and port but on cloud we need only connection String along with credentials to connect. - """ + """ # noqa: E501 errors: list[SupersetError] = [] required = {"host", "username", "password", "database"} @@ -197,7 +197,7 @@ class CouchbaseEngineSpec(BasicParametersMixin, BaseEngineSpec): host = parameters.get("host", None) if not host: return errors - # host can be a connection string in case of couchbase cloud. So Connection Check is not required in that case. + # host can be a connection string in case of couchbase cloud. So Connection Check is not required in that case. # noqa: E501 if not is_hostname_valid(host): errors.append( SupersetError( diff --git a/superset/db_engine_specs/duckdb.py b/superset/db_engine_specs/duckdb.py index 89c45fb57..9a3ce7258 100644 --- a/superset/db_engine_specs/duckdb.py +++ b/superset/db_engine_specs/duckdb.py @@ -42,7 +42,7 @@ if TYPE_CHECKING: COLUMN_DOES_NOT_EXIST_REGEX = re.compile("no such column: (?P.+)") DEFAULT_ACCESS_TOKEN_URL = ( - "https://app.motherduck.com/token-request?appName=Superset&close=y" + "https://app.motherduck.com/token-request?appName=Superset&close=y" # noqa: S105 ) @@ -112,7 +112,7 @@ class DuckDBParametersMixin: """ Build SQLAlchemy URI for connecting to a DuckDB database. If an access token is specified, return a URI to connect to a MotherDuck database. - """ + """ # noqa: E501 if parameters is None: parameters = {} query = parameters.get("query", {}) diff --git a/superset/db_engine_specs/gsheets.py b/superset/db_engine_specs/gsheets.py index 070be5a92..65afd9aee 100644 --- a/superset/db_engine_specs/gsheets.py +++ b/superset/db_engine_specs/gsheets.py @@ -114,7 +114,7 @@ class GSheetsEngineSpec(ShillelaghEngineSpec): oauth2_authorization_request_uri = ( # pylint: disable=invalid-name "https://accounts.google.com/o/oauth2/v2/auth" ) - oauth2_token_request_uri = "https://oauth2.googleapis.com/token" + oauth2_token_request_uri = "https://oauth2.googleapis.com/token" # noqa: S105 oauth2_exception = UnauthenticatedError @classmethod @@ -271,7 +271,7 @@ class GSheetsEngineSpec(ShillelaghEngineSpec): return errors try: - results = conn.execute(f'SELECT * FROM "{url}" LIMIT 1') + results = conn.execute(f'SELECT * FROM "{url}" LIMIT 1') # noqa: S608 results.fetchall() except Exception: # pylint: disable=broad-except errors.append( diff --git a/superset/db_engine_specs/hive.py b/superset/db_engine_specs/hive.py index c2cde83c7..9491ff588 100644 --- a/superset/db_engine_specs/hive.py +++ b/superset/db_engine_specs/hive.py @@ -110,12 +110,12 @@ class HiveEngineSpec(PrestoEngineSpec): TimeGrain.MINUTE: "from_unixtime(unix_timestamp({col}), 'yyyy-MM-dd HH:mm:00')", TimeGrain.HOUR: "from_unixtime(unix_timestamp({col}), 'yyyy-MM-dd HH:00:00')", TimeGrain.DAY: "from_unixtime(unix_timestamp({col}), 'yyyy-MM-dd 00:00:00')", - TimeGrain.WEEK: "date_format(date_sub({col}, CAST(7-from_unixtime(unix_timestamp({col}),'u') as int)), 'yyyy-MM-dd 00:00:00')", + TimeGrain.WEEK: "date_format(date_sub({col}, CAST(7-from_unixtime(unix_timestamp({col}),'u') as int)), 'yyyy-MM-dd 00:00:00')", # noqa: E501 TimeGrain.MONTH: "from_unixtime(unix_timestamp({col}), 'yyyy-MM-01 00:00:00')", - TimeGrain.QUARTER: "date_format(add_months(trunc({col}, 'MM'), -(month({col})-1)%3), 'yyyy-MM-dd 00:00:00')", + TimeGrain.QUARTER: "date_format(add_months(trunc({col}, 'MM'), -(month({col})-1)%3), 'yyyy-MM-dd 00:00:00')", # noqa: E501 TimeGrain.YEAR: "from_unixtime(unix_timestamp({col}), 'yyyy-01-01 00:00:00')", - TimeGrain.WEEK_ENDING_SATURDAY: "date_format(date_add({col}, INT(6-from_unixtime(unix_timestamp({col}), 'u'))), 'yyyy-MM-dd 00:00:00')", - TimeGrain.WEEK_STARTING_SUNDAY: "date_format(date_add({col}, -INT(from_unixtime(unix_timestamp({col}), 'u'))), 'yyyy-MM-dd 00:00:00')", + TimeGrain.WEEK_ENDING_SATURDAY: "date_format(date_add({col}, INT(6-from_unixtime(unix_timestamp({col}), 'u'))), 'yyyy-MM-dd 00:00:00')", # noqa: E501 + TimeGrain.WEEK_STARTING_SUNDAY: "date_format(date_add({col}, -INT(from_unixtime(unix_timestamp({col}), 'u'))), 'yyyy-MM-dd 00:00:00')", # noqa: E501 } # Scoping regex at class level to avoid recompiling @@ -338,7 +338,7 @@ class HiveEngineSpec(PrestoEngineSpec): return None @classmethod - def handle_cursor( # pylint: disable=too-many-locals + def handle_cursor( # pylint: disable=too-many-locals # noqa: C901 cls, cursor: Any, query: Query ) -> None: """Updates progress information""" @@ -404,7 +404,7 @@ class HiveEngineSpec(PrestoEngineSpec): db.session.commit() # pylint: disable=consider-using-transaction if sleep_interval := current_app.config.get("HIVE_POLL_INTERVAL"): logger.warning( - "HIVE_POLL_INTERVAL is deprecated and will be removed in 3.0. Please use DB_POLL_INTERVAL_SECONDS instead" + "HIVE_POLL_INTERVAL is deprecated and will be removed in 3.0. Please use DB_POLL_INTERVAL_SECONDS instead" # noqa: E501 ) else: sleep_interval = current_app.config["DB_POLL_INTERVAL_SECONDS"].get( @@ -553,7 +553,7 @@ class HiveEngineSpec(PrestoEngineSpec): # Must be Hive connection, enable impersonation, and set optional param # auth=LDAP|KERBEROS - # this will set hive.server2.proxy.user=$effective_username on connect_args['configuration'] + # this will set hive.server2.proxy.user=$effective_username on connect_args['configuration'] # noqa: E501 if backend_name == "hive" and username is not None: configuration = connect_args.get("configuration", {}) configuration["hive.server2.proxy.user"] = username diff --git a/superset/db_engine_specs/impala.py b/superset/db_engine_specs/impala.py index ce34ae564..456f81c79 100644 --- a/superset/db_engine_specs/impala.py +++ b/superset/db_engine_specs/impala.py @@ -124,7 +124,7 @@ class ImpalaEngineSpec(BaseEngineSpec): while status in unfinished_states: db.session.refresh(query) query = db.session.query(Query).filter_by(id=query_id).one() - # if query cancelation was requested prior to the handle_cursor call, but + # if query cancelation was requested prior to the handle_cursor call, but # noqa: E501 # the query was still executed # modified in stop_query in views / core.py is reflected here. # stop query diff --git a/superset/db_engine_specs/kusto.py b/superset/db_engine_specs/kusto.py index 696faf74b..56b93b2ec 100644 --- a/superset/db_engine_specs/kusto.py +++ b/superset/db_engine_specs/kusto.py @@ -46,7 +46,7 @@ class KustoSqlEngineSpec(BaseEngineSpec): # pylint: disable=abstract-method TimeGrain.SECOND: "DATEADD(second, \ 'DATEDIFF(second, 2000-01-01', {col}), '2000-01-01')", TimeGrain.MINUTE: "DATEADD(minute, DATEDIFF(minute, 0, {col}), 0)", - TimeGrain.FIVE_MINUTES: "DATEADD(minute, DATEDIFF(minute, 0, {col}) / 5 * 5, 0)", + TimeGrain.FIVE_MINUTES: "DATEADD(minute, DATEDIFF(minute, 0, {col}) / 5 * 5, 0)", # noqa: E501 TimeGrain.TEN_MINUTES: "DATEADD(minute, \ DATEDIFF(minute, 0, {col}) / 10 * 10, 0)", TimeGrain.FIFTEEN_MINUTES: "DATEADD(minute, \ diff --git a/superset/db_engine_specs/kylin.py b/superset/db_engine_specs/kylin.py index 348515003..42f3aeab1 100644 --- a/superset/db_engine_specs/kylin.py +++ b/superset/db_engine_specs/kylin.py @@ -31,8 +31,8 @@ class KylinEngineSpec(BaseEngineSpec): # pylint: disable=abstract-method _time_grain_expressions = { None: "{col}", - TimeGrain.SECOND: "CAST(FLOOR(CAST({col} AS TIMESTAMP) TO SECOND) AS TIMESTAMP)", - TimeGrain.MINUTE: "CAST(FLOOR(CAST({col} AS TIMESTAMP) TO MINUTE) AS TIMESTAMP)", + TimeGrain.SECOND: "CAST(FLOOR(CAST({col} AS TIMESTAMP) TO SECOND) AS TIMESTAMP)", # noqa: E501 + TimeGrain.MINUTE: "CAST(FLOOR(CAST({col} AS TIMESTAMP) TO MINUTE) AS TIMESTAMP)", # noqa: E501 TimeGrain.HOUR: "CAST(FLOOR(CAST({col} AS TIMESTAMP) TO HOUR) AS TIMESTAMP)", TimeGrain.DAY: "CAST(FLOOR(CAST({col} AS TIMESTAMP) TO DAY) AS DATE)", TimeGrain.WEEK: "CAST(FLOOR(CAST({col} AS TIMESTAMP) TO WEEK) AS DATE)", diff --git a/superset/db_engine_specs/lib.py b/superset/db_engine_specs/lib.py index 398d5af64..fc7a8168b 100644 --- a/superset/db_engine_specs/lib.py +++ b/superset/db_engine_specs/lib.py @@ -222,7 +222,7 @@ def generate_table() -> list[list[Any]]: rows = [] # pylint: disable=redefined-outer-name rows.append(["Feature"] + list(info)) # header row - rows.append(["Module"] + list(db_info["module"] for db_info in info.values())) + rows.append(["Module"] + list(db_info["module"] for db_info in info.values())) # noqa: C400 # descriptive keys = [ @@ -243,14 +243,14 @@ def generate_table() -> list[list[Any]]: ] for key in keys: rows.append( - [DATABASE_DETAILS[key]] + list(db_info[key] for db_info in info.values()) + [DATABASE_DETAILS[key]] + list(db_info[key] for db_info in info.values()) # noqa: C400 ) # basic for time_grain in TimeGrain: rows.append( [f"Has time grain {time_grain.name}"] - + list(db_info["time_grains"][time_grain.name] for db_info in info.values()) + + list(db_info["time_grains"][time_grain.name] for db_info in info.values()) # noqa: C400 ) keys = [ "masked_encrypted_extra", @@ -259,7 +259,7 @@ def generate_table() -> list[list[Any]]: ] for key in keys: rows.append( - [BASIC_FEATURES[key]] + list(db_info[key] for db_info in info.values()) + [BASIC_FEATURES[key]] + list(db_info[key] for db_info in info.values()) # noqa: C400 ) # nice to have @@ -280,7 +280,7 @@ def generate_table() -> list[list[Any]]: for key in keys: rows.append( [NICE_TO_HAVE_FEATURES[key]] - + list(db_info[key] for db_info in info.values()) + + list(db_info[key] for db_info in info.values()) # noqa: C400 ) # advanced @@ -291,10 +291,10 @@ def generate_table() -> list[list[Any]]: ] for key in keys: rows.append( - [ADVANCED_FEATURES[key]] + list(db_info[key] for db_info in info.values()) + [ADVANCED_FEATURES[key]] + list(db_info[key] for db_info in info.values()) # noqa: C400 ) - rows.append(["Score"] + list(db_info["score"] for db_info in info.values())) + rows.append(["Score"] + list(db_info["score"] for db_info in info.values())) # noqa: C400 return rows diff --git a/superset/db_engine_specs/mssql.py b/superset/db_engine_specs/mssql.py index 5b8ba457b..c1f7e295d 100644 --- a/superset/db_engine_specs/mssql.py +++ b/superset/db_engine_specs/mssql.py @@ -160,7 +160,7 @@ class MssqlEngineSpec(BaseEngineSpec): def extract_error_message(cls, ex: Exception) -> str: if str(ex).startswith("(8155,"): return ( - f"{cls.engine} error: All your SQL functions need to " + f"{cls.engine} error: All your SQL functions need to " # noqa: S608 "have an alias on MSSQL. For example: SELECT COUNT(*) AS C1 FROM TABLE1" ) return f"{cls.engine} error: {cls._extract_error_message(ex)}" diff --git a/superset/db_engine_specs/ocient.py b/superset/db_engine_specs/ocient.py index e5826097e..e740ca938 100644 --- a/superset/db_engine_specs/ocient.py +++ b/superset/db_engine_specs/ocient.py @@ -214,7 +214,7 @@ def _find_columns_to_sanitize(cursor: Any) -> list[PlacedSanitizeFunc]: :param cursor: the result set cursor :returns: the list of tuples consisting of the column index and sanitization function - """ + """ # noqa: E501 return [ PlacedSanitizeFunc(i, _sanitized_ocient_type_codes[cursor.description[i][1]]) for i in range(len(cursor.description)) @@ -317,9 +317,7 @@ class OcientEngineSpec(BaseEngineSpec): rows: list[tuple[Any, ...]] = super().fetch_data(cursor, limit) except Exception: with OcientEngineSpec.query_id_mapping_lock: - del OcientEngineSpec.query_id_mapping[ - getattr(cursor, "superset_query_id") - ] + del OcientEngineSpec.query_id_mapping[cursor.superset_query_id] raise # TODO: Unsure if we need to verify that we are receiving rows: @@ -376,7 +374,7 @@ class OcientEngineSpec(BaseEngineSpec): OcientEngineSpec.query_id_mapping[query.id] = cursor.query_id # Add the query id to the cursor - setattr(cursor, "superset_query_id", query.id) + cursor.superset_query_id = query.id return super().handle_cursor(cursor, query) @classmethod diff --git a/superset/db_engine_specs/pinot.py b/superset/db_engine_specs/pinot.py index f7798da9b..54fa72c29 100644 --- a/superset/db_engine_specs/pinot.py +++ b/superset/db_engine_specs/pinot.py @@ -44,14 +44,14 @@ class PinotEngineSpec(BaseEngineSpec): + "CAST({col} AS TIMESTAMP)), 900000) AS TIMESTAMP)", TimeGrain.THIRTY_MINUTES: "CAST(ROUND(DATE_TRUNC('minute', " + "CAST({col} AS TIMESTAMP)), 1800000) AS TIMESTAMP)", - TimeGrain.HOUR: "CAST(DATE_TRUNC('hour', CAST({col} AS TIMESTAMP)) AS TIMESTAMP)", + TimeGrain.HOUR: "CAST(DATE_TRUNC('hour', CAST({col} AS TIMESTAMP)) AS TIMESTAMP)", # noqa: E501 TimeGrain.DAY: "CAST(DATE_TRUNC('day', CAST({col} AS TIMESTAMP)) AS TIMESTAMP)", - TimeGrain.WEEK: "CAST(DATE_TRUNC('week', CAST({col} AS TIMESTAMP)) AS TIMESTAMP)", + TimeGrain.WEEK: "CAST(DATE_TRUNC('week', CAST({col} AS TIMESTAMP)) AS TIMESTAMP)", # noqa: E501 TimeGrain.MONTH: "CAST(DATE_TRUNC('month', " + "CAST({col} AS TIMESTAMP)) AS TIMESTAMP)", TimeGrain.QUARTER: "CAST(DATE_TRUNC('quarter', " + "CAST({col} AS TIMESTAMP)) AS TIMESTAMP)", - TimeGrain.YEAR: "CAST(DATE_TRUNC('year', CAST({col} AS TIMESTAMP)) AS TIMESTAMP)", + TimeGrain.YEAR: "CAST(DATE_TRUNC('year', CAST({col} AS TIMESTAMP)) AS TIMESTAMP)", # noqa: E501 } @classmethod diff --git a/superset/db_engine_specs/postgres.py b/superset/db_engine_specs/postgres.py index 6281c6b3b..a2e6a3fe1 100644 --- a/superset/db_engine_specs/postgres.py +++ b/superset/db_engine_specs/postgres.py @@ -103,13 +103,13 @@ class PostgresBaseEngineSpec(BaseEngineSpec): _time_grain_expressions = { None: "{col}", TimeGrain.SECOND: "DATE_TRUNC('second', {col})", - TimeGrain.FIVE_SECONDS: "DATE_TRUNC('minute', {col}) + INTERVAL '5 seconds' * FLOOR(EXTRACT(SECOND FROM {col}) / 5)", - TimeGrain.THIRTY_SECONDS: "DATE_TRUNC('minute', {col}) + INTERVAL '30 seconds' * FLOOR(EXTRACT(SECOND FROM {col}) / 30)", + TimeGrain.FIVE_SECONDS: "DATE_TRUNC('minute', {col}) + INTERVAL '5 seconds' * FLOOR(EXTRACT(SECOND FROM {col}) / 5)", # noqa: E501 + TimeGrain.THIRTY_SECONDS: "DATE_TRUNC('minute', {col}) + INTERVAL '30 seconds' * FLOOR(EXTRACT(SECOND FROM {col}) / 30)", # noqa: E501 TimeGrain.MINUTE: "DATE_TRUNC('minute', {col})", - TimeGrain.FIVE_MINUTES: "DATE_TRUNC('hour', {col}) + INTERVAL '5 minutes' * FLOOR(EXTRACT(MINUTE FROM {col}) / 5)", - TimeGrain.TEN_MINUTES: "DATE_TRUNC('hour', {col}) + INTERVAL '10 minutes' * FLOOR(EXTRACT(MINUTE FROM {col}) / 10)", - TimeGrain.FIFTEEN_MINUTES: "DATE_TRUNC('hour', {col}) + INTERVAL '15 minutes' * FLOOR(EXTRACT(MINUTE FROM {col}) / 15)", - TimeGrain.THIRTY_MINUTES: "DATE_TRUNC('hour', {col}) + INTERVAL '30 minutes' * FLOOR(EXTRACT(MINUTE FROM {col}) / 30)", + TimeGrain.FIVE_MINUTES: "DATE_TRUNC('hour', {col}) + INTERVAL '5 minutes' * FLOOR(EXTRACT(MINUTE FROM {col}) / 5)", # noqa: E501 + TimeGrain.TEN_MINUTES: "DATE_TRUNC('hour', {col}) + INTERVAL '10 minutes' * FLOOR(EXTRACT(MINUTE FROM {col}) / 10)", # noqa: E501 + TimeGrain.FIFTEEN_MINUTES: "DATE_TRUNC('hour', {col}) + INTERVAL '15 minutes' * FLOOR(EXTRACT(MINUTE FROM {col}) / 15)", # noqa: E501 + TimeGrain.THIRTY_MINUTES: "DATE_TRUNC('hour', {col}) + INTERVAL '30 minutes' * FLOOR(EXTRACT(MINUTE FROM {col}) / 30)", # noqa: E501 TimeGrain.HOUR: "DATE_TRUNC('hour', {col})", TimeGrain.DAY: "DATE_TRUNC('day', {col})", TimeGrain.WEEK: "DATE_TRUNC('week', {col})", @@ -294,7 +294,7 @@ class PostgresEngineSpec(BasicParametersMixin, PostgresBaseEngineSpec): SupersetError( error_type=SupersetErrorType.QUERY_SECURITY_ACCESS_ERROR, message=__( - "Users are not allowed to set a search path for security reasons." + "Users are not allowed to set a search path for security reasons." # noqa: E501 ), level=ErrorLevel.ERROR, ) @@ -471,7 +471,7 @@ WHERE datistemplate = false; """ try: cursor.execute( - "SELECT pg_terminate_backend(pid) " + "SELECT pg_terminate_backend(pid) " # noqa: S608 "FROM pg_stat_activity " f"WHERE pid='{cancel_query_id}'" ) diff --git a/superset/db_engine_specs/presto.py b/superset/db_engine_specs/presto.py index 61b4cff5c..3a3dadbbd 100644 --- a/superset/db_engine_specs/presto.py +++ b/superset/db_engine_specs/presto.py @@ -111,7 +111,7 @@ def get_children(column: ResultSetColumnType) -> list[ResultSetColumnType]: :param column: dictionary representing a Presto column :return: list of dictionaries representing children columns - """ + """ # noqa: E501 pattern = re.compile(r"(?P\w+)\((?P.*)\)") if not column["type"]: raise ValueError @@ -256,15 +256,15 @@ class PrestoBaseEngineSpec(BaseEngineSpec, metaclass=ABCMeta): _time_grain_expressions = { None: "{col}", TimeGrain.SECOND: "date_trunc('second', CAST({col} AS TIMESTAMP))", - TimeGrain.FIVE_SECONDS: "date_trunc('second', CAST({col} AS TIMESTAMP)) - interval '1' second * (second(CAST({col} AS TIMESTAMP)) % 5)", - TimeGrain.THIRTY_SECONDS: "date_trunc('second', CAST({col} AS TIMESTAMP)) - interval '1' second * (second(CAST({col} AS TIMESTAMP)) % 30)", + TimeGrain.FIVE_SECONDS: "date_trunc('second', CAST({col} AS TIMESTAMP)) - interval '1' second * (second(CAST({col} AS TIMESTAMP)) % 5)", # noqa: E501 + TimeGrain.THIRTY_SECONDS: "date_trunc('second', CAST({col} AS TIMESTAMP)) - interval '1' second * (second(CAST({col} AS TIMESTAMP)) % 30)", # noqa: E501 TimeGrain.MINUTE: "date_trunc('minute', CAST({col} AS TIMESTAMP))", - TimeGrain.FIVE_MINUTES: "date_trunc('minute', CAST({col} AS TIMESTAMP)) - interval '1' minute * (minute(CAST({col} AS TIMESTAMP)) % 5)", - TimeGrain.TEN_MINUTES: "date_trunc('minute', CAST({col} AS TIMESTAMP)) - interval '1' minute * (minute(CAST({col} AS TIMESTAMP)) % 10)", - TimeGrain.FIFTEEN_MINUTES: "date_trunc('minute', CAST({col} AS TIMESTAMP)) - interval '1' minute * (minute(CAST({col} AS TIMESTAMP)) % 15)", - TimeGrain.HALF_HOUR: "date_trunc('minute', CAST({col} AS TIMESTAMP)) - interval '1' minute * (minute(CAST({col} AS TIMESTAMP)) % 30)", + TimeGrain.FIVE_MINUTES: "date_trunc('minute', CAST({col} AS TIMESTAMP)) - interval '1' minute * (minute(CAST({col} AS TIMESTAMP)) % 5)", # noqa: E501 + TimeGrain.TEN_MINUTES: "date_trunc('minute', CAST({col} AS TIMESTAMP)) - interval '1' minute * (minute(CAST({col} AS TIMESTAMP)) % 10)", # noqa: E501 + TimeGrain.FIFTEEN_MINUTES: "date_trunc('minute', CAST({col} AS TIMESTAMP)) - interval '1' minute * (minute(CAST({col} AS TIMESTAMP)) % 15)", # noqa: E501 + TimeGrain.HALF_HOUR: "date_trunc('minute', CAST({col} AS TIMESTAMP)) - interval '1' minute * (minute(CAST({col} AS TIMESTAMP)) % 30)", # noqa: E501 TimeGrain.HOUR: "date_trunc('hour', CAST({col} AS TIMESTAMP))", - TimeGrain.SIX_HOURS: "date_trunc('hour', CAST({col} AS TIMESTAMP)) - interval '1' hour * (hour(CAST({col} AS TIMESTAMP)) % 6)", + TimeGrain.SIX_HOURS: "date_trunc('hour', CAST({col} AS TIMESTAMP)) - interval '1' hour * (hour(CAST({col} AS TIMESTAMP)) % 6)", # noqa: E501 TimeGrain.DAY: "date_trunc('day', CAST({col} AS TIMESTAMP))", TimeGrain.WEEK: "date_trunc('week', CAST({col} AS TIMESTAMP))", TimeGrain.MONTH: "date_trunc('month', CAST({col} AS TIMESTAMP))", @@ -512,7 +512,7 @@ class PrestoBaseEngineSpec(BaseEngineSpec, metaclass=ABCMeta): if table.schema else system_table_name ) - partition_select_clause = f"SELECT * FROM {full_table_name}" + partition_select_clause = f"SELECT * FROM {full_table_name}" # noqa: S608 sql = dedent( f"""\ @@ -768,7 +768,7 @@ class PrestoBaseEngineSpec(BaseEngineSpec, metaclass=ABCMeta): return result @classmethod - def _parse_structural_column( # pylint: disable=too-many-locals + def _parse_structural_column( # pylint: disable=too-many-locals # noqa: C901 cls, parent_column_name: str, parent_data_type: str, @@ -1139,7 +1139,7 @@ class PrestoEngineSpec(PrestoBaseEngineSpec): ) @classmethod - def expand_data( # pylint: disable=too-many-locals + def expand_data( # pylint: disable=too-many-locals # noqa: C901 cls, columns: list[ResultSetColumnType], data: list[dict[Any, Any]] ) -> tuple[ list[ResultSetColumnType], list[dict[Any, Any]], list[ResultSetColumnType] diff --git a/superset/db_engine_specs/redshift.py b/superset/db_engine_specs/redshift.py index 44281d2c3..8b5a35759 100644 --- a/superset/db_engine_specs/redshift.py +++ b/superset/db_engine_specs/redshift.py @@ -176,7 +176,7 @@ class RedshiftEngineSpec(BasicParametersMixin, PostgresBaseEngineSpec): try: logger.info("Killing Redshift PID:%s", str(cancel_query_id)) cursor.execute( - "SELECT pg_cancel_backend(procpid) " + "SELECT pg_cancel_backend(procpid) " # noqa: S608 "FROM pg_stat_activity " f"WHERE procpid='{cancel_query_id}'" ) diff --git a/superset/db_engine_specs/trino.py b/superset/db_engine_specs/trino.py index 3caec31ac..e4567082e 100644 --- a/superset/db_engine_specs/trino.py +++ b/superset/db_engine_specs/trino.py @@ -79,7 +79,7 @@ class TrinoEngineSpec(PrestoBaseEngineSpec): # OAuth 2.0 supports_oauth2 = True oauth2_exception = TrinoAuthError - oauth2_token_request_type = "data" + oauth2_token_request_type = "data" # noqa: S105 @classmethod def get_extra_table_metadata( @@ -101,7 +101,7 @@ class TrinoEngineSpec(PrestoBaseEngineSpec): latest_parts = tuple([None] * len(col_names)) metadata["partitions"] = { - "cols": sorted( + "cols": sorted( # noqa: C414 list( { column_name diff --git a/superset/db_engine_specs/ydb.py b/superset/db_engine_specs/ydb.py index 50a1972a6..811c38fea 100755 --- a/superset/db_engine_specs/ydb.py +++ b/superset/db_engine_specs/ydb.py @@ -53,15 +53,15 @@ class YDBEngineSpec(BaseEngineSpec): _time_grain_expressions = { None: "{col}", - TimeGrain.SECOND: "DateTime::MakeDatetime(DateTime::StartOf({col}, Interval('PT1S')))", - TimeGrain.THIRTY_SECONDS: "DateTime::MakeDatetime(DateTime::StartOf({col}, Interval('PT30S')))", - TimeGrain.MINUTE: "DateTime::MakeDatetime(DateTime::StartOf({col}, Interval('PT1M')))", - TimeGrain.FIVE_MINUTES: "DateTime::MakeDatetime(DateTime::StartOf({col}, Interval('PT5M')))", - TimeGrain.TEN_MINUTES: "DateTime::MakeDatetime(DateTime::StartOf({col}, Interval('PT10M')))", - TimeGrain.FIFTEEN_MINUTES: "DateTime::MakeDatetime(DateTime::StartOf({col}, Interval('PT15M')))", - TimeGrain.THIRTY_MINUTES: "DateTime::MakeDatetime(DateTime::StartOf({col}, Interval('PT30M')))", - TimeGrain.HOUR: "DateTime::MakeDatetime(DateTime::StartOf({col}, Interval('PT1H')))", - TimeGrain.DAY: "DateTime::MakeDatetime(DateTime::StartOf({col}, Interval('P1D')))", + TimeGrain.SECOND: "DateTime::MakeDatetime(DateTime::StartOf({col}, Interval('PT1S')))", # noqa: E501 + TimeGrain.THIRTY_SECONDS: "DateTime::MakeDatetime(DateTime::StartOf({col}, Interval('PT30S')))", # noqa: E501 + TimeGrain.MINUTE: "DateTime::MakeDatetime(DateTime::StartOf({col}, Interval('PT1M')))", # noqa: E501 + TimeGrain.FIVE_MINUTES: "DateTime::MakeDatetime(DateTime::StartOf({col}, Interval('PT5M')))", # noqa: E501 + TimeGrain.TEN_MINUTES: "DateTime::MakeDatetime(DateTime::StartOf({col}, Interval('PT10M')))", # noqa: E501 + TimeGrain.FIFTEEN_MINUTES: "DateTime::MakeDatetime(DateTime::StartOf({col}, Interval('PT15M')))", # noqa: E501 + TimeGrain.THIRTY_MINUTES: "DateTime::MakeDatetime(DateTime::StartOf({col}, Interval('PT30M')))", # noqa: E501 + TimeGrain.HOUR: "DateTime::MakeDatetime(DateTime::StartOf({col}, Interval('PT1H')))", # noqa: E501 + TimeGrain.DAY: "DateTime::MakeDatetime(DateTime::StartOf({col}, Interval('P1D')))", # noqa: E501 TimeGrain.WEEK: "DateTime::MakeDatetime(DateTime::StartOfWeek({col}))", TimeGrain.MONTH: "DateTime::MakeDatetime(DateTime::StartOfMonth({col}))", TimeGrain.QUARTER: "DateTime::MakeDatetime(DateTime::StartOfQuarter({col}))", @@ -79,9 +79,9 @@ class YDBEngineSpec(BaseEngineSpec): sqla_type = cls.get_sqla_column_type(target_type) if isinstance(sqla_type, types.Date): - return f"DateTime::MakeDate(DateTime::ParseIso8601('{dttm.date().isoformat()}'))" + return f"DateTime::MakeDate(DateTime::ParseIso8601('{dttm.date().isoformat()}'))" # noqa: E501 if isinstance(sqla_type, types.DateTime): - return f"""DateTime::MakeDatetime(DateTime::ParseIso8601('{dttm.isoformat(sep="T", timespec="seconds")}'))""" + return f"""DateTime::MakeDatetime(DateTime::ParseIso8601('{dttm.isoformat(sep="T", timespec="seconds")}'))""" # noqa: E501 return None @staticmethod diff --git a/superset/distributed_lock/__init__.py b/superset/distributed_lock/__init__.py index c4af73ac0..4374e85aa 100644 --- a/superset/distributed_lock/__init__.py +++ b/superset/distributed_lock/__init__.py @@ -36,7 +36,7 @@ RESOURCE = KeyValueResource.LOCK @contextmanager -def KeyValueDistributedLock( # pylint: disable=invalid-name +def KeyValueDistributedLock( # pylint: disable=invalid-name # noqa: N802 namespace: str, **kwargs: Any, ) -> Iterator[uuid.UUID]: diff --git a/superset/errors.py b/superset/errors.py index 4d9c08d14..f3530cfdc 100644 --- a/superset/errors.py +++ b/superset/errors.py @@ -27,7 +27,7 @@ class SupersetErrorType(StrEnum): Types of errors that can exist within Superset. Keep in sync with superset-frontend/packages/superset-ui-core/src/query/types/Query.ts - """ + """ # noqa: E501 # Frontend errors FRONTEND_CSRF_ERROR = "FRONTEND_CSRF_ERROR" @@ -40,7 +40,7 @@ class SupersetErrorType(StrEnum): TABLE_DOES_NOT_EXIST_ERROR = "TABLE_DOES_NOT_EXIST_ERROR" SCHEMA_DOES_NOT_EXIST_ERROR = "SCHEMA_DOES_NOT_EXIST_ERROR" CONNECTION_INVALID_USERNAME_ERROR = "CONNECTION_INVALID_USERNAME_ERROR" - CONNECTION_INVALID_PASSWORD_ERROR = "CONNECTION_INVALID_PASSWORD_ERROR" + CONNECTION_INVALID_PASSWORD_ERROR = "CONNECTION_INVALID_PASSWORD_ERROR" # noqa: S105 CONNECTION_INVALID_HOSTNAME_ERROR = "CONNECTION_INVALID_HOSTNAME_ERROR" CONNECTION_PORT_CLOSED_ERROR = "CONNECTION_PORT_CLOSED_ERROR" CONNECTION_INVALID_PORT_ERROR = "CONNECTION_INVALID_PORT_ERROR" @@ -201,7 +201,7 @@ class ErrorLevel(StrEnum): Levels of errors that can exist within Superset. Keep in sync with superset-frontend/packages/superset-ui-core/src/query/types/Query.ts - """ + """ # noqa: E501 INFO = "info" WARNING = "warning" diff --git a/superset/examples/big_data.py b/superset/examples/big_data.py index ed738d2c9..0f3fd10fd 100644 --- a/superset/examples/big_data.py +++ b/superset/examples/big_data.py @@ -71,5 +71,5 @@ def load_big_data() -> None: add_data(columns=columns, num_rows=10, table_name=f"small_table_{i}") print("Creating table with long name") - name = "".join(random.choices(string.ascii_letters + string.digits, k=60)) + name = "".join(random.choices(string.ascii_letters + string.digits, k=60)) # noqa: S311 add_data(columns=columns, num_rows=10, table_name=name) diff --git a/superset/examples/birth_names.py b/superset/examples/birth_names.py index b69cf0f4a..8d2b23e22 100644 --- a/superset/examples/birth_names.py +++ b/superset/examples/birth_names.py @@ -851,7 +851,7 @@ def create_dashboard(slices: list[Slice]) -> Dashboard: "type": "ROW" } } - """ + """ # noqa: E501 ) ) # pylint: enable=line-too-long diff --git a/superset/examples/long_lat.py b/superset/examples/long_lat.py index 5afb65f6f..c4d48ce25 100644 --- a/superset/examples/long_lat.py +++ b/superset/examples/long_lat.py @@ -54,8 +54,8 @@ def load_long_lat_data(only_metadata: bool = False, force: bool = False) -> None start + datetime.timedelta(hours=i * 24 / (len(pdf) - 1)) for i in range(len(pdf)) ] - pdf["occupancy"] = [random.randint(1, 6) for _ in range(len(pdf))] - pdf["radius_miles"] = [random.uniform(1, 3) for _ in range(len(pdf))] + pdf["occupancy"] = [random.randint(1, 6) for _ in range(len(pdf))] # noqa: S311 + pdf["radius_miles"] = [random.uniform(1, 3) for _ in range(len(pdf))] # noqa: S311 pdf["geohash"] = pdf[["LAT", "LON"]].apply( lambda x: geohash.encode(*x), axis=1 ) diff --git a/superset/examples/utils.py b/superset/examples/utils.py index 72df7a579..d5da9c7c5 100644 --- a/superset/examples/utils.py +++ b/superset/examples/utils.py @@ -88,7 +88,7 @@ def load_configs_from_directory( # removing "type" from the metadata allows us to import any exported model # from the unzipped directory directly - metadata = yaml.load(contents.get(METADATA_FILE_NAME, "{}"), Loader=yaml.Loader) + metadata = yaml.load(contents.get(METADATA_FILE_NAME, "{}"), Loader=yaml.Loader) # noqa: S506 if "type" in metadata: del metadata["type"] contents[METADATA_FILE_NAME] = yaml.dump(metadata) diff --git a/superset/exceptions.py b/superset/exceptions.py index 5afad6061..c6105b446 100644 --- a/superset/exceptions.py +++ b/superset/exceptions.py @@ -26,7 +26,7 @@ from marshmallow import ValidationError from superset.errors import ErrorLevel, SupersetError, SupersetErrorType -class SupersetException(Exception): +class SupersetException(Exception): # noqa: N818 status = 500 message = "" @@ -396,13 +396,13 @@ class DisallowedSQLFunction(SupersetErrorException): ) -class CreateKeyValueDistributedLockFailedException(Exception): +class CreateKeyValueDistributedLockFailedException(Exception): # noqa: N818 """ Exception to signalize failure to acquire lock. """ -class DeleteKeyValueDistributedLockFailedException(Exception): +class DeleteKeyValueDistributedLockFailedException(Exception): # noqa: N818 """ Exception to signalize failure to delete lock. """ diff --git a/superset/explore/permalink/schemas.py b/superset/explore/permalink/schemas.py index 6edaab4ed..63466b0c1 100644 --- a/superset/explore/permalink/schemas.py +++ b/superset/explore/permalink/schemas.py @@ -18,12 +18,12 @@ from marshmallow import fields, Schema class ExplorePermalinkStateSchema(Schema): - formData = fields.Dict( + formData = fields.Dict( # noqa: N815 required=True, allow_none=False, metadata={"description": "Chart form data"}, ) - urlParams = fields.List( + urlParams = fields.List( # noqa: N815 fields.Tuple( ( fields.String( @@ -44,17 +44,17 @@ class ExplorePermalinkStateSchema(Schema): class ExplorePermalinkSchema(Schema): - chartId = fields.Integer( + chartId = fields.Integer( # noqa: N815 required=False, allow_none=True, metadata={"description": "The id of the chart"}, ) - datasourceType = fields.String( + datasourceType = fields.String( # noqa: N815 required=True, allow_none=False, metadata={"description": "The type of the datasource"}, ) - datasourceId = fields.Integer( + datasourceId = fields.Integer( # noqa: N815 required=False, allow_none=True, metadata={"description": "The id of the datasource"}, diff --git a/superset/explore/schemas.py b/superset/explore/schemas.py index 75c3dcac2..5149f98ff 100644 --- a/superset/explore/schemas.py +++ b/superset/explore/schemas.py @@ -106,7 +106,7 @@ class DatasetSchema(Schema): class SliceSchema(Schema): cache_timeout = fields.Integer( metadata={ - "description": "Duration (in seconds) of the caching timeout for this chart." + "description": "Duration (in seconds) of the caching timeout for this chart." # noqa: E501 } ) certification_details = fields.String( diff --git a/superset/extensions/__init__.py b/superset/extensions/__init__.py index 3559f7ba3..ffe9c7996 100644 --- a/superset/extensions/__init__.py +++ b/superset/extensions/__init__.py @@ -97,7 +97,7 @@ class UIManifestProcessor: # templates full_manifest = json.load(f) self.manifest = full_manifest.get("entrypoints", {}) - except Exception: # pylint: disable=broad-except + except Exception: # pylint: disable=broad-except # noqa: S110 pass def get_manifest_files(self, bundle: str, asset_type: str) -> list[str]: diff --git a/superset/extensions/metadb.py b/superset/extensions/metadb.py index 8424d8ee4..3a95ab5d7 100644 --- a/superset/extensions/metadb.py +++ b/superset/extensions/metadb.py @@ -33,7 +33,7 @@ and applying any filters (as well as sorting, limiting, and offsetting). Note that no aggregation is done on the database. Aggregations and other operations like joins and unions are done in memory, using the SQLite engine. -""" +""" # noqa: E501 from __future__ import annotations @@ -270,7 +270,7 @@ class SupersetShillelaghAdapter(Adapter): self.schema = parts.pop(-1) if parts else None self.catalog = parts.pop(-1) if parts else None - # If the table has a single integer primary key we use that as the row ID in order + # If the table has a single integer primary key we use that as the row ID in order # noqa: E501 # to perform updates and deletes. Otherwise we can only do inserts and selects. self._rowid: str | None = None diff --git a/superset/initialization/__init__.py b/superset/initialization/__init__.py index 1720c87af..ee7fcf9ef 100644 --- a/superset/initialization/__init__.py +++ b/superset/initialization/__init__.py @@ -581,7 +581,7 @@ class SupersetAppInitializer: # pylint: disable=too-many-public-methods self.superset_app.url_map.converters["regex"] = RegexConverter self.superset_app.url_map.converters["object_type"] = ObjectTypeConverter - def configure_middlewares(self) -> None: + def configure_middlewares(self) -> None: # noqa: C901 if self.config["ENABLE_CORS"]: # pylint: disable=import-outside-toplevel from flask_cors import CORS @@ -648,7 +648,7 @@ class SupersetAppInitializer: # pylint: disable=too-many-public-methods "We haven't found any Content Security Policy (CSP) defined in " "the configurations. Please make sure to configure CSP using the " "TALISMAN_ENABLED and TALISMAN_CONFIG keys or any other external " - "software. Failing to configure CSP have serious security implications. " + "software. Failing to configure CSP have serious security implications. " # noqa: E501 "Check https://developer.mozilla.org/en-US/docs/Web/HTTP/CSP for more " "information. You can disable this warning using the " "CONTENT_SECURITY_POLICY_WARNING key." diff --git a/superset/key_value/types.py b/superset/key_value/types.py index f6459c330..348697234 100644 --- a/superset/key_value/types.py +++ b/superset/key_value/types.py @@ -79,7 +79,7 @@ class PickleKeyValueCodec(KeyValueCodec): return pickle.dumps(value) def decode(self, value: bytes) -> dict[Any, Any]: - return pickle.loads(value) + return pickle.loads(value) # noqa: S301 class MarshmallowKeyValueCodec(JsonKeyValueCodec): diff --git a/superset/key_value/utils.py b/superset/key_value/utils.py index 0a4e63778..e0ccbd1f5 100644 --- a/superset/key_value/utils.py +++ b/superset/key_value/utils.py @@ -61,7 +61,7 @@ def decode_permalink_id(key: str, salt: str) -> int: def get_uuid_namespace(seed: str) -> UUID: - md5_obj = md5() + md5_obj = md5() # noqa: S324 md5_obj.update(seed.encode("utf-8")) return UUID(md5_obj.hexdigest()) diff --git a/superset/migrations/shared/catalogs.py b/superset/migrations/shared/catalogs.py index 45d84e303..2306e5849 100644 --- a/superset/migrations/shared/catalogs.py +++ b/superset/migrations/shared/catalogs.py @@ -150,10 +150,10 @@ def print_processed_batch( """ elapsed_time = datetime.now() - start_time elapsed_seconds = elapsed_time.total_seconds() - elapsed_formatted = f"{int(elapsed_seconds // 3600):02}:{int((elapsed_seconds % 3600) // 60):02}:{int(elapsed_seconds % 60):02}" + elapsed_formatted = f"{int(elapsed_seconds // 3600):02}:{int((elapsed_seconds % 3600) // 60):02}:{int(elapsed_seconds % 60):02}" # noqa: E501 rows_processed = min(offset + batch_size, total_rows) logger.info( - f"{elapsed_formatted} - {rows_processed:,} of {total_rows:,} {model.__tablename__} rows processed " + f"{elapsed_formatted} - {rows_processed:,} of {total_rows:,} {model.__tablename__} rows processed " # noqa: E501 f"({(rows_processed / total_rows) * 100:.2f}%)" ) @@ -252,7 +252,7 @@ def update_schema_catalog_perms( catalog (str): The new catalog to set. downgrade (bool, optional): If True, reset the `catalog` and `catalog_perm` fields to None. Defaults to False. - """ + """ # noqa: E501 # Mapping of table id to schema permission mapping = {} diff --git a/superset/migrations/shared/migrate_viz/base.py b/superset/migrations/shared/migrate_viz/base.py index b013aa0be..ee5372e3a 100644 --- a/superset/migrations/shared/migrate_viz/base.py +++ b/superset/migrations/shared/migrate_viz/base.py @@ -62,8 +62,8 @@ class MigrateViz: if "viz_type" in self.data: self.data["viz_type"] = self.target_viz_type - # Sometimes visualizations have same keys in the source form_data and rename_keys - # We need to remove them from data to allow the migration to work properly with rename_keys + # Sometimes visualizations have same keys in the source form_data and rename_keys # noqa: E501 + # We need to remove them from data to allow the migration to work properly with rename_keys # noqa: E501 for source_key, target_key in self.rename_keys.items(): if source_key in self.data and target_key in self.data: self.data.pop(target_key) diff --git a/superset/migrations/shared/native_filters.py b/superset/migrations/shared/native_filters.py index 6e04928fa..1fa097c23 100644 --- a/superset/migrations/shared/native_filters.py +++ b/superset/migrations/shared/native_filters.py @@ -26,7 +26,7 @@ from superset.utils import json from superset.utils.dashboard_filter_scopes_converter import convert_filter_scopes -def convert_filter_scopes_to_native_filters( # pylint: disable=invalid-name,too-many-branches,too-many-locals,too-many-nested-blocks,too-many-statements +def convert_filter_scopes_to_native_filters( # pylint: disable=invalid-name,too-many-branches,too-many-locals,too-many-nested-blocks,too-many-statements # noqa: C901 json_metadata: dict[str, Any], position_json: dict[str, Any], filter_boxes: list[Slice], @@ -274,7 +274,7 @@ def convert_filter_scopes_to_native_filters( # pylint: disable=invalid-name,too ) -def migrate_dashboard(dashboard: Dashboard) -> None: +def migrate_dashboard(dashboard: Dashboard) -> None: # noqa: C901 """ Convert the dashboard to use native filters. diff --git a/superset/migrations/shared/security_converge.py b/superset/migrations/shared/security_converge.py index 42a68acb2..b91e61c4a 100644 --- a/superset/migrations/shared/security_converge.py +++ b/superset/migrations/shared/security_converge.py @@ -189,7 +189,7 @@ def _delete_old_permissions( - Deletes the ViewMenu if it's an orphan now """ # Delete old permissions - for old_pvm, new_pvms in pvm_map.items(): + for old_pvm, new_pvms in pvm_map.items(): # noqa: B007 old_permission_name = old_pvm.permission.name old_view_name = old_pvm.view_menu.name logger.info(f"Going to delete pvm: {old_pvm}") @@ -212,7 +212,7 @@ def _delete_old_permissions( session.delete(old_pvm.view_menu) -def migrate_roles( +def migrate_roles( # noqa: C901 session: Session, pvm_key_map: PvmMigrationMapType, commit: bool = False, @@ -252,7 +252,7 @@ def migrate_roles( def get_reversed_new_pvms(pvm_map: PvmMigrationMapType) -> dict[str, tuple[str, ...]]: reversed_pvms: dict[str, tuple[str, ...]] = {} - for old_pvm, new_pvms in pvm_map.items(): + for old_pvm, new_pvms in pvm_map.items(): # noqa: B007 if old_pvm.view not in reversed_pvms: reversed_pvms[old_pvm.view] = (old_pvm.permission,) else: diff --git a/superset/migrations/shared/utils.py b/superset/migrations/shared/utils.py index 813162bcd..b0a460f58 100644 --- a/superset/migrations/shared/utils.py +++ b/superset/migrations/shared/utils.py @@ -120,7 +120,7 @@ def assign_uuids( for dialect, sql in uuid_by_dialect.items(): if isinstance(bind.dialect, dialect): op.execute( - f"UPDATE {dialect().identifier_preparer.quote(table_name)} SET uuid = {sql}" + f"UPDATE {dialect().identifier_preparer.quote(table_name)} SET uuid = {sql}" # noqa: S608, E501 ) print(f"Done. Assigned {count} uuids in {time.time() - start_time:.3f}s.\n") return @@ -210,7 +210,7 @@ def drop_fks_for_table(table_name: str) -> None: foreign_keys = inspector.get_foreign_keys(table_name) for fk in foreign_keys: logger.info( - f"Dropping foreign key {GREEN}{fk['name']}{RESET} from table {GREEN}{table_name}{RESET}..." + f"Dropping foreign key {GREEN}{fk['name']}{RESET} from table {GREEN}{table_name}{RESET}..." # noqa: E501 ) op.drop_constraint(fk["name"], table_name, type_="foreignkey") @@ -225,7 +225,7 @@ def create_table(table_name: str, *columns: SchemaItem) -> None: :param table_name: The name of the table to be created. :param columns: A variable number of arguments representing the schema just like when calling alembic's method create_table() - """ + """ # noqa: E501 if has_table(table_name=table_name): logger.info(f"Table {LRED}{table_name}{RESET} already exists. Skipping...") @@ -246,7 +246,7 @@ def drop_table(table_name: str) -> None: (handled by `drop_fks_for_table`) and then proceeds to drop the table. :param table_name: The name of the table to be dropped. - """ + """ # noqa: E501 if not has_table(table_name=table_name): logger.info(f"Table {GREEN}{table_name}{RESET} doesn't exist. Skipping...") @@ -274,10 +274,10 @@ def batch_operation( the start index and the end index of the current batch. :param count: The total number of items to process. :param batch_size: The number of items to process in each batch. - """ + """ # noqa: E501 if count <= 0: logger.info( - f"No records to process in batch {LRED}(count <= 0){RESET} for callable {LRED}other_callable_example{RESET}. Skipping..." + f"No records to process in batch {LRED}(count <= 0){RESET} for callable {LRED}other_callable_example{RESET}. Skipping..." # noqa: E501 ) return for offset in range(0, count, batch_size): @@ -287,7 +287,7 @@ def batch_operation( logger.info(f"Progress: {count:,}/{count:,} (100%)") logger.info( - f"End: {GREEN}{callable.__name__}{RESET} batch operation {GREEN}succesfully{RESET} executed." + f"End: {GREEN}{callable.__name__}{RESET} batch operation {GREEN}succesfully{RESET} executed." # noqa: E501 ) @@ -302,13 +302,13 @@ def add_columns(table_name: str, *columns: Column) -> None: :param table_name: The name of the table to which the columns will be added. :param columns: A list of SQLAlchemy Column objects that define the name, type, and other attributes of the columns to be added. - """ + """ # noqa: E501 cols_to_add = [] for col in columns: if table_has_column(table_name=table_name, column_name=col.name): logger.info( - f"Column {LRED}{col.name}{RESET} already present on table {LRED}{table_name}{RESET}. Skipping..." + f"Column {LRED}{col.name}{RESET} already present on table {LRED}{table_name}{RESET}. Skipping..." # noqa: E501 ) else: cols_to_add.append(col) @@ -316,7 +316,7 @@ def add_columns(table_name: str, *columns: Column) -> None: with op.batch_alter_table(table_name) as batch_op: for col in cols_to_add: logger.info( - f"Adding column {GREEN}{col.name}{RESET} to table {GREEN}{table_name}{RESET}..." + f"Adding column {GREEN}{col.name}{RESET} to table {GREEN}{table_name}{RESET}..." # noqa: E501 ) batch_op.add_column(col) @@ -332,13 +332,13 @@ def drop_columns(table_name: str, *columns: str) -> None: :param table_name: The name of the table from which the columns will be removed. :param columns: A list of column names to be dropped. - """ + """ # noqa: E501 cols_to_drop = [] for col in columns: if not table_has_column(table_name=table_name, column_name=col): logger.info( - f"Column {LRED}{col}{RESET} is not present on table {LRED}{table_name}{RESET}. Skipping..." + f"Column {LRED}{col}{RESET} is not present on table {LRED}{table_name}{RESET}. Skipping..." # noqa: E501 ) else: cols_to_drop.append(col) @@ -346,7 +346,7 @@ def drop_columns(table_name: str, *columns: str) -> None: with op.batch_alter_table(table_name) as batch_op: for col in cols_to_drop: logger.info( - f"Dropping column {GREEN}{col}{RESET} from table {GREEN}{table_name}{RESET}..." + f"Dropping column {GREEN}{col}{RESET} from table {GREEN}{table_name}{RESET}..." # noqa: E501 ) batch_op.drop_column(col) @@ -361,11 +361,11 @@ def create_index(table_name: str, index_name: str, *columns: str) -> None: :param table_name: The name of the table on which the index will be created. :param index_name: The name of the index to be created. :param columns: A list column names where the index will be created - """ + """ # noqa: E501 if table_has_index(table=table_name, index=index_name): logger.info( - f"Table {LRED}{table_name}{RESET} already has index {LRED}{index_name}{RESET}. Skipping..." + f"Table {LRED}{table_name}{RESET} already has index {LRED}{index_name}{RESET}. Skipping..." # noqa: E501 ) return @@ -385,16 +385,16 @@ def drop_index(table_name: str, index_name: str) -> None: :param table_name: The name of the table from which the index will be dropped. :param index_name: The name of the index to be dropped. - """ + """ # noqa: E501 if not table_has_index(table=table_name, index=index_name): logger.info( - f"Table {LRED}{table_name}{RESET} doesn't have index {LRED}{index_name}{RESET}. Skipping..." + f"Table {LRED}{table_name}{RESET} doesn't have index {LRED}{index_name}{RESET}. Skipping..." # noqa: E501 ) return logger.info( - f"Dropping index {GREEN}{index_name}{RESET} from table {GREEN}{table_name}{RESET}..." + f"Dropping index {GREEN}{index_name}{RESET} from table {GREEN}{table_name}{RESET}..." # noqa: E501 ) op.drop_index(table_name=table_name, index_name=index_name) diff --git a/superset/migrations/versions/2015-12-04_09-42_1a48a5411020_adding_slug_to_dash.py b/superset/migrations/versions/2015-12-04_09-42_1a48a5411020_adding_slug_to_dash.py index 1bee2ed14..d5b91862e 100644 --- a/superset/migrations/versions/2015-12-04_09-42_1a48a5411020_adding_slug_to_dash.py +++ b/superset/migrations/versions/2015-12-04_09-42_1a48a5411020_adding_slug_to_dash.py @@ -34,7 +34,7 @@ def upgrade(): op.add_column("dashboards", sa.Column("slug", sa.String(length=255), nullable=True)) try: op.create_unique_constraint("idx_unique_slug", "dashboards", ["slug"]) - except: # noqa: E722 + except: # noqa: E722, S110 pass diff --git a/superset/migrations/versions/2016-03-13_21-30_18e88e1cc004_making_audit_nullable.py b/superset/migrations/versions/2016-03-13_21-30_18e88e1cc004_making_audit_nullable.py index 4cd834b88..5a6eab718 100644 --- a/superset/migrations/versions/2016-03-13_21-30_18e88e1cc004_making_audit_nullable.py +++ b/superset/migrations/versions/2016-03-13_21-30_18e88e1cc004_making_audit_nullable.py @@ -97,7 +97,7 @@ def upgrade(): ) op.alter_column("url", "changed_on", existing_type=sa.DATETIME(), nullable=True) op.alter_column("url", "created_on", existing_type=sa.DATETIME(), nullable=True) - except Exception: + except Exception: # noqa: S110 pass diff --git a/superset/migrations/versions/2016-03-24_14-13_763d4b211ec9_fixing_audit_fk.py b/superset/migrations/versions/2016-03-24_14-13_763d4b211ec9_fixing_audit_fk.py index 94521d556..93dc87f7d 100644 --- a/superset/migrations/versions/2016-03-24_14-13_763d4b211ec9_fixing_audit_fk.py +++ b/superset/migrations/versions/2016-03-24_14-13_763d4b211ec9_fixing_audit_fk.py @@ -96,7 +96,7 @@ def upgrade(): op.alter_column("url", "created_on", existing_type=sa.DATETIME(), nullable=True) op.create_foreign_key(None, "metrics", "ab_user", ["changed_by_fk"], ["id"]) op.create_foreign_key(None, "metrics", "ab_user", ["created_by_fk"], ["id"]) - except: # noqa: E722 + except: # noqa: E722, S110 pass @@ -174,5 +174,5 @@ def downgrade(): op.alter_column( "columns", "changed_on", existing_type=sa.DATETIME(), nullable=False ) - except: # noqa: E722 + except: # noqa: E722, S110 pass diff --git a/superset/migrations/versions/2016-04-15_08-31_b4456560d4f3_change_table_unique_constraint.py b/superset/migrations/versions/2016-04-15_08-31_b4456560d4f3_change_table_unique_constraint.py index 429c44757..20d38f820 100644 --- a/superset/migrations/versions/2016-04-15_08-31_b4456560d4f3_change_table_unique_constraint.py +++ b/superset/migrations/versions/2016-04-15_08-31_b4456560d4f3_change_table_unique_constraint.py @@ -36,7 +36,7 @@ def upgrade(): op.create_unique_constraint( "_customer_location_uc", "tables", ["database_id", "schema", "table_name"] ) - except Exception: + except Exception: # noqa: S110 pass @@ -44,5 +44,5 @@ def downgrade(): try: # Trying since sqlite doesn't like constraints op.drop_constraint("_customer_location_uc", "tables", type_="unique") - except Exception: + except Exception: # noqa: S110 pass diff --git a/superset/migrations/versions/2016-06-27_08-43_27ae655e4247_make_creator_owners.py b/superset/migrations/versions/2016-06-27_08-43_27ae655e4247_make_creator_owners.py index 76a823041..0983095a7 100644 --- a/superset/migrations/versions/2016-06-27_08-43_27ae655e4247_make_creator_owners.py +++ b/superset/migrations/versions/2016-06-27_08-43_27ae655e4247_make_creator_owners.py @@ -63,13 +63,13 @@ dashboard_user = Table( class AuditMixin: @declared_attr - def created_by_fk(cls): + def created_by_fk(cls): # noqa: N805 return Column( Integer, ForeignKey("ab_user.id"), default=get_user_id, nullable=False ) @declared_attr - def created_by(cls): + def created_by(cls): # noqa: N805 return relationship( "User", primaryjoin=f"{cls.__name__}.created_by_fk == User.id", diff --git a/superset/migrations/versions/2016-09-12_23-33_4500485bde7d_allow_run_sync_async.py b/superset/migrations/versions/2016-09-12_23-33_4500485bde7d_allow_run_sync_async.py index a1e71a554..2c50d04df 100644 --- a/superset/migrations/versions/2016-09-12_23-33_4500485bde7d_allow_run_sync_async.py +++ b/superset/migrations/versions/2016-09-12_23-33_4500485bde7d_allow_run_sync_async.py @@ -39,5 +39,5 @@ def downgrade(): try: op.drop_column("dbs", "allow_run_sync") op.drop_column("dbs", "allow_run_async") - except Exception: + except Exception: # noqa: S110 pass diff --git a/superset/migrations/versions/2018-06-04_11-12_c5756bec8b47_time_grain_sqla.py b/superset/migrations/versions/2018-06-04_11-12_c5756bec8b47_time_grain_sqla.py index 36ad22526..3e2674c52 100644 --- a/superset/migrations/versions/2018-06-04_11-12_c5756bec8b47_time_grain_sqla.py +++ b/superset/migrations/versions/2018-06-04_11-12_c5756bec8b47_time_grain_sqla.py @@ -54,7 +54,7 @@ def upgrade(): if params.get("time_grain_sqla") == "Time Column": params["time_grain_sqla"] = None slc.params = json.dumps(params, sort_keys=True) - except Exception: + except Exception: # noqa: S110 pass session.commit() @@ -72,7 +72,7 @@ def downgrade(): if params.get("time_grain_sqla") is None: params["time_grain_sqla"] = "Time Column" slc.params = json.dumps(params, sort_keys=True) - except Exception: + except Exception: # noqa: S110 pass session.commit() diff --git a/superset/migrations/versions/2018-06-07_09-52_afb7730f6a9c_remove_empty_filters.py b/superset/migrations/versions/2018-06-07_09-52_afb7730f6a9c_remove_empty_filters.py index c9ea7b9b3..ee846f37a 100644 --- a/superset/migrations/versions/2018-06-07_09-52_afb7730f6a9c_remove_empty_filters.py +++ b/superset/migrations/versions/2018-06-07_09-52_afb7730f6a9c_remove_empty_filters.py @@ -63,7 +63,7 @@ def upgrade(): ] slc.params = json.dumps(params, sort_keys=True) - except Exception: + except Exception: # noqa: S110 pass session.commit() diff --git a/superset/migrations/versions/2018-06-13_10-20_4451805bbaa1_remove_double_percents.py b/superset/migrations/versions/2018-06-13_10-20_4451805bbaa1_remove_double_percents.py index 1786ae37e..eee62261e 100644 --- a/superset/migrations/versions/2018-06-13_10-20_4451805bbaa1_remove_double_percents.py +++ b/superset/migrations/versions/2018-06-13_10-20_4451805bbaa1_remove_double_percents.py @@ -94,7 +94,7 @@ def replace(source, target): ) slc.params = json.dumps(params, sort_keys=True) - except Exception: + except Exception: # noqa: S110 pass session.commit() diff --git a/superset/migrations/versions/2018-06-13_14-54_bddc498dd179_adhoc_filters.py b/superset/migrations/versions/2018-06-13_14-54_bddc498dd179_adhoc_filters.py index 8ebcc104e..bca1c4332 100644 --- a/superset/migrations/versions/2018-06-13_14-54_bddc498dd179_adhoc_filters.py +++ b/superset/migrations/versions/2018-06-13_14-54_bddc498dd179_adhoc_filters.py @@ -57,7 +57,7 @@ def upgrade(): params = json.loads(slc.params) convert_legacy_filters_into_adhoc(params) slc.params = json.dumps(params, sort_keys=True) - except Exception: + except Exception: # noqa: S110 pass session.commit() @@ -77,7 +77,7 @@ def downgrade(): del params["adhoc_filters"] slc.params = json.dumps(params, sort_keys=True) - except Exception: + except Exception: # noqa: S110 pass session.commit() diff --git a/superset/migrations/versions/2018-06-14_14-31_80a67c5192fa_single_pie_chart_metric.py b/superset/migrations/versions/2018-06-14_14-31_80a67c5192fa_single_pie_chart_metric.py index 302dda2a5..5db104f3b 100644 --- a/superset/migrations/versions/2018-06-14_14-31_80a67c5192fa_single_pie_chart_metric.py +++ b/superset/migrations/versions/2018-06-14_14-31_80a67c5192fa_single_pie_chart_metric.py @@ -59,7 +59,7 @@ def upgrade(): del params["metrics"] slc.params = json.dumps(params, sort_keys=True) - except Exception: + except Exception: # noqa: S110 pass session.commit() @@ -80,7 +80,7 @@ def downgrade(): del params["metric"] slc.params = json.dumps(params, sort_keys=True) - except Exception: + except Exception: # noqa: S110 pass session.commit() diff --git a/superset/migrations/versions/2018-07-05_15-19_3dda56f1c4c6_migrate_num_period_compare_and_period_.py b/superset/migrations/versions/2018-07-05_15-19_3dda56f1c4c6_migrate_num_period_compare_and_period_.py index 18547f3cf..580c79ce3 100644 --- a/superset/migrations/versions/2018-07-05_15-19_3dda56f1c4c6_migrate_num_period_compare_and_period_.py +++ b/superset/migrations/versions/2018-07-05_15-19_3dda56f1c4c6_migrate_num_period_compare_and_period_.py @@ -99,7 +99,7 @@ def timedelta_to_string(obj): def format_seconds(value): periods = [("minute", 60), ("hour", 3600), ("day", 86400), ("week", 604800)] - for period, multiple in periods: + for period, multiple in periods: # noqa: B007 if value % multiple == 0: value //= multiple break diff --git a/superset/migrations/versions/2018-07-22_11-59_bebcf3fed1fe_convert_dashboard_v1_positions.py b/superset/migrations/versions/2018-07-22_11-59_bebcf3fed1fe_convert_dashboard_v1_positions.py index b57f4f3e9..8baa13fe6 100644 --- a/superset/migrations/versions/2018-07-22_11-59_bebcf3fed1fe_convert_dashboard_v1_positions.py +++ b/superset/migrations/versions/2018-07-22_11-59_bebcf3fed1fe_convert_dashboard_v1_positions.py @@ -120,7 +120,7 @@ def generate_id(): return uuid.uuid4().hex[:8] -def has_overlap(positions, xAxis=True): +def has_overlap(positions, xAxis=True): # noqa: N803 sorted_positions = ( sorted(positions[:], key=lambda pos: pos["col"]) if xAxis @@ -225,14 +225,14 @@ def get_children_max(children, attr, root): def get_children_sum(children, attr, root): - return reduce((lambda sum, childId: sum + root[childId]["meta"][attr]), children, 0) + return reduce((lambda sum, childId: sum + root[childId]["meta"][attr]), children, 0) # noqa: N803 # find column that: width > 2 and # each row has at least 1 chart can reduce width def get_wide_column_ids(children, root): return list( - filter(lambda childId: can_reduce_column_width(root[childId], root), children) + filter(lambda childId: can_reduce_column_width(root[childId], root), children) # noqa: N803 ) @@ -248,12 +248,12 @@ def can_reduce_column_width(column_component, root): column_component["type"] == COLUMN_TYPE and column_component["meta"]["width"] > GRID_MIN_COLUMN_COUNT and all( - [ + [ # noqa: C419 is_wide_leaf_component(root[childId]) or ( root[childId]["type"] == ROW_TYPE and all( - [ + [ # noqa: C419 is_wide_leaf_component(root[id]) for id in root[childId]["children"] ] @@ -268,7 +268,7 @@ def can_reduce_column_width(column_component, root): def reduce_row_width(row_component, root): wide_leaf_component_ids = list( filter( - lambda childId: is_wide_leaf_component(root[childId]), + lambda childId: is_wide_leaf_component(root[childId]), # noqa: N803 row_component["children"], ) ) @@ -292,7 +292,7 @@ def reduce_component_width(component): return component["meta"]["width"] -def convert(positions, level, parent, root): +def convert(positions, level, parent, root): # noqa: C901 if len(positions) == 0: return @@ -424,7 +424,7 @@ def convert(positions, level, parent, root): ) -def convert_to_layout(positions): +def convert_to_layout(positions): # noqa: C901 root = get_empty_layout() convert(positions, 0, root[DASHBOARD_GRID_ID], root) @@ -444,7 +444,7 @@ def convert_to_layout(positions): while current_width > GRID_COLUMN_COUNT and len( list( filter( - lambda childId: is_wide_leaf_component(root[childId]), + lambda childId: is_wide_leaf_component(root[childId]), # noqa: N803 item["children"], ) ) @@ -463,7 +463,7 @@ def convert_to_layout(positions): # need 2nd loop since same column may reduce multiple times while idx < len(col_ids) and current_width > GRID_COLUMN_COUNT: current_column = col_ids[idx] - for childId in root[current_column]["children"]: + for childId in root[current_column]["children"]: # noqa: N806 if root[childId]["type"] == ROW_TYPE: root[childId]["meta"]["width"] = reduce_row_width( root[childId], root diff --git a/superset/migrations/versions/2018-08-01_11-47_7fcdcde0761c_.py b/superset/migrations/versions/2018-08-01_11-47_7fcdcde0761c_.py index 6f5220197..bf03627b5 100644 --- a/superset/migrations/versions/2018-08-01_11-47_7fcdcde0761c_.py +++ b/superset/migrations/versions/2018-08-01_11-47_7fcdcde0761c_.py @@ -58,7 +58,7 @@ def upgrade(): session = db.Session(bind=bind) dashboards = session.query(Dashboard).all() - for i, dashboard in enumerate(dashboards): + for i, dashboard in enumerate(dashboards): # noqa: B007 original_text = dashboard.position_json or "" position_json = json.loads(original_text or "{}") if is_v2_dash(position_json): diff --git a/superset/migrations/versions/2018-11-12_13-31_4ce8df208545_migrate_time_range_for_default_filters.py b/superset/migrations/versions/2018-11-12_13-31_4ce8df208545_migrate_time_range_for_default_filters.py index d1f68979c..c96fed982 100644 --- a/superset/migrations/versions/2018-11-12_13-31_4ce8df208545_migrate_time_range_for_default_filters.py +++ b/superset/migrations/versions/2018-11-12_13-31_4ce8df208545_migrate_time_range_for_default_filters.py @@ -44,7 +44,7 @@ class Dashboard(Base): json_metadata = Column(Text) -def upgrade(): +def upgrade(): # noqa: C901 bind = op.get_bind() session = db.Session(bind=bind) @@ -76,7 +76,7 @@ def upgrade(): val["__time_range"] = f"{__from} : {__to}" json_metadata["default_filters"] = json.dumps(filters) has_update = True - except Exception: + except Exception: # noqa: S110 pass # filter_immune_slice_fields: diff --git a/superset/migrations/versions/2020-02-07_14-13_3325d4caccc8_dashboard_scoped_filters.py b/superset/migrations/versions/2020-02-07_14-13_3325d4caccc8_dashboard_scoped_filters.py index f37aa3e87..5f7ba2d1d 100644 --- a/superset/migrations/versions/2020-02-07_14-13_3325d4caccc8_dashboard_scoped_filters.py +++ b/superset/migrations/versions/2020-02-07_14-13_3325d4caccc8_dashboard_scoped_filters.py @@ -87,7 +87,7 @@ def upgrade(): filter_scopes = convert_filter_scopes(json_metadata, filters) json_metadata["filter_scopes"] = filter_scopes logging.info( - f"Adding filter_scopes for dashboard {dashboard.id}: {json.dumps(filter_scopes)}" + f"Adding filter_scopes for dashboard {dashboard.id}: {json.dumps(filter_scopes)}" # noqa: E501 ) json_metadata.pop("filter_immune_slices", None) diff --git a/superset/migrations/versions/2020-02-20_08-52_72428d1ea401_add_tmp_schema_name_to_the_query_object.py b/superset/migrations/versions/2020-02-20_08-52_72428d1ea401_add_tmp_schema_name_to_the_query_object.py index bb2198291..b854a7e08 100644 --- a/superset/migrations/versions/2020-02-20_08-52_72428d1ea401_add_tmp_schema_name_to_the_query_object.py +++ b/superset/migrations/versions/2020-02-20_08-52_72428d1ea401_add_tmp_schema_name_to_the_query_object.py @@ -40,5 +40,5 @@ def downgrade(): try: # sqlite doesn't like dropping the columns op.drop_column("query", "tmp_schema_name") - except Exception: + except Exception: # noqa: S110 pass diff --git a/superset/migrations/versions/2020-03-25_10-42_f9a30386bd74_cleanup_time_grainularity.py b/superset/migrations/versions/2020-03-25_10-42_f9a30386bd74_cleanup_time_grainularity.py index f85cf5db9..00b44d378 100644 --- a/superset/migrations/versions/2020-03-25_10-42_f9a30386bd74_cleanup_time_grainularity.py +++ b/superset/migrations/versions/2020-03-25_10-42_f9a30386bd74_cleanup_time_grainularity.py @@ -86,7 +86,7 @@ def upgrade(): del params[field] slc.params = json.dumps(params, sort_keys=True) - except Exception: + except Exception: # noqa: S110 pass session.commit() diff --git a/superset/migrations/versions/2020-04-29_09-24_620241d1153f_update_time_grain_sqla.py b/superset/migrations/versions/2020-04-29_09-24_620241d1153f_update_time_grain_sqla.py index 2f9e7cc56..1e6926b34 100644 --- a/superset/migrations/versions/2020-04-29_09-24_620241d1153f_update_time_grain_sqla.py +++ b/superset/migrations/versions/2020-04-29_09-24_620241d1153f_update_time_grain_sqla.py @@ -92,7 +92,7 @@ def upgrade(): if granularity in duration_dict: params["time_grain_sqla"] = duration_dict[granularity] slc.params = json.dumps(params, sort_keys=True) - except Exception: + except Exception: # noqa: S110 pass session.commit() diff --git a/superset/migrations/versions/2020-08-12_00-24_978245563a02_migrate_iframe_to_dash_markdown.py b/superset/migrations/versions/2020-08-12_00-24_978245563a02_migrate_iframe_to_dash_markdown.py index 404d8b351..6e1aaa6ec 100644 --- a/superset/migrations/versions/2020-08-12_00-24_978245563a02_migrate_iframe_to_dash_markdown.py +++ b/superset/migrations/versions/2020-08-12_00-24_978245563a02_migrate_iframe_to_dash_markdown.py @@ -121,7 +121,7 @@ def upgrade(): ) for i, dashboard in enumerate(dashboards): print( - f"scanning dashboard ({i + 1}/{len(dashboards)}) dashboard: {dashboard.id} >>>>" + f"scanning dashboard ({i + 1}/{len(dashboards)}) dashboard: {dashboard.id} >>>>" # noqa: E501 ) # remove iframe slices from dashboard diff --git a/superset/migrations/versions/2020-09-24_12-04_3fbbc6e8d654_fix_data_access_permissions_for_virtual_.py b/superset/migrations/versions/2020-09-24_12-04_3fbbc6e8d654_fix_data_access_permissions_for_virtual_.py index 47be42486..002d25d2f 100644 --- a/superset/migrations/versions/2020-09-24_12-04_3fbbc6e8d654_fix_data_access_permissions_for_virtual_.py +++ b/superset/migrations/versions/2020-09-24_12-04_3fbbc6e8d654_fix_data_access_permissions_for_virtual_.py @@ -142,7 +142,7 @@ class SqlaTable(Base): return f"[{self.database}].[{self.table_name}](id:{self.id})" -def upgrade(): +def upgrade(): # noqa: C901 """ Previous sqla_viz behaviour when creating a virtual dataset was faulty by creating an associated data access permission with [None] on the database name. diff --git a/superset/migrations/versions/2020-11-04_11-06_49b5a32daba5_add_report_schedules.py b/superset/migrations/versions/2020-11-04_11-06_49b5a32daba5_add_report_schedules.py index 7446b9a75..8e28c2995 100644 --- a/superset/migrations/versions/2020-11-04_11-06_49b5a32daba5_add_report_schedules.py +++ b/superset/migrations/versions/2020-11-04_11-06_49b5a32daba5_add_report_schedules.py @@ -69,7 +69,7 @@ def upgrade(): op.create_unique_constraint( "uq_report_schedule_name", "report_schedule", ["name"] ) - except Exception: + except Exception: # noqa: S110 # Expected to fail on SQLite pass op.create_index( diff --git a/superset/migrations/versions/2021-02-04_09-34_070c043f2fdb_add_granularity_to_charts_where_missing.py b/superset/migrations/versions/2021-02-04_09-34_070c043f2fdb_add_granularity_to_charts_where_missing.py index cda2a9b41..5976b527e 100644 --- a/superset/migrations/versions/2021-02-04_09-34_070c043f2fdb_add_granularity_to_charts_where_missing.py +++ b/superset/migrations/versions/2021-02-04_09-34_070c043f2fdb_add_granularity_to_charts_where_missing.py @@ -113,7 +113,7 @@ def upgrade(): params["granularity"] = table_columns[0].column_name slc.params = json.dumps(params, sort_keys=True) print( - f"Set granularity for slice {slc.id} to {table_columns[0].column_name}" + f"Set granularity for slice {slc.id} to {table_columns[0].column_name}" # noqa: E501 ) slices_changed += 1 except Exception as e: diff --git a/superset/migrations/versions/2021-04-09_16-14_085f06488938_country_map_use_lowercase_country_name.py b/superset/migrations/versions/2021-04-09_16-14_085f06488938_country_map_use_lowercase_country_name.py index a16caedeb..a5a8bfbe2 100644 --- a/superset/migrations/versions/2021-04-09_16-14_085f06488938_country_map_use_lowercase_country_name.py +++ b/superset/migrations/versions/2021-04-09_16-14_085f06488938_country_map_use_lowercase_country_name.py @@ -57,7 +57,7 @@ def upgrade(): if params.get("select_country"): params["select_country"] = params["select_country"].lower() slc.params = json.dumps(params, sort_keys=True) - except Exception: + except Exception: # noqa: S110 pass session.commit() @@ -78,7 +78,7 @@ def downgrade(): country = params["select_country"].lower() params["select_country"] = country[0].upper() + country[1:] slc.params = json.dumps(params, sort_keys=True) - except Exception: + except Exception: # noqa: S110 pass session.commit() diff --git a/superset/migrations/versions/2021-08-09_17-32_07071313dd52_change_fetch_values_predicate_to_text.py b/superset/migrations/versions/2021-08-09_17-32_07071313dd52_change_fetch_values_predicate_to_text.py index b5d9715da..614789ec6 100644 --- a/superset/migrations/versions/2021-08-09_17-32_07071313dd52_change_fetch_values_predicate_to_text.py +++ b/superset/migrations/versions/2021-08-09_17-32_07071313dd52_change_fetch_values_predicate_to_text.py @@ -50,7 +50,7 @@ def remove_value_if_too_long(): bind = op.get_bind() session = db.Session(bind=bind) - # it will be easier for users to notice that their field has been deleted rather than truncated + # it will be easier for users to notice that their field has been deleted rather than truncated # noqa: E501 # so just remove it if it won't fit back into the 1000 string length column try: rows = ( diff --git a/superset/migrations/versions/2021-08-31_11-37_021b81fe4fbb_add_type_to_native_filter_configuration.py b/superset/migrations/versions/2021-08-31_11-37_021b81fe4fbb_add_type_to_native_filter_configuration.py index 923714828..f6630f668 100644 --- a/superset/migrations/versions/2021-08-31_11-37_021b81fe4fbb_add_type_to_native_filter_configuration.py +++ b/superset/migrations/versions/2021-08-31_11-37_021b81fe4fbb_add_type_to_native_filter_configuration.py @@ -96,7 +96,7 @@ def downgrade(): ) if not dashboard.json_metadata: logger.info( - "[RemoveTypeToNativeFilter] Skipping Dashboard json_metadata is %s", + "[RemoveTypeToNativeFilter] Skipping Dashboard json_metadata is %s", # noqa: E501 dashboard.id, dashboard.json_metadata, ) diff --git a/superset/migrations/versions/2021-10-12_11-15_32646df09c64_update_time_grain_sqla.py b/superset/migrations/versions/2021-10-12_11-15_32646df09c64_update_time_grain_sqla.py index aa87f3036..b7b398ef9 100644 --- a/superset/migrations/versions/2021-10-12_11-15_32646df09c64_update_time_grain_sqla.py +++ b/superset/migrations/versions/2021-10-12_11-15_32646df09c64_update_time_grain_sqla.py @@ -55,7 +55,7 @@ def migrate(mapping: dict[str, str]) -> None: if time_grain_sqla in mapping: params["time_grain_sqla"] = mapping[time_grain_sqla] slc.params = json.dumps(params, sort_keys=True) - except Exception: + except Exception: # noqa: S110 pass session.commit() diff --git a/superset/migrations/versions/2022-03-02_16-41_7293b0ca7944_change_adhoc_filter_b_from_none_to_.py b/superset/migrations/versions/2022-03-02_16-41_7293b0ca7944_change_adhoc_filter_b_from_none_to_.py index 9085ea05d..cd26b5701 100644 --- a/superset/migrations/versions/2022-03-02_16-41_7293b0ca7944_change_adhoc_filter_b_from_none_to_.py +++ b/superset/migrations/versions/2022-03-02_16-41_7293b0ca7944_change_adhoc_filter_b_from_none_to_.py @@ -57,7 +57,7 @@ def upgrade(): if not adhoc_filters_b: params["adhoc_filters_b"] = [] slc.params = json.dumps(params, sort_keys=True) - except Exception: + except Exception: # noqa: S110 pass session.commit() @@ -76,7 +76,7 @@ def downgrade(): if not adhoc_filters_b: del params["adhoc_filters_b"] slc.params = json.dumps(params, sort_keys=True) - except Exception: + except Exception: # noqa: S110 pass session.commit() diff --git a/superset/migrations/versions/2022-04-01_14-38_a9422eeaae74_new_dataset_models_take_2.py b/superset/migrations/versions/2022-04-01_14-38_a9422eeaae74_new_dataset_models_take_2.py index 9618dd98f..2a76a9554 100644 --- a/superset/migrations/versions/2022-04-01_14-38_a9422eeaae74_new_dataset_models_take_2.py +++ b/superset/migrations/versions/2022-04-01_14-38_a9422eeaae74_new_dataset_models_take_2.py @@ -80,11 +80,11 @@ class AuxiliaryColumnsMixin(UUIDMixin): ) @declared_attr - def created_by_fk(cls): + def created_by_fk(cls): # noqa: N805 return sa.Column(sa.Integer, sa.ForeignKey("ab_user.id"), nullable=True) @declared_attr - def changed_by_fk(cls): + def changed_by_fk(cls): # noqa: N805 return sa.Column(sa.Integer, sa.ForeignKey("ab_user.id"), nullable=True) @@ -324,7 +324,7 @@ def copy_tables(session: Session) -> None: # Tables need different uuid than datasets, since they are different # entities. When INSERT FROM SELECT, we must provide a value for `uuid`, # otherwise it'd use the default generated on Python side, which - # will cause duplicate values. They will be replaced by `assign_uuids` later. + # will cause duplicate values. They will be replaced by `assign_uuids` later. # noqa: E501 SqlaTable.uuid, SqlaTable.id.label("sqlatable_id"), SqlaTable.created_on, @@ -502,7 +502,7 @@ def copy_metrics(session: Session) -> None: ) -def postprocess_datasets(session: Session) -> None: +def postprocess_datasets(session: Session) -> None: # noqa: C901 """ Postprocess datasets after insertion to - Quote table names for physical datasets (if needed) @@ -620,7 +620,7 @@ def postprocess_datasets(session: Session) -> None: print("") -def postprocess_columns(session: Session) -> None: +def postprocess_columns(session: Session) -> None: # noqa: C901 """ At this step, we will - Add engine specific quotes to `expression` of physical columns @@ -809,7 +809,7 @@ def postprocess_columns(session: Session) -> None: updates["expression"] = quoted_expression # duplicate physical columns for tables physical_columns.append( - dict( + dict( # noqa: C408 created_on=created_on, changed_on=changed_on, changed_by_fk=changed_by_fk, @@ -903,7 +903,7 @@ def upgrade() -> None: assign_uuids(NewTable, session) print(">> Drop intermediate columns...") - # These columns are are used during migration, as datasets are independent of tables once created, + # These columns are are used during migration, as datasets are independent of tables once created, # noqa: E501 # dataset columns also the same to table columns. with op.batch_alter_table(NewTable.__tablename__) as batch_op: batch_op.drop_column("sqlatable_id") diff --git a/superset/migrations/versions/2022-06-27_14-59_7fb8bca906d2_permalink_rename_filterstate.py b/superset/migrations/versions/2022-06-27_14-59_7fb8bca906d2_permalink_rename_filterstate.py index 194760d2f..56fda0a48 100644 --- a/superset/migrations/versions/2022-06-27_14-59_7fb8bca906d2_permalink_rename_filterstate.py +++ b/superset/migrations/versions/2022-06-27_14-59_7fb8bca906d2_permalink_rename_filterstate.py @@ -56,7 +56,7 @@ def upgrade(): KeyValueEntry.resource == DASHBOARD_PERMALINK_RESOURCE_TYPE ) ): - value = pickle.loads(entry.value) or {} + value = pickle.loads(entry.value) or {} # noqa: S301 state = value.get("state") if state: if "filterState" in state: @@ -76,7 +76,7 @@ def downgrade(): KeyValueEntry.resource == DASHBOARD_PERMALINK_RESOURCE_TYPE ), ): - value = pickle.loads(entry.value) or {} + value = pickle.loads(entry.value) or {} # noqa: S301 state = value.get("state") if state: if "dataMask" in state: diff --git a/superset/migrations/versions/2023-02-28_14-46_c0a3ea245b61_remove_show_native_filters.py b/superset/migrations/versions/2023-02-28_14-46_c0a3ea245b61_remove_show_native_filters.py index 7be7162d2..bb3a4ff6a 100644 --- a/superset/migrations/versions/2023-02-28_14-46_c0a3ea245b61_remove_show_native_filters.py +++ b/superset/migrations/versions/2023-02-28_14-46_c0a3ea245b61_remove_show_native_filters.py @@ -54,7 +54,7 @@ def upgrade(): if "show_native_filters" in json_metadata: del json_metadata["show_native_filters"] dashboard.json_metadata = json.dumps(json_metadata) - except Exception: # pylint: disable=broad-except + except Exception: # pylint: disable=broad-except # noqa: S110 pass session.commit() diff --git a/superset/migrations/versions/2023-03-27_12-30_7e67aecbf3f1_chart_ds_constraint.py b/superset/migrations/versions/2023-03-27_12-30_7e67aecbf3f1_chart_ds_constraint.py index 4507ea736..e55496c86 100644 --- a/superset/migrations/versions/2023-03-27_12-30_7e67aecbf3f1_chart_ds_constraint.py +++ b/superset/migrations/versions/2023-03-27_12-30_7e67aecbf3f1_chart_ds_constraint.py @@ -96,7 +96,7 @@ def upgrade(): slc.datasource_type, ) - # need commit the updated values for Slice.datasource_type before creating constraint + # need commit the updated values for Slice.datasource_type before creating constraint # noqa: E501 session.commit() with op.batch_alter_table("slices") as batch_op: diff --git a/superset/migrations/versions/2023-07-18_15-30_863adcf72773_delete_obsolete_druid_nosql_slice_parameters.py b/superset/migrations/versions/2023-07-18_15-30_863adcf72773_delete_obsolete_druid_nosql_slice_parameters.py index 1c6e9c241..cc1c31d42 100644 --- a/superset/migrations/versions/2023-07-18_15-30_863adcf72773_delete_obsolete_druid_nosql_slice_parameters.py +++ b/superset/migrations/versions/2023-07-18_15-30_863adcf72773_delete_obsolete_druid_nosql_slice_parameters.py @@ -46,7 +46,7 @@ class Slice(Base): query_context = Column(Text) -def upgrade(): +def upgrade(): # noqa: C901 bind = op.get_bind() session = db.Session(bind=bind) diff --git a/superset/migrations/versions/2024-03-01_10-47_be1b217cd8cd_big_number_kpi_single_metric.py b/superset/migrations/versions/2024-03-01_10-47_be1b217cd8cd_big_number_kpi_single_metric.py index e09e6fa64..9d8196946 100644 --- a/superset/migrations/versions/2024-03-01_10-47_be1b217cd8cd_big_number_kpi_single_metric.py +++ b/superset/migrations/versions/2024-03-01_10-47_be1b217cd8cd_big_number_kpi_single_metric.py @@ -62,7 +62,7 @@ def upgrade(): del params["metrics"] slc.params = json.dumps(params, sort_keys=True) - except Exception: + except Exception: # noqa: S110 pass session.commit() @@ -85,7 +85,7 @@ def downgrade(): del params["metric"] slc.params = json.dumps(params, sort_keys=True) - except Exception: + except Exception: # noqa: S110 pass session.commit() diff --git a/superset/migrations/versions/2024-05-10_18-02_f84fde59123a_update_charts_with_old_time_comparison.py b/superset/migrations/versions/2024-05-10_18-02_f84fde59123a_update_charts_with_old_time_comparison.py index 2116ac4ca..57acaee9e 100644 --- a/superset/migrations/versions/2024-05-10_18-02_f84fde59123a_update_charts_with_old_time_comparison.py +++ b/superset/migrations/versions/2024-05-10_18-02_f84fde59123a_update_charts_with_old_time_comparison.py @@ -165,10 +165,10 @@ def downgrade_comparison_params(slice_params: dict[str, Any]) -> dict[str, Any]: since, until = get_since_until(temporal_range_filter["comparator"]) delta_days = (until - since).days new_until_date = start_date_offset + timedelta(days=delta_days - 1) - comparator_str = f"{start_date_offset.strftime('%Y-%m-%d')} : {new_until_date.strftime('%Y-%m-%d')}" + comparator_str = f"{start_date_offset.strftime('%Y-%m-%d')} : {new_until_date.strftime('%Y-%m-%d')}" # noqa: E501 # Generate filterOptionName - random_string = md5(comparator_str.encode("utf-8")).hexdigest() + random_string = md5(comparator_str.encode("utf-8")).hexdigest() # noqa: S324 filter_option_name = f"filter_{random_string}" adhoc_custom[0] = { diff --git a/superset/models/core.py b/superset/models/core.py index 4ffbc6aaa..3f9bd634f 100755 --- a/superset/models/core.py +++ b/superset/models/core.py @@ -458,7 +458,7 @@ class Database(Model, AuditMixinNullable, ImportExportMixin): # pylint: disable sqlalchemy_uri=sqlalchemy_uri, ) - def _get_sqla_engine( # pylint: disable=too-many-locals + def _get_sqla_engine( # pylint: disable=too-many-locals # noqa: C901 self, catalog: str | None = None, schema: str | None = None, @@ -584,7 +584,7 @@ class Database(Model, AuditMixinNullable, ImportExportMixin): # pylint: disable ) as engine: try: with closing(engine.raw_connection()) as conn: - # pre-session queries are used to set the selected schema and, in the + # pre-session queries are used to set the selected schema and, in the # noqa: E501 # future, the selected catalog for prequery in self.db_engine_spec.get_prequeries( database=self, @@ -626,7 +626,7 @@ class Database(Model, AuditMixinNullable, ImportExportMixin): # pylint: disable can change the default schema on a per-query basis; in other DB engine specs the default schema is defined in the SQLAlchemy URI; and in others the default schema might be determined by the database itself (like `public` for Postgres). - """ + """ # noqa: E501 return self.db_engine_spec.get_default_schema_for_query(self, query) @staticmethod @@ -661,7 +661,7 @@ class Database(Model, AuditMixinNullable, ImportExportMixin): # pylint: disable sql is broken down into smaller queries. If False, the SQL query mutator applies on the group of queries as a whole. Here the called passes the context as to whether the SQL is split or already. - """ + """ # noqa: E501 sql_mutator = config["SQL_QUERY_MUTATOR"] if sql_mutator and (is_split == config["MUTATE_AFTER_SPLIT"]): return sql_mutator( @@ -1069,7 +1069,7 @@ class Database(Model, AuditMixinNullable, ImportExportMixin): # pylint: disable return f"[{self.database_name}].(id:{self.id})" @perm.expression # type: ignore - def perm(cls) -> str: # pylint: disable=no-self-argument + def perm(cls) -> str: # pylint: disable=no-self-argument # noqa: N805 return ( "[" + cls.database_name + "].(id:" + expression.cast(cls.id, String) + ")" ) diff --git a/superset/models/dashboard.py b/superset/models/dashboard.py index 28d8aacc7..3af3a63ab 100644 --- a/superset/models/dashboard.py +++ b/superset/models/dashboard.py @@ -155,7 +155,7 @@ class Dashboard(AuditMixinNullable, ImportExportMixin, Model): primaryjoin="and_(Dashboard.id == TaggedObject.object_id, " "TaggedObject.object_type == 'dashboard')", secondaryjoin="TaggedObject.tag_id == Tag.id", - viewonly=True, # cascading deletion already handled by superset.tags.models.ObjectUpdater.after_delete + viewonly=True, # cascading deletion already handled by superset.tags.models.ObjectUpdater.after_delete # noqa: E501 ) published = Column(Boolean, default=False) is_managed_externally = Column(Boolean, nullable=False, default=False) diff --git a/superset/models/helpers.py b/superset/models/helpers.py index feb05a401..784d86d72 100644 --- a/superset/models/helpers.py +++ b/superset/models/helpers.py @@ -253,7 +253,7 @@ class ImportExportMixin: return schema @classmethod - def import_from_dict( + def import_from_dict( # noqa: C901 # pylint: disable=too-many-arguments,too-many-branches,too-many-locals cls, dict_rep: dict[Any, Any], @@ -1132,7 +1132,7 @@ class ExploreMixin: # pylint: disable=too-many-public-methods return {} @staticmethod - def filter_values_handler( # pylint: disable=too-many-arguments + def filter_values_handler( # pylint: disable=too-many-arguments # noqa: C901 values: Optional[FilterValues], operator: str, target_generic_type: utils.GenericDataType, @@ -1419,7 +1419,7 @@ class ExploreMixin: # pylint: disable=too-many-public-methods col = self.make_sqla_column_compatible(col, label) return col - def get_sqla_query( # pylint: disable=too-many-arguments,too-many-locals,too-many-branches,too-many-statements + def get_sqla_query( # pylint: disable=too-many-arguments,too-many-locals,too-many-branches,too-many-statements # noqa: C901 self, apply_fetch_values_predicate: bool = False, columns: Optional[list[Column]] = None, @@ -1553,7 +1553,7 @@ class ExploreMixin: # pylint: disable=too-many-public-methods # Since orderby may use adhoc metrics, too; we need to process them first orderby_exprs: list[ColumnElement] = [] - for orig_col, ascending in orderby: + for orig_col, ascending in orderby: # noqa: B007 col: Union[AdhocMetric, ColumnElement] = orig_col if isinstance(col, dict): col = cast(AdhocMetric, col) @@ -1965,7 +1965,7 @@ class ExploreMixin: # pylint: disable=too-many-public-methods self.make_orderby_compatible(select_exprs, orderby_exprs) - for col, (orig_col, ascending) in zip(orderby_exprs, orderby): + for col, (orig_col, ascending) in zip(orderby_exprs, orderby): # noqa: B007 if not db_engine_spec.allows_alias_in_orderby and isinstance(col, Label): # if engine does not allow using SELECT alias in ORDER BY # revert to the underlying column diff --git a/superset/models/slice.py b/superset/models/slice.py index cf94a50f5..1e6daa832 100644 --- a/superset/models/slice.py +++ b/superset/models/slice.py @@ -107,7 +107,7 @@ class Slice( # pylint: disable=too-many-public-methods primaryjoin="and_(Slice.id == TaggedObject.object_id, " "TaggedObject.object_type == 'chart')", secondaryjoin="TaggedObject.tag_id == Tag.id", - viewonly=True, # cascading deletion already handled by superset.tags.models.ObjectUpdater.after_delete + viewonly=True, # cascading deletion already handled by superset.tags.models.ObjectUpdater.after_delete # noqa: E501 ) table = relationship( "SqlaTable", diff --git a/superset/models/sql_lab.py b/superset/models/sql_lab.py index 1702601d0..4d443423d 100644 --- a/superset/models/sql_lab.py +++ b/superset/models/sql_lab.py @@ -420,7 +420,7 @@ class SavedQuery( primaryjoin="and_(SavedQuery.id == TaggedObject.object_id, " "TaggedObject.object_type == 'query')", secondaryjoin="TaggedObject.tag_id == Tag.id", - viewonly=True, # cascading deletion already handled by superset.tags.models.ObjectUpdater.after_delete + viewonly=True, # cascading deletion already handled by superset.tags.models.ObjectUpdater.after_delete # noqa: E501 ) export_parent = "database" diff --git a/superset/reports/logs/api.py b/superset/reports/logs/api.py index db307039b..0bf0d4ab5 100644 --- a/superset/reports/logs/api.py +++ b/superset/reports/logs/api.py @@ -147,7 +147,7 @@ class ReportExecutionLogRestApi(BaseSupersetModelRestApi): $ref: '#/components/responses/422' 500: $ref: '#/components/responses/500' - """ + """ # noqa: E501 self._apply_layered_relation_to_rison(pk, kwargs["rison"]) return self.get_list_headless(**kwargs) diff --git a/superset/reports/notifications/email.py b/superset/reports/notifications/email.py index b4514d43a..181d4aa6a 100644 --- a/superset/reports/notifications/email.py +++ b/superset/reports/notifications/email.py @@ -91,7 +91,7 @@ class EmailNotification(BaseNotification): # pylint: disable=too-few-public-met

Your report/alert was unable to be generated because of the following error: %(text)s

Please check your dashboard/chart for errors.

%(call_to_action)s

- """, + """, # noqa: E501 text=text, url=self._content.url, call_to_action=call_to_action, diff --git a/superset/reports/notifications/slackv2.py b/superset/reports/notifications/slackv2.py index 8b864f414..824d4bd32 100644 --- a/superset/reports/notifications/slackv2.py +++ b/superset/reports/notifications/slackv2.py @@ -61,7 +61,7 @@ class SlackV2Notification(SlackMixin, BaseNotification): # pylint: disable=too- Get the recipient's channel(s). :returns: A list of channel ids: "EID676L" :raises NotificationParamException or SlackApiError: If the recipient is not found - """ + """ # noqa: E501 recipient_str = json.loads(self._recipient.recipient_config_json)["target"] return get_email_address_list(recipient_str) diff --git a/superset/reports/schemas.py b/superset/reports/schemas.py index f88a38405..7078970b3 100644 --- a/superset/reports/schemas.py +++ b/superset/reports/schemas.py @@ -123,8 +123,8 @@ class ValidatorConfigJSONSchema(Schema): class ReportRecipientConfigJSONSchema(Schema): # TODO if email check validity target = fields.String() - ccTarget = fields.String() - bccTarget = fields.String() + ccTarget = fields.String() # noqa: N815 + bccTarget = fields.String() # noqa: N815 class ReportRecipientSchema(Schema): diff --git a/superset/result_set.py b/superset/result_set.py index f4303a7f8..eca00de4f 100644 --- a/superset/result_set.py +++ b/superset/result_set.py @@ -100,7 +100,7 @@ def convert_to_string(value: Any) -> str: class SupersetResultSet: - def __init__( # pylint: disable=too-many-locals + def __init__( # pylint: disable=too-many-locals # noqa: C901 self, data: DbapiResult, cursor_description: DbapiDescription, @@ -122,7 +122,7 @@ class SupersetResultSet: # fix cursor descriptor with the deduped names deduped_cursor_desc = [ - tuple([column_name, *list(description)[1:]]) + tuple([column_name, *list(description)[1:]]) # noqa: C409 for column_name, description in zip(column_names, cursor_description) ] diff --git a/superset/row_level_security/schemas.py b/superset/row_level_security/schemas.py index f02767ec1..04d7d9b9d 100644 --- a/superset/row_level_security/schemas.py +++ b/superset/row_level_security/schemas.py @@ -27,14 +27,14 @@ id_description = "Unique if of rls filter" name_description = "Name of rls filter" description_description = "Detailed description" # pylint: disable=line-too-long -filter_type_description = "Regular filters add where clauses to queries if a user belongs to a role referenced in the filter, base filters apply filters to all queries except the roles defined in the filter, and can be used to define what users can see if no RLS filters within a filter group apply to them." +filter_type_description = "Regular filters add where clauses to queries if a user belongs to a role referenced in the filter, base filters apply filters to all queries except the roles defined in the filter, and can be used to define what users can see if no RLS filters within a filter group apply to them." # noqa: E501 tables_description = "These are the tables this filter will be applied to." # pylint: disable=line-too-long -roles_description = "For regular filters, these are the roles this filter will be applied to. For base filters, these are the roles that the filter DOES NOT apply to, e.g. Admin if admin should see all data." +roles_description = "For regular filters, these are the roles this filter will be applied to. For base filters, these are the roles that the filter DOES NOT apply to, e.g. Admin if admin should see all data." # noqa: E501 # pylint: disable=line-too-long -group_key_description = "Filters with the same group key will be ORed together within the group, while different filter groups will be ANDed together. Undefined group keys are treated as unique groups, i.e. are not grouped together. For example, if a table has three filters, of which two are for departments Finance and Marketing (group key = 'department'), and one refers to the region Europe (group key = 'region'), the filter clause would apply the filter (department = 'Finance' OR department = 'Marketing') AND (region = 'Europe')." +group_key_description = "Filters with the same group key will be ORed together within the group, while different filter groups will be ANDed together. Undefined group keys are treated as unique groups, i.e. are not grouped together. For example, if a table has three filters, of which two are for departments Finance and Marketing (group key = 'department'), and one refers to the region Europe (group key = 'region'), the filter clause would apply the filter (department = 'Finance' OR department = 'Marketing') AND (region = 'Europe')." # noqa: E501 # pylint: disable=line-too-long -clause_description = "This is the condition that will be added to the WHERE clause. For example, to only return rows for a particular client, you might define a regular filter with the clause `client_id = 9`. To display no rows unless a user belongs to a RLS filter role, a base filter can be created with the clause `1 = 0` (always false)." +clause_description = "This is the condition that will be added to the WHERE clause. For example, to only return rows for a particular client, you might define a regular filter with the clause `client_id = 9`. To display no rows unless a user belongs to a RLS filter role, a base filter can be created with the clause `1 = 0` (always false)." # noqa: E501 get_delete_ids_schema = {"type": "array", "items": {"type": "integer"}} diff --git a/superset/security/manager.py b/superset/security/manager.py index f2dd12e13..38fff7a3f 100644 --- a/superset/security/manager.py +++ b/superset/security/manager.py @@ -1410,7 +1410,7 @@ class SupersetSecurityManager( # pylint: disable=too-many-public-methods :param old_database_name: the old database name :param target: The database object :return: A list of changed view menus (permission resource names) - """ + """ # noqa: E501 view_menu_table = self.viewmenu_model.__table__ # pylint: disable=no-member new_database_name = target.database_name old_view_menu_name = self.get_database_perm(target.id, old_database_name) @@ -1466,7 +1466,7 @@ class SupersetSecurityManager( # pylint: disable=too-many-public-methods :param old_database_name: the old database name :param target: The database object :return: A list of changed view menus (permission resource names) - """ + """ # noqa: E501 from superset.connectors.sqla.models import ( # pylint: disable=import-outside-toplevel SqlaTable, ) @@ -2144,7 +2144,7 @@ class SupersetSecurityManager( # pylint: disable=too-many-public-methods """ return [] - def raise_for_access( + def raise_for_access( # noqa: C901 # pylint: disable=too-many-arguments,too-many-branches,too-many-locals,too-many-statements self, dashboard: Optional["Dashboard"] = None, @@ -2353,7 +2353,7 @@ class SupersetSecurityManager( # pylint: disable=too-many-public-methods if dashboard: if self.is_guest_user(): - # Guest user is currently used for embedded dashboards only. If the guest + # Guest user is currently used for embedded dashboards only. If the guest # noqa: E501 # user doesn't have access to the dashboard, ignore all other checks. if self.has_guest_access(dashboard): return diff --git a/superset/sql/parse.py b/superset/sql/parse.py index 38367faa7..91d7b5118 100644 --- a/superset/sql/parse.py +++ b/superset/sql/parse.py @@ -553,7 +553,7 @@ class KustoKQLStatement(BaseSQLStatement[str]): | join (PopulationData) on State | project State, Population, TotalInjuries = InjuriesDirect + InjuriesIndirect - """ + """ # noqa: E501 logger.warning( "Kusto KQL doesn't support table extraction. This means that data access " "roles will not be enforced by Superset in the database." diff --git a/superset/sql_lab.py b/superset/sql_lab.py index 88e1bc1aa..25ac9a5c5 100644 --- a/superset/sql_lab.py +++ b/superset/sql_lab.py @@ -82,7 +82,7 @@ logger = logging.getLogger(__name__) BYTES_IN_MB = 1024 * 1024 -class SqlLabException(Exception): +class SqlLabException(Exception): # noqa: N818 pass @@ -106,7 +106,7 @@ def handle_query_error( query.error_message = msg query.tmp_table_name = None query.status = QueryStatus.FAILED - # TODO: re-enable this after updating the frontend to properly display timeout status + # TODO: re-enable this after updating the frontend to properly display timeout status # noqa: E501 # if query.status != QueryStatus.TIMED_OUT: # query.status = QueryStatus.FAILED if not query.end_time: @@ -197,7 +197,7 @@ def get_sql_results( # pylint: disable=too-many-arguments return handle_query_error(ex, query) -def execute_sql_statement( # pylint: disable=too-many-statements, too-many-locals +def execute_sql_statement( # pylint: disable=too-many-statements, too-many-locals # noqa: C901 sql_statement: str, query: Query, cursor: Any, @@ -254,7 +254,7 @@ def execute_sql_statement( # pylint: disable=too-many-statements, too-many-loca SupersetError( message=__( "This database does not allow for DDL/DML, and the query " - "could not be parsed to confirm it is a read-only query. Please " + "could not be parsed to confirm it is a read-only query. Please " # noqa: E501 "contact your administrator for more assistance." ), error_type=SupersetErrorType.DML_NOT_ALLOWED_ERROR, @@ -408,7 +408,7 @@ def _serialize_and_expand_data( return (data, selected_columns, all_columns, expanded_columns) -def execute_sql_statements( +def execute_sql_statements( # noqa: C901 # pylint: disable=too-many-arguments, too-many-locals, too-many-statements, too-many-branches query_id: int, rendered_query: str, @@ -614,7 +614,7 @@ def execute_sql_statements( logger.info("Result size exceeds the allowed limit.") raise SupersetErrorException( SupersetError( - message=f"Result size ({serialized_payload_size / BYTES_IN_MB:.2f} MB) exceeds the allowed limit of {sql_lab_payload_max_mb} MB.", + message=f"Result size ({serialized_payload_size / BYTES_IN_MB:.2f} MB) exceeds the allowed limit of {sql_lab_payload_max_mb} MB.", # noqa: E501 error_type=SupersetErrorType.RESULT_TOO_LARGE_ERROR, level=ErrorLevel.ERROR, ) @@ -666,7 +666,7 @@ def execute_sql_statements( logger.info("Result size exceeds the allowed limit.") raise SupersetErrorException( SupersetError( - message=f"Result size ({serialized_payload_size / BYTES_IN_MB:.2f} MB) exceeds the allowed limit of {sql_lab_payload_max_mb} MB.", + message=f"Result size ({serialized_payload_size / BYTES_IN_MB:.2f} MB) exceeds the allowed limit of {sql_lab_payload_max_mb} MB.", # noqa: E501 error_type=SupersetErrorType.RESULT_TOO_LARGE_ERROR, level=ErrorLevel.ERROR, ) diff --git a/superset/sql_parse.py b/superset/sql_parse.py index 377f09ec3..c8836f167 100644 --- a/superset/sql_parse.py +++ b/superset/sql_parse.py @@ -318,7 +318,7 @@ class ParsedQuery: return False return True - def is_select(self) -> bool: + def is_select(self) -> bool: # noqa: C901 # make sure we strip comments; prevents a bug with comments in the CTE parsed = sqlparse.parse(self.strip_comments()) seen_select = False @@ -333,7 +333,7 @@ class ParsedQuery: ): return False except ValueError: - # sqloxide was not able to parse the query, so let's continue with + # sqloxide was not able to parse the query, so let's continue with # noqa: E501 # sqlparse pass inner_cte = self.get_inner_cte_expression(statement.tokens) or [] @@ -753,7 +753,7 @@ def insert_rls_as_subquery( return token_list -def insert_rls_in_predicate( +def insert_rls_in_predicate( # noqa: C901 token_list: TokenList, database_id: int, default_schema: str | None, @@ -807,7 +807,7 @@ def insert_rls_in_predicate( ) state = InsertRLSState.SCANNING - # Found ON clause, insert RLS. The logic for ON is more complicated than the logic + # Found ON clause, insert RLS. The logic for ON is more complicated than the logic # noqa: E501 # for WHERE because in the former the comparisons are siblings, while on the # latter they are children. elif ( @@ -899,7 +899,7 @@ RE_JINJA_VAR = re.compile(r"\{\{[^\{\}]+\}\}") RE_JINJA_BLOCK = re.compile(r"\{[%#][^\{\}%#]+[%#]\}") -def extract_table_references( +def extract_table_references( # noqa: C901 sql_text: str, sqla_dialect: str, show_warning: bool = True ) -> set[Table]: """ @@ -909,7 +909,7 @@ def extract_table_references( tree = None if sqloxide_parse: - for dialect, sqla_dialects in SQLOXIDE_DIALECTS.items(): + for dialect, sqla_dialects in SQLOXIDE_DIALECTS.items(): # noqa: B007 if sqla_dialect in sqla_dialects: break sql_text = RE_JINJA_BLOCK.sub(" ", sql_text) diff --git a/superset/sqllab/schemas.py b/superset/sqllab/schemas.py index f441cdf04..1e1d492b7 100644 --- a/superset/sqllab/schemas.py +++ b/superset/sqllab/schemas.py @@ -54,47 +54,47 @@ class ExecutePayloadSchema(Schema): database_id = fields.Integer(required=True) sql = fields.String(required=True) client_id = fields.String(allow_none=True) - queryLimit = fields.Integer(allow_none=True) + queryLimit = fields.Integer(allow_none=True) # noqa: N815 sql_editor_id = fields.String(allow_none=True) catalog = fields.String(allow_none=True) schema = fields.String(allow_none=True) tab = fields.String(allow_none=True) ctas_method = fields.String(allow_none=True) - templateParams = fields.String(allow_none=True) + templateParams = fields.String(allow_none=True) # noqa: N815 tmp_table_name = fields.String(allow_none=True) select_as_cta = fields.Boolean(allow_none=True) json = fields.Boolean(allow_none=True) - runAsync = fields.Boolean(allow_none=True) + runAsync = fields.Boolean(allow_none=True) # noqa: N815 expand_data = fields.Boolean(allow_none=True) class QueryResultSchema(Schema): changed_on = fields.DateTime() - dbId = fields.Integer() + dbId = fields.Integer() # noqa: N815 db = fields.String() # pylint: disable=disallowed-name - endDttm = fields.Float() - errorMessage = fields.String(allow_none=True) - executedSql = fields.String() + endDttm = fields.Float() # noqa: N815 + errorMessage = fields.String(allow_none=True) # noqa: N815 + executedSql = fields.String() # noqa: N815 id = fields.String() - queryId = fields.Integer() + queryId = fields.Integer() # noqa: N815 limit = fields.Integer() - limitingFactor = fields.String() + limitingFactor = fields.String() # noqa: N815 progress = fields.Integer() rows = fields.Integer() schema = fields.String() ctas = fields.Boolean() - serverId = fields.Integer() + serverId = fields.Integer() # noqa: N815 sql = fields.String() - sqlEditorId = fields.String() - startDttm = fields.Float() + sqlEditorId = fields.String() # noqa: N815 + startDttm = fields.Float() # noqa: N815 state = fields.String() tab = fields.String() - tempSchema = fields.String(allow_none=True) - tempTable = fields.String(allow_none=True) - userId = fields.Integer() + tempSchema = fields.String(allow_none=True) # noqa: N815 + tempTable = fields.String(allow_none=True) # noqa: N815 + userId = fields.Integer() # noqa: N815 user = fields.String() - resultsKey = fields.String() - trackingUrl = fields.String(allow_none=True) + resultsKey = fields.String() # noqa: N815 + trackingUrl = fields.String(allow_none=True) # noqa: N815 extra = fields.Dict(keys=fields.String()) diff --git a/superset/stats_logger.py b/superset/stats_logger.py index fc223f752..4cf2a7354 100644 --- a/superset/stats_logger.py +++ b/superset/stats_logger.py @@ -105,5 +105,5 @@ try: def gauge(self, key: str, value: float) -> None: self.client.gauge(key, value) -except Exception: # pylint: disable=broad-except +except Exception: # pylint: disable=broad-except # noqa: S110 pass diff --git a/superset/tags/models.py b/superset/tags/models.py index 31975c3e8..9223e4ad2 100644 --- a/superset/tags/models.py +++ b/superset/tags/models.py @@ -65,7 +65,7 @@ class TagType(enum.Enum): Objects (queries, charts, dashboards, and datasets) will have with implicit tags based on metadata: types, owners and who favorited them. This way, user "alice" can find all their objects by querying for the tag `owner:alice`. - """ + """ # noqa: E501 # pylint: disable=invalid-name # explicit tags, added manually by the owner diff --git a/superset/tasks/cache.py b/superset/tasks/cache.py index 18ca7d716..b1eaff58b 100644 --- a/superset/tasks/cache.py +++ b/superset/tasks/cache.py @@ -230,10 +230,10 @@ def fetch_url(data: str, headers: dict[str, str]) -> dict[str, str]: headers.update(fetch_csrf_token(headers)) logger.info("Fetching %s with payload %s", url, data) - req = request.Request( + req = request.Request( # noqa: S310 url, data=bytes(data, "utf-8"), headers=headers, method="PUT" ) - response = request.urlopen( # pylint: disable=consider-using-with + response = request.urlopen( # pylint: disable=consider-using-with # noqa: S310 req, timeout=600 ) logger.info( diff --git a/superset/tasks/utils.py b/superset/tasks/utils.py index 5e3bc1480..4815b7034 100644 --- a/superset/tasks/utils.py +++ b/superset/tasks/utils.py @@ -41,7 +41,7 @@ logger.setLevel(logging.INFO) # pylint: disable=too-many-branches -def get_executor( +def get_executor( # noqa: C901 executor_types: list[ExecutorType], model: Dashboard | ReportSchedule | Slice, current_user: str | None = None, @@ -115,9 +115,9 @@ def fetch_csrf_token( """ url = get_url_path("SecurityRestApi.csrf_token") logger.info("Fetching %s", url) - req = request.Request(url, headers=headers, method="GET") + req = request.Request(url, headers=headers, method="GET") # noqa: S310 response: HTTPResponse - with request.urlopen(req, timeout=600) as response: + with request.urlopen(req, timeout=600) as response: # noqa: S310 body = response.read().decode("utf-8") session_cookie: Optional[str] = None cookie_headers = response.headers.get_all("set-cookie") diff --git a/superset/utils/cache.py b/superset/utils/cache.py index 15f334e12..87a74dd6e 100644 --- a/superset/utils/cache.py +++ b/superset/utils/cache.py @@ -115,7 +115,7 @@ def memoized_func(key: str, cache: Cache = cache_manager.cache) -> Callable[..., :param key: a callable function that takes function arguments and returns the caching key. :param cache: a FlaskCache instance that will store the cache. - """ + """ # noqa: E501 def wrap(f: Callable[..., Any]) -> Callable[..., Any]: def wrapped_f(*args: Any, **kwargs: Any) -> Any: @@ -144,7 +144,7 @@ def memoized_func(key: str, cache: Cache = cache_manager.cache) -> Callable[..., return wrap -def etag_cache( +def etag_cache( # noqa: C901 cache: Cache = cache_manager.cache, get_last_modified: Callable[..., datetime] | None = None, max_age: int | float = app.config["CACHE_DEFAULT_TIMEOUT"], @@ -164,9 +164,9 @@ def etag_cache( """ - def decorator(f: Callable[..., Any]) -> Callable[..., Any]: + def decorator(f: Callable[..., Any]) -> Callable[..., Any]: # noqa: C901 @wraps(f) - def wrapper(*args: Any, **kwargs: Any) -> Response: + def wrapper(*args: Any, **kwargs: Any) -> Response: # noqa: C901 # Check if the user can access the resource if raise_for_access: try: diff --git a/superset/utils/core.py b/superset/utils/core.py index ada40053c..7dc2270a0 100644 --- a/superset/utils/core.py +++ b/superset/utils/core.py @@ -881,7 +881,7 @@ def form_data_to_adhoc(form_data: dict[str, Any], clause: str) -> AdhocFilterCla return result -def merge_extra_form_data(form_data: dict[str, Any]) -> None: +def merge_extra_form_data(form_data: dict[str, Any]) -> None: # noqa: C901 """ Merge extra form data (appends and overrides) into the main payload and add applied time extras to the payload. @@ -935,7 +935,7 @@ def merge_extra_form_data(form_data: dict[str, Any]) -> None: adhoc_filter["comparator"] = form_data["time_range"] -def merge_extra_filters(form_data: dict[str, Any]) -> None: +def merge_extra_filters(form_data: dict[str, Any]) -> None: # noqa: C901 # extra_filters are temporary/contextual filters (using the legacy constructs) # that are external to the slice definition. We use those for dynamic # interactive filters. @@ -1365,11 +1365,11 @@ def time_function( return (stop - start) * 1000.0, response -def MediumText() -> Variant: # pylint:disable=invalid-name +def MediumText() -> Variant: # pylint:disable=invalid-name # noqa: N802 return Text().with_variant(MEDIUMTEXT(), "mysql") -def LongText() -> Variant: # pylint:disable=invalid-name +def LongText() -> Variant: # pylint:disable=invalid-name # noqa: N802 return Text().with_variant(LONGTEXT(), "mysql") @@ -1658,7 +1658,7 @@ class DateColumn: def normalize_dttm_col( df: pd.DataFrame, - dttm_cols: tuple[DateColumn, ...] = tuple(), + dttm_cols: tuple[DateColumn, ...] = tuple(), # noqa: C408 ) -> None: for _col in dttm_cols: if _col.col_label not in df.columns: diff --git a/superset/utils/dashboard_filter_scopes_converter.py b/superset/utils/dashboard_filter_scopes_converter.py index f3c09bc83..d303b74a9 100644 --- a/superset/utils/dashboard_filter_scopes_converter.py +++ b/superset/utils/dashboard_filter_scopes_converter.py @@ -24,7 +24,7 @@ from superset.utils import json logger = logging.getLogger(__name__) -def convert_filter_scopes( +def convert_filter_scopes( # noqa: C901 json_metadata: dict[Any, Any], filter_boxes: list[Slice] ) -> dict[int, dict[str, dict[str, Any]]]: filter_scopes = {} diff --git a/superset/utils/date_parser.py b/superset/utils/date_parser.py index a3736b0ab..63589601a 100644 --- a/superset/utils/date_parser.py +++ b/superset/utils/date_parser.py @@ -77,7 +77,7 @@ def parse_human_datetime(human_readable: str) -> datetime: def normalize_time_delta(human_readable: str) -> dict[str, int]: - x_unit = r"^\s*([0-9]+)\s+(second|minute|hour|day|week|month|quarter|year)s?\s+(ago|later)*$" # pylint: disable=line-too-long,useless-suppression + x_unit = r"^\s*([0-9]+)\s+(second|minute|hour|day|week|month|quarter|year)s?\s+(ago|later)*$" # pylint: disable=line-too-long,useless-suppression # noqa: E501 matched = re.match(x_unit, human_readable, re.IGNORECASE) if not matched: raise TimeDeltaAmbiguousError(human_readable) @@ -143,7 +143,7 @@ def parse_past_timedelta( ) -def get_since_until( # pylint: disable=too-many-arguments,too-many-locals,too-many-branches,too-many-statements +def get_since_until( # pylint: disable=too-many-arguments,too-many-locals,too-many-branches,too-many-statements # noqa: C901 time_range: str | None = None, since: str | None = None, until: str | None = None, @@ -194,49 +194,49 @@ def get_since_until( # pylint: disable=too-many-arguments,too-many-locals,too-m and time_range.startswith("previous calendar week") and separator not in time_range ): - time_range = "DATETRUNC(DATEADD(DATETIME('today'), -1, WEEK), WEEK) : DATETRUNC(DATETIME('today'), WEEK)" # pylint: disable=line-too-long,useless-suppression + time_range = "DATETRUNC(DATEADD(DATETIME('today'), -1, WEEK), WEEK) : DATETRUNC(DATETIME('today'), WEEK)" # pylint: disable=line-too-long,useless-suppression # noqa: E501 if ( time_range and time_range.startswith("previous calendar month") and separator not in time_range ): - time_range = "DATETRUNC(DATEADD(DATETIME('today'), -1, MONTH), MONTH) : DATETRUNC(DATETIME('today'), MONTH)" # pylint: disable=line-too-long,useless-suppression + time_range = "DATETRUNC(DATEADD(DATETIME('today'), -1, MONTH), MONTH) : DATETRUNC(DATETIME('today'), MONTH)" # pylint: disable=line-too-long,useless-suppression # noqa: E501 if ( time_range and time_range.startswith("previous calendar year") and separator not in time_range ): - time_range = "DATETRUNC(DATEADD(DATETIME('today'), -1, YEAR), YEAR) : DATETRUNC(DATETIME('today'), YEAR)" # pylint: disable=line-too-long,useless-suppression + time_range = "DATETRUNC(DATEADD(DATETIME('today'), -1, YEAR), YEAR) : DATETRUNC(DATETIME('today'), YEAR)" # pylint: disable=line-too-long,useless-suppression # noqa: E501 if ( time_range and time_range.startswith("Current day") and separator not in time_range ): - time_range = "DATETRUNC(DATEADD(DATETIME('today'), 0, DAY), DAY) : DATETRUNC(DATEADD(DATETIME('today'), 1, DAY), DAY)" # pylint: disable=line-too-long,useless-suppression + time_range = "DATETRUNC(DATEADD(DATETIME('today'), 0, DAY), DAY) : DATETRUNC(DATEADD(DATETIME('today'), 1, DAY), DAY)" # pylint: disable=line-too-long,useless-suppression # noqa: E501 if ( time_range and time_range.startswith("Current week") and separator not in time_range ): - time_range = "DATETRUNC(DATEADD(DATETIME('today'), 0, WEEK), WEEK) : DATETRUNC(DATEADD(DATETIME('today'), 1, WEEK), WEEK)" # pylint: disable=line-too-long,useless-suppression + time_range = "DATETRUNC(DATEADD(DATETIME('today'), 0, WEEK), WEEK) : DATETRUNC(DATEADD(DATETIME('today'), 1, WEEK), WEEK)" # pylint: disable=line-too-long,useless-suppression # noqa: E501 if ( time_range and time_range.startswith("Current month") and separator not in time_range ): - time_range = "DATETRUNC(DATEADD(DATETIME('today'), 0, MONTH), MONTH) : DATETRUNC(DATEADD(DATETIME('today'), 1, MONTH), MONTH)" # pylint: disable=line-too-long,useless-suppression + time_range = "DATETRUNC(DATEADD(DATETIME('today'), 0, MONTH), MONTH) : DATETRUNC(DATEADD(DATETIME('today'), 1, MONTH), MONTH)" # pylint: disable=line-too-long,useless-suppression # noqa: E501 if ( time_range and time_range.startswith("Current quarter") and separator not in time_range ): - time_range = "DATETRUNC(DATEADD(DATETIME('today'), 0, QUARTER), QUARTER) : DATETRUNC(DATEADD(DATETIME('today'), 1, QUARTER), QUARTER)" # pylint: disable=line-too-long,useless-suppression + time_range = "DATETRUNC(DATEADD(DATETIME('today'), 0, QUARTER), QUARTER) : DATETRUNC(DATEADD(DATETIME('today'), 1, QUARTER), QUARTER)" # pylint: disable=line-too-long,useless-suppression # noqa: E501 if ( time_range and time_range.startswith("Current year") and separator not in time_range ): - time_range = "DATETRUNC(DATEADD(DATETIME('today'), 0, YEAR), YEAR) : DATETRUNC(DATEADD(DATETIME('today'), 1, YEAR), YEAR)" # pylint: disable=line-too-long,useless-suppression + time_range = "DATETRUNC(DATEADD(DATETIME('today'), 0, YEAR), YEAR) : DATETRUNC(DATEADD(DATETIME('today'), 1, YEAR), YEAR)" # pylint: disable=line-too-long,useless-suppression # noqa: E501 if time_range and separator in time_range: time_range_lookup = [ @@ -492,20 +492,20 @@ class EvalHolidayFunc: # pylint: disable=too-few-public-methods @lru_cache(maxsize=LRU_CACHE_MAX_SIZE) def datetime_parser() -> ParseResults: # pylint: disable=too-many-locals ( # pylint: disable=invalid-name - DATETIME, - DATEADD, - DATEDIFF, - DATETRUNC, - LASTDAY, - HOLIDAY, - YEAR, - QUARTER, - MONTH, - WEEK, - DAY, - HOUR, - MINUTE, - SECOND, + DATETIME, # noqa: N806 + DATEADD, # noqa: N806 + DATEDIFF, # noqa: N806 + DATETRUNC, # noqa: N806 + LASTDAY, # noqa: N806 + HOLIDAY, # noqa: N806 + YEAR, # noqa: N806 + QUARTER, # noqa: N806 + MONTH, # noqa: N806 + WEEK, # noqa: N806 + DAY, # noqa: N806 + HOUR, # noqa: N806 + MINUTE, # noqa: N806 + SECOND, # noqa: N806 ) = map( CaselessKeyword, "datetime dateadd datediff datetrunc lastday holiday " diff --git a/superset/utils/encrypt.py b/superset/utils/encrypt.py index 899b3391a..85d45f13a 100644 --- a/superset/utils/encrypt.py +++ b/superset/utils/encrypt.py @@ -133,7 +133,7 @@ class SecretsMigrator: def _select_columns_from_table( conn: Connection, column_names: list[str], table_name: str ) -> Row: - return conn.execute(f"SELECT id, {','.join(column_names)} FROM {table_name}") + return conn.execute(f"SELECT id, {','.join(column_names)} FROM {table_name}") # noqa: S608 def _re_encrypt_row( self, @@ -183,7 +183,7 @@ class SecretsMigrator: ) logger.info("Processing table: %s", table_name) conn.execute( - text(f"UPDATE {table_name} SET {set_cols} WHERE id = :id"), + text(f"UPDATE {table_name} SET {set_cols} WHERE id = :id"), # noqa: S608 id=row["id"], **re_encrypted_columns, ) diff --git a/superset/utils/feature_flag_manager.py b/superset/utils/feature_flag_manager.py index ea295c776..ec8009b75 100644 --- a/superset/utils/feature_flag_manager.py +++ b/superset/utils/feature_flag_manager.py @@ -36,7 +36,7 @@ class FeatureFlagManager: if self._get_feature_flags_func: return self._get_feature_flags_func(deepcopy(self._feature_flags)) if callable(self._is_feature_enabled_func): - return dict( + return dict( # noqa: C417 map( lambda kv: (kv[0], self._is_feature_enabled_func(kv[0], kv[1])), self._feature_flags.items(), diff --git a/superset/utils/hashing.py b/superset/utils/hashing.py index 86c04657c..d5c6de68e 100644 --- a/superset/utils/hashing.py +++ b/superset/utils/hashing.py @@ -21,7 +21,7 @@ from superset.utils import json def md5_sha_from_str(val: str) -> str: - return hashlib.md5(val.encode("utf-8")).hexdigest() + return hashlib.md5(val.encode("utf-8")).hexdigest() # noqa: S324 def md5_sha_from_dict( diff --git a/superset/utils/json.py b/superset/utils/json.py index 0c62d7640..5bb92fead 100644 --- a/superset/utils/json.py +++ b/superset/utils/json.py @@ -69,7 +69,7 @@ def format_timedelta(time_delta: timedelta) -> str: return str(time_delta) -def base_json_conv(obj: Any) -> Any: +def base_json_conv(obj: Any) -> Any: # noqa: C901 """ Tries to convert additional types to JSON compatible forms. diff --git a/superset/utils/mock_data.py b/superset/utils/mock_data.py index cffa89719..88c9d5a57 100644 --- a/superset/utils/mock_data.py +++ b/superset/utils/mock_data.py @@ -64,54 +64,54 @@ MAXIMUM_DATE = date.today() days_range = (MAXIMUM_DATE - MINIMUM_DATE).days -def get_type_generator( # pylint: disable=too-many-return-statements,too-many-branches +def get_type_generator( # pylint: disable=too-many-return-statements,too-many-branches # noqa: C901 sqltype: sqlalchemy.sql.sqltypes, ) -> Callable[[], Any]: if isinstance(sqltype, sqlalchemy.dialects.mysql.types.TINYINT): - return lambda: random.choice([0, 1]) + return lambda: random.choice([0, 1]) # noqa: S311 if isinstance( sqltype, (sqlalchemy.sql.sqltypes.INTEGER, sqlalchemy.sql.sqltypes.Integer) ): - return lambda: random.randrange(2147483647) + return lambda: random.randrange(2147483647) # noqa: S311 if isinstance(sqltype, sqlalchemy.sql.sqltypes.BIGINT): - return lambda: random.randrange(sys.maxsize) + return lambda: random.randrange(sys.maxsize) # noqa: S311 if isinstance( sqltype, (sqlalchemy.sql.sqltypes.VARCHAR, sqlalchemy.sql.sqltypes.String) ): - length = random.randrange(sqltype.length or 255) + length = random.randrange(sqltype.length or 255) # noqa: S311 length = max(8, length) # for unique values length = min(100, length) # for FAB perms - return lambda: "".join(random.choices(string.ascii_letters, k=length)) + return lambda: "".join(random.choices(string.ascii_letters, k=length)) # noqa: S311 if isinstance( sqltype, (sqlalchemy.sql.sqltypes.TEXT, sqlalchemy.sql.sqltypes.Text) ): - length = random.randrange(65535) + length = random.randrange(65535) # noqa: S311 # "practicality beats purity" length = max(length, 2048) - return lambda: "".join(random.choices(string.ascii_letters, k=length)) + return lambda: "".join(random.choices(string.ascii_letters, k=length)) # noqa: S311 if isinstance( sqltype, (sqlalchemy.sql.sqltypes.BOOLEAN, sqlalchemy.sql.sqltypes.Boolean) ): - return lambda: random.choice([True, False]) + return lambda: random.choice([True, False]) # noqa: S311 if isinstance( sqltype, (sqlalchemy.sql.sqltypes.FLOAT, sqlalchemy.sql.sqltypes.REAL) ): - return lambda: random.uniform(-sys.maxsize, sys.maxsize) + return lambda: random.uniform(-sys.maxsize, sys.maxsize) # noqa: S311 if isinstance(sqltype, sqlalchemy.sql.sqltypes.DATE): - return lambda: MINIMUM_DATE + timedelta(days=random.randrange(days_range)) + return lambda: MINIMUM_DATE + timedelta(days=random.randrange(days_range)) # noqa: S311 if isinstance(sqltype, sqlalchemy.sql.sqltypes.TIME): return lambda: time( - random.randrange(24), - random.randrange(60), - random.randrange(60), + random.randrange(24), # noqa: S311 + random.randrange(60), # noqa: S311 + random.randrange(60), # noqa: S311 ) if isinstance( @@ -123,7 +123,7 @@ def get_type_generator( # pylint: disable=too-many-return-statements,too-many-b ), ): return lambda: datetime.fromordinal(MINIMUM_DATE.toordinal()) + timedelta( - seconds=random.randrange(days_range * 86400) + seconds=random.randrange(days_range * 86400) # noqa: S311 ) if isinstance(sqltype, sqlalchemy.sql.sqltypes.Numeric): @@ -133,7 +133,7 @@ def get_type_generator( # pylint: disable=too-many-return-statements,too-many-b if isinstance(sqltype, sqlalchemy.sql.sqltypes.JSON): return lambda: { - "".join(random.choices(string.ascii_letters, k=8)): random.randrange(65535) + "".join(random.choices(string.ascii_letters, k=8)): random.randrange(65535) # noqa: S311 for _ in range(10) } @@ -144,7 +144,7 @@ def get_type_generator( # pylint: disable=too-many-return-statements,too-many-b sqlalchemy_utils.types.encrypted.encrypted_type.EncryptedType, ), ): - length = random.randrange(sqltype.length or 255) + length = random.randrange(sqltype.length or 255) # noqa: S311 return lambda: os.urandom(length) if isinstance(sqltype, sqlalchemy_utils.types.uuid.UUIDType): @@ -154,7 +154,7 @@ def get_type_generator( # pylint: disable=too-many-return-statements,too-many-b return lambda: str(uuid4()) if isinstance(sqltype, sqlalchemy.sql.sqltypes.BLOB): - length = random.randrange(sqltype.length or 255) + length = random.randrange(sqltype.length or 255) # noqa: S311 return lambda: os.urandom(length) logger.warning( @@ -281,12 +281,12 @@ def add_sample_rows(model: type[Model], count: int) -> Iterator[Model]: def get_valid_foreign_key(column: Column) -> Any: foreign_key = list(column.foreign_keys)[0] table_name, column_name = foreign_key.target_fullname.split(".", 1) - return db.engine.execute(f"SELECT {column_name} FROM {table_name} LIMIT 1").scalar() + return db.engine.execute(f"SELECT {column_name} FROM {table_name} LIMIT 1").scalar() # noqa: S608 def generate_value(column: Column) -> Any: if hasattr(column.type, "enums"): - return random.choice(column.type.enums) + return random.choice(column.type.enums) # noqa: S311 json_as_string = "json" in column.name.lower() and isinstance( column.type, sqlalchemy.sql.sqltypes.Text diff --git a/superset/utils/network.py b/superset/utils/network.py index fea3cfc6b..8654fcf90 100644 --- a/superset/utils/network.py +++ b/superset/utils/network.py @@ -63,7 +63,7 @@ def is_host_up(host: str) -> bool: param = "-n" if platform.system().lower() == "windows" else "-c" command = ["ping", param, "1", host] try: - output = subprocess.call(command, timeout=PING_TIMEOUT) + output = subprocess.call(command, timeout=PING_TIMEOUT) # noqa: S603 except subprocess.TimeoutExpired: return False diff --git a/superset/utils/oauth2.py b/superset/utils/oauth2.py index 95db2921f..0918f0792 100644 --- a/superset/utils/oauth2.py +++ b/superset/utils/oauth2.py @@ -59,7 +59,7 @@ def get_oauth2_access_token( simultaneous requests for refreshing a stale token; in that case only the first process to acquire the lock will perform the refresh, and othe process should find a a valid token when they retry. - """ + """ # noqa: E501 # pylint: disable=import-outside-toplevel from superset.models.core import DatabaseUserOAuth2Tokens diff --git a/superset/utils/pandas_postprocessing/boxplot.py b/superset/utils/pandas_postprocessing/boxplot.py index f9fed40e5..eb93b2084 100644 --- a/superset/utils/pandas_postprocessing/boxplot.py +++ b/superset/utils/pandas_postprocessing/boxplot.py @@ -25,7 +25,7 @@ from superset.utils.core import PostProcessingBoxplotWhiskerType from superset.utils.pandas_postprocessing.aggregate import aggregate -def boxplot( +def boxplot( # noqa: C901 df: DataFrame, groupby: list[str], metrics: list[str], diff --git a/superset/utils/pandas_postprocessing/contribution.py b/superset/utils/pandas_postprocessing/contribution.py index ad8b07086..3c0ea04f1 100644 --- a/superset/utils/pandas_postprocessing/contribution.py +++ b/superset/utils/pandas_postprocessing/contribution.py @@ -75,7 +75,7 @@ def contribution( if len(rename_columns) != len(actual_columns): raise InvalidPostProcessingError( _( - "`rename_columns` must have the same length as `columns` + `time_shift_columns`." + "`rename_columns` must have the same length as `columns` + `time_shift_columns`." # noqa: E501 ) ) # limit to selected columns @@ -110,7 +110,7 @@ def get_column_groups( maps to a tuple of original and renamed columns without a time shift. 'time_shifts' maps to a dictionary where each key is a time shift and each value is a tuple of original and renamed columns with that time shift. - """ + """ # noqa: E501 result: dict[str, Any] = { "non_time_shift": ([], []), # take the form of ([A, B, C], [X, Y, Z]) "time_shifts": {}, # take the form of {A: ([X], [Y]), B: ([Z], [W])} @@ -146,7 +146,7 @@ def calculate_row_contribution( :param df: The DataFrame to calculate contributions for. :param columns: A list of column names to calculate contributions for. :param rename_columns: A list of new column names for the contribution columns. - """ + """ # noqa: E501 # calculate the row sum considering only the selected columns row_sum_except_selected = df.loc[:, columns].sum(axis=1) diff --git a/superset/utils/pandas_postprocessing/histogram.py b/superset/utils/pandas_postprocessing/histogram.py index dbe93ef32..74fc68e22 100644 --- a/superset/utils/pandas_postprocessing/histogram.py +++ b/superset/utils/pandas_postprocessing/histogram.py @@ -43,7 +43,7 @@ def histogram( Returns: DataFrame: A DataFrame where each row corresponds to a group (or the entire DataFrame if no grouping is performed), and each column corresponds to a histogram bin. The values are the counts in each bin. - """ + """ # noqa: E501 if groupby is None: groupby = [] diff --git a/superset/utils/pandas_postprocessing/pivot.py b/superset/utils/pandas_postprocessing/pivot.py index 28e8ff380..aadde058b 100644 --- a/superset/utils/pandas_postprocessing/pivot.py +++ b/superset/utils/pandas_postprocessing/pivot.py @@ -87,7 +87,7 @@ def pivot( # pylint: disable=too-many-arguments if not drop_missing_columns and columns: for row in df[columns].itertuples(): for metric in aggfunc.keys(): - series_set.add(tuple([metric]) + tuple(row[1:])) + series_set.add(tuple([metric]) + tuple(row[1:])) # noqa: C409 df = df.pivot_table( values=aggfunc.keys(), diff --git a/superset/utils/public_interfaces.py b/superset/utils/public_interfaces.py index 85622d1c3..6a9fb6133 100644 --- a/superset/utils/public_interfaces.py +++ b/superset/utils/public_interfaces.py @@ -40,13 +40,13 @@ def compute_hash(obj: Callable[..., Any]) -> str: def compute_func_hash(function: Callable[..., Any]) -> str: - hashed = md5() + hashed = md5() # noqa: S324 hashed.update(str(signature(function)).encode()) return b85encode(hashed.digest()).decode("utf-8") def compute_class_hash(class_: Callable[..., Any]) -> str: - hashed = md5() + hashed = md5() # noqa: S324 public_methods = sorted( [ (name, method) diff --git a/superset/utils/webdriver.py b/superset/utils/webdriver.py index 6a334e1c5..04339fb97 100644 --- a/superset/utils/webdriver.py +++ b/superset/utils/webdriver.py @@ -32,7 +32,7 @@ from selenium.common.exceptions import ( from selenium.webdriver import chrome, firefox, FirefoxProfile from selenium.webdriver.common.by import By from selenium.webdriver.remote.webdriver import WebDriver -from selenium.webdriver.support import expected_conditions as EC +from selenium.webdriver.support import expected_conditions as EC # noqa: N812 from selenium.webdriver.support.ui import WebDriverWait from superset import feature_flag_manager @@ -133,7 +133,7 @@ class WebDriverPlaywright(WebDriverProxy): return error_messages - def get_screenshot( # pylint: disable=too-many-locals, too-many-statements + def get_screenshot( # pylint: disable=too-many-locals, too-many-statements # noqa: C901 self, url: str, element_name: str, user: User ) -> bytes | None: with sync_playwright() as playwright: @@ -162,7 +162,7 @@ class WebDriverPlaywright(WebDriverProxy): ) except PlaywrightTimeout: logger.exception( - "Web event %s not detected. Page %s might not have been fully loaded", + "Web event %s not detected. Page %s might not have been fully loaded", # noqa: E501 current_app.config["SCREENSHOT_PLAYWRIGHT_WAIT_EVENT"], url, ) @@ -226,7 +226,7 @@ class WebDriverPlaywright(WebDriverProxy): unexpected_errors = WebDriverPlaywright.find_unexpected_errors(page) if unexpected_errors: logger.warning( - "%i errors found in the screenshot. URL: %s. Errors are: %s", + "%i errors found in the screenshot. URL: %s. Errors are: %s", # noqa: E501 len(unexpected_errors), url, unexpected_errors, @@ -285,11 +285,11 @@ class WebDriverSelenium(WebDriverProxy): # and catch-all exceptions try: retry_call(driver.close, max_tries=tries) - except Exception: # pylint: disable=broad-except + except Exception: # pylint: disable=broad-except # noqa: S110 pass try: driver.quit() - except Exception: # pylint: disable=broad-except + except Exception: # pylint: disable=broad-except # noqa: S110 pass @staticmethod diff --git a/superset/views/core.py b/superset/views/core.py index e187c3312..8a56befe0 100755 --- a/superset/views/core.py +++ b/superset/views/core.py @@ -222,7 +222,7 @@ class Superset(BaseSupersetView): response_type = cached.get("response_type") # Set form_data in Flask Global as it is used as a fallback # for async queries with jinja context - setattr(g, "form_data", form_data) + g.form_data = form_data datasource_id, datasource_type = get_datasource_info(None, None, form_data) viz_obj = get_viz( @@ -394,7 +394,7 @@ class Superset(BaseSupersetView): ) @deprecated() # pylint: disable=too-many-locals,too-many-branches,too-many-statements - def explore( + def explore( # noqa: C901 self, datasource_type: str | None = None, datasource_id: int | None = None, @@ -441,7 +441,7 @@ class Superset(BaseSupersetView): if form_data_key: flash( _( - "Form data not found in cache, reverting to dataset metadata." + "Form data not found in cache, reverting to dataset metadata." # noqa: E501 ) ) @@ -473,10 +473,10 @@ class Superset(BaseSupersetView): if not viz_type and datasource and datasource.default_endpoint: return redirect(datasource.default_endpoint) - selectedColumns = [] + selectedColumns = [] # noqa: N806 if "selectedColumns" in form_data: - selectedColumns = form_data.pop("selectedColumns") + selectedColumns = form_data.pop("selectedColumns") # noqa: N806 if "viz_type" not in form_data: form_data["viz_type"] = app.config["DEFAULT_VIZ_TYPE"] @@ -582,7 +582,7 @@ class Superset(BaseSupersetView): ) @staticmethod - def save_or_overwrite_slice( + def save_or_overwrite_slice( # noqa: C901 # pylint: disable=too-many-arguments,too-many-locals slc: Slice | None, slice_add_perm: bool, @@ -792,7 +792,7 @@ class Superset(BaseSupersetView): try: dashboard.raise_for_access() except SupersetSecurityException as ex: - # anonymous users should get the login screen, others should go to dashboard list + # anonymous users should get the login screen, others should go to dashboard list # noqa: E501 if g.user is None or g.user.is_anonymous: redirect_url = f"{appbuilder.get_url_for_login}?next={request.url}" warn_msg = "Users must be logged in to view this dashboard." diff --git a/superset/views/database/mixins.py b/superset/views/database/mixins.py index a908dcba5..af3ba2782 100644 --- a/superset/views/database/mixins.py +++ b/superset/views/database/mixins.py @@ -148,7 +148,7 @@ class DatabaseMixin: "6. The ``disable_data_preview`` field is a boolean specifying whether or" "not data preview queries will be run when fetching table metadata in" "SQL Lab." - "7. The ``disable_drill_to_detail`` field is a boolean specifying whether or" + "7. The ``disable_drill_to_detail`` field is a boolean specifying whether or" # noqa: E501 "not drill to detail is disabled for the database." "8. The ``allow_multi_catalog`` indicates if the database allows changing " "the default catalog when running queries and creating datasets.", diff --git a/superset/views/error_handling.py b/superset/views/error_handling.py index 2c538b434..946142b0f 100644 --- a/superset/views/error_handling.py +++ b/superset/views/error_handling.py @@ -129,7 +129,7 @@ def handle_api_exception( return functools.update_wrapper(wraps, f) -def set_app_error_handlers(app: Flask) -> None: +def set_app_error_handlers(app: Flask) -> None: # noqa: C901 """ Set up error handlers for the Flask app Refer to SIP-40 and SIP-41 for more details on the error handling strategy diff --git a/superset/views/sql_lab/schemas.py b/superset/views/sql_lab/schemas.py index 399665afc..ea184cb39 100644 --- a/superset/views/sql_lab/schemas.py +++ b/superset/views/sql_lab/schemas.py @@ -22,14 +22,14 @@ class SqlJsonPayloadSchema(Schema): database_id = fields.Integer(required=True) sql = fields.String(required=True) client_id = fields.String(allow_none=True) - queryLimit = fields.Integer(allow_none=True) + queryLimit = fields.Integer(allow_none=True) # noqa: N815 sql_editor_id = fields.String(allow_none=True) schema = fields.String(allow_none=True) tab = fields.String(allow_none=True) ctas_method = fields.String(allow_none=True) - templateParams = fields.String(allow_none=True) + templateParams = fields.String(allow_none=True) # noqa: N815 tmp_table_name = fields.String(allow_none=True) select_as_cta = fields.Boolean(allow_none=True) json = fields.Boolean(allow_none=True) - runAsync = fields.Boolean(allow_none=True) + runAsync = fields.Boolean(allow_none=True) # noqa: N815 expand_data = fields.Boolean(allow_none=True) diff --git a/superset/views/utils.py b/superset/views/utils.py index 8c45b774a..f2d06e7a3 100644 --- a/superset/views/utils.py +++ b/superset/views/utils.py @@ -180,7 +180,7 @@ def get_form_data( # Fallback to using the Flask globals (used for cache warmup and async queries) if not form_data and hasattr(g, "form_data"): - form_data = getattr(g, "form_data") + form_data = g.form_data # chart data API requests are JSON json_data = form_data["queries"][0] if "queries" in form_data else {} form_data.update(json_data) @@ -329,7 +329,7 @@ def get_dashboard_extra_filters( return [] -def build_extra_filters( # pylint: disable=too-many-locals,too-many-nested-blocks +def build_extra_filters( # pylint: disable=too-many-locals,too-many-nested-blocks # noqa: C901 layout: dict[str, dict[str, Any]], filter_scopes: dict[str, dict[str, Any]], default_filters: dict[str, dict[str, list[Any]]], diff --git a/superset/viz.py b/superset/viz.py index f36a17a34..a8f9e7664 100644 --- a/superset/viz.py +++ b/superset/viz.py @@ -297,7 +297,7 @@ class BaseViz: # pylint: disable=too-many-public-methods if not df.empty: utils.normalize_dttm_col( df=df, - dttm_cols=tuple( + dttm_cols=tuple( # noqa: C409 [ DateColumn.get_legacy_time_column( timestamp_format=timestamp_format, @@ -514,7 +514,7 @@ class BaseViz: # pylint: disable=too-many-public-methods return payload @deprecated(deprecated_in="3.0") - def get_df_payload( # pylint: disable=too-many-statements + def get_df_payload( # pylint: disable=too-many-statements # noqa: C901 self, query_obj: QueryObjectDict | None = None, **kwargs: Any ) -> dict[str, Any]: """Handles caching around the df payload retrieval""" @@ -753,7 +753,7 @@ class CalHeatmapViz(BaseViz): is_timeseries = True @deprecated(deprecated_in="3.0") - def get_data(self, df: pd.DataFrame) -> VizData: # pylint: disable=too-many-locals + def get_data(self, df: pd.DataFrame) -> VizData: # pylint: disable=too-many-locals # noqa: C901 if df.empty: return None @@ -940,7 +940,7 @@ class NVD3TimeSeriesViz(NVD3Viz): return query_obj @deprecated(deprecated_in="3.0") - def to_series( # pylint: disable=too-many-branches + def to_series( # pylint: disable=too-many-branches # noqa: C901 self, df: pd.DataFrame, classed: str = "", title_suffix: str = "" ) -> list[dict[str, Any]]: cols = [] diff --git a/tests/common/logger_utils.py b/tests/common/logger_utils.py index b38f7a0b0..5a77faf36 100644 --- a/tests/common/logger_utils.py +++ b/tests/common/logger_utils.py @@ -74,7 +74,7 @@ def log( return decorator(decorated) -def _make_decorator( +def _make_decorator( # noqa: C901 prefix_enter_msg: str, suffix_enter_msg: str, with_arguments_msg_part, @@ -82,7 +82,7 @@ def _make_decorator( suffix_out_msg: str, return_value_msg_part, ) -> Decorated: - def decorator(decorated: Decorated): + def decorator(decorated: Decorated): # noqa: C901 decorated_logger = _get_logger(decorated) def decorator_class(clazz: type[Any]) -> type[Any]: @@ -96,7 +96,7 @@ def _make_decorator( for member_name, member in members: setattr(clazz, member_name, decorator_func(member, f"{clazz.__name__}")) - def decorator_func(func: Function, prefix_name: str = "") -> Function: + def decorator_func(func: Function, prefix_name: str = "") -> Function: # noqa: C901 func_name = func.__name__ func_signature: Signature = signature(func) is_fixture = hasattr(func, _FIXTURE_ATTRIBUTE) diff --git a/tests/conftest.py b/tests/conftest.py index 3a712ec58..7becf7d7a 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -28,7 +28,7 @@ from __future__ import annotations from typing import Callable, TYPE_CHECKING from unittest.mock import MagicMock, Mock, PropertyMock -from pytest import fixture +from pytest import fixture # noqa: PT013 from tests.example_data.data_loading.pandas.pandas_data_loader import PandasDataLoader from tests.example_data.data_loading.pandas.pands_data_loading_conf import ( diff --git a/tests/example_data/data_generator/birth_names/birth_names_generator.py b/tests/example_data/data_generator/birth_names/birth_names_generator.py index a8e8c45e2..70c7f0c68 100644 --- a/tests/example_data/data_generator/birth_names/birth_names_generator.py +++ b/tests/example_data/data_generator/birth_names/birth_names_generator.py @@ -69,14 +69,14 @@ class BirthNamesGenerator(ExampleDataGenerator): return datetime(year, 1, 1, 0, 0, 0) def generate_row(self, dt: datetime) -> dict[Any, Any]: - gender = choice([BOY, GIRL]) - num = randint(1, 100000) + gender = choice([BOY, GIRL]) # noqa: S311 + num = randint(1, 100000) # noqa: S311 return { DS: dt, GENDER: gender, NAME: self._names_generator.generate(), NUM: num, - STATE: choice(US_STATES), + STATE: choice(US_STATES), # noqa: S311 NUM_BOYS: num if gender == BOY else 0, NUM_GIRLS: num if gender == GIRL else 0, } diff --git a/tests/example_data/data_generator/string_generator.py b/tests/example_data/data_generator/string_generator.py index 103ceefcb..fee57fcf0 100644 --- a/tests/example_data/data_generator/string_generator.py +++ b/tests/example_data/data_generator/string_generator.py @@ -28,6 +28,6 @@ class StringGenerator: self._max_length = max_length def generate(self) -> str: - rv_string_length = randint(self._min_length, self._max_length) - randomized_letters = choices(self._seed_letters, k=rv_string_length) + rv_string_length = randint(self._min_length, self._max_length) # noqa: S311 + randomized_letters = choices(self._seed_letters, k=rv_string_length) # noqa: S311 return "".join(randomized_letters) diff --git a/tests/fixtures/birth_names.py b/tests/fixtures/birth_names.py index 5a0135b45..4fc4aa50a 100644 --- a/tests/fixtures/birth_names.py +++ b/tests/fixtures/birth_names.py @@ -18,9 +18,9 @@ from __future__ import annotations from typing import Callable, TYPE_CHECKING -from pytest import fixture +from pytest import fixture # noqa: PT013 -from tests.example_data.data_generator.birth_names.birth_names_generator_factory import ( +from tests.example_data.data_generator.birth_names.birth_names_generator_factory import ( # noqa: E501 BirthNamesGeneratorFactory, ) from tests.example_data.data_loading.data_definitions.birth_names import ( diff --git a/tests/integration_tests/advanced_data_type/api_tests.py b/tests/integration_tests/advanced_data_type/api_tests.py index 4e0ad45d8..1a1f74455 100644 --- a/tests/integration_tests/advanced_data_type/api_tests.py +++ b/tests/integration_tests/advanced_data_type/api_tests.py @@ -76,7 +76,7 @@ CHARTS_FIXTURE_COUNT = 10 def test_types_type_request(test_client, login_as_admin): """ Advanced Data Type API: Test to see if the API call returns all the valid advanced data types - """ + """ # noqa: E501 uri = "api/v1/advanced_data_type/types" # noqa: F541 response_value = test_client.get(uri) data = json.loads(response_value.data.decode("utf-8")) @@ -87,7 +87,7 @@ def test_types_type_request(test_client, login_as_admin): def test_types_convert_bad_request_no_vals(test_client, login_as_admin): """ Advanced Data Type API: Test request to see if it behaves as expected when no values are passed - """ + """ # noqa: E501 arguments = {"type": "type", "values": []} uri = f"api/v1/advanced_data_type/convert?q={prison.dumps(arguments)}" response_value = test_client.get(uri) @@ -97,7 +97,7 @@ def test_types_convert_bad_request_no_vals(test_client, login_as_admin): def test_types_convert_bad_request_no_type(test_client, login_as_admin): """ Advanced Data Type API: Test request to see if it behaves as expected when no type is passed - """ + """ # noqa: E501 arguments = {"type": "", "values": [1]} uri = f"api/v1/advanced_data_type/convert?q={prison.dumps(arguments)}" response_value = test_client.get(uri) @@ -112,7 +112,7 @@ def test_types_convert_bad_request_type_not_found(test_client, login_as_admin): """ Advanced Data Type API: Test request to see if it behaves as expected when passed in type is not found/not valid - """ + """ # noqa: E501 arguments = {"type": "not_found", "values": [1]} uri = f"api/v1/advanced_data_type/convert?q={prison.dumps(arguments)}" response_value = test_client.get(uri) @@ -127,7 +127,7 @@ def test_types_convert_request(test_client, login_as_admin): """ Advanced Data Type API: Test request to see if it behaves as expected when a valid type and valid values are passed in - """ + """ # noqa: E501 arguments = {"type": "type", "values": [1]} uri = f"api/v1/advanced_data_type/convert?q={prison.dumps(arguments)}" response_value = test_client.get(uri) diff --git a/tests/integration_tests/annotation_layers/api_tests.py b/tests/integration_tests/annotation_layers/api_tests.py index 49c88ae29..8dc639d96 100644 --- a/tests/integration_tests/annotation_layers/api_tests.py +++ b/tests/integration_tests/annotation_layers/api_tests.py @@ -474,7 +474,7 @@ class TestAnnotationLayerApi(SupersetTestCase): ] for order_column in order_columns: arguments = {"order_column": order_column, "order_direction": "asc"} - uri = f"api/v1/annotation_layer/{layer.id}/annotation/?q={prison.dumps(arguments)}" + uri = f"api/v1/annotation_layer/{layer.id}/annotation/?q={prison.dumps(arguments)}" # noqa: E501 rv = self.get_assert_metric(uri, "get_list") assert rv.status_code == 200 @@ -490,7 +490,7 @@ class TestAnnotationLayerApi(SupersetTestCase): {"col": "short_descr", "opr": "annotation_all_text", "value": "2"} ] } - uri = f"api/v1/annotation_layer/{layer.id}/annotation/?q={prison.dumps(arguments)}" + uri = f"api/v1/annotation_layer/{layer.id}/annotation/?q={prison.dumps(arguments)}" # noqa: E501 rv = self.get_assert_metric(uri, "get_list") assert rv.status_code == 200 @@ -502,7 +502,7 @@ class TestAnnotationLayerApi(SupersetTestCase): {"col": "short_descr", "opr": "annotation_all_text", "value": "descr3"} ] } - uri = f"api/v1/annotation_layer/{layer.id}/annotation/?q={prison.dumps(arguments)}" + uri = f"api/v1/annotation_layer/{layer.id}/annotation/?q={prison.dumps(arguments)}" # noqa: E501 rv = self.get_assert_metric(uri, "get_list") assert rv.status_code == 200 @@ -719,7 +719,7 @@ class TestAnnotationLayerApi(SupersetTestCase): annotations = query_annotations.all() annotations_ids = [annotation.id for annotation in annotations] self.login(ADMIN_USERNAME) - uri = f"api/v1/annotation_layer/{layer.id}/annotation/?q={prison.dumps(annotations_ids)}" + uri = f"api/v1/annotation_layer/{layer.id}/annotation/?q={prison.dumps(annotations_ids)}" # noqa: E501 rv = self.client.delete(uri) assert rv.status_code == 200 deleted_annotations = query_annotations.all() @@ -745,6 +745,6 @@ class TestAnnotationLayerApi(SupersetTestCase): annotations_ids.append(max_id + 1) self.login(ADMIN_USERNAME) - uri = f"api/v1/annotation_layer/{layer.id}/annotation/?q={prison.dumps(annotations_ids)}" + uri = f"api/v1/annotation_layer/{layer.id}/annotation/?q={prison.dumps(annotations_ids)}" # noqa: E501 rv = self.client.delete(uri) assert rv.status_code == 404 diff --git a/tests/integration_tests/annotation_layers/fixtures.py b/tests/integration_tests/annotation_layers/fixtures.py index a3fb6e184..e591e4947 100644 --- a/tests/integration_tests/annotation_layers/fixtures.py +++ b/tests/integration_tests/annotation_layers/fixtures.py @@ -66,7 +66,7 @@ def _insert_annotation( return annotation -@pytest.fixture() +@pytest.fixture def create_annotation_layers(app_context: AppContext): """ Creates ANNOTATION_LAYERS_COUNT-1 layers with no annotations diff --git a/tests/integration_tests/async_events/api_tests.py b/tests/integration_tests/async_events/api_tests.py index 8397b8cf9..80e9fa9b3 100644 --- a/tests/integration_tests/async_events/api_tests.py +++ b/tests/integration_tests/async_events/api_tests.py @@ -77,13 +77,13 @@ class TestAsyncEventApi(SupersetTestCase): ( "1607477697866-0", { - "data": '{"channel_id": "1095c1c9-b6b1-444d-aa83-8e323b32831f", "job_id": "10a0bd9a-03c8-4737-9345-f4234ba86512", "user_id": "1", "status": "done", "errors": [], "result_url": "/api/v1/chart/data/qc-ecd766dd461f294e1bcdaa321e0e8463"}' + "data": '{"channel_id": "1095c1c9-b6b1-444d-aa83-8e323b32831f", "job_id": "10a0bd9a-03c8-4737-9345-f4234ba86512", "user_id": "1", "status": "done", "errors": [], "result_url": "/api/v1/chart/data/qc-ecd766dd461f294e1bcdaa321e0e8463"}' # noqa: E501 }, ), ( "1607477697993-0", { - "data": '{"channel_id": "1095c1c9-b6b1-444d-aa83-8e323b32831f", "job_id": "027cbe49-26ce-4813-bb5a-0b95a626b84c", "user_id": "1", "status": "done", "errors": [], "result_url": "/api/v1/chart/data/qc-1bbc3a240e7039ba4791aefb3a7ee80d"}' + "data": '{"channel_id": "1095c1c9-b6b1-444d-aa83-8e323b32831f", "job_id": "027cbe49-26ce-4813-bb5a-0b95a626b84c", "user_id": "1", "status": "done", "errors": [], "result_url": "/api/v1/chart/data/qc-1bbc3a240e7039ba4791aefb3a7ee80d"}' # noqa: E501 }, ), ] @@ -100,7 +100,7 @@ class TestAsyncEventApi(SupersetTestCase): "errors": [], "id": "1607477697866-0", "job_id": "10a0bd9a-03c8-4737-9345-f4234ba86512", - "result_url": "/api/v1/chart/data/qc-ecd766dd461f294e1bcdaa321e0e8463", + "result_url": "/api/v1/chart/data/qc-ecd766dd461f294e1bcdaa321e0e8463", # noqa: E501 "status": "done", "user_id": "1", }, @@ -109,7 +109,7 @@ class TestAsyncEventApi(SupersetTestCase): "errors": [], "id": "1607477697993-0", "job_id": "027cbe49-26ce-4813-bb5a-0b95a626b84c", - "result_url": "/api/v1/chart/data/qc-1bbc3a240e7039ba4791aefb3a7ee80d", + "result_url": "/api/v1/chart/data/qc-1bbc3a240e7039ba4791aefb3a7ee80d", # noqa: E501 "status": "done", "user_id": "1", }, diff --git a/tests/integration_tests/base_tests.py b/tests/integration_tests/base_tests.py index 9cddba6bf..f922b3796 100644 --- a/tests/integration_tests/base_tests.py +++ b/tests/integration_tests/base_tests.py @@ -49,7 +49,7 @@ from superset.utils.database import get_example_database from superset.views.base_api import BaseSupersetModelRestApi FAKE_DB_NAME = "fake_db_100" -DEFAULT_PASSWORD = "general" +DEFAULT_PASSWORD = "general" # noqa: S105 test_client = app.test_client() @@ -555,7 +555,7 @@ class SupersetTestCase(TestCase): dashboard_title: str, slug: Optional[str], owners: list[int], - roles: list[int] = [], + roles: list[int] = [], # noqa: B006 created_by=None, slices: Optional[list[Slice]] = None, position_json: str = "", @@ -565,8 +565,8 @@ class SupersetTestCase(TestCase): certified_by: Optional[str] = None, certification_details: Optional[str] = None, ) -> Dashboard: - obj_owners = list() - obj_roles = list() + obj_owners = list() # noqa: C408 + obj_roles = list() # noqa: C408 slices = slices or [] for owner in owners: user = db.session.query(security_manager.user_model).get(owner) @@ -595,7 +595,7 @@ class SupersetTestCase(TestCase): def get_list( self, asset_type: str, - filter: dict[str, Any] = {}, + filter: dict[str, Any] = {}, # noqa: B006 username: str = ADMIN_USERNAME, ) -> Response: """ diff --git a/tests/integration_tests/celery_tests.py b/tests/integration_tests/celery_tests.py index 53690443d..2d060be90 100644 --- a/tests/integration_tests/celery_tests.py +++ b/tests/integration_tests/celery_tests.py @@ -97,13 +97,13 @@ def run_sql( db_id = get_example_database().id return test_client.post( "/api/v1/sqllab/execute/", - json=dict( + json=dict( # noqa: C408 database_id=db_id, sql=sql, runAsync=async_, select_as_cta=cta, tmp_table_name=tmp_table, - client_id="".join(random.choice(string.ascii_lowercase) for i in range(5)), + client_id="".join(random.choice(string.ascii_lowercase) for i in range(5)), # noqa: S311 ctas_method=ctas_method, ), ).json @@ -162,11 +162,11 @@ def test_run_sync_query_dont_exist(test_client, ctas_method): "issue_codes": [ { "code": 1003, - "message": "Issue 1003 - There is a syntax error in the SQL query. Perhaps there was a misspelling or a typo.", + "message": "Issue 1003 - There is a syntax error in the SQL query. Perhaps there was a misspelling or a typo.", # noqa: E501 }, { "code": 1005, - "message": "Issue 1005 - The table was deleted or renamed in the database.", + "message": "Issue 1005 - The table was deleted or renamed in the database.", # noqa: E501 }, ], } @@ -180,7 +180,7 @@ def test_run_sync_query_dont_exist(test_client, ctas_method): "issue_codes": [ { "code": 1002, - "message": "Issue 1002 - The database returned an unexpected error.", + "message": "Issue 1002 - The database returned an unexpected error.", # noqa: E501 } ], "engine_name": engine_name, @@ -219,7 +219,7 @@ def test_run_sync_query_cta_no_data(test_client): @pytest.mark.usefixtures("load_birth_names_data", "login_as_admin") @pytest.mark.parametrize("ctas_method", [CtasMethod.TABLE, CtasMethod.VIEW]) -@mock.patch( +@mock.patch( # noqa: PT008 "superset.sqllab.sqllab_execution_context.get_cta_schema_name", lambda d, u, s, sql: CTAS_SCHEMA_NAME, ) @@ -250,7 +250,7 @@ def test_run_sync_query_cta_config(test_client, ctas_method): @pytest.mark.usefixtures("load_birth_names_data", "login_as_admin") @pytest.mark.parametrize("ctas_method", [CtasMethod.TABLE, CtasMethod.VIEW]) -@mock.patch( +@mock.patch( # noqa: PT008 "superset.sqllab.sqllab_execution_context.get_cta_schema_name", lambda d, u, s, sql: CTAS_SCHEMA_NAME, ) diff --git a/tests/integration_tests/charts/api_tests.py b/tests/integration_tests/charts/api_tests.py index fc00dd3bc..78fc28a2a 100644 --- a/tests/integration_tests/charts/api_tests.py +++ b/tests/integration_tests/charts/api_tests.py @@ -93,7 +93,7 @@ class TestChartApi(ApiOwnersTestCaseMixin, InsertChartMixin, SupersetTestCase): cache_manager.data_cache.clear() yield - @pytest.fixture() + @pytest.fixture def create_charts(self): with self.create_app().app_context(): charts = [] @@ -117,7 +117,7 @@ class TestChartApi(ApiOwnersTestCaseMixin, InsertChartMixin, SupersetTestCase): db.session.delete(fav_chart) db.session.commit() - @pytest.fixture() + @pytest.fixture def create_charts_created_by_gamma(self): with self.create_app().app_context(): charts = [] @@ -130,7 +130,7 @@ class TestChartApi(ApiOwnersTestCaseMixin, InsertChartMixin, SupersetTestCase): db.session.delete(chart) db.session.commit() - @pytest.fixture() + @pytest.fixture def create_certified_charts(self): with self.create_app().app_context(): certified_charts = [] @@ -153,7 +153,7 @@ class TestChartApi(ApiOwnersTestCaseMixin, InsertChartMixin, SupersetTestCase): db.session.delete(chart) db.session.commit() - @pytest.fixture() + @pytest.fixture def create_chart_with_report(self): with self.create_app().app_context(): admin = self.get_user("admin") @@ -173,7 +173,7 @@ class TestChartApi(ApiOwnersTestCaseMixin, InsertChartMixin, SupersetTestCase): db.session.delete(chart) db.session.commit() - @pytest.fixture() + @pytest.fixture def add_dashboard_to_chart(self): with self.create_app().app_context(): admin = self.get_user("admin") @@ -348,7 +348,7 @@ class TestChartApi(ApiOwnersTestCaseMixin, InsertChartMixin, SupersetTestCase): """ admin = self.get_user("admin") chart_count = 4 - chart_ids = list() + chart_ids = list() # noqa: C408 for chart_name_index in range(chart_count): chart_ids.append( self.insert_chart(f"title{chart_name_index}", [admin.id], 1, admin).id @@ -462,7 +462,7 @@ class TestChartApi(ApiOwnersTestCaseMixin, InsertChartMixin, SupersetTestCase): """ gamma_id = self.get_user("gamma").id chart_count = 4 - chart_ids = list() + chart_ids = list() # noqa: C408 for chart_name_index in range(chart_count): chart_ids.append( self.insert_chart(f"title{chart_name_index}", [gamma_id], 1).id @@ -492,7 +492,7 @@ class TestChartApi(ApiOwnersTestCaseMixin, InsertChartMixin, SupersetTestCase): "alpha2", "password", "Alpha", email="alpha2@superset.org" ) chart = self.insert_chart("title", [user_alpha1.id], 1) - self.login(username="alpha2", password="password") + self.login(username="alpha2", password="password") # noqa: S106 uri = f"api/v1/chart/{chart.id}" rv = self.delete_assert_metric(uri, "delete") assert rv.status_code == 403 @@ -513,7 +513,7 @@ class TestChartApi(ApiOwnersTestCaseMixin, InsertChartMixin, SupersetTestCase): ) chart_count = 4 - charts = list() + charts = list() # noqa: C408 for chart_name_index in range(chart_count): charts.append( self.insert_chart(f"title{chart_name_index}", [user_alpha1.id], 1) @@ -521,7 +521,7 @@ class TestChartApi(ApiOwnersTestCaseMixin, InsertChartMixin, SupersetTestCase): owned_chart = self.insert_chart("title_owned", [user_alpha2.id], 1) - self.login(username="alpha2", password="password") + self.login(username="alpha2", password="password") # noqa: S106 # verify we can't delete not owned charts arguments = [chart.id for chart in charts] @@ -922,7 +922,7 @@ class TestChartApi(ApiOwnersTestCaseMixin, InsertChartMixin, SupersetTestCase): ) chart = self.insert_chart("title", [user_alpha1.id], 1) - self.login(username="alpha2", password="password") + self.login(username="alpha2", password="password") # noqa: S106 chart_data = {"slice_name": "title1_changed"} uri = f"api/v1/chart/{chart.id}" rv = self.put_assert_metric(uri, chart_data, "put") @@ -960,7 +960,7 @@ class TestChartApi(ApiOwnersTestCaseMixin, InsertChartMixin, SupersetTestCase): new_dashboard.published = False db.session.add(new_dashboard) - self.login(username="alpha1", password="password") + self.login(username="alpha1", password="password") # noqa: S106 chart_data_with_invalid_dashboard = { "slice_name": "title1_changed", "dashboards": [original_dashboard.id, 0], @@ -1208,9 +1208,9 @@ class TestChartApi(ApiOwnersTestCaseMixin, InsertChartMixin, SupersetTestCase): # Compare results assert data_by_id["count"] == data_by_name["count"], len(expected_charts) - assert set(chart["id"] for chart in data_by_id["result"]) == set( + assert set(chart["id"] for chart in data_by_id["result"]) == set( # noqa: C401 chart["id"] for chart in data_by_name["result"] - ), set(chart.id for chart in expected_charts) + ), set(chart.id for chart in expected_charts) # noqa: C401 def test_get_charts_changed_on(self): """ @@ -1255,7 +1255,7 @@ class TestChartApi(ApiOwnersTestCaseMixin, InsertChartMixin, SupersetTestCase): data = json.loads(rv.data.decode("utf-8")) assert data["count"] == 5 - @pytest.fixture() + @pytest.fixture def load_energy_charts(self): with app.app_context(): admin = self.get_user("admin") @@ -1824,11 +1824,11 @@ class TestChartApi(ApiOwnersTestCaseMixin, InsertChartMixin, SupersetTestCase): "error_type": "GENERIC_COMMAND_ERROR", "level": "warning", "extra": { - "charts/imported_chart.yaml": "Chart already exists and `overwrite=true` was not passed", + "charts/imported_chart.yaml": "Chart already exists and `overwrite=true` was not passed", # noqa: E501 "issue_codes": [ { "code": 1010, - "message": "Issue 1010 - Superset encountered an error while running a command.", + "message": "Issue 1010 - Superset encountered an error while running a command.", # noqa: E501 } ], }, @@ -2383,4 +2383,4 @@ class TestChartApi(ApiOwnersTestCaseMixin, InsertChartMixin, SupersetTestCase): data = json.loads(rv.data.decode("utf-8")) result = data["result"] excluded_key = "query" - assert all([excluded_key not in query for query in result]) + assert all([excluded_key not in query for query in result]) # noqa: C419 diff --git a/tests/integration_tests/charts/commands_tests.py b/tests/integration_tests/charts/commands_tests.py index d66980585..bcdd79b13 100644 --- a/tests/integration_tests/charts/commands_tests.py +++ b/tests/integration_tests/charts/commands_tests.py @@ -113,7 +113,7 @@ class TestExportChartsCommand(SupersetTestCase): example_chart = db.session.query(Slice).all()[0] command = ExportChartsCommand([example_chart.id]) contents = command.run() - with self.assertRaises(ChartNotFoundError): + with self.assertRaises(ChartNotFoundError): # noqa: PT027 next(contents) @patch("superset.security.manager.g") @@ -122,7 +122,7 @@ class TestExportChartsCommand(SupersetTestCase): mock_g.user = security_manager.find_user("admin") command = ExportChartsCommand([-1]) contents = command.run() - with self.assertRaises(ChartNotFoundError): + with self.assertRaises(ChartNotFoundError): # noqa: PT027 next(contents) @patch("superset.security.manager.g") @@ -200,7 +200,7 @@ class TestImportChartsCommand(SupersetTestCase): "color_picker": {"a": 1, "b": 135, "g": 122, "r": 0}, "datasource": dataset.uid if dataset else None, "js_columns": ["color"], - "js_data_mutator": "data => data.map(d => ({\\n ...d,\\n color: colors.hexToRGB(d.extraProps.color)\\n}));", + "js_data_mutator": "data => data.map(d => ({\\n ...d,\\n color: colors.hexToRGB(d.extraProps.color)\\n}));", # noqa: E501 "js_onclick_href": "", "js_tooltip": "", "line_column": "path_json", @@ -417,7 +417,7 @@ class TestChartsUpdateCommand(SupersetTestCase): class TestChartWarmUpCacheCommand(SupersetTestCase): def test_warm_up_cache_command_chart_not_found(self): - with self.assertRaises(WarmUpCacheChartNotFoundError): + with self.assertRaises(WarmUpCacheChartNotFoundError): # noqa: PT027 ChartWarmUpCacheCommand(99999, None, None).run() @pytest.mark.usefixtures("load_birth_names_dashboard_with_slices") @@ -469,16 +469,16 @@ class TestFavoriteChartCommand(SupersetTestCase): example_chart_id = 1234 with override_user(security_manager.find_user("admin")): - with self.assertRaises(ChartNotFoundError): + with self.assertRaises(ChartNotFoundError): # noqa: PT027 AddFavoriteChartCommand(example_chart_id).run() - with self.assertRaises(ChartNotFoundError): + with self.assertRaises(ChartNotFoundError): # noqa: PT027 DelFavoriteChartCommand(example_chart_id).run() @pytest.mark.usefixtures("load_energy_table_with_slice") @patch("superset.daos.base.BaseDAO.find_by_id") def test_fave_unfave_chart_command_forbidden(self, mock_find_by_id): - """Test that faving / unfaving raises an exception for a chart the user doesn't own""" + """Test that faving / unfaving raises an exception for a chart the user doesn't own""" # noqa: E501 with self.client.application.test_request_context(): example_chart = db.session.query(Slice).all()[0] mock_find_by_id.return_value = example_chart @@ -487,8 +487,8 @@ class TestFavoriteChartCommand(SupersetTestCase): assert example_chart is not None with override_user(security_manager.find_user("gamma")): - with self.assertRaises(ChartForbiddenError): + with self.assertRaises(ChartForbiddenError): # noqa: PT027 AddFavoriteChartCommand(example_chart.id).run() - with self.assertRaises(ChartForbiddenError): + with self.assertRaises(ChartForbiddenError): # noqa: PT027 DelFavoriteChartCommand(example_chart.id).run() diff --git a/tests/integration_tests/charts/data/api_tests.py b/tests/integration_tests/charts/data/api_tests.py index b922f16cb..dd9480c12 100644 --- a/tests/integration_tests/charts/data/api_tests.py +++ b/tests/integration_tests/charts/data/api_tests.py @@ -124,7 +124,7 @@ class BaseTestChartDataApi(SupersetTestCase): GROUP BY name ORDER BY sum__num DESC LIMIT 100) AS inner__query - """ + """ # noqa: S608 resp = self.run_sql(sql, client_id, raise_on_error=True) db.session.query(Query).delete() db.session.commit() @@ -832,7 +832,7 @@ class TestPostChartDataApi(BaseTestChartDataApi): @pytest.mark.usefixtures("load_birth_names_dashboard_with_slices") def test_with_series_limit(self): - SERIES_LIMIT = 5 + SERIES_LIMIT = 5 # noqa: N806 self.query_context_payload["queries"][0]["columns"] = ["state", "name"] self.query_context_payload["queries"][0]["series_columns"] = ["name"] self.query_context_payload["queries"][0]["series_limit"] = SERIES_LIMIT @@ -844,7 +844,7 @@ class TestPostChartDataApi(BaseTestChartDataApi): unique_names = {row["name"] for row in data} self.maxDiff = None assert len(unique_names) == SERIES_LIMIT - assert {column for column in data[0].keys()} == {"state", "name", "sum__num"} + assert {column for column in data[0].keys()} == {"state", "name", "sum__num"} # noqa: C416 @pytest.mark.usefixtures( "create_annotation_layers", "load_birth_names_dashboard_with_slices" @@ -935,7 +935,7 @@ class TestPostChartDataApi(BaseTestChartDataApi): assert rv.status_code == 200 result = rv.json["result"][0] data = result["data"] - assert {col for col in data[0].keys()} == {"foo", "bar", "state", "count"} + assert {col for col in data[0].keys()} == {"foo", "bar", "state", "count"} # noqa: C416 # make sure results and query parameters are unescaped assert {row["foo"] for row in data} == {":foo"} assert {row["bar"] for row in data} == {":bar:"} @@ -1255,7 +1255,7 @@ class TestGetChartDataApi(BaseTestChartDataApi): response_payload = json.loads(rv.data.decode("utf-8")) result = response_payload["result"][0] data = result["data"] - assert {column for column in data[0].keys()} == {"male_or_female", "sum__num"} + assert {column for column in data[0].keys()} == {"male_or_female", "sum__num"} # noqa: C416 unique_genders = {row["male_or_female"] for row in data} assert unique_genders == {"male", "female"} assert result["applied_filters"] == [{"column": "male_or_female"}] @@ -1275,7 +1275,7 @@ class TestGetChartDataApi(BaseTestChartDataApi): response_payload = json.loads(rv.data.decode("utf-8")) result = response_payload["result"][0] data = result["data"] - assert {column for column in data[0].keys()} == {"male_or_female", "sum__num"} + assert {column for column in data[0].keys()} == {"male_or_female", "sum__num"} # noqa: C416 unique_genders = {row["male_or_female"] for row in data} assert unique_genders == {"male", "female"} assert result["applied_filters"] == [{"column": "male_or_female"}] @@ -1287,7 +1287,7 @@ class TestGetChartDataApi(BaseTestChartDataApi): ] -@pytest.fixture() +@pytest.fixture def physical_query_context(physical_dataset) -> dict[str, Any]: return { "datasource": { @@ -1342,7 +1342,7 @@ def test_time_filter_with_grain(test_client, login_as_admin, physical_query_cont backend = get_example_database().backend if backend == "sqlite": assert ( - "DATETIME(col5, 'start of day', -strftime('%w', col5) || ' days') >=" + "DATETIME(col5, 'start of day', -strftime('%w', col5) || ' days') >=" # noqa: E501 in query ) elif backend == "mysql": diff --git a/tests/integration_tests/charts/schema_tests.py b/tests/integration_tests/charts/schema_tests.py index 46ba792e5..9064674e4 100644 --- a/tests/integration_tests/charts/schema_tests.py +++ b/tests/integration_tests/charts/schema_tests.py @@ -44,7 +44,7 @@ class TestSchema(SupersetTestCase): # too low limit and offset payload["queries"][0]["row_limit"] = -1 payload["queries"][0]["row_offset"] = -1 - with self.assertRaises(ValidationError) as context: + with self.assertRaises(ValidationError) as context: # noqa: PT027 _ = ChartDataQueryContextSchema().load(payload) assert "row_limit" in context.exception.messages["queries"][0] assert "row_offset" in context.exception.messages["queries"][0] diff --git a/tests/integration_tests/commands_test.py b/tests/integration_tests/commands_test.py index c3015520b..d3cc01b4f 100644 --- a/tests/integration_tests/commands_test.py +++ b/tests/integration_tests/commands_test.py @@ -62,7 +62,7 @@ class TestImportAssetsCommand(SupersetTestCase): def setUp(self): user = self.get_user("admin") self.user = user - setattr(g, "user", user) + g.user = user @patch("superset.commands.database.importers.v1.utils.add_permissions") def test_import_assets(self, mock_add_permissions): diff --git a/tests/integration_tests/conftest.py b/tests/integration_tests/conftest.py index c0d960f49..08770c164 100644 --- a/tests/integration_tests/conftest.py +++ b/tests/integration_tests/conftest.py @@ -59,22 +59,22 @@ def test_client(app_context: AppContext): def login_as(test_client: FlaskClient[Any]): """Fixture with app context and logged in admin user.""" - def _login_as(username: str, password: str = "general"): + def _login_as(username: str, password: str = "general"): # noqa: S107 login(test_client, username=username, password=password) - yield _login_as + return _login_as # no need to log out as both app_context and test_client are # function level fixtures anyway @pytest.fixture def login_as_admin(login_as: Callable[..., None]): - yield login_as("admin") # type: ignore + return login_as("admin") @pytest.fixture def create_user(app_context: AppContext): - def _create_user(username: str, role: str = "Admin", password: str = "general"): + def _create_user(username: str, role: str = "Admin", password: str = "general"): # noqa: S107 security_manager.add_user( username, "firstname", @@ -124,7 +124,7 @@ def setup_sample_data() -> Any: with app.app_context(): try: setup_presto_if_needed() - except Exception: + except Exception: # noqa: S110 pass from superset.examples.css_templates import load_css_templates @@ -139,7 +139,7 @@ def setup_sample_data() -> Any: from sqlalchemy.ext import declarative sqla_base = declarative.declarative_base() - # uses sorted_tables to drop in proper order without violating foreign constrains + # uses sorted_tables to drop in proper order without violating foreign constrains # noqa: E501 for table in sqla_base.metadata.sorted_tables: table.__table__.drop() db.session.commit() @@ -157,8 +157,8 @@ def drop_from_schema(engine: Engine, schema_name: str): @pytest.fixture(scope="session") -def example_db_provider() -> Callable[[], Database]: # type: ignore - class _example_db_provider: +def example_db_provider() -> Callable[[], Database]: + class _example_db_provider: # noqa: N801 _db: Database | None = None def __call__(self) -> Database: @@ -171,7 +171,7 @@ def example_db_provider() -> Callable[[], Database]: # type: ignore def _load_lazy_data_to_decouple_from_session(self) -> None: self._db._get_sqla_engine() # type: ignore - self._db.backend # type: ignore + self._db.backend # type: ignore # noqa: B018 def remove(self) -> None: if self._db: @@ -180,7 +180,7 @@ def example_db_provider() -> Callable[[], Database]: # type: ignore _instance = _example_db_provider() - yield _instance + return _instance # TODO - can not use it until referenced objects will be deleted. # _instance.remove() @@ -315,7 +315,7 @@ def virtual_dataset(): SELECT 8 as col1, 'i' as col2, 1.8, NULL, '2000-01-09 00:00:00', 9 UNION ALL SELECT 9 as col1, 'j' as col2, 1.9, NULL, '2000-01-10 00:00:00', 10 - """) + """) # noqa: E501 ), database=get_example_database(), ) @@ -323,7 +323,7 @@ def virtual_dataset(): TableColumn(column_name="col2", type="VARCHAR(255)", table=dataset) TableColumn(column_name="col3", type="DECIMAL(4,2)", table=dataset) TableColumn(column_name="col4", type="VARCHAR(255)", table=dataset) - # Different database dialect datetime type is not consistent, so temporarily use varchar + # Different database dialect datetime type is not consistent, so temporarily use varchar # noqa: E501 TableColumn(column_name="col5", type="VARCHAR(255)", table=dataset) TableColumn(column_name="col6", type="INTEGER", table=dataset) @@ -355,7 +355,7 @@ def virtual_dataset_with_comments(): UNION ALL/*COMMENT*/ SELECT 1 as col1, 'f' as col2, 1.5, NULL, '2000-01-06 00:00:00', 6 --COMMENT UNION ALL--COMMENT - SELECT * FROM cte --COMMENT""") + SELECT * FROM cte --COMMENT""") # noqa: E501 ), database=get_example_database(), ) @@ -363,7 +363,7 @@ def virtual_dataset_with_comments(): TableColumn(column_name="col2", type="VARCHAR(255)", table=dataset) TableColumn(column_name="col3", type="DECIMAL(4,2)", table=dataset) TableColumn(column_name="col4", type="VARCHAR(255)", table=dataset) - # Different database dialect datetime type is not consistent, so temporarily use varchar + # Different database dialect datetime type is not consistent, so temporarily use varchar # noqa: E501 TableColumn(column_name="col5", type="VARCHAR(255)", table=dataset) TableColumn(column_name="col6", type="INTEGER", table=dataset) @@ -413,7 +413,7 @@ def physical_dataset(): (7, 'h', 1.7, NULL, '2000-01-08 00:00:00', '2002-08-18 00:00:00', '2002-08-18 00:00:00'), (8, 'i', 1.8, NULL, '2000-01-09 00:00:00', '2002-09-20 00:00:00', '2002-09-20 00:00:00'), (9, 'j', 1.9, NULL, '2000-01-10 00:00:00', '2002-10-22 00:00:00', '2002-10-22 00:00:00'); - """ + """ # noqa: E501 ) dataset = SqlaTable( diff --git a/tests/integration_tests/core_tests.py b/tests/integration_tests/core_tests.py index e100d10da..38b899871 100644 --- a/tests/integration_tests/core_tests.py +++ b/tests/integration_tests/core_tests.py @@ -80,7 +80,7 @@ def cleanup(): db.session.query(Query).delete() db.session.query(models.Log).delete() db.session.commit() - yield + return class TestCore(SupersetTestCase): @@ -105,7 +105,7 @@ class TestCore(SupersetTestCase): ) return dashboard - @pytest.fixture() + @pytest.fixture def insert_dashboard_created_by_gamma(self): dashboard = self.insert_dashboard_created_by("gamma") yield dashboard @@ -113,14 +113,15 @@ class TestCore(SupersetTestCase): db.session.commit() def test_login(self): - resp = self.get_resp("/login/", data=dict(username="admin", password="general")) + resp = self.get_resp("/login/", data=dict(username="admin", password="general")) # noqa: S106, C408 assert "User confirmation needed" not in resp resp = self.get_resp("/logout/", follow_redirects=True) assert "User confirmation needed" in resp resp = self.get_resp( - "/login/", data=dict(username="admin", password="wrongPassword") + "/login/", + data=dict(username="admin", password="wrongPassword"), # noqa: S106, C408 ) assert "User confirmation needed" in resp @@ -177,9 +178,9 @@ class TestCore(SupersetTestCase): slice_name = f"Energy Sankey" # noqa: F541 slice_id = self.get_slice(slice_name).id copy_name_prefix = "Test Sankey" - copy_name = f"{copy_name_prefix}[save]{random.random()}" + copy_name = f"{copy_name_prefix}[save]{random.random()}" # noqa: S311 tbl_id = self.table_ids.get("energy_usage") - new_slice_name = f"{copy_name_prefix}[overwrite]{random.random()}" + new_slice_name = f"{copy_name_prefix}[overwrite]{random.random()}" # noqa: S311 url = ( "/superset/explore/table/{}/?slice_name={}&" @@ -248,7 +249,7 @@ class TestCore(SupersetTestCase): def test_slices(self): # Testing by hitting the two supported end points for all slices self.login(ADMIN_USERNAME) - Slc = Slice + Slc = Slice # noqa: N806 urls = [] for slc in db.session.query(Slc).all(): urls += [ @@ -275,7 +276,7 @@ class TestCore(SupersetTestCase): assert resp.status_code == 200 @pytest.mark.usefixtures("load_energy_table_with_slice") - def test_slices_V2(self): + def test_slices_V2(self): # noqa: N802 # Add explore-v2-beta role to admin user # Test all slice urls as user with explore-v2-beta role security_manager.add_role("explore-v2-beta") @@ -286,11 +287,11 @@ class TestCore(SupersetTestCase): " user", "explore_beta@airbnb.com", security_manager.find_role("explore-v2-beta"), - password="general", + password="general", # noqa: S106 ) - self.login(username="explore_beta", password="general") + self.login(username="explore_beta", password="general") # noqa: S106 - Slc = Slice + Slc = Slice # noqa: N806 urls = [] for slc in db.session.query(Slc).all(): urls += [(slc.slice_name, "slice_url", slc.slice_url)] @@ -320,7 +321,7 @@ class TestCore(SupersetTestCase): models.custom_password_store = custom_password_store conn = sqla.engine.url.make_url(database.sqlalchemy_uri_decrypted) if conn_pre.password: - assert conn.password == "password_store_test" + assert conn.password == "password_store_test" # noqa: S105 assert conn.password != conn_pre.password # Disable for password store for later tests models.custom_password_store = None @@ -415,7 +416,7 @@ class TestCore(SupersetTestCase): assert 404 == resp.status_code value = json.dumps({"data": "this is a test"}) - resp = self.client.post("/kv/store/", data=dict(data=value)) + resp = self.client.post("/kv/store/", data=dict(data=value)) # noqa: C408 assert resp.status_code == 404 @with_feature_flags(KV_STORE=True) @@ -426,7 +427,7 @@ class TestCore(SupersetTestCase): assert 404 == resp.status_code value = json.dumps({"data": "this is a test"}) - resp = self.client.post("/kv/store/", data=dict(data=value)) + resp = self.client.post("/kv/store/", data=dict(data=value)) # noqa: C408 assert resp.status_code == 200 kv = db.session.query(models.KeyValue).first() kv_value = kv.value @@ -507,7 +508,7 @@ class TestCore(SupersetTestCase): ) assert ( json_str - == '[{"data": "2017-11-18T21:53:00.219225+01:00"}, {"data": "2017-11-18T22:06:30+01:00"}]' + == '[{"data": "2017-11-18T21:53:00.219225+01:00"}, {"data": "2017-11-18T22:06:30+01:00"}]' # noqa: E501 ) def test_mssql_engine_spec_pymssql(self): @@ -588,7 +589,7 @@ class TestCore(SupersetTestCase): "viz_type": "dist_bar", "url_params": {}, "granularity_sqla": "ds", - "time_range": 'DATEADD(DATETIME("2021-01-22T00:00:00"), -100, year) : 2021-01-22T00:00:00', + "time_range": 'DATEADD(DATETIME("2021-01-22T00:00:00"), -100, year) : 2021-01-22T00:00:00', # noqa: E501 "metrics": [ { "expressionType": "SIMPLE", @@ -661,7 +662,7 @@ class TestCore(SupersetTestCase): GROUP BY name ORDER BY count_name DESC LIMIT 10; - """, + """, # noqa: E501 client_id="client_id_1", username="admin", ) @@ -749,7 +750,7 @@ class TestCore(SupersetTestCase): @mock.patch("superset.viz.BaseViz.force_cached", new_callable=mock.PropertyMock) def test_explore_json_data(self, mock_force_cached, mock_cache): tbl_id = self.table_ids.get("birth_names") - form_data = dict( + form_data = dict( # noqa: C418 { "form_data": { "datasource": f"{tbl_id}__table", @@ -787,7 +788,7 @@ class TestCore(SupersetTestCase): ) def test_explore_json_data_no_login(self, mock_cache): tbl_id = self.table_ids.get("birth_names") - form_data = dict( + form_data = dict( # noqa: C418 { "form_data": { "datasource": f"{tbl_id}__table", diff --git a/tests/integration_tests/css_templates/api_tests.py b/tests/integration_tests/css_templates/api_tests.py index 27754afd3..0e8054b07 100644 --- a/tests/integration_tests/css_templates/api_tests.py +++ b/tests/integration_tests/css_templates/api_tests.py @@ -50,7 +50,7 @@ class TestCssTemplateApi(SupersetTestCase): db.session.commit() return css_template - @pytest.fixture() + @pytest.fixture def create_css_templates(self): with self.create_app().app_context(): css_templates = [] diff --git a/tests/integration_tests/dashboard_tests.py b/tests/integration_tests/dashboard_tests.py index 021fa0e1b..78218744b 100644 --- a/tests/integration_tests/dashboard_tests.py +++ b/tests/integration_tests/dashboard_tests.py @@ -64,8 +64,8 @@ class TestDashboard(SupersetTestCase): self.grant_public_access_to_table(table) - pytest.hidden_dash_slug = f"hidden_dash_{random()}" - pytest.published_dash_slug = f"published_dash_{random()}" + pytest.hidden_dash_slug = f"hidden_dash_{random()}" # noqa: S311 + pytest.published_dash_slug = f"published_dash_{random()}" # noqa: S311 # Create a published and hidden dashboard and add them to the database published_dash = Dashboard() @@ -199,8 +199,8 @@ class TestDashboard(SupersetTestCase): def test_users_can_view_own_dashboard(self): user = security_manager.find_user("gamma") - my_dash_slug = f"my_dash_{random()}" - not_my_dash_slug = f"not_my_dash_{random()}" + my_dash_slug = f"my_dash_{random()}" # noqa: S311 + not_my_dash_slug = f"not_my_dash_{random()}" # noqa: S311 # Create one dashboard I own and another that I don't dash = Dashboard() @@ -229,7 +229,7 @@ class TestDashboard(SupersetTestCase): def test_user_can_not_view_unpublished_dash(self): admin_user = security_manager.find_user("admin") - slug = f"admin_owned_unpublished_dash_{random()}" + slug = f"admin_owned_unpublished_dash_{random()}" # noqa: S311 # Create a dashboard owned by admin and unpublished dash = Dashboard() diff --git a/tests/integration_tests/dashboards/api_tests.py b/tests/integration_tests/dashboards/api_tests.py index d7f147edb..be40b7dfb 100644 --- a/tests/integration_tests/dashboards/api_tests.py +++ b/tests/integration_tests/dashboards/api_tests.py @@ -81,7 +81,7 @@ class TestDashboardApi(ApiOwnersTestCaseMixin, InsertChartMixin, SupersetTestCas "slug": "slug1_changed", "position_json": '{"b": "B"}', "css": "css_changed", - "json_metadata": '{"refresh_frequency": 30, "timed_refresh_immune_slices": [], "expanded_slices": {}, "color_scheme": "", "label_colors": {}, "shared_label_colors": [], "map_label_colors": {}, "color_scheme_domain": [], "cross_filters_enabled": false}', + "json_metadata": '{"refresh_frequency": 30, "timed_refresh_immune_slices": [], "expanded_slices": {}, "color_scheme": "", "label_colors": {}, "shared_label_colors": [], "map_label_colors": {}, "color_scheme_domain": [], "cross_filters_enabled": false}', # noqa: E501 "published": False, } @@ -94,7 +94,7 @@ class TestDashboardApi(ApiOwnersTestCaseMixin, InsertChartMixin, SupersetTestCas "reordered": [], } - @pytest.fixture() + @pytest.fixture def create_dashboards(self): with self.create_app().app_context(): dashboards = [] @@ -136,7 +136,7 @@ class TestDashboardApi(ApiOwnersTestCaseMixin, InsertChartMixin, SupersetTestCas db.session.delete(fav_dashboard) db.session.commit() - @pytest.fixture() + @pytest.fixture def create_created_by_gamma_dashboards(self): with self.create_app().app_context(): dashboards = [] @@ -157,7 +157,7 @@ class TestDashboardApi(ApiOwnersTestCaseMixin, InsertChartMixin, SupersetTestCas db.session.delete(dashboard) db.session.commit() - @pytest.fixture() + @pytest.fixture def create_dashboard_with_report(self): with self.create_app().app_context(): admin = self.get_user("admin") @@ -218,7 +218,7 @@ class TestDashboardApi(ApiOwnersTestCaseMixin, InsertChartMixin, SupersetTestCas - ``fourth_dashboard`` is not associated with any tag Relies on the ``create_custom_tags`` fixture for the tag creation. - """ + """ # noqa: E501 with self.create_app().app_context(): admin_user = self.get_user(ADMIN_USERNAME) @@ -832,9 +832,9 @@ class TestDashboardApi(ApiOwnersTestCaseMixin, InsertChartMixin, SupersetTestCas assert data_by_id["count"] == data_by_name["count"], len( expected_dashboards ) - assert set(chart["id"] for chart in data_by_id["result"]) == set( + assert set(chart["id"] for chart in data_by_id["result"]) == set( # noqa: C401 chart["id"] for chart in data_by_name["result"] - ), set(chart.id for chart in expected_dashboards) + ), set(chart.id for chart in expected_dashboards) # noqa: C401 @pytest.mark.usefixtures("create_dashboards") def test_get_current_user_favorite_status(self): @@ -1273,7 +1273,7 @@ class TestDashboardApi(ApiOwnersTestCaseMixin, InsertChartMixin, SupersetTestCas """ admin_id = self.get_user("admin").id dashboard_count = 4 - dashboard_ids = list() + dashboard_ids = list() # noqa: C408 for dashboard_name_index in range(dashboard_count): dashboard_ids.append( self.insert_dashboard( @@ -1300,7 +1300,7 @@ class TestDashboardApi(ApiOwnersTestCaseMixin, InsertChartMixin, SupersetTestCas """ user = self.get_user("admin") dashboard_count = 4 - dashboard_ids = list() + dashboard_ids = list() # noqa: C408 for dashboard_name_index in range(dashboard_count): dashboard_ids.append( self.insert_dashboard( @@ -1434,7 +1434,7 @@ class TestDashboardApi(ApiOwnersTestCaseMixin, InsertChartMixin, SupersetTestCas """ gamma_id = self.get_user("gamma").id dashboard_count = 4 - dashboard_ids = list() + dashboard_ids = list() # noqa: C408 for dashboard_name_index in range(dashboard_count): dashboard_ids.append( self.insert_dashboard( @@ -1474,7 +1474,7 @@ class TestDashboardApi(ApiOwnersTestCaseMixin, InsertChartMixin, SupersetTestCas dashboard = self.insert_dashboard( "title", "slug1", [user_alpha1.id], slices=[existing_slice], published=True ) - self.login(username="alpha2", password="password") + self.login(username="alpha2", password="password") # noqa: S106 uri = f"api/v1/dashboard/{dashboard.id}" rv = self.client.delete(uri) assert rv.status_code == 403 @@ -1499,7 +1499,7 @@ class TestDashboardApi(ApiOwnersTestCaseMixin, InsertChartMixin, SupersetTestCas ) dashboard_count = 4 - dashboards = list() + dashboards = list() # noqa: C408 for dashboard_name_index in range(dashboard_count): dashboards.append( self.insert_dashboard( @@ -1519,7 +1519,7 @@ class TestDashboardApi(ApiOwnersTestCaseMixin, InsertChartMixin, SupersetTestCas published=True, ) - self.login(username="alpha2", password="password") + self.login(username="alpha2", password="password") # noqa: S106 # verify we can't delete not owned dashboards arguments = [dashboard.id for dashboard in dashboards] @@ -2208,7 +2208,7 @@ class TestDashboardApi(ApiOwnersTestCaseMixin, InsertChartMixin, SupersetTestCas dashboard = self.insert_dashboard( "title", "slug1", [user_alpha1.id], slices=[existing_slice], published=True ) - self.login(username="alpha2", password="password") + self.login(username="alpha2", password="password") # noqa: S106 dashboard_data = {"dashboard_title": "title1_changed", "slug": "slug1 changed"} uri = f"api/v1/dashboard/{dashboard.id}" rv = self.put_assert_metric(uri, dashboard_data, "put") @@ -2435,27 +2435,30 @@ class TestDashboardApi(ApiOwnersTestCaseMixin, InsertChartMixin, SupersetTestCas response = json.loads(rv.data.decode("utf-8")) assert rv.status_code == 422 - assert response == { - "errors": [ - { - "message": "Error importing dashboard", - "error_type": "GENERIC_COMMAND_ERROR", - "level": "warning", - "extra": { - "dashboards/imported_dashboard.yaml": "Dashboard already exists and `overwrite=true` was not passed", - "issue_codes": [ - { - "code": 1010, - "message": ( - "Issue 1010 - Superset encountered an " - "error while running a command." - ), - } - ], - }, - } - ] - } + assert ( + response + == { + "errors": [ + { + "message": "Error importing dashboard", + "error_type": "GENERIC_COMMAND_ERROR", + "level": "warning", + "extra": { + "dashboards/imported_dashboard.yaml": "Dashboard already exists and `overwrite=true` was not passed", # noqa: E501 + "issue_codes": [ + { + "code": 1010, + "message": ( + "Issue 1010 - Superset encountered an " + "error while running a command." + ), + } + ], + }, + } + ] + } + ) # import with overwrite flag buf = self.create_dashboard_import() @@ -3021,7 +3024,7 @@ class TestDashboardApi(ApiOwnersTestCaseMixin, InsertChartMixin, SupersetTestCas return self.client.post(uri, json=payload) def _get_screenshot(self, dashboard_id, cache_key, download_format): - uri = f"/api/v1/dashboard/{dashboard_id}/screenshot/{cache_key}/?download_format={download_format}" + uri = f"/api/v1/dashboard/{dashboard_id}/screenshot/{cache_key}/?download_format={download_format}" # noqa: E501 return self.client.get(uri) @pytest.mark.usefixtures("create_dashboard_with_tag") @@ -3240,7 +3243,7 @@ class TestDashboardApi(ApiOwnersTestCaseMixin, InsertChartMixin, SupersetTestCas def test_put_dashboard_colors_no_mark_updated(self): """ Dashboard API: Test updating dashboard colors without marking the dashboard as updated - """ + """ # noqa: E501 self.login(ADMIN_USERNAME) dashboard = Dashboard.get("world_health") diff --git a/tests/integration_tests/dashboards/commands_tests.py b/tests/integration_tests/dashboards/commands_tests.py index 8c2fbc05f..4dabe1b01 100644 --- a/tests/integration_tests/dashboards/commands_tests.py +++ b/tests/integration_tests/dashboards/commands_tests.py @@ -257,7 +257,7 @@ class TestExportDashboardsCommand(SupersetTestCase): expected_paths = { "metadata.yaml", f"dashboards/COVID_Vaccine_Dashboard_{example_dashboard.id}.yaml", - "datasets/examples/covid_vaccines.yaml", # referenced dataset needs to be exported + "datasets/examples/covid_vaccines.yaml", # referenced dataset needs to be exported # noqa: E501 "databases/examples.yaml", } for chart in example_dashboard.slices: @@ -296,7 +296,7 @@ class TestExportDashboardsCommand(SupersetTestCase): ) command = ExportDashboardsCommand([example_dashboard.id]) contents = command.run() - with self.assertRaises(DashboardNotFoundError): + with self.assertRaises(DashboardNotFoundError): # noqa: PT027 next(contents) @pytest.mark.usefixtures("load_world_bank_dashboard_with_slices") @@ -308,7 +308,7 @@ class TestExportDashboardsCommand(SupersetTestCase): mock_g2.user = security_manager.find_user("admin") command = ExportDashboardsCommand([-1]) contents = command.run() - with self.assertRaises(DashboardNotFoundError): + with self.assertRaises(DashboardNotFoundError): # noqa: PT027 next(contents) @pytest.mark.usefixtures("load_world_bank_dashboard_with_slices") @@ -766,7 +766,7 @@ class TestCopyDashboardCommand(SupersetTestCase): @pytest.mark.usefixtures("load_world_bank_dashboard_with_slices") def test_copy_dashboard_command_no_access(self): - """Test that a non-owner user cannot copy a dashboard if DASHBOARD_RBAC is enabled""" + """Test that a non-owner user cannot copy a dashboard if DASHBOARD_RBAC is enabled""" # noqa: E501 with self.client.application.test_request_context(): example_dashboard = ( db.session.query(Dashboard).filter_by(slug="world_health").one() @@ -779,7 +779,7 @@ class TestCopyDashboardCommand(SupersetTestCase): return_value=True, ): command = CopyDashboardCommand(example_dashboard, copy_data) - with self.assertRaises(DashboardForbiddenError): + with self.assertRaises(DashboardForbiddenError): # noqa: PT027 command.run() @pytest.mark.usefixtures("load_world_bank_dashboard_with_slices") @@ -793,7 +793,7 @@ class TestCopyDashboardCommand(SupersetTestCase): with override_user(security_manager.find_user("admin")): command = CopyDashboardCommand(example_dashboard, invalid_copy_data) - with self.assertRaises(DashboardInvalidError): + with self.assertRaises(DashboardInvalidError): # noqa: PT027 command.run() @@ -868,16 +868,16 @@ class TestFavoriteDashboardCommand(SupersetTestCase): example_dashboard_id = 1234 with override_user(security_manager.find_user("admin")): - with self.assertRaises(DashboardNotFoundError): + with self.assertRaises(DashboardNotFoundError): # noqa: PT027 AddFavoriteDashboardCommand(example_dashboard_id).run() - with self.assertRaises(DashboardNotFoundError): + with self.assertRaises(DashboardNotFoundError): # noqa: PT027 DelFavoriteDashboardCommand(example_dashboard_id).run() @pytest.mark.usefixtures("load_world_bank_dashboard_with_slices") @patch("superset.models.dashboard.Dashboard.get") def test_fave_unfave_dashboard_command_forbidden(self, mock_get): - """Test that faving / unfaving raises an exception for a dashboard the user doesn't own""" + """Test that faving / unfaving raises an exception for a dashboard the user doesn't own""" # noqa: E501 with self.client.application.test_request_context(): example_dashboard = ( db.session.query(Dashboard).filter_by(slug="world_health").one() @@ -889,8 +889,8 @@ class TestFavoriteDashboardCommand(SupersetTestCase): assert example_dashboard is not None with override_user(security_manager.find_user("gamma")): - with self.assertRaises(DashboardAccessDeniedError): + with self.assertRaises(DashboardAccessDeniedError): # noqa: PT027 AddFavoriteDashboardCommand(example_dashboard.uuid).run() - with self.assertRaises(DashboardAccessDeniedError): + with self.assertRaises(DashboardAccessDeniedError): # noqa: PT027 DelFavoriteDashboardCommand(example_dashboard.uuid).run() diff --git a/tests/integration_tests/dashboards/dashboard_test_utils.py b/tests/integration_tests/dashboards/dashboard_test_utils.py index 39bce02ca..ecaf2cc02 100644 --- a/tests/integration_tests/dashboards/dashboard_test_utils.py +++ b/tests/integration_tests/dashboards/dashboard_test_utils.py @@ -98,7 +98,7 @@ def random_slug(): def get_random_string(length): letters = string.ascii_lowercase - result_str = "".join(random.choice(letters) for i in range(length)) + result_str = "".join(random.choice(letters) for i in range(length)) # noqa: S311 print("Random string of length", length, "is:", result_str) return result_str diff --git a/tests/integration_tests/dashboards/security/base_case.py b/tests/integration_tests/dashboards/security/base_case.py index b52260508..13523a777 100644 --- a/tests/integration_tests/dashboards/security/base_case.py +++ b/tests/integration_tests/dashboards/security/base_case.py @@ -43,7 +43,7 @@ class BaseTestDashboardSecurity(DashboardTestCase): self.assert200(response) response_data = response.json assert response_data["count"] == expected_counts - response_dashboards_url = set( + response_dashboards_url = set( # noqa: C417 map(lambda dash: dash["url"], response_data["result"]) ) expected_dashboards = expected_dashboards or [] diff --git a/tests/integration_tests/dashboards/security/security_dataset_tests.py b/tests/integration_tests/dashboards/security/security_dataset_tests.py index cf2275680..064139ccb 100644 --- a/tests/integration_tests/dashboards/security/security_dataset_tests.py +++ b/tests/integration_tests/dashboards/security/security_dataset_tests.py @@ -85,7 +85,7 @@ class TestDashboardDatasetSecurity(DashboardTestCase): } # assert - for dashboard_url, get_dashboard_response in responses_by_url.items(): + for dashboard_url, get_dashboard_response in responses_by_url.items(): # noqa: B007 self.assert200(get_dashboard_response) def test_get_dashboards__users_are_dashboards_owners(self): diff --git a/tests/integration_tests/dashboards/security/security_rbac_tests.py b/tests/integration_tests/dashboards/security/security_rbac_tests.py index 4ecc2e3e3..475485404 100644 --- a/tests/integration_tests/dashboards/security/security_rbac_tests.py +++ b/tests/integration_tests/dashboards/security/security_rbac_tests.py @@ -224,7 +224,7 @@ class TestDashboardRoleBasedSecurity(BaseTestDashboardSecurity): assert response.status_code == 302 @pytest.mark.usefixtures("public_role_like_gamma") - def test_get_dashboard_view__public_user_with_dashboard_permission_can_not_access_draft( + def test_get_dashboard_view__public_user_with_dashboard_permission_can_not_access_draft( # noqa: E501 self, ): # arrange diff --git a/tests/integration_tests/databases/api_tests.py b/tests/integration_tests/databases/api_tests.py index e4fbbc6e4..d7071d0c7 100644 --- a/tests/integration_tests/databases/api_tests.py +++ b/tests/integration_tests/databases/api_tests.py @@ -118,7 +118,7 @@ class TestDatabaseApi(SupersetTestCase): db.session.commit() return database - @pytest.fixture() + @pytest.fixture def create_database_with_report(self): with self.create_app().app_context(): example_db = get_example_database() @@ -142,7 +142,7 @@ class TestDatabaseApi(SupersetTestCase): db.session.delete(database) db.session.commit() - @pytest.fixture() + @pytest.fixture def create_database_with_dataset(self): with self.create_app().app_context(): example_db = get_example_database() @@ -332,7 +332,7 @@ class TestDatabaseApi(SupersetTestCase): .filter(SSHTunnel.database_id == response.get("id")) .one() ) - assert response.get("result")["ssh_tunnel"]["password"] == "XXXXXXXXXX" + assert response.get("result")["ssh_tunnel"]["password"] == "XXXXXXXXXX" # noqa: S105 assert model_ssh_tunnel.database_id == response.get("id") # Cleanup model = db.session.query(Database).get(response.get("id")) @@ -669,7 +669,7 @@ class TestDatabaseApi(SupersetTestCase): .one() ) assert model_ssh_tunnel.database_id == response_update.get("id") - assert response_update.get("result")["ssh_tunnel"]["password"] == "XXXXXXXXXX" + assert response_update.get("result")["ssh_tunnel"]["password"] == "XXXXXXXXXX" # noqa: S105 assert model_ssh_tunnel.username == "Test" assert model_ssh_tunnel.server_address == "123.132.123.1" assert model_ssh_tunnel.server_port == 8080 @@ -1132,7 +1132,7 @@ class TestDatabaseApi(SupersetTestCase): example_db = get_example_database() if example_db.backend in ("sqlite", "hive", "presto"): return - example_db.password = "wrong_password" + example_db.password = "wrong_password" # noqa: S105 database_data = { "database_name": "test-create-database-wrong-password", "sqlalchemy_uri": example_db.sqlalchemy_uri_decrypted, @@ -1160,7 +1160,7 @@ class TestDatabaseApi(SupersetTestCase): { "code": 1015, "message": ( - "Issue 1015 - Issue 1015 - Either the database is spelled incorrectly or does not exist." + "Issue 1015 - Issue 1015 - Either the database is spelled incorrectly or does not exist." # noqa: E501 ), }, ], @@ -1177,7 +1177,7 @@ class TestDatabaseApi(SupersetTestCase): { "code": 1013, "message": ( - "Issue 1013 - The password provided when connecting to a database is not valid." + "Issue 1013 - The password provided when connecting to a database is not valid." # noqa: E501 ), } ], @@ -1225,7 +1225,7 @@ class TestDatabaseApi(SupersetTestCase): test_database = self.insert_database( "test-database1", example_db.sqlalchemy_uri_decrypted ) - example_db.password = "wrong_password" + example_db.password = "wrong_password" # noqa: S105 database_data = { "sqlalchemy_uri": example_db.sqlalchemy_uri_decrypted, } @@ -1771,7 +1771,7 @@ class TestDatabaseApi(SupersetTestCase): db.session.delete(database) db.session.commit() - def mock_csv_function(d, user): + def mock_csv_function(d, user): # noqa: N805 return d.get_all_schema_names() @mock.patch( @@ -1818,7 +1818,7 @@ class TestDatabaseApi(SupersetTestCase): db.session.delete(database) db.session.commit() - def mock_empty_csv_function(d, user): + def mock_empty_csv_function(d, user): # noqa: N805 return [] @mock.patch( @@ -1979,7 +1979,7 @@ class TestDatabaseApi(SupersetTestCase): schema_name = self.default_schema_backend_map[database.backend] rv = self.client.get( - f"api/v1/database/{database.id}/tables/?q={prison.dumps({'schema_name': schema_name})}" + f"api/v1/database/{database.id}/tables/?q={prison.dumps({'schema_name': schema_name})}" # noqa: E501 ) assert rv.status_code == 200 @@ -2001,7 +2001,7 @@ class TestDatabaseApi(SupersetTestCase): """ self.login(GAMMA_USERNAME) example_db = get_example_database() - uri = f"api/v1/database/{example_db.id}/tables/?q={prison.dumps({'schema_name': 'non_existent'})}" + uri = f"api/v1/database/{example_db.id}/tables/?q={prison.dumps({'schema_name': 'non_existent'})}" # noqa: E501 rv = self.client.get(uri) assert rv.status_code == 404 logger_mock.warning.assert_called_once_with( @@ -2033,7 +2033,7 @@ class TestDatabaseApi(SupersetTestCase): mock_can_access_database.side_effect = Exception("Test Error") rv = self.client.get( - f"api/v1/database/{database.id}/tables/?q={prison.dumps({'schema_name': 'main'})}" + f"api/v1/database/{database.id}/tables/?q={prison.dumps({'schema_name': 'main'})}" # noqa: E501 ) assert rv.status_code == 422 logger_mock.warning.assert_called_once_with("Test Error", exc_info=True) @@ -2105,7 +2105,7 @@ class TestDatabaseApi(SupersetTestCase): "issue_codes": [ { "code": 1010, - "message": "Issue 1010 - Superset encountered an error while running a command.", + "message": "Issue 1010 - Superset encountered an error while running a command.", # noqa: E501 } ] }, @@ -2134,7 +2134,7 @@ class TestDatabaseApi(SupersetTestCase): "issue_codes": [ { "code": 1010, - "message": "Issue 1010 - Superset encountered an error while running a command.", + "message": "Issue 1010 - Superset encountered an error while running a command.", # noqa: E501 } ] }, @@ -2163,7 +2163,7 @@ class TestDatabaseApi(SupersetTestCase): expected_response = { "message": { "sqlalchemy_uri": [ - "SQLiteDialect_pysqlite cannot be used as a data source for security reasons." + "SQLiteDialect_pysqlite cannot be used as a data source for security reasons." # noqa: E501 ] } } @@ -2183,7 +2183,7 @@ class TestDatabaseApi(SupersetTestCase): """ Database API: Test test connection failed due to invalid hostname """ - msg = 'psql: error: could not translate host name "localhost_" to address: nodename nor servname provided, or not known' + msg = 'psql: error: could not translate host name "localhost_" to address: nodename nor servname provided, or not known' # noqa: E501 mock_build_db.return_value.set_sqlalchemy_uri.side_effect = DBAPIError( msg, None, None ) @@ -2361,27 +2361,30 @@ class TestDatabaseApi(SupersetTestCase): response = json.loads(rv.data.decode("utf-8")) assert rv.status_code == 422 - assert response == { - "errors": [ - { - "message": "Error importing database", - "error_type": "GENERIC_COMMAND_ERROR", - "level": "warning", - "extra": { - "databases/imported_database.yaml": "Database already exists and `overwrite=true` was not passed", - "issue_codes": [ - { - "code": 1010, - "message": ( - "Issue 1010 - Superset encountered an " - "error while running a command." - ), - } - ], - }, - } - ] - } + assert ( + response + == { + "errors": [ + { + "message": "Error importing database", + "error_type": "GENERIC_COMMAND_ERROR", + "level": "warning", + "extra": { + "databases/imported_database.yaml": "Database already exists and `overwrite=true` was not passed", # noqa: E501 + "issue_codes": [ + { + "code": 1010, + "message": ( + "Issue 1010 - Superset encountered an " + "error while running a command." + ), + } + ], + }, + } + ] + } + ) # import with overwrite flag buf = self.create_database_import() @@ -2555,7 +2558,7 @@ class TestDatabaseApi(SupersetTestCase): database.sqlalchemy_uri == "vertica+vertica_python://hackathon:XXXXXXXXXX@host:5433/dbname?ssl=1" ) - assert database.password == "SECRET" + assert database.password == "SECRET" # noqa: S105 db.session.delete(database) db.session.commit() @@ -2668,7 +2671,7 @@ class TestDatabaseApi(SupersetTestCase): .filter(SSHTunnel.database_id == database.id) .one() ) - assert model_ssh_tunnel.password == "TEST" + assert model_ssh_tunnel.password == "TEST" # noqa: S105 db.session.delete(database) db.session.commit() @@ -2709,32 +2712,35 @@ class TestDatabaseApi(SupersetTestCase): response = json.loads(rv.data.decode("utf-8")) assert rv.status_code == 422 - assert response == { - "errors": [ - { - "message": "Error importing database", - "error_type": "GENERIC_COMMAND_ERROR", - "level": "warning", - "extra": { - "databases/imported_database.yaml": { - "_schema": [ - "Must provide a private key for the ssh tunnel", - "Must provide a private key password for the ssh tunnel", - ] + assert ( + response + == { + "errors": [ + { + "message": "Error importing database", + "error_type": "GENERIC_COMMAND_ERROR", + "level": "warning", + "extra": { + "databases/imported_database.yaml": { + "_schema": [ + "Must provide a private key for the ssh tunnel", + "Must provide a private key password for the ssh tunnel", # noqa: E501 + ] + }, + "issue_codes": [ + { + "code": 1010, + "message": ( + "Issue 1010 - Superset encountered an " + "error while running a command." + ), + } + ], }, - "issue_codes": [ - { - "code": 1010, - "message": ( - "Issue 1010 - Superset encountered an " - "error while running a command." - ), - } - ], - }, - } - ] - } + } + ] + } + ) @mock.patch("superset.databases.schemas.is_feature_enabled") @mock.patch("superset.commands.database.importers.v1.utils.add_permissions") @@ -2787,7 +2793,7 @@ class TestDatabaseApi(SupersetTestCase): .one() ) assert model_ssh_tunnel.private_key == "TestPrivateKey" - assert model_ssh_tunnel.private_key_password == "TEST" + assert model_ssh_tunnel.private_key_password == "TEST" # noqa: S105 db.session.delete(database) db.session.commit() @@ -3001,32 +3007,35 @@ class TestDatabaseApi(SupersetTestCase): response = json.loads(rv.data.decode("utf-8")) assert rv.status_code == 422 - assert response == { - "errors": [ - { - "message": "Error importing database", - "error_type": "GENERIC_COMMAND_ERROR", - "level": "warning", - "extra": { - "databases/imported_database.yaml": { - "_schema": [ - "Must provide a private key for the ssh tunnel", - "Must provide a private key password for the ssh tunnel", - ] + assert ( + response + == { + "errors": [ + { + "message": "Error importing database", + "error_type": "GENERIC_COMMAND_ERROR", + "level": "warning", + "extra": { + "databases/imported_database.yaml": { + "_schema": [ + "Must provide a private key for the ssh tunnel", + "Must provide a private key password for the ssh tunnel", # noqa: E501 + ] + }, + "issue_codes": [ + { + "code": 1010, + "message": ( + "Issue 1010 - Superset encountered an " + "error while running a command." + ), + } + ], }, - "issue_codes": [ - { - "code": 1010, - "message": ( - "Issue 1010 - Superset encountered an " - "error while running a command." - ), - } - ], - }, - } - ] - } + } + ] + } + ) @mock.patch( "superset.db_engine_specs.base.BaseEngineSpec.get_function_names", @@ -3210,7 +3219,7 @@ class TestDatabaseApi(SupersetTestCase): "type": "string", }, "encryption": { - "description": "Use an encrypted connection to the database", + "description": "Use an encrypted connection to the database", # noqa: E501 "type": "boolean", }, "host": { @@ -3234,7 +3243,7 @@ class TestDatabaseApi(SupersetTestCase): "type": "object", }, "ssh": { - "description": "Use an ssh tunnel connection to the database", + "description": "Use an ssh tunnel connection to the database", # noqa: E501 "type": "boolean", }, "username": { @@ -3294,7 +3303,7 @@ class TestDatabaseApi(SupersetTestCase): "type": "string", }, "encryption": { - "description": "Use an encrypted connection to the database", + "description": "Use an encrypted connection to the database", # noqa: E501 "type": "boolean", }, "host": { @@ -3318,7 +3327,7 @@ class TestDatabaseApi(SupersetTestCase): "type": "object", }, "ssh": { - "description": "Use an ssh tunnel connection to the database", + "description": "Use an ssh tunnel connection to the database", # noqa: E501 "type": "boolean", }, "username": { @@ -3378,7 +3387,7 @@ class TestDatabaseApi(SupersetTestCase): "type": "string", }, "encryption": { - "description": "Use an encrypted connection to the database", + "description": "Use an encrypted connection to the database", # noqa: E501 "type": "boolean", }, "host": { @@ -3402,7 +3411,7 @@ class TestDatabaseApi(SupersetTestCase): "type": "object", }, "ssh": { - "description": "Use an ssh tunnel connection to the database", + "description": "Use an ssh tunnel connection to the database", # noqa: E501 "type": "boolean", }, "username": { @@ -3507,7 +3516,7 @@ class TestDatabaseApi(SupersetTestCase): "issue_codes": [ { "code": 1019, - "message": "Issue 1019 - The submitted payload has the incorrect format.", + "message": "Issue 1019 - The submitted payload has the incorrect format.", # noqa: E501 } ] }, @@ -3606,7 +3615,10 @@ class TestDatabaseApi(SupersetTestCase): @mock.patch("superset.db_engine_specs.base.is_port_open") @mock.patch("superset.databases.api.ValidateDatabaseParametersCommand") def test_validate_parameters_valid_payload( - self, ValidateDatabaseParametersCommand, is_port_open, is_hostname_valid + self, + ValidateDatabaseParametersCommand, # noqa: N803 + is_port_open, + is_hostname_valid, # noqa: N803 ): is_hostname_valid.return_value = True is_port_open.return_value = True @@ -3786,13 +3798,13 @@ class TestDatabaseApi(SupersetTestCase): "issue_codes": [ { "code": 1018, - "message": "Issue 1018 - One or more parameters needed to configure a database are missing.", + "message": "Issue 1018 - One or more parameters needed to configure a database are missing.", # noqa: E501 } ], }, }, { - "message": "The port must be an integer between 0 and 65535 (inclusive).", + "message": "The port must be an integer between 0 and 65535 (inclusive).", # noqa: E501 "error_type": "CONNECTION_INVALID_PORT_ERROR", "level": "error", "extra": { diff --git a/tests/integration_tests/databases/commands/upload_test.py b/tests/integration_tests/databases/commands/upload_test.py index 3e0eb55b7..295c6b21b 100644 --- a/tests/integration_tests/databases/commands/upload_test.py +++ b/tests/integration_tests/databases/commands/upload_test.py @@ -83,12 +83,12 @@ def get_upload_db(): return db.session.query(Database).filter_by(database_name=CSV_UPLOAD_DATABASE).one() -@pytest.fixture() +@pytest.fixture def setup_csv_upload_with_context(app_context: AppContext): yield from _setup_csv_upload() -@pytest.fixture() +@pytest.fixture def setup_csv_upload_with_context_schema(app_context: AppContext): yield from _setup_csv_upload(["public"]) @@ -107,7 +107,7 @@ def test_csv_upload_with_nulls(): CSVReader({"null_values": ["N/A", "None"]}), ).run() with upload_database.get_sqla_engine() as engine: - data = engine.execute(f"SELECT * from {CSV_UPLOAD_TABLE}").fetchall() + data = engine.execute(f"SELECT * from {CSV_UPLOAD_TABLE}").fetchall() # noqa: S608 assert data == [ ("name1", None, "city1", "1-1-1980"), ("name2", 29, None, "1-1-1981"), @@ -151,15 +151,16 @@ def test_csv_upload_with_index(): CSVReader({"dataframe_index": True, "index_label": "id"}), ).run() with upload_database.get_sqla_engine() as engine: - data = engine.execute(f"SELECT * from {CSV_UPLOAD_TABLE}").fetchall() + data = engine.execute(f"SELECT * from {CSV_UPLOAD_TABLE}").fetchall() # noqa: S608 assert data == [ (0, "name1", 30, "city1", "1-1-1980"), (1, "name2", 29, "city2", "1-1-1981"), (2, "name3", 28, "city3", "1-1-1982"), ] # assert column names - assert [ - col for col in engine.execute(f"SELECT * from {CSV_UPLOAD_TABLE}").keys() + assert [ # noqa: C416 + col + for col in engine.execute(f"SELECT * from {CSV_UPLOAD_TABLE}").keys() # noqa: S608 ] == [ "id", "Name", diff --git a/tests/integration_tests/databases/commands_tests.py b/tests/integration_tests/databases/commands_tests.py index 3bd0cfce2..6e3d64366 100644 --- a/tests/integration_tests/databases/commands_tests.py +++ b/tests/integration_tests/databases/commands_tests.py @@ -338,7 +338,7 @@ class TestExportDatabasesCommand(SupersetTestCase): example_db = get_example_database() command = ExportDatabasesCommand([example_db.id]) contents = command.run() - with self.assertRaises(DatabaseNotFoundError): + with self.assertRaises(DatabaseNotFoundError): # noqa: PT027 next(contents) @patch("superset.security.manager.g") @@ -347,7 +347,7 @@ class TestExportDatabasesCommand(SupersetTestCase): mock_g.user = security_manager.find_user("admin") command = ExportDatabasesCommand([-1]) contents = command.run() - with self.assertRaises(DatabaseNotFoundError): + with self.assertRaises(DatabaseNotFoundError): # noqa: PT027 next(contents) @patch("superset.security.manager.g") @@ -680,7 +680,7 @@ class TestImportDatabasesCommand(SupersetTestCase): mock_add_permissions, mock_schema_is_feature_enabled, ): - """Test that database imports with masked ssh_tunnel private_key and private_key_password are rejected""" + """Test that database imports with masked ssh_tunnel private_key and private_key_password are rejected""" # noqa: E501 mock_schema_is_feature_enabled.return_value = True masked_database_config = database_with_ssh_tunnel_config_private_key.copy() contents = { @@ -713,7 +713,7 @@ class TestImportDatabasesCommand(SupersetTestCase): mock_g.user = security_manager.find_user("admin") mock_schema_is_feature_enabled.return_value = True masked_database_config = database_with_ssh_tunnel_config_password.copy() - masked_database_config["ssh_tunnel"]["password"] = "TEST" + masked_database_config["ssh_tunnel"]["password"] = "TEST" # noqa: S105 contents = { "metadata.yaml": yaml.safe_dump(database_metadata_config), "databases/imported_database.yaml": yaml.safe_dump(masked_database_config), @@ -740,7 +740,7 @@ class TestImportDatabasesCommand(SupersetTestCase): .filter(SSHTunnel.database_id == database.id) .one() ) - assert model_ssh_tunnel.password == "TEST" + assert model_ssh_tunnel.password == "TEST" # noqa: S105 db.session.delete(database) db.session.commit() @@ -754,13 +754,13 @@ class TestImportDatabasesCommand(SupersetTestCase): mock_g, mock_schema_is_feature_enabled, ): - """Test that a database with ssh_tunnel private_key and private_key_password can be imported""" + """Test that a database with ssh_tunnel private_key and private_key_password can be imported""" # noqa: E501 mock_g.user = security_manager.find_user("admin") mock_schema_is_feature_enabled.return_value = True masked_database_config = database_with_ssh_tunnel_config_private_key.copy() masked_database_config["ssh_tunnel"]["private_key"] = "TestPrivateKey" - masked_database_config["ssh_tunnel"]["private_key_password"] = "TEST" + masked_database_config["ssh_tunnel"]["private_key_password"] = "TEST" # noqa: S105 contents = { "metadata.yaml": yaml.safe_dump(database_metadata_config), "databases/imported_database.yaml": yaml.safe_dump(masked_database_config), @@ -788,7 +788,7 @@ class TestImportDatabasesCommand(SupersetTestCase): .one() ) assert model_ssh_tunnel.private_key == "TestPrivateKey" - assert model_ssh_tunnel.private_key_password == "TEST" + assert model_ssh_tunnel.private_key_password == "TEST" # noqa: S105 db.session.delete(database) db.session.commit() @@ -819,7 +819,7 @@ class TestImportDatabasesCommand(SupersetTestCase): mock_add_permissions, mock_schema_is_feature_enabled, ): - """Test that databases with ssh_tunnels that have multiple credentials are rejected""" + """Test that databases with ssh_tunnels that have multiple credentials are rejected""" # noqa: E501 mock_schema_is_feature_enabled.return_value = True masked_database_config = database_with_ssh_tunnel_config_mix_credentials.copy() contents = { @@ -840,7 +840,7 @@ class TestImportDatabasesCommand(SupersetTestCase): mock_add_permissions, mock_schema_is_feature_enabled, ): - """Test that databases with ssh_tunnels that have multiple credentials are rejected""" + """Test that databases with ssh_tunnels that have multiple credentials are rejected""" # noqa: E501 mock_schema_is_feature_enabled.return_value = True masked_database_config = ( database_with_ssh_tunnel_config_private_pass_only.copy() @@ -878,7 +878,7 @@ class TestImportDatabasesCommand(SupersetTestCase): "metadata.yaml": yaml.safe_dump(database_metadata_config), } command = ImportDatabasesCommand(contents) - with pytest.raises(Exception) as excinfo: + with pytest.raises(Exception) as excinfo: # noqa: PT011 command.run() assert str(excinfo.value) == "Import database failed for an unknown reason" @@ -902,7 +902,7 @@ class TestTestConnectionDatabaseCommand(SupersetTestCase): json_payload = {"sqlalchemy_uri": db_uri} command_without_db_name = TestConnectionDatabaseCommand(json_payload) - with pytest.raises(DatabaseTestConnectionUnexpectedError) as excinfo: + with pytest.raises(DatabaseTestConnectionUnexpectedError) as excinfo: # noqa: PT012 command_without_db_name.run() assert str(excinfo.value) == ( "Unexpected error occurred, please check your logs for details" @@ -971,7 +971,7 @@ class TestTestConnectionDatabaseCommand(SupersetTestCase): json_payload = {"sqlalchemy_uri": db_uri} command_without_db_name = TestConnectionDatabaseCommand(json_payload) - with pytest.raises(DatabaseSecurityUnsafeError) as excinfo: + with pytest.raises(DatabaseSecurityUnsafeError) as excinfo: # noqa: PT012 command_without_db_name.run() assert str(excinfo.value) == ("Stopped an unsafe database connection") @@ -993,7 +993,7 @@ class TestTestConnectionDatabaseCommand(SupersetTestCase): json_payload = {"sqlalchemy_uri": db_uri} command_without_db_name = TestConnectionDatabaseCommand(json_payload) - with pytest.raises(SupersetErrorsException) as excinfo: + with pytest.raises(SupersetErrorsException) as excinfo: # noqa: PT012 command_without_db_name.run() assert str(excinfo.value) == ( "Connection failed, please check your connection settings" @@ -1005,7 +1005,7 @@ class TestTestConnectionDatabaseCommand(SupersetTestCase): @patch("superset.db_engine_specs.base.is_hostname_valid") @patch("superset.db_engine_specs.base.is_port_open") @patch("superset.commands.database.validate.DatabaseDAO") -def test_validate(DatabaseDAO, is_port_open, is_hostname_valid, app_context): +def test_validate(DatabaseDAO, is_port_open, is_hostname_valid, app_context): # noqa: N803 """ Test parameter validation. """ @@ -1060,7 +1060,7 @@ def test_validate_partial(is_port_open, is_hostname_valid, app_context): "issue_codes": [ { "code": 1018, - "message": "Issue 1018 - One or more parameters needed to configure a database are missing.", + "message": "Issue 1018 - One or more parameters needed to configure a database are missing.", # noqa: E501 } ], }, @@ -1099,7 +1099,7 @@ def test_validate_partial_invalid_hostname(is_hostname_valid, app_context): "issue_codes": [ { "code": 1018, - "message": "Issue 1018 - One or more parameters needed to configure a database are missing.", + "message": "Issue 1018 - One or more parameters needed to configure a database are missing.", # noqa: E501 } ], }, @@ -1113,7 +1113,7 @@ def test_validate_partial_invalid_hostname(is_hostname_valid, app_context): "issue_codes": [ { "code": 1007, - "message": "Issue 1007 - The hostname provided can't be resolved.", + "message": "Issue 1007 - The hostname provided can't be resolved.", # noqa: E501 } ], }, @@ -1127,7 +1127,7 @@ class TestTablesDatabaseCommand(SupersetTestCase): mock_find_by_id.return_value = None command = TablesDatabaseCommand(1, None, "test", False) - with pytest.raises(DatabaseNotFoundError) as excinfo: + with pytest.raises(DatabaseNotFoundError) as excinfo: # noqa: PT012 command.run() assert str(excinfo.value) == ("Database not found.") @@ -1146,7 +1146,7 @@ class TestTablesDatabaseCommand(SupersetTestCase): mock_g.user = security_manager.find_user("admin") command = TablesDatabaseCommand(database.id, None, "main", False) - with pytest.raises(SupersetException) as excinfo: + with pytest.raises(SupersetException) as excinfo: # noqa: PT012 command.run() assert str(excinfo.value) == "Test Error" @@ -1162,7 +1162,7 @@ class TestTablesDatabaseCommand(SupersetTestCase): mock_g.user = security_manager.find_user("admin") command = TablesDatabaseCommand(database.id, None, "main", False) - with pytest.raises(DatabaseTablesUnexpectedError) as excinfo: + with pytest.raises(DatabaseTablesUnexpectedError) as excinfo: # noqa: PT012 command.run() assert ( str(excinfo.value) diff --git a/tests/integration_tests/datasets/api_tests.py b/tests/integration_tests/datasets/api_tests.py index b04d4cec7..6c6410fee 100644 --- a/tests/integration_tests/datasets/api_tests.py +++ b/tests/integration_tests/datasets/api_tests.py @@ -79,7 +79,7 @@ class TestDatasetApi(SupersetTestCase): sql: Optional[str] = None, schema: Optional[str] = None, ) -> SqlaTable: - obj_owners = list() + obj_owners = list() # noqa: C408 for owner in owners: user = db.session.query(security_manager.user_model).get(owner) obj_owners.append(user) @@ -115,7 +115,7 @@ class TestDatasetApi(SupersetTestCase): .all() ) - @pytest.fixture() + @pytest.fixture def create_virtual_datasets(self): with self.create_app().app_context(): datasets = [] @@ -137,7 +137,7 @@ class TestDatasetApi(SupersetTestCase): db.session.delete(dataset) db.session.commit() - @pytest.fixture() + @pytest.fixture def create_datasets(self): with self.create_app().app_context(): datasets = [] @@ -254,7 +254,7 @@ class TestDatasetApi(SupersetTestCase): "sql", "table_name", ] - assert sorted(list(response["result"][0].keys())) == expected_columns + assert sorted(list(response["result"][0].keys())) == expected_columns # noqa: C414 def test_get_dataset_list_gamma(self): """ @@ -519,7 +519,7 @@ class TestDatasetApi(SupersetTestCase): self.login(ADMIN_USERNAME) - uri = f"api/v1/dataset/{dataset.id}?q=(columns:!(id,sql))&include_rendered_sql=true" + uri = f"api/v1/dataset/{dataset.id}?q=(columns:!(id,sql))&include_rendered_sql=true" # noqa: E501 rv = self.get_assert_metric(uri, "get") assert rv.status_code == 400 response = json.loads(rv.data.decode("utf-8")) @@ -2204,11 +2204,11 @@ class TestDatasetApi(SupersetTestCase): "error_type": "GENERIC_COMMAND_ERROR", "level": "warning", "extra": { - "datasets/imported_dataset.yaml": "Dataset already exists and `overwrite=true` was not passed", + "datasets/imported_dataset.yaml": "Dataset already exists and `overwrite=true` was not passed", # noqa: E501 "issue_codes": [ { "code": 1010, - "message": "Issue 1010 - Superset encountered an error while running a command.", + "message": "Issue 1010 - Superset encountered an error while running a command.", # noqa: E501 } ], }, @@ -2327,7 +2327,7 @@ class TestDatasetApi(SupersetTestCase): "issue_codes": [ { "code": 1010, - "message": "Issue 1010 - Superset encountered an error while running a command.", + "message": "Issue 1010 - Superset encountered an error while running a command.", # noqa: E501 } ] }, diff --git a/tests/integration_tests/datasets/commands_tests.py b/tests/integration_tests/datasets/commands_tests.py index f85951c45..2c0a9ce61 100644 --- a/tests/integration_tests/datasets/commands_tests.py +++ b/tests/integration_tests/datasets/commands_tests.py @@ -192,7 +192,7 @@ class TestExportDatasetsCommand(SupersetTestCase): example_dataset = example_db.tables[0] command = ExportDatasetsCommand([example_dataset.id]) contents = command.run() - with self.assertRaises(DatasetNotFoundError): + with self.assertRaises(DatasetNotFoundError): # noqa: PT027 next(contents) @patch("superset.security.manager.g") @@ -201,7 +201,7 @@ class TestExportDatasetsCommand(SupersetTestCase): mock_g.user = security_manager.find_user("admin") command = ExportDatasetsCommand([-1]) contents = command.run() - with self.assertRaises(DatasetNotFoundError): + with self.assertRaises(DatasetNotFoundError): # noqa: PT027 next(contents) @patch("superset.security.manager.g") @@ -282,7 +282,7 @@ class TestImportDatasetsCommand(SupersetTestCase): ) assert ( dataset.params - == '{"remote_id": 3, "database_name": "examples", "import_time": 1604342885}' + == '{"remote_id": 3, "database_name": "examples", "import_time": 1604342885}' # noqa: E501 ) assert len(dataset.metrics) == 2 assert dataset.main_dttm_col == "ds" @@ -322,7 +322,7 @@ class TestImportDatasetsCommand(SupersetTestCase): ) assert ( dataset.params - == '{"remote_id": 3, "database_name": "examples", "import_time": 1604342885}' + == '{"remote_id": 3, "database_name": "examples", "import_time": 1604342885}' # noqa: E501 ) assert len(dataset.metrics) == 2 assert dataset.main_dttm_col == "ds" @@ -373,7 +373,7 @@ class TestImportDatasetsCommand(SupersetTestCase): assert dataset.fetch_values_predicate is None assert ( dataset.extra - == '{"certification": {"certified_by": "Data Platform Team", "details": "This table is the source of truth."}, "warning_markdown": "This is a warning."}' + == '{"certification": {"certified_by": "Data Platform Team", "details": "This table is the source of truth."}, "warning_markdown": "This is a warning."}' # noqa: E501 ) # user should be included as one of the owners @@ -554,7 +554,7 @@ class TestCreateDatasetCommand(SupersetTestCase): @patch("superset.commands.utils.g") def test_database_not_found(self, mock_g): mock_g.user = security_manager.find_user("admin") - with self.assertRaises(DatasetInvalidError): + with self.assertRaises(DatasetInvalidError): # noqa: PT027 CreateDatasetCommand({"table_name": "table", "database": 9999}).run() @patch("superset.commands.utils.g") @@ -562,7 +562,7 @@ class TestCreateDatasetCommand(SupersetTestCase): def test_get_table_from_database_error(self, get_table_mock, mock_g): get_table_mock.side_effect = SQLAlchemyError mock_g.user = security_manager.find_user("admin") - with self.assertRaises(DatasetInvalidError): + with self.assertRaises(DatasetInvalidError): # noqa: PT027 CreateDatasetCommand( {"table_name": "table", "database": get_example_database().id} ).run() @@ -598,7 +598,7 @@ class TestCreateDatasetCommand(SupersetTestCase): def test_create_dataset_command_not_allowed(self): examples_db = get_example_database() with override_user(security_manager.find_user("gamma")): - with self.assertRaises(DatasetInvalidError): + with self.assertRaises(DatasetInvalidError): # noqa: PT027 _ = CreateDatasetCommand( { "sql": "select * from ab_user", @@ -610,7 +610,7 @@ class TestCreateDatasetCommand(SupersetTestCase): class TestDatasetWarmUpCacheCommand(SupersetTestCase): def test_warm_up_cache_command_table_not_found(self): - with self.assertRaises(WarmUpCacheTableNotFoundError): + with self.assertRaises(WarmUpCacheTableNotFoundError): # noqa: PT027 DatasetWarmUpCacheCommand("not", "here", None, None).run() @pytest.mark.usefixtures("load_birth_names_dashboard_with_slices") diff --git a/tests/integration_tests/datasource_tests.py b/tests/integration_tests/datasource_tests.py index ab13fc4da..c3533d131 100644 --- a/tests/integration_tests/datasource_tests.py +++ b/tests/integration_tests/datasource_tests.py @@ -58,10 +58,10 @@ def create_test_table_context(database: Database): with database.get_sqla_engine() as engine: engine.execute( - f"CREATE TABLE IF NOT EXISTS {full_table_name} AS SELECT 1 as first, 2 as second" + f"CREATE TABLE IF NOT EXISTS {full_table_name} AS SELECT 1 as first, 2 as second" # noqa: E501 ) - engine.execute(f"INSERT INTO {full_table_name} (first, second) VALUES (1, 2)") - engine.execute(f"INSERT INTO {full_table_name} (first, second) VALUES (3, 4)") + engine.execute(f"INSERT INTO {full_table_name} (first, second) VALUES (1, 2)") # noqa: S608 + engine.execute(f"INSERT INTO {full_table_name} (first, second) VALUES (3, 4)") # noqa: S608 yield db.session @@ -151,11 +151,11 @@ class TestDatasource(SupersetTestCase): with create_and_cleanup_table(table): table.always_filter_main_dttm = False result = str(table.get_sqla_query(**query_obj).sqla_query.whereclause) - assert "default_dttm" not in result and "additional_dttm" in result + assert "default_dttm" not in result and "additional_dttm" in result # noqa: PT018 table.always_filter_main_dttm = True result = str(table.get_sqla_query(**query_obj).sqla_query.whereclause) - assert "default_dttm" in result and "additional_dttm" in result + assert "default_dttm" in result and "additional_dttm" in result # noqa: PT018 def test_external_metadata_for_virtual_table(self): self.login(ADMIN_USERNAME) @@ -372,7 +372,7 @@ class TestDatasource(SupersetTestCase): datasource_post = get_datasource_post() datasource_post["id"] = tbl_id datasource_post["owners"] = [1] - data = dict(data=json.dumps(datasource_post)) + data = dict(data=json.dumps(datasource_post)) # noqa: C408 resp = self.get_json_resp("/datasource/save/", data) for k in datasource_post: if k == "columns": @@ -394,12 +394,12 @@ class TestDatasource(SupersetTestCase): datasource_post["id"] = tbl_id datasource_post["owners"] = [1] datasource_post["default_endpoint"] = "http://localhost/superset/1" - data = dict(data=json.dumps(datasource_post)) + data = dict(data=json.dumps(datasource_post)) # noqa: C408 resp = self.client.post("/datasource/save/", data=data) assert resp.status_code == 200 def save_datasource_from_dict(self, datasource_post): - data = dict(data=json.dumps(datasource_post)) + data = dict(data=json.dumps(datasource_post)) # noqa: C408 resp = self.get_json_resp("/datasource/save/", data) return resp @@ -451,7 +451,7 @@ class TestDatasource(SupersetTestCase): }, ] ) - data = dict(data=json.dumps(datasource_post)) + data = dict(data=json.dumps(datasource_post)) # noqa: C408 resp = self.get_json_resp("/datasource/save/", data, raise_on_error=False) assert "Duplicate column name(s): " in resp["error"] @@ -463,7 +463,7 @@ class TestDatasource(SupersetTestCase): datasource_post = get_datasource_post() datasource_post["id"] = tbl.id datasource_post["owners"] = [admin_user.id] - data = dict(data=json.dumps(datasource_post)) + data = dict(data=json.dumps(datasource_post)) # noqa: C408 self.get_json_resp("/datasource/save/", data) url = f"/datasource/get/{tbl.type}/{tbl.id}/" resp = self.get_json_resp(url) @@ -537,7 +537,7 @@ def test_get_samples(test_client, login_as_admin, virtual_dataset): assert rv.json["result"]["is_cached"] # 2. should read through cache data - uri2 = f"/datasource/samples?datasource_id={virtual_dataset.id}&datasource_type=table&force=true" + uri2 = f"/datasource/samples?datasource_id={virtual_dataset.id}&datasource_type=table&force=true" # noqa: E501 # feeds data test_client.post(uri2, json={}) # force query @@ -556,7 +556,7 @@ def test_get_samples(test_client, login_as_admin, virtual_dataset): assert "data" in rv2.json["result"] sql = ( - f"select * from ({virtual_dataset.sql}) as tbl " + f"select * from ({virtual_dataset.sql}) as tbl " # noqa: S608 f'limit {app.config["SAMPLES_ROW_LIMIT"]}' ) eager_samples = virtual_dataset.database.get_df(sql) @@ -722,12 +722,12 @@ def test_get_samples_pagination(test_client, login_as_admin, virtual_dataset): # 2. incorrect per_page per_pages = (app.config["SAMPLES_ROW_LIMIT"] + 1, 0, "xx") for per_page in per_pages: - uri = f"/datasource/samples?datasource_id={virtual_dataset.id}&datasource_type=table&per_page={per_page}" + uri = f"/datasource/samples?datasource_id={virtual_dataset.id}&datasource_type=table&per_page={per_page}" # noqa: E501 rv = test_client.post(uri, json={}) assert rv.status_code == 400 # 3. incorrect page or datasource_type - uri = f"/datasource/samples?datasource_id={virtual_dataset.id}&datasource_type=table&page=xx" + uri = f"/datasource/samples?datasource_id={virtual_dataset.id}&datasource_type=table&page=xx" # noqa: E501 rv = test_client.post(uri, json={}) assert rv.status_code == 400 @@ -736,14 +736,14 @@ def test_get_samples_pagination(test_client, login_as_admin, virtual_dataset): assert rv.status_code == 400 # 4. turning pages - uri = f"/datasource/samples?datasource_id={virtual_dataset.id}&datasource_type=table&per_page=2&page=1" + uri = f"/datasource/samples?datasource_id={virtual_dataset.id}&datasource_type=table&per_page=2&page=1" # noqa: E501 rv = test_client.post(uri, json={}) assert rv.json["result"]["page"] == 1 assert rv.json["result"]["per_page"] == 2 assert rv.json["result"]["total_count"] == 10 assert [row["col1"] for row in rv.json["result"]["data"]] == [0, 1] - uri = f"/datasource/samples?datasource_id={virtual_dataset.id}&datasource_type=table&per_page=2&page=2" + uri = f"/datasource/samples?datasource_id={virtual_dataset.id}&datasource_type=table&per_page=2&page=2" # noqa: E501 rv = test_client.post(uri, json={}) assert rv.json["result"]["page"] == 2 assert rv.json["result"]["per_page"] == 2 @@ -751,7 +751,7 @@ def test_get_samples_pagination(test_client, login_as_admin, virtual_dataset): assert [row["col1"] for row in rv.json["result"]["data"]] == [2, 3] # 5. Exceeding the maximum pages - uri = f"/datasource/samples?datasource_id={virtual_dataset.id}&datasource_type=table&per_page=2&page=6" + uri = f"/datasource/samples?datasource_id={virtual_dataset.id}&datasource_type=table&per_page=2&page=6" # noqa: E501 rv = test_client.post(uri, json={}) assert rv.json["result"]["page"] == 6 assert rv.json["result"]["per_page"] == 2 diff --git a/tests/integration_tests/db_engine_specs/base_engine_spec_tests.py b/tests/integration_tests/db_engine_specs/base_engine_spec_tests.py index 916de39cd..d4d886b67 100644 --- a/tests/integration_tests/db_engine_specs/base_engine_spec_tests.py +++ b/tests/integration_tests/db_engine_specs/base_engine_spec_tests.py @@ -305,7 +305,7 @@ class TestDbEngineSpecs(TestDbEngineSpec): } sql = table.get_query_str(query_obj) assert ( - "ORDER BY \n case\n when gender='boy' then 'male'\n else 'female'\n end\n ASC" + "ORDER BY \n case\n when gender='boy' then 'male'\n else 'female'\n end\n ASC" # noqa: E501 in sql ) diff --git a/tests/integration_tests/db_engine_specs/bigquery_tests.py b/tests/integration_tests/db_engine_specs/bigquery_tests.py index fa10bd2ce..45edb5a0d 100644 --- a/tests/integration_tests/db_engine_specs/bigquery_tests.py +++ b/tests/integration_tests/db_engine_specs/bigquery_tests.py @@ -248,11 +248,11 @@ class TestBigQueryDbEngineSpec(TestDbEngineSpec): ) def test_extract_errors(self): - msg = "403 POST https://bigquery.googleapis.com/bigquery/v2/projects/test-keel-310804/jobs?prettyPrint=false: Access Denied: Project profound-keel-310804: User does not have bigquery.jobs.create permission in project profound-keel-310804" + msg = "403 POST https://bigquery.googleapis.com/bigquery/v2/projects/test-keel-310804/jobs?prettyPrint=false: Access Denied: Project profound-keel-310804: User does not have bigquery.jobs.create permission in project profound-keel-310804" # noqa: E501 result = BigQueryEngineSpec.extract_errors(Exception(msg)) assert result == [ SupersetError( - message='Unable to connect. Verify that the following roles are set on the service account: "BigQuery Data Viewer", "BigQuery Metadata Viewer", "BigQuery Job User" and the following permissions are set "bigquery.readsessions.create", "bigquery.readsessions.getData"', + message='Unable to connect. Verify that the following roles are set on the service account: "BigQuery Data Viewer", "BigQuery Metadata Viewer", "BigQuery Job User" and the following permissions are set "bigquery.readsessions.create", "bigquery.readsessions.getData"', # noqa: E501 error_type=SupersetErrorType.CONNECTION_DATABASE_PERMISSIONS_ERROR, level=ErrorLevel.ERROR, extra={ @@ -267,11 +267,11 @@ class TestBigQueryDbEngineSpec(TestDbEngineSpec): ) ] - msg = "bigquery error: 404 Not found: Dataset fakeDataset:bogusSchema was not found in location" + msg = "bigquery error: 404 Not found: Dataset fakeDataset:bogusSchema was not found in location" # noqa: E501 result = BigQueryEngineSpec.extract_errors(Exception(msg)) assert result == [ SupersetError( - message='The schema "bogusSchema" does not exist. A valid schema must be used to run this query.', + message='The schema "bogusSchema" does not exist. A valid schema must be used to run this query.', # noqa: E501 error_type=SupersetErrorType.SCHEMA_DOES_NOT_EXIST_ERROR, level=ErrorLevel.ERROR, extra={ @@ -279,22 +279,22 @@ class TestBigQueryDbEngineSpec(TestDbEngineSpec): "issue_codes": [ { "code": 1003, - "message": "Issue 1003 - There is a syntax error in the SQL query. Perhaps there was a misspelling or a typo.", + "message": "Issue 1003 - There is a syntax error in the SQL query. Perhaps there was a misspelling or a typo.", # noqa: E501 }, { "code": 1004, - "message": "Issue 1004 - The column was deleted or renamed in the database.", + "message": "Issue 1004 - The column was deleted or renamed in the database.", # noqa: E501 }, ], }, ) ] - msg = 'Table name "badtable" missing dataset while no default dataset is set in the request' + msg = 'Table name "badtable" missing dataset while no default dataset is set in the request' # noqa: E501 result = BigQueryEngineSpec.extract_errors(Exception(msg)) assert result == [ SupersetError( - message='The table "badtable" does not exist. A valid table must be used to run this query.', + message='The table "badtable" does not exist. A valid table must be used to run this query.', # noqa: E501 error_type=SupersetErrorType.TABLE_DOES_NOT_EXIST_ERROR, level=ErrorLevel.ERROR, extra={ @@ -302,11 +302,11 @@ class TestBigQueryDbEngineSpec(TestDbEngineSpec): "issue_codes": [ { "code": 1003, - "message": "Issue 1003 - There is a syntax error in the SQL query. Perhaps there was a misspelling or a typo.", + "message": "Issue 1003 - There is a syntax error in the SQL query. Perhaps there was a misspelling or a typo.", # noqa: E501 }, { "code": 1005, - "message": "Issue 1005 - The table was deleted or renamed in the database.", + "message": "Issue 1005 - The table was deleted or renamed in the database.", # noqa: E501 }, ], }, @@ -325,11 +325,11 @@ class TestBigQueryDbEngineSpec(TestDbEngineSpec): "issue_codes": [ { "code": 1003, - "message": "Issue 1003 - There is a syntax error in the SQL query. Perhaps there was a misspelling or a typo.", + "message": "Issue 1003 - There is a syntax error in the SQL query. Perhaps there was a misspelling or a typo.", # noqa: E501 }, { "code": 1004, - "message": "Issue 1004 - The column was deleted or renamed in the database.", + "message": "Issue 1004 - The column was deleted or renamed in the database.", # noqa: E501 }, ], }, @@ -340,7 +340,7 @@ class TestBigQueryDbEngineSpec(TestDbEngineSpec): result = BigQueryEngineSpec.extract_errors(Exception(msg)) assert result == [ SupersetError( - message='Please check your query for syntax errors at or near "from_". Then, try running your query again.', + message='Please check your query for syntax errors at or near "from_". Then, try running your query again.', # noqa: E501 error_type=SupersetErrorType.SYNTAX_ERROR, level=ErrorLevel.ERROR, extra={ diff --git a/tests/integration_tests/db_engine_specs/gsheets_tests.py b/tests/integration_tests/db_engine_specs/gsheets_tests.py index d66c54e2f..212af15c3 100644 --- a/tests/integration_tests/db_engine_specs/gsheets_tests.py +++ b/tests/integration_tests/db_engine_specs/gsheets_tests.py @@ -28,7 +28,7 @@ class TestGsheetsDbEngineSpec(TestDbEngineSpec): result = GSheetsEngineSpec.extract_errors(Exception(msg)) assert result == [ SupersetError( - message='Please check your query for syntax errors near "from_". Then, try running your query again.', + message='Please check your query for syntax errors near "from_". Then, try running your query again.', # noqa: E501 error_type=SupersetErrorType.SYNTAX_ERROR, level=ErrorLevel.ERROR, extra={ diff --git a/tests/integration_tests/db_engine_specs/hive_tests.py b/tests/integration_tests/db_engine_specs/hive_tests.py index 734ebca1c..c6bbfb683 100644 --- a/tests/integration_tests/db_engine_specs/hive_tests.py +++ b/tests/integration_tests/db_engine_specs/hive_tests.py @@ -33,7 +33,7 @@ def test_0_progress(): log = """ 17/02/07 18:26:27 INFO log.PerfLogger: 17/02/07 18:26:27 INFO log.PerfLogger: - """.split("\n") + """.split("\n") # noqa: E501 assert HiveEngineSpec.progress(log) == 0 @@ -57,7 +57,7 @@ def test_job_1_launched_stage_1(): 17/02/07 19:15:55 INFO ql.Driver: Total jobs = 2 17/02/07 19:15:55 INFO ql.Driver: Launching Job 1 out of 2 17/02/07 19:16:09 INFO exec.Task: 2017-02-07 19:16:09,173 Stage-1 map = 0%, reduce = 0% - """.split("\n") + """.split("\n") # noqa: E501 assert HiveEngineSpec.progress(log) == 0 @@ -67,7 +67,7 @@ def test_job_1_launched_stage_1_map_40_progress(): # pylint: disable=invalid-na 17/02/07 19:15:55 INFO ql.Driver: Launching Job 1 out of 2 17/02/07 19:16:09 INFO exec.Task: 2017-02-07 19:16:09,173 Stage-1 map = 0%, reduce = 0% 17/02/07 19:16:09 INFO exec.Task: 2017-02-07 19:16:09,173 Stage-1 map = 40%, reduce = 0% - """.split("\n") + """.split("\n") # noqa: E501 assert HiveEngineSpec.progress(log) == 10 @@ -78,7 +78,7 @@ def test_job_1_launched_stage_1_map_80_reduce_40_progress(): # pylint: disable= 17/02/07 19:16:09 INFO exec.Task: 2017-02-07 19:16:09,173 Stage-1 map = 0%, reduce = 0% 17/02/07 19:16:09 INFO exec.Task: 2017-02-07 19:16:09,173 Stage-1 map = 40%, reduce = 0% 17/02/07 19:16:09 INFO exec.Task: 2017-02-07 19:16:09,173 Stage-1 map = 80%, reduce = 40% - """.split("\n") + """.split("\n") # noqa: E501 assert HiveEngineSpec.progress(log) == 30 @@ -91,7 +91,7 @@ def test_job_1_launched_stage_2_stages_progress(): # pylint: disable=invalid-na 17/02/07 19:16:09 INFO exec.Task: 2017-02-07 19:16:09,173 Stage-1 map = 80%, reduce = 40% 17/02/07 19:16:09 INFO exec.Task: 2017-02-07 19:16:09,173 Stage-2 map = 0%, reduce = 0% 17/02/07 19:16:09 INFO exec.Task: 2017-02-07 19:16:09,173 Stage-1 map = 100%, reduce = 0% - """.split("\n") + """.split("\n") # noqa: E501 assert HiveEngineSpec.progress(log) == 12 @@ -103,7 +103,7 @@ def test_job_2_launched_stage_2_stages_progress(): # pylint: disable=invalid-na 17/02/07 19:15:55 INFO ql.Driver: Launching Job 2 out of 2 17/02/07 19:16:09 INFO exec.Task: 2017-02-07 19:16:09,173 Stage-1 map = 0%, reduce = 0% 17/02/07 19:16:09 INFO exec.Task: 2017-02-07 19:16:09,173 Stage-1 map = 40%, reduce = 0% - """.split("\n") + """.split("\n") # noqa: E501 assert HiveEngineSpec.progress(log) == 60 diff --git a/tests/integration_tests/db_engine_specs/mysql_tests.py b/tests/integration_tests/db_engine_specs/mysql_tests.py index e935b99e0..23af61f17 100644 --- a/tests/integration_tests/db_engine_specs/mysql_tests.py +++ b/tests/integration_tests/db_engine_specs/mysql_tests.py @@ -170,18 +170,18 @@ class TestMySQLEngineSpecsDbEngineSpec(TestDbEngineSpec): "issue_codes": [ { "code": 1015, - "message": "Issue 1015 - Either the database is spelled incorrectly or does not exist.", + "message": "Issue 1015 - Either the database is spelled incorrectly or does not exist.", # noqa: E501 } ], }, ) ] - msg = "check the manual that corresponds to your MySQL server version for the right syntax to use near 'from_" + msg = "check the manual that corresponds to your MySQL server version for the right syntax to use near 'from_" # noqa: E501 result = MySQLEngineSpec.extract_errors(Exception(msg)) assert result == [ SupersetError( - message='Please check your query for syntax errors near "from_". Then, try running your query again.', + message='Please check your query for syntax errors near "from_". Then, try running your query again.', # noqa: E501 error_type=SupersetErrorType.SYNTAX_ERROR, level=ErrorLevel.ERROR, extra={ diff --git a/tests/integration_tests/db_engine_specs/pinot_tests.py b/tests/integration_tests/db_engine_specs/pinot_tests.py index 40793494e..66d4865fb 100755 --- a/tests/integration_tests/db_engine_specs/pinot_tests.py +++ b/tests/integration_tests/db_engine_specs/pinot_tests.py @@ -81,10 +81,10 @@ class TestPinotDbEngineSpec(TestDbEngineSpec): assert result == expected def test_invalid_get_time_expression_arguments(self): - with self.assertRaises(NotImplementedError): + with self.assertRaises(NotImplementedError): # noqa: PT027 PinotEngineSpec.get_timestamp_expr(column("tstamp"), None, "P0.25Y") - with self.assertRaises(NotImplementedError): + with self.assertRaises(NotImplementedError): # noqa: PT027 PinotEngineSpec.get_timestamp_expr( column("tstamp"), "epoch_s", "invalid_grain" ) diff --git a/tests/integration_tests/db_engine_specs/postgres_tests.py b/tests/integration_tests/db_engine_specs/postgres_tests.py index a5ef1cdec..e45e0189f 100644 --- a/tests/integration_tests/db_engine_specs/postgres_tests.py +++ b/tests/integration_tests/db_engine_specs/postgres_tests.py @@ -176,7 +176,7 @@ class TestPostgresDbEngineSpec(TestDbEngineSpec): """ ) sql = "DROP TABLE birth_names" - with self.assertRaises(errors.SyntaxError): + with self.assertRaises(errors.SyntaxError): # noqa: PT027 PostgresEngineSpec.estimate_statement_cost(database, sql, cursor) def test_query_cost_formatter_example_costs(self): @@ -396,11 +396,11 @@ psql: error: could not connect to server: Operation timed out "issue_codes": [ { "code": 1014, - "message": "Issue 1014 - Either the username or the password is wrong.", + "message": "Issue 1014 - Either the username or the password is wrong.", # noqa: E501 }, { "code": 1015, - "message": "Issue 1015 - Either the database is spelled incorrectly or does not exist.", + "message": "Issue 1015 - Either the database is spelled incorrectly or does not exist.", # noqa: E501 }, ], }, @@ -411,7 +411,7 @@ psql: error: could not connect to server: Operation timed out result = PostgresEngineSpec.extract_errors(Exception(msg)) assert result == [ SupersetError( - message='Please check your query for syntax errors at or near "from_". Then, try running your query again.', + message='Please check your query for syntax errors at or near "from_". Then, try running your query again.', # noqa: E501 error_type=SupersetErrorType.SYNTAX_ERROR, level=ErrorLevel.ERROR, extra={ diff --git a/tests/integration_tests/db_engine_specs/presto_tests.py b/tests/integration_tests/db_engine_specs/presto_tests.py index 9d83bb5bb..6ee639552 100644 --- a/tests/integration_tests/db_engine_specs/presto_tests.py +++ b/tests/integration_tests/db_engine_specs/presto_tests.py @@ -922,7 +922,7 @@ class TestPrestoDbEngineSpec(TestDbEngineSpec): mock_database = mock.MagicMock() mock_cursor = mock.MagicMock() mock_cursor.execute.side_effect = Exception() - with self.assertRaises(Exception): + with self.assertRaises(Exception): # noqa: B017, PT027 PrestoEngineSpec.estimate_statement_cost( mock_database, "DROP TABLE brth_names", mock_cursor ) @@ -946,7 +946,7 @@ class TestPrestoDbEngineSpec(TestDbEngineSpec): database.get_raw_connection().__enter__().cursor().execute = mock_execute schema = "schema" table = "table" - with self.assertRaises(Exception): + with self.assertRaises(Exception): # noqa: B017, PT027 PrestoEngineSpec.get_create_view(database, schema=schema, table=table) def test_get_create_view_database_error(self): @@ -997,7 +997,7 @@ class TestPrestoDbEngineSpec(TestDbEngineSpec): "issue_codes": [ { "code": 1002, - "message": "Issue 1002 - The database returned an unexpected error.", + "message": "Issue 1002 - The database returned an unexpected error.", # noqa: E501 } ], }, @@ -1016,11 +1016,11 @@ class TestPrestoDbEngineSpec(TestDbEngineSpec): "issue_codes": [ { "code": 1003, - "message": "Issue 1003 - There is a syntax error in the SQL query. Perhaps there was a misspelling or a typo.", + "message": "Issue 1003 - There is a syntax error in the SQL query. Perhaps there was a misspelling or a typo.", # noqa: E501 }, { "code": 1004, - "message": "Issue 1004 - The column was deleted or renamed in the database.", + "message": "Issue 1004 - The column was deleted or renamed in the database.", # noqa: E501 }, ], }, @@ -1031,7 +1031,7 @@ class TestPrestoDbEngineSpec(TestDbEngineSpec): result = PrestoEngineSpec.extract_errors(Exception(msg)) assert result == [ SupersetError( - message="The table \"'tpch.tiny.region2'\" does not exist. A valid table must be used to run this query.", + message="The table \"'tpch.tiny.region2'\" does not exist. A valid table must be used to run this query.", # noqa: E501 error_type=SupersetErrorType.TABLE_DOES_NOT_EXIST_ERROR, level=ErrorLevel.ERROR, extra={ @@ -1039,11 +1039,11 @@ class TestPrestoDbEngineSpec(TestDbEngineSpec): "issue_codes": [ { "code": 1003, - "message": "Issue 1003 - There is a syntax error in the SQL query. Perhaps there was a misspelling or a typo.", + "message": "Issue 1003 - There is a syntax error in the SQL query. Perhaps there was a misspelling or a typo.", # noqa: E501 }, { "code": 1005, - "message": "Issue 1005 - The table was deleted or renamed in the database.", + "message": "Issue 1005 - The table was deleted or renamed in the database.", # noqa: E501 }, ], }, @@ -1054,7 +1054,7 @@ class TestPrestoDbEngineSpec(TestDbEngineSpec): result = PrestoEngineSpec.extract_errors(Exception(msg)) assert result == [ SupersetError( - message='The schema "tin" does not exist. A valid schema must be used to run this query.', + message='The schema "tin" does not exist. A valid schema must be used to run this query.', # noqa: E501 error_type=SupersetErrorType.SCHEMA_DOES_NOT_EXIST_ERROR, level=ErrorLevel.ERROR, extra={ @@ -1062,11 +1062,11 @@ class TestPrestoDbEngineSpec(TestDbEngineSpec): "issue_codes": [ { "code": 1003, - "message": "Issue 1003 - There is a syntax error in the SQL query. Perhaps there was a misspelling or a typo.", + "message": "Issue 1003 - There is a syntax error in the SQL query. Perhaps there was a misspelling or a typo.", # noqa: E501 }, { "code": 1016, - "message": "Issue 1005 - The schema was deleted or renamed in the database.", + "message": "Issue 1005 - The schema was deleted or renamed in the database.", # noqa: E501 }, ], }, @@ -1085,14 +1085,14 @@ class TestPrestoDbEngineSpec(TestDbEngineSpec): "issue_codes": [ { "code": 1014, - "message": "Issue 1014 - Either the username or the password is wrong.", + "message": "Issue 1014 - Either the username or the password is wrong.", # noqa: E501 } ], }, ) ] - msg = "Failed to establish a new connection: [Errno 8] nodename nor servname provided, or not known" + msg = "Failed to establish a new connection: [Errno 8] nodename nor servname provided, or not known" # noqa: E501 result = PrestoEngineSpec.extract_errors( Exception(msg), {"hostname": "badhost"} ) @@ -1106,7 +1106,7 @@ class TestPrestoDbEngineSpec(TestDbEngineSpec): "issue_codes": [ { "code": 1007, - "message": "Issue 1007 - The hostname provided can't be resolved.", + "message": "Issue 1007 - The hostname provided can't be resolved.", # noqa: E501 } ], }, @@ -1119,7 +1119,7 @@ class TestPrestoDbEngineSpec(TestDbEngineSpec): ) assert result == [ SupersetError( - message='The host "badhost" might be down, and can\'t be reached on port 12345.', + message='The host "badhost" might be down, and can\'t be reached on port 12345.', # noqa: E501 error_type=SupersetErrorType.CONNECTION_HOST_DOWN_ERROR, level=ErrorLevel.ERROR, extra={ @@ -1127,7 +1127,7 @@ class TestPrestoDbEngineSpec(TestDbEngineSpec): "issue_codes": [ { "code": 1009, - "message": "Issue 1009 - The host might be down, and can't be reached on the provided port.", + "message": "Issue 1009 - The host might be down, and can't be reached on the provided port.", # noqa: E501 } ], }, @@ -1164,7 +1164,7 @@ class TestPrestoDbEngineSpec(TestDbEngineSpec): "issue_codes": [ { "code": 1015, - "message": "Issue 1015 - Either the database is spelled incorrectly or does not exist.", + "message": "Issue 1015 - Either the database is spelled incorrectly or does not exist.", # noqa: E501 } ], }, diff --git a/tests/integration_tests/dict_import_export_tests.py b/tests/integration_tests/dict_import_export_tests.py index 116487882..bff144630 100644 --- a/tests/integration_tests/dict_import_export_tests.py +++ b/tests/integration_tests/dict_import_export_tests.py @@ -58,7 +58,13 @@ class TestDictImportExport(SupersetTestCase): cls.delete_imports() def create_table( - self, name, schema=None, id=0, cols_names=[], cols_uuids=None, metric_names=[] + self, + name, + schema=None, + id=0, + cols_names=[], # noqa: B006 + cols_uuids=None, + metric_names=[], # noqa: B006 ): database_name = "main" name = f"{NAME_PREFIX}{name}" @@ -250,7 +256,7 @@ class TestDictImportExport(SupersetTestCase): back_references=False, include_defaults=False, ) - self.get_resp("/login/", data=dict(username="admin", password="general")) + self.get_resp("/login/", data=dict(username="admin", password="general")) # noqa: S106, C408 resp = self.get_resp( "/databaseview/action_post", {"action": "yaml_export", "rowid": 1} ) diff --git a/tests/integration_tests/email_tests.py b/tests/integration_tests/email_tests.py index d7afe3551..b5815cc31 100644 --- a/tests/integration_tests/email_tests.py +++ b/tests/integration_tests/email_tests.py @@ -141,7 +141,11 @@ class TestEmailSmtp(SupersetTestCase): def test_send_smtp_inline_images(self, mock_send_mime): image = read_fixture("sample.png") utils.send_email_smtp( - "to", "subject", "content", app.config, images=dict(blah=image) + "to", + "subject", + "content", + app.config, + images=dict(blah=image), # noqa: C408 ) assert mock_send_mime.called call_args = mock_send_mime.call_args[0] diff --git a/tests/integration_tests/event_logger_tests.py b/tests/integration_tests/event_logger_tests.py index 4de125888..4a9ca8dde 100644 --- a/tests/integration_tests/event_logger_tests.py +++ b/tests/integration_tests/event_logger_tests.py @@ -56,7 +56,7 @@ class TestEventLogger(unittest.TestCase): def test_raises_typeerror_if_not_abc(self): # test that assignment of non AbstractEventLogger derived type raises # TypeError - with self.assertRaises(TypeError): + with self.assertRaises(TypeError): # noqa: PT027 get_event_logger_from_cfg_value(logging.getLogger()) @patch.object(DBEventLogger, "log") diff --git a/tests/integration_tests/explore/form_data/commands_tests.py b/tests/integration_tests/explore/form_data/commands_tests.py index 979d0f2cf..f5dce1fb8 100644 --- a/tests/integration_tests/explore/form_data/commands_tests.py +++ b/tests/integration_tests/explore/form_data/commands_tests.py @@ -36,7 +36,7 @@ from tests.integration_tests.base_tests import SupersetTestCase class TestCreateFormDataCommand(SupersetTestCase): - @pytest.fixture() + @pytest.fixture def create_dataset(self): with self.create_app().app_context(): dataset = SqlaTable( @@ -54,7 +54,7 @@ class TestCreateFormDataCommand(SupersetTestCase): db.session.delete(dataset) db.session.commit() - @pytest.fixture() + @pytest.fixture def create_slice(self): with self.create_app().app_context(): dataset = ( @@ -78,7 +78,7 @@ class TestCreateFormDataCommand(SupersetTestCase): db.session.delete(slice) db.session.commit() - @pytest.fixture() + @pytest.fixture def create_query(self): with self.create_app().app_context(): query = Query( diff --git a/tests/integration_tests/explore/permalink/commands_tests.py b/tests/integration_tests/explore/permalink/commands_tests.py index c17d8bafd..2924415a0 100644 --- a/tests/integration_tests/explore/permalink/commands_tests.py +++ b/tests/integration_tests/explore/permalink/commands_tests.py @@ -31,7 +31,7 @@ from tests.integration_tests.base_tests import SupersetTestCase class TestCreatePermalinkDataCommand(SupersetTestCase): - @pytest.fixture() + @pytest.fixture def create_dataset(self): with self.create_app().app_context(): dataset = SqlaTable( @@ -49,7 +49,7 @@ class TestCreatePermalinkDataCommand(SupersetTestCase): db.session.delete(dataset) db.session.commit() - @pytest.fixture() + @pytest.fixture def create_slice(self): with self.create_app().app_context(): dataset = ( @@ -73,7 +73,7 @@ class TestCreatePermalinkDataCommand(SupersetTestCase): db.session.delete(slice) db.session.commit() - @pytest.fixture() + @pytest.fixture def create_query(self): with self.create_app().app_context(): query = Query( diff --git a/tests/integration_tests/fixtures/birth_names_dashboard.py b/tests/integration_tests/fixtures/birth_names_dashboard.py index 513a9f84a..084cbcb2a 100644 --- a/tests/integration_tests/fixtures/birth_names_dashboard.py +++ b/tests/integration_tests/fixtures/birth_names_dashboard.py @@ -43,7 +43,7 @@ def load_birth_names_data( data_loader.remove_table(birth_names_table.table_name) -@pytest.fixture() +@pytest.fixture def load_birth_names_dashboard_with_slices(load_birth_names_data): with app.app_context(): dash_id_to_delete, slices_ids_to_delete = _create_dashboards() diff --git a/tests/integration_tests/fixtures/dashboard_with_tabs.py b/tests/integration_tests/fixtures/dashboard_with_tabs.py index 44f10e1cc..9d86555fe 100644 --- a/tests/integration_tests/fixtures/dashboard_with_tabs.py +++ b/tests/integration_tests/fixtures/dashboard_with_tabs.py @@ -478,7 +478,7 @@ def load_mutltiple_tabs_dashboard(): "children": [], "id": "MARKDOWN-BUmyHM2s0x", "meta": { - "code": "# Aspiring Developers\n\nThe mission of FreeCodeCamp is to \"help people learn to code for free\". With this in mind, it's no surprise that ~83% of this survey's respondents fall into the **Aspiring Developer** category.\n\nIn this tab, we use visualization to explore:\n\n- Interest in relocating for work\n- Preferences around work location & style\n- Distribution of expected income\n- Distribution of highest degree held\n- Heatmap of highest degree held vs employment style preference", + "code": "# Aspiring Developers\n\nThe mission of FreeCodeCamp is to \"help people learn to code for free\". With this in mind, it's no surprise that ~83% of this survey's respondents fall into the **Aspiring Developer** category.\n\nIn this tab, we use visualization to explore:\n\n- Interest in relocating for work\n- Preferences around work location & style\n- Distribution of expected income\n- Distribution of highest degree held\n- Heatmap of highest degree held vs employment style preference", # noqa: E501 "height": 50, "width": 4, }, @@ -495,7 +495,7 @@ def load_mutltiple_tabs_dashboard(): "children": [], "id": "MARKDOWN-NQmSPDOtpl", "meta": { - "code": "# Current Developers\n\nWhile majority of the students on FCC are Aspiring developers, there's a nontrivial minority that's there to continue leveling up their skills (17% of the survey respondents).\n\nBased on how respondents self-identified in the start of the survey, they were asked different questions. In this tab, we use visualizations to explore:\n\n- The buckets of commute team these developers encounter\n- The proportion of developers whose current job is their first developer job\n- Distribution of last year's income\n- The geographic distribution of these developers\n- The overlap between commute time and if their current job is their first developer job\n- Potential link between highest degree earned and last year's income", + "code": "# Current Developers\n\nWhile majority of the students on FCC are Aspiring developers, there's a nontrivial minority that's there to continue leveling up their skills (17% of the survey respondents).\n\nBased on how respondents self-identified in the start of the survey, they were asked different questions. In this tab, we use visualizations to explore:\n\n- The buckets of commute team these developers encounter\n- The proportion of developers whose current job is their first developer job\n- Distribution of last year's income\n- The geographic distribution of these developers\n- The overlap between commute time and if their current job is their first developer job\n- Potential link between highest degree earned and last year's income", # noqa: E501 "height": 56, "width": 4, }, @@ -512,7 +512,7 @@ def load_mutltiple_tabs_dashboard(): "children": [], "id": "MARKDOWN-__u6CsUyfh", "meta": { - "code": "## FreeCodeCamp New Coder Survey 2018\n\nEvery year, FCC surveys its user base (mostly budding software developers) to learn more about their interests, backgrounds, goals, job status, and socioeconomic features. This dashboard visualizes survey data from the 2018 survey.\n\n- [Survey link](https://freecodecamp.typeform.com/to/S3UeD9)\n- [Dataset](https://github.com/freeCodeCamp/2018-new-coder-survey)\n- [FCC Blog Post](https://www.freecodecamp.org/news/we-asked-20-000-people-who-they-are-and-how-theyre-learning-to-code-fff5d668969/)", + "code": "## FreeCodeCamp New Coder Survey 2018\n\nEvery year, FCC surveys its user base (mostly budding software developers) to learn more about their interests, backgrounds, goals, job status, and socioeconomic features. This dashboard visualizes survey data from the 2018 survey.\n\n- [Survey link](https://freecodecamp.typeform.com/to/S3UeD9)\n- [Dataset](https://github.com/freeCodeCamp/2018-new-coder-survey)\n- [FCC Blog Post](https://www.freecodecamp.org/news/we-asked-20-000-people-who-they-are-and-how-theyre-learning-to-code-fff5d668969/)", # noqa: E501 "height": 30, "width": 6, }, @@ -529,7 +529,7 @@ def load_mutltiple_tabs_dashboard(): "children": [], "id": "MARKDOWN-zc2mWxZeox", "meta": { - "code": "# Demographics\n\nFreeCodeCamp is a completely-online community of people learning to code and consists of aspiring & current developers from all over the world. That doesn't necessarily mean that access to these types of opportunities are evenly distributed. \n\nThe following charts can begin to help us understand:\n\n- the original citizenship of the survey respondents\n- minority representation among both aspiring and current developers\n- their age distribution\n- household languages", + "code": "# Demographics\n\nFreeCodeCamp is a completely-online community of people learning to code and consists of aspiring & current developers from all over the world. That doesn't necessarily mean that access to these types of opportunities are evenly distributed. \n\nThe following charts can begin to help us understand:\n\n- the original citizenship of the survey respondents\n- minority representation among both aspiring and current developers\n- their age distribution\n- household languages", # noqa: E501 "height": 52, "width": 3, }, @@ -648,4 +648,4 @@ def load_mutltiple_tabs_dashboard(): dash = create_dashboard( "multi_tabs_test", "multiple tabs Test", json.dumps(position_json), None ) - yield dash + return dash diff --git a/tests/integration_tests/fixtures/datasource.py b/tests/integration_tests/fixtures/datasource.py index 974f2c408..99223b9c1 100644 --- a/tests/integration_tests/fixtures/datasource.py +++ b/tests/integration_tests/fixtures/datasource.py @@ -171,8 +171,7 @@ def get_datasource_post() -> dict[str, Any]: } -@pytest.fixture() -@pytest.mark.usefixtures("app_conntext") +@pytest.fixture def load_dataset_with_columns() -> Generator[SqlaTable, None, None]: engine = create_engine(app.config["SQLALCHEMY_DATABASE_URI"], echo=True) meta = MetaData() diff --git a/tests/integration_tests/fixtures/energy_dashboard.py b/tests/integration_tests/fixtures/energy_dashboard.py index 5d938e054..2407ec01e 100644 --- a/tests/integration_tests/fixtures/energy_dashboard.py +++ b/tests/integration_tests/fixtures/energy_dashboard.py @@ -56,7 +56,7 @@ def load_energy_table_data(): engine.execute("DROP TABLE IF EXISTS energy_usage") -@pytest.fixture() +@pytest.fixture def load_energy_table_with_slice(load_energy_table_data): with app.app_context(): slices = _create_energy_table() @@ -135,7 +135,7 @@ def _get_energy_data(): { "source": f"energy_source{i}", "target": f"energy_target{i}", - "value": random.uniform(0.1, 11.0), + "value": random.uniform(0.1, 11.0), # noqa: S311 } ) return data @@ -186,6 +186,6 @@ def _get_energy_slices(): "xscale_interval": "1", "yscale_interval": "1", }, - "query_context": '{"datasource":{"id":12,"type":"table"},"force":false,"queries":[{"time_range":" : ","filters":[],"extras":{"time_grain_sqla":null,"having":"","where":""},"applied_time_extras":{},"columns":[],"metrics":[],"annotation_layers":[],"row_limit":5000,"timeseries_limit":0,"order_desc":true,"url_params":{},"custom_params":{},"custom_form_data":{}}],"result_format":"json","result_type":"full"}', + "query_context": '{"datasource":{"id":12,"type":"table"},"force":false,"queries":[{"time_range":" : ","filters":[],"extras":{"time_grain_sqla":null,"having":"","where":""},"applied_time_extras":{},"columns":[],"metrics":[],"annotation_layers":[],"row_limit":5000,"timeseries_limit":0,"order_desc":true,"url_params":{},"custom_params":{},"custom_form_data":{}}],"result_format":"json","result_type":"full"}', # noqa: E501 }, ] diff --git a/tests/integration_tests/fixtures/importexport.py b/tests/integration_tests/fixtures/importexport.py index 5a778ed07..427d0d24a 100644 --- a/tests/integration_tests/fixtures/importexport.py +++ b/tests/integration_tests/fixtures/importexport.py @@ -44,7 +44,7 @@ dataset_ui_export: list[dict[str, Any]] = [ }, {"expression": "SUM(num)", "metric_name": "sum__num"}, ], - "params": '{"remote_id": 3, "database_name": "examples", "import_time": 1604342885}', + "params": '{"remote_id": 3, "database_name": "examples", "import_time": 1604342885}', # noqa: E501 "table_name": "birth_names_2", } ] @@ -67,8 +67,8 @@ dashboard_export: dict[str, Any] = { "css": "", "dashboard_title": "Births 2", "description": None, - "json_metadata": '{"timed_refresh_immune_slices": [], "expanded_slices": {}, "refresh_frequency": 0, "default_filters": "{}", "color_scheme": null, "remote_id": 1}', - "position_json": '{"CHART--jvaBFZx78":{"children":[],"id":"CHART--jvaBFZx78","meta":{"chartId":83,"height":50,"sliceName":"Number of California Births","uuid":"c77bb4b3-09f4-4d9a-a9e2-66a627c64343","width":4},"parents":["ROOT_ID","GRID_ID","ROW-se_5H8KNiO"],"type":"CHART"},"DASHBOARD_VERSION_KEY":"v2","GRID_ID":{"children":["ROW-se_5H8KNiO"],"id":"GRID_ID","parents":["ROOT_ID"],"type":"GRID"},"HEADER_ID":{"id":"HEADER_ID","meta":{"text":"Births"},"type":"HEADER"},"ROOT_ID":{"children":["GRID_ID"],"id":"ROOT_ID","type":"ROOT"},"ROW-se_5H8KNiO":{"children":["CHART--jvaBFZx78"],"id":"ROW-se_5H8KNiO","meta":{"background":"BACKGROUND_TRANSPARENT"},"parents":["ROOT_ID","GRID_ID"],"type":"ROW"}}', + "json_metadata": '{"timed_refresh_immune_slices": [], "expanded_slices": {}, "refresh_frequency": 0, "default_filters": "{}", "color_scheme": null, "remote_id": 1}', # noqa: E501 + "position_json": '{"CHART--jvaBFZx78":{"children":[],"id":"CHART--jvaBFZx78","meta":{"chartId":83,"height":50,"sliceName":"Number of California Births","uuid":"c77bb4b3-09f4-4d9a-a9e2-66a627c64343","width":4},"parents":["ROOT_ID","GRID_ID","ROW-se_5H8KNiO"],"type":"CHART"},"DASHBOARD_VERSION_KEY":"v2","GRID_ID":{"children":["ROW-se_5H8KNiO"],"id":"GRID_ID","parents":["ROOT_ID"],"type":"GRID"},"HEADER_ID":{"id":"HEADER_ID","meta":{"text":"Births"},"type":"HEADER"},"ROOT_ID":{"children":["GRID_ID"],"id":"ROOT_ID","type":"ROOT"},"ROW-se_5H8KNiO":{"children":["CHART--jvaBFZx78"],"id":"ROW-se_5H8KNiO","meta":{"background":"BACKGROUND_TRANSPARENT"},"parents":["ROOT_ID","GRID_ID"],"type":"ROW"}}', # noqa: E501 "slices": [ { "__Slice__": { @@ -76,7 +76,7 @@ dashboard_export: dict[str, Any] = { "datasource_name": "birth_names_2", "datasource_type": "table", "id": 83, - "params": '{"adhoc_filters": [], "datasource": "3__table", "granularity_sqla": "ds", "header_font_size": 0.4, "metric": {"aggregate": "SUM", "column": {"column_name": "num_california", "expression": "CASE WHEN state = \'CA\' THEN num ELSE 0 END"}, "expressionType": "SIMPLE", "label": "SUM(num_california)"}, "slice_id": 83, "subheader_font_size": 0.15, "time_range": "100 years ago : now", "url_params": {}, "viz_type": "big_number_total", "y_axis_format": "SMART_NUMBER", "remote_id": 83, "datasource_name": "birth_names_2", "schema": null, "database_name": "examples"}', + "params": '{"adhoc_filters": [], "datasource": "3__table", "granularity_sqla": "ds", "header_font_size": 0.4, "metric": {"aggregate": "SUM", "column": {"column_name": "num_california", "expression": "CASE WHEN state = \'CA\' THEN num ELSE 0 END"}, "expressionType": "SIMPLE", "label": "SUM(num_california)"}, "slice_id": 83, "subheader_font_size": 0.15, "time_range": "100 years ago : now", "url_params": {}, "viz_type": "big_number_total", "y_axis_format": "SMART_NUMBER", "remote_id": 83, "datasource_name": "birth_names_2", "schema": null, "database_name": "examples"}', # noqa: E501 "slice_name": "Number of California Births", "viz_type": "big_number_total", } @@ -308,7 +308,7 @@ dashboard_export: dict[str, Any] = { }, ], "offset": 0, - "params": '{"remote_id": 3, "database_name": "examples", "import_time": 1604342885}', + "params": '{"remote_id": 3, "database_name": "examples", "import_time": 1604342885}', # noqa: E501 "schema": None, "sql": None, "table_name": "birth_names_2", @@ -516,7 +516,7 @@ dataset_config: dict[str, Any] = { "always_filter_main_dttm": False, "filter_select_enabled": True, "fetch_values_predicate": None, - "extra": '{ "certification": { "certified_by": "Data Platform Team", "details": "This table is the source of truth." }, "warning_markdown": "This is a warning." }', + "extra": '{ "certification": { "certified_by": "Data Platform Team", "details": "This table is the source of truth." }, "warning_markdown": "This is a warning." }', # noqa: E501 "metrics": [ { "metric_name": "count", @@ -555,7 +555,7 @@ chart_config: dict[str, Any] = { "color_picker": {"a": 1, "b": 135, "g": 122, "r": 0}, "datasource": "12__table", "js_columns": ["color"], - "js_data_mutator": r"data => data.map(d => ({\n ...d,\n color: colors.hexToRGB(d.extraProps.color)\n}));", + "js_data_mutator": r"data => data.map(d => ({\n ...d,\n color: colors.hexToRGB(d.extraProps.color)\n}));", # noqa: E501 "js_onclick_href": "", "js_tooltip": "", "line_column": "path_json", @@ -585,7 +585,7 @@ chart_config: dict[str, Any] = { }, "viz_type": "deck_path", }, - "query_context": '{"datasource":{"id":12,"type":"table"},"force":false,"queries":[{"time_range":" : ","filters":[],"extras":{"time_grain_sqla":null,"having":"","where":""},"applied_time_extras":{},"columns":[],"metrics":[],"annotation_layers":[],"row_limit":5000,"timeseries_limit":0,"order_desc":true,"url_params":{},"custom_params":{},"custom_form_data":{}}],"result_format":"json","result_type":"full"}', + "query_context": '{"datasource":{"id":12,"type":"table"},"force":false,"queries":[{"time_range":" : ","filters":[],"extras":{"time_grain_sqla":null,"having":"","where":""},"applied_time_extras":{},"columns":[],"metrics":[],"annotation_layers":[],"row_limit":5000,"timeseries_limit":0,"order_desc":true,"url_params":{},"custom_params":{},"custom_form_data":{}}],"result_format":"json","result_type":"full"}', # noqa: E501 "cache_timeout": None, "uuid": "0c23747a-6528-4629-97bf-e4b78d3b9df1", "version": "1.0.0", @@ -685,7 +685,7 @@ saved_queries_config = { "schema": "public", "label": "Test Saved Query", "description": None, - "sql": "-- Note: Unless you save your query, these tabs will NOT persist if you clear\nyour cookies or change browsers.\n\n\nSELECT * from birth_names", + "sql": "-- Note: Unless you save your query, these tabs will NOT persist if you clear\nyour cookies or change browsers.\n\n\nSELECT * from birth_names", # noqa: E501 "uuid": "05b679b5-8eaf-452c-b874-a7a774cfa4e9", "version": "1.0.0", "database_uuid": "b8a1ccd3-779d-4ab7-8ad8-9ab119d7fe89", diff --git a/tests/integration_tests/fixtures/public_role.py b/tests/integration_tests/fixtures/public_role.py index eeb4c798d..c0b80847c 100644 --- a/tests/integration_tests/fixtures/public_role.py +++ b/tests/integration_tests/fixtures/public_role.py @@ -21,7 +21,7 @@ from superset.extensions import db, security_manager from tests.integration_tests.test_app import app -@pytest.fixture() +@pytest.fixture def public_role_like_gamma(app_context: AppContext): app.config["PUBLIC_ROLE_LIKE"] = "Gamma" security_manager.sync_role_definitions() @@ -32,7 +32,7 @@ def public_role_like_gamma(app_context: AppContext): db.session.commit() -@pytest.fixture() +@pytest.fixture def public_role_like_test_role(app_context: AppContext): app.config["PUBLIC_ROLE_LIKE"] = "TestRole" security_manager.sync_role_definitions() diff --git a/tests/integration_tests/fixtures/tags.py b/tests/integration_tests/fixtures/tags.py index 90449957f..8fb427015 100644 --- a/tests/integration_tests/fixtures/tags.py +++ b/tests/integration_tests/fixtures/tags.py @@ -24,7 +24,6 @@ from tests.integration_tests.test_app import app @pytest.fixture -@pytest.mark.usefixtures("app_context") def with_tagging_system_feature(): is_enabled = app.config["DEFAULT_FEATURE_FLAGS"]["TAGGING_SYSTEM"] if not is_enabled: @@ -50,7 +49,7 @@ def create_custom_tags(): yield tags - for tags in tags: + for tags in tags: # noqa: B020 db.session.delete(tags) db.session.commit() diff --git a/tests/integration_tests/fixtures/unicode_dashboard.py b/tests/integration_tests/fixtures/unicode_dashboard.py index 970845783..2f18c402b 100644 --- a/tests/integration_tests/fixtures/unicode_dashboard.py +++ b/tests/integration_tests/fixtures/unicode_dashboard.py @@ -55,7 +55,7 @@ def load_unicode_data(): engine.execute("DROP TABLE IF EXISTS unicode_test") -@pytest.fixture() +@pytest.fixture def load_unicode_dashboard_with_slice(load_unicode_data): slice_name = "Unicode Cloud" with app.app_context(): @@ -64,7 +64,7 @@ def load_unicode_dashboard_with_slice(load_unicode_data): _cleanup(dash, slice_name) -@pytest.fixture() +@pytest.fixture def load_unicode_dashboard_with_position(load_unicode_data): slice_name = "Unicode Cloud" position = "{}" diff --git a/tests/integration_tests/fixtures/users.py b/tests/integration_tests/fixtures/users.py index dd3730df9..1a2073bf9 100644 --- a/tests/integration_tests/fixtures/users.py +++ b/tests/integration_tests/fixtures/users.py @@ -22,7 +22,7 @@ from superset import db, security_manager from tests.integration_tests.constants import GAMMA_SQLLAB_NO_DATA_USERNAME -@pytest.fixture() +@pytest.fixture def create_gamma_sqllab_no_data(app_context: AppContext): gamma_role = db.session.query(Role).filter(Role.name == "Gamma").one_or_none() sqllab_role = db.session.query(Role).filter(Role.name == "sql_lab").one_or_none() @@ -33,7 +33,7 @@ def create_gamma_sqllab_no_data(app_context: AppContext): "gamma_sqllab_no_data", "gamma_sqllab_no_data@apache.org", [gamma_role, sqllab_role], - password="general", + password="general", # noqa: S106 ) yield diff --git a/tests/integration_tests/fixtures/world_bank_dashboard.py b/tests/integration_tests/fixtures/world_bank_dashboard.py index 0d49b6e61..983c0fba2 100644 --- a/tests/integration_tests/fixtures/world_bank_dashboard.py +++ b/tests/integration_tests/fixtures/world_bank_dashboard.py @@ -70,7 +70,7 @@ def load_world_bank_data(): engine.execute("DROP TABLE IF EXISTS wb_health_population") -@pytest.fixture() +@pytest.fixture def load_world_bank_dashboard_with_slices(load_world_bank_data): with app.app_context(): dash_id_to_delete, slices_ids_to_delete = create_dashboard_for_loaded_data() @@ -177,19 +177,19 @@ def _get_world_bank_data() -> list[dict[Any, Any]]: data.append( { "country_name": "".join( - choice(string.ascii_uppercase + string.ascii_lowercase + " ") - for _ in range(randint(3, 10)) + choice(string.ascii_uppercase + string.ascii_lowercase + " ") # noqa: S311 + for _ in range(randint(3, 10)) # noqa: S311 ), "country_code": "".join( - choice(string.ascii_uppercase + string.ascii_lowercase) + choice(string.ascii_uppercase + string.ascii_lowercase) # noqa: S311 for _ in range(3) ), "region": "".join( - choice(string.ascii_uppercase + string.ascii_lowercase) - for _ in range(randint(3, 10)) + choice(string.ascii_uppercase + string.ascii_lowercase) # noqa: S311 + for _ in range(randint(3, 10)) # noqa: S311 ), "year": "-".join( - [str(randint(1900, 2020)), str(randint(1, 12)), str(randint(1, 28))] + [str(randint(1900, 2020)), str(randint(1, 12)), str(randint(1, 28))] # noqa: S311 ), "NY_GNP_PCAP_CD": get_random_float_or_none(0, 100, 0.3), "SE_ADT_1524_LT_FM_ZS": get_random_float_or_none(0, 100, 0.3), @@ -522,7 +522,7 @@ def _get_world_bank_data() -> list[dict[Any, Any]]: def get_random_float_or_none(min_value, max_value, none_probability): - if random() < none_probability: + if random() < none_probability: # noqa: S311 return None else: - return uniform(min_value, max_value) + return uniform(min_value, max_value) # noqa: S311 diff --git a/tests/integration_tests/import_export_tests.py b/tests/integration_tests/import_export_tests.py index 702acf4b0..d4acc0103 100644 --- a/tests/integration_tests/import_export_tests.py +++ b/tests/integration_tests/import_export_tests.py @@ -109,7 +109,7 @@ class TestImportExport(SupersetTestCase): id=id, ) - def create_dashboard(self, title, id=0, slcs=[]): + def create_dashboard(self, title, id=0, slcs=[]): # noqa: B006 json_metadata = {"remote_id": id} return Dashboard( id=id, @@ -121,7 +121,7 @@ class TestImportExport(SupersetTestCase): published=False, ) - def create_table(self, name, schema=None, id=0, cols_names=[], metric_names=[]): + def create_table(self, name, schema=None, id=0, cols_names=[], metric_names=[]): # noqa: B006 params = {"remote_id": id, "database_name": "examples"} table = SqlaTable( id=id, diff --git a/tests/integration_tests/insert_chart_mixin.py b/tests/integration_tests/insert_chart_mixin.py index 722e387a5..49fc2e476 100644 --- a/tests/integration_tests/insert_chart_mixin.py +++ b/tests/integration_tests/insert_chart_mixin.py @@ -40,7 +40,7 @@ class InsertChartMixin: certified_by: Optional[str] = None, certification_details: Optional[str] = None, ) -> Slice: - obj_owners = list() + obj_owners = list() # noqa: C408 for owner in owners: user = db.session.query(security_manager.user_model).get(owner) obj_owners.append(user) diff --git a/tests/integration_tests/migrations/fb13d49b72f9_better_filters__tests.py b/tests/integration_tests/migrations/fb13d49b72f9_better_filters__tests.py index 28cfd5a7b..b9ccd7c2e 100644 --- a/tests/integration_tests/migrations/fb13d49b72f9_better_filters__tests.py +++ b/tests/integration_tests/migrations/fb13d49b72f9_better_filters__tests.py @@ -29,7 +29,7 @@ def test_upgrade_slice(): slc = Slice( slice_name="FOO", viz_type="filter_box", - params=json.dumps(dict(metric="foo", groupby=["bar"])), + params=json.dumps(dict(metric="foo", groupby=["bar"])), # noqa: C408 ) upgrade_slice(slc) params = json.loads(slc.params) diff --git a/tests/integration_tests/model_tests.py b/tests/integration_tests/model_tests.py index d956a1016..f3d73ae55 100644 --- a/tests/integration_tests/model_tests.py +++ b/tests/integration_tests/model_tests.py @@ -322,12 +322,12 @@ class TestDatabaseModel(SupersetTestCase): # TODO(bkyryliuk): unify sql generation if db.backend == "presto": assert ( - 'SELECT\n "source" AS "source",\n "target" AS "target",\n "value" AS "value"\nFROM "energy_usage"\nLIMIT 100' + 'SELECT\n "source" AS "source",\n "target" AS "target",\n "value" AS "value"\nFROM "energy_usage"\nLIMIT 100' # noqa: E501 in sql ) elif db.backend == "hive": assert ( - "SELECT\n `source`,\n `target`,\n `value`\nFROM `energy_usage`\nLIMIT 100" + "SELECT\n `source`,\n `target`,\n `value`\nFROM `energy_usage`\nLIMIT 100" # noqa: E501 in sql ) else: @@ -385,7 +385,7 @@ class TestDatabaseModel(SupersetTestCase): return_value={Exception: SupersetException} ) mocked_create_engine.side_effect = Exception() - with self.assertRaises(SupersetException): + with self.assertRaises(SupersetException): # noqa: PT027 model._get_sqla_engine() @@ -448,10 +448,10 @@ class TestSqlaTableModel(SupersetTestCase): old_inner_join = spec.allows_joins spec.allows_joins = inner_join arbitrary_gby = "state || gender || '_test'" - arbitrary_metric = dict( + arbitrary_metric = dict( # noqa: C408 label="arbitrary", expressionType="SQL", sqlExpression="SUM(num_boys)" ) - query_obj = dict( + query_obj = dict( # noqa: C408 groupby=[arbitrary_gby, "name"], metrics=[arbitrary_metric], filter=[], @@ -460,7 +460,7 @@ class TestSqlaTableModel(SupersetTestCase): granularity="ds", from_dttm=None, to_dttm=None, - extras=dict(time_grain_sqla="P1Y"), + extras=dict(time_grain_sqla="P1Y"), # noqa: C408 series_limit=15 if inner_join and is_timeseries else None, ) qr = tbl.query(query_obj) @@ -502,7 +502,7 @@ class TestSqlaTableModel(SupersetTestCase): @pytest.mark.usefixtures("load_birth_names_dashboard_with_slices") def test_sql_mutator(self): tbl = self.get_table(name="birth_names") - query_obj = dict( + query_obj = dict( # noqa: C408 groupby=[], metrics=None, filter=[], @@ -528,7 +528,7 @@ class TestSqlaTableModel(SupersetTestCase): @pytest.mark.usefixtures("load_birth_names_dashboard_with_slices") def test_sql_mutator_different_params(self): tbl = self.get_table(name="birth_names") - query_obj = dict( + query_obj = dict( # noqa: C408 groupby=[], metrics=None, filter=[], @@ -555,7 +555,7 @@ class TestSqlaTableModel(SupersetTestCase): def test_query_with_non_existent_metrics(self): tbl = self.get_table(name="birth_names") - query_obj = dict( + query_obj = dict( # noqa: C408 groupby=[], metrics=["invalid"], filter=[], @@ -567,14 +567,14 @@ class TestSqlaTableModel(SupersetTestCase): extras={}, ) - with self.assertRaises(Exception) as context: + with self.assertRaises(Exception) as context: # noqa: PT027 tbl.get_query_str(query_obj) assert "Metric 'invalid' does not exist", context.exception def test_query_label_without_group_by(self): tbl = self.get_table(name="birth_names") - query_obj = dict( + query_obj = dict( # noqa: C408 groupby=[], columns=[ "gender", diff --git a/tests/integration_tests/queries/api_tests.py b/tests/integration_tests/queries/api_tests.py index 92a1f47fc..c2a4715a4 100644 --- a/tests/integration_tests/queries/api_tests.py +++ b/tests/integration_tests/queries/api_tests.py @@ -76,7 +76,7 @@ class TestQueryApi(SupersetTestCase): db.session.commit() return query - @pytest.fixture() + @pytest.fixture def create_queries(self): with self.create_app().app_context(): queries = [] @@ -90,7 +90,7 @@ class TestQueryApi(SupersetTestCase): example_database_id, admin_id, self.get_random_string(), - sql=f"SELECT col1, col2 from table{cx}", + sql=f"SELECT col1, col2 from table{cx}", # noqa: S608 rows=cx, status=QueryStatus.SUCCESS if (cx % 2) == 0 @@ -102,7 +102,7 @@ class TestQueryApi(SupersetTestCase): main_database_id, alpha_id, self.get_random_string(), - sql=f"SELECT col1, col2 from table{QUERIES_FIXTURE_COUNT}", + sql=f"SELECT col1, col2 from table{QUERIES_FIXTURE_COUNT}", # noqa: S608 rows=QUERIES_FIXTURE_COUNT, status=QueryStatus.SUCCESS, ) @@ -118,7 +118,7 @@ class TestQueryApi(SupersetTestCase): @staticmethod def get_random_string(length: int = 10): letters = string.ascii_letters - return "".join(random.choice(letters) for i in range(length)) + return "".join(random.choice(letters) for i in range(length)) # noqa: S311 def test_get_query(self): """ @@ -219,7 +219,7 @@ class TestQueryApi(SupersetTestCase): ) # Gamma1 user, only sees their own queries - self.login(username="gamma_1", password="password") + self.login(username="gamma_1", password="password") # noqa: S106 uri = f"api/v1/query/{query_gamma2.id}" rv = self.client.get(uri) assert rv.status_code == 404 @@ -229,7 +229,7 @@ class TestQueryApi(SupersetTestCase): # Gamma2 user, only sees their own queries self.logout() - self.login(username="gamma_2", password="password") + self.login(username="gamma_2", password="password") # noqa: S106 uri = f"api/v1/query/{query_gamma1.id}" rv = self.client.get(uri) assert rv.status_code == 404 @@ -266,7 +266,7 @@ class TestQueryApi(SupersetTestCase): data = json.loads(rv.data.decode("utf-8")) assert data["count"] == QUERIES_FIXTURE_COUNT # check expected columns - assert sorted(list(data["result"][0].keys())) == [ + assert sorted(list(data["result"][0].keys())) == [ # noqa: C414 "changed_on", "database", "end_time", @@ -283,7 +283,7 @@ class TestQueryApi(SupersetTestCase): "tracking_url", "user", ] - assert sorted(list(data["result"][0]["user"].keys())) == [ + assert sorted(list(data["result"][0]["user"].keys())) == [ # noqa: C414 "first_name", "id", "last_name", @@ -431,7 +431,7 @@ class TestQueryApi(SupersetTestCase): self.login(ADMIN_USERNAME) timestamp = datetime.timestamp(now - timedelta(days=2)) * 1000 - uri = f"api/v1/query/updated_since?q={prison.dumps({'last_updated_ms': timestamp})}" + uri = f"api/v1/query/updated_since?q={prison.dumps({'last_updated_ms': timestamp})}" # noqa: E501 rv = self.client.get(uri) assert rv.status_code == 200 diff --git a/tests/integration_tests/queries/saved_queries/api_tests.py b/tests/integration_tests/queries/saved_queries/api_tests.py index aa2c93110..03ad660e4 100644 --- a/tests/integration_tests/queries/saved_queries/api_tests.py +++ b/tests/integration_tests/queries/saved_queries/api_tests.py @@ -92,7 +92,7 @@ class TestSavedQueryApi(SupersetTestCase): description="cool description", ) - @pytest.fixture() + @pytest.fixture def create_saved_queries(self): with self.create_app().app_context(): saved_queries = [] @@ -480,9 +480,9 @@ class TestSavedQueryApi(SupersetTestCase): assert data_by_id["count"] == data_by_name["count"], len( expected_saved_queries ) - assert set(query["id"] for query in data_by_id["result"]) == set( + assert set(query["id"] for query in data_by_id["result"]) == set( # noqa: C401 query["id"] for query in data_by_name["result"] - ), set(query.id for query in expected_saved_queries) + ), set(query.id for query in expected_saved_queries) # noqa: C401 @pytest.mark.usefixtures("create_saved_queries") def test_get_saved_query_favorite_filter(self): diff --git a/tests/integration_tests/queries/saved_queries/commands_tests.py b/tests/integration_tests/queries/saved_queries/commands_tests.py index 4ce816622..b36cb0cdf 100644 --- a/tests/integration_tests/queries/saved_queries/commands_tests.py +++ b/tests/integration_tests/queries/saved_queries/commands_tests.py @@ -47,7 +47,7 @@ class TestExportSavedQueriesCommand(SupersetTestCase): sql="SELECT 42", label="The answer", schema="schema1", - description="Answer to the Ultimate Question of Life, the Universe, and Everything", + description="Answer to the Ultimate Question of Life, the Universe, and Everything", # noqa: E501 ) db.session.add(self.example_query) db.session.commit() @@ -78,7 +78,7 @@ class TestExportSavedQueriesCommand(SupersetTestCase): "catalog": None, "schema": "schema1", "label": "The answer", - "description": "Answer to the Ultimate Question of Life, the Universe, and Everything", + "description": "Answer to the Ultimate Question of Life, the Universe, and Everything", # noqa: E501 "sql": "SELECT 42", "uuid": str(self.example_query.uuid), "version": "1.0.0", @@ -110,7 +110,7 @@ class TestExportSavedQueriesCommand(SupersetTestCase): command = ExportSavedQueriesCommand([self.example_query.id]) contents = command.run() - with self.assertRaises(SavedQueryNotFoundError): + with self.assertRaises(SavedQueryNotFoundError): # noqa: PT027 next(contents) @patch("superset.queries.saved_queries.filters.g") @@ -120,7 +120,7 @@ class TestExportSavedQueriesCommand(SupersetTestCase): command = ExportSavedQueriesCommand([-1]) contents = command.run() - with self.assertRaises(SavedQueryNotFoundError): + with self.assertRaises(SavedQueryNotFoundError): # noqa: PT027 next(contents) @patch("superset.queries.saved_queries.filters.g") diff --git a/tests/integration_tests/query_context_tests.py b/tests/integration_tests/query_context_tests.py index d77523c71..c744bb75a 100644 --- a/tests/integration_tests/query_context_tests.py +++ b/tests/integration_tests/query_context_tests.py @@ -208,7 +208,7 @@ class TestQueryContext(SupersetTestCase): payload = get_query_context("birth_names", add_postprocessing_operations=True) del payload["queries"][0]["granularity"] - # construct baseline query_cache_key from query_context with post processing operation + # construct baseline query_cache_key from query_context with post processing operation # noqa: E501 query_context: QueryContext = ChartDataQueryContextSchema().load(payload) query_object: QueryObject = query_context.queries[0] cache_key_original = query_context.query_cache_key(query_object) @@ -222,7 +222,7 @@ class TestQueryContext(SupersetTestCase): def test_query_cache_key_changes_when_post_processing_is_updated(self): payload = get_query_context("birth_names", add_postprocessing_operations=True) - # construct baseline query_cache_key from query_context with post processing operation + # construct baseline query_cache_key from query_context with post processing operation # noqa: E501 query_context = ChartDataQueryContextSchema().load(payload) query_object = query_context.queries[0] cache_key_original = query_context.query_cache_key(query_object) diff --git a/tests/integration_tests/reports/api_tests.py b/tests/integration_tests/reports/api_tests.py index 55b333b81..7737e346e 100644 --- a/tests/integration_tests/reports/api_tests.py +++ b/tests/integration_tests/reports/api_tests.py @@ -60,7 +60,7 @@ REPORTS_GAMMA_USER = "reports_gamma" class TestReportSchedulesApi(SupersetTestCase): - @pytest.fixture() + @pytest.fixture def gamma_user_with_alerts_role(self): with self.create_app().app_context(): user = self.create_user( @@ -91,7 +91,7 @@ class TestReportSchedulesApi(SupersetTestCase): db.session.delete(user) db.session.commit() - @pytest.fixture() + @pytest.fixture def create_working_admin_report_schedule(self): with self.create_app().app_context(): admin_user = self.get_user("admin") @@ -115,8 +115,7 @@ class TestReportSchedulesApi(SupersetTestCase): db.session.delete(report_schedule) db.session.commit() - @pytest.mark.usefixtures("gamma_user_with_alerts_role") - @pytest.fixture() + @pytest.fixture def create_working_gamma_report_schedule(self, gamma_user_with_alerts_role): with self.create_app().app_context(): chart = db.session.query(Slice).first() @@ -139,8 +138,7 @@ class TestReportSchedulesApi(SupersetTestCase): db.session.delete(report_schedule) db.session.commit() - @pytest.mark.usefixtures("gamma_user_with_alerts_role") - @pytest.fixture() + @pytest.fixture def create_working_shared_report_schedule(self, gamma_user_with_alerts_role): with self.create_app().app_context(): admin_user = self.get_user("admin") @@ -165,7 +163,7 @@ class TestReportSchedulesApi(SupersetTestCase): db.session.delete(report_schedule) db.session.commit() - @pytest.fixture() + @pytest.fixture def create_report_schedules(self): with self.create_app().app_context(): report_schedules = [] @@ -196,7 +194,7 @@ class TestReportSchedulesApi(SupersetTestCase): type=ReportScheduleType.ALERT, name=f"name{cx}", crontab=f"*/{cx} * * * *", - sql=f"SELECT value from table{cx}", + sql=f"SELECT value from table{cx}", # noqa: S608 description=f"Some description {cx}", chart=chart, database=example_db, @@ -213,7 +211,7 @@ class TestReportSchedulesApi(SupersetTestCase): db.session.delete(report_schedule) db.session.commit() - @pytest.fixture() + @pytest.fixture def create_alpha_users(self): with self.create_app().app_context(): users = [ @@ -380,16 +378,16 @@ class TestReportSchedulesApi(SupersetTestCase): assert rv.status_code == 200 data = json.loads(rv.data.decode("utf-8")) assert data["count"] == REPORTS_COUNT - data_keys = sorted(list(data["result"][0].keys())) + data_keys = sorted(list(data["result"][0].keys())) # noqa: C414 assert expected_fields == data_keys # Assert nested fields expected_owners_fields = ["first_name", "id", "last_name"] - data_keys = sorted(list(data["result"][0]["owners"][0].keys())) + data_keys = sorted(list(data["result"][0]["owners"][0].keys())) # noqa: C414 assert expected_owners_fields == data_keys expected_recipients_fields = ["id", "type"] - data_keys = sorted(list(data["result"][1]["recipients"][0].keys())) + data_keys = sorted(list(data["result"][1]["recipients"][0].keys())) # noqa: C414 assert expected_recipients_fields == data_keys @parameterized.expand( @@ -993,7 +991,7 @@ class TestReportSchedulesApi(SupersetTestCase): data = json.loads(rv.data.decode("utf-8")) assert rv.status_code == 201 - # this second time it should receive an error because the chart has an attached report + # this second time it should receive an error because the chart has an attached report # noqa: E501 # with the same creation method from the same user. report_schedule_data = { "type": ReportScheduleType.REPORT, @@ -1018,7 +1016,7 @@ class TestReportSchedulesApi(SupersetTestCase): "issue_codes": [ { "code": 1010, - "message": "Issue 1010 - Superset encountered an error while running a command.", + "message": "Issue 1010 - Superset encountered an error while running a command.", # noqa: E501 } ] }, @@ -1051,7 +1049,7 @@ class TestReportSchedulesApi(SupersetTestCase): data = json.loads(rv.data.decode("utf-8")) assert rv.status_code == 201 - # this second time it should receive an error because the dashboard has an attached report + # this second time it should receive an error because the dashboard has an attached report # noqa: E501 # with the same creation method from the same user. report_schedule_data = { "type": ReportScheduleType.REPORT, @@ -1076,7 +1074,7 @@ class TestReportSchedulesApi(SupersetTestCase): "issue_codes": [ { "code": 1010, - "message": "Issue 1010 - Superset encountered an error while running a command.", + "message": "Issue 1010 - Superset encountered an error while running a command.", # noqa: E501 } ] }, @@ -1184,7 +1182,7 @@ class TestReportSchedulesApi(SupersetTestCase): assert data == {"message": {"dashboard": "Dashboard does not exist"}} # @pytest.mark.usefixtures("load_birth_names_dashboard_with_slices") - # TODO (AAfghahi): I am going to enable this when the report schedule feature is fully finished + # TODO (AAfghahi): I am going to enable this when the report schedule feature is fully finished # noqa: E501 # def test_create_report_schedule_no_creation_method(self): # """ # ReportSchedule Api: Test create report schedule @@ -1355,7 +1353,7 @@ class TestReportSchedulesApi(SupersetTestCase): "message": { "crontab": ( "Alert schedule frequency exceeding limit. " - "Please configure a schedule with a minimum interval of 6 minutes per execution." + "Please configure a schedule with a minimum interval of 6 minutes per execution." # noqa: E501 ) } } @@ -1368,7 +1366,7 @@ class TestReportSchedulesApi(SupersetTestCase): "message": { "crontab": ( "Report schedule frequency exceeding limit. " - "Please configure a schedule with a minimum interval of 8 minutes per execution." + "Please configure a schedule with a minimum interval of 8 minutes per execution." # noqa: E501 ) } } @@ -1456,7 +1454,7 @@ class TestReportSchedulesApi(SupersetTestCase): "message": { "crontab": ( "Alert schedule frequency exceeding limit. " - "Please configure a schedule with a minimum interval of 6 minutes per execution." + "Please configure a schedule with a minimum interval of 6 minutes per execution." # noqa: E501 ) } } @@ -1472,7 +1470,7 @@ class TestReportSchedulesApi(SupersetTestCase): "message": { "crontab": ( "Report schedule frequency exceeding limit. " - "Please configure a schedule with a minimum interval of 4 minutes per execution." + "Please configure a schedule with a minimum interval of 4 minutes per execution." # noqa: E501 ) } } @@ -1670,7 +1668,7 @@ class TestReportSchedulesApi(SupersetTestCase): .one_or_none() ) - self.login(username="alpha2", password="password") + self.login(username="alpha2", password="password") # noqa: S106 report_schedule_data = { "active": False, } @@ -1819,7 +1817,7 @@ class TestReportSchedulesApi(SupersetTestCase): .one_or_none() ) - self.login(username="alpha2", password="password") + self.login(username="alpha2", password="password") # noqa: S106 uri = f"api/v1/report/{report_schedule.id}" rv = self.delete_assert_metric(uri, "delete") assert rv.status_code == 403 @@ -1876,7 +1874,7 @@ class TestReportSchedulesApi(SupersetTestCase): ) report_schedules_ids = [report_schedule.id] - self.login(username="alpha2", password="password") + self.login(username="alpha2", password="password") # noqa: S106 uri = f"api/v1/report/?q={prison.dumps(report_schedules_ids)}" rv = self.delete_assert_metric(uri, "bulk_delete") assert rv.status_code == 403 diff --git a/tests/integration_tests/reports/commands_tests.py b/tests/integration_tests/reports/commands_tests.py index d52301963..a3f105a41 100644 --- a/tests/integration_tests/reports/commands_tests.py +++ b/tests/integration_tests/reports/commands_tests.py @@ -183,7 +183,7 @@ def create_test_table_context(database: Database): engine.execute("DROP TABLE test_table") -@pytest.fixture() +@pytest.fixture def create_report_email_chart(): chart = db.session.query(Slice).first() report_schedule = create_report_notification( @@ -194,7 +194,7 @@ def create_report_email_chart(): cleanup_report_schedule(report_schedule) -@pytest.fixture() +@pytest.fixture def create_report_email_chart_with_cc_and_bcc(): chart = db.session.query(Slice).first() report_schedule = create_report_notification( @@ -208,7 +208,7 @@ def create_report_email_chart_with_cc_and_bcc(): cleanup_report_schedule(report_schedule) -@pytest.fixture() +@pytest.fixture def create_report_email_chart_alpha_owner(get_user): owners = [get_user("alpha")] chart = db.session.query(Slice).first() @@ -220,7 +220,7 @@ def create_report_email_chart_alpha_owner(get_user): cleanup_report_schedule(report_schedule) -@pytest.fixture() +@pytest.fixture def create_report_email_chart_force_screenshot(): chart = db.session.query(Slice).first() report_schedule = create_report_notification( @@ -231,7 +231,7 @@ def create_report_email_chart_force_screenshot(): cleanup_report_schedule(report_schedule) -@pytest.fixture() +@pytest.fixture def create_report_email_chart_with_csv(): chart = db.session.query(Slice).first() chart.query_context = '{"mock": "query_context"}' @@ -244,7 +244,7 @@ def create_report_email_chart_with_csv(): cleanup_report_schedule(report_schedule) -@pytest.fixture() +@pytest.fixture def create_report_email_chart_with_text(): chart = db.session.query(Slice).first() chart.query_context = '{"mock": "query_context"}' @@ -257,7 +257,7 @@ def create_report_email_chart_with_text(): cleanup_report_schedule(report_schedule) -@pytest.fixture() +@pytest.fixture def create_report_email_chart_with_csv_no_query_context(): chart = db.session.query(Slice).first() chart.query_context = None @@ -271,7 +271,7 @@ def create_report_email_chart_with_csv_no_query_context(): cleanup_report_schedule(report_schedule) -@pytest.fixture() +@pytest.fixture def create_report_email_dashboard(): dashboard = db.session.query(Dashboard).first() report_schedule = create_report_notification( @@ -282,7 +282,7 @@ def create_report_email_dashboard(): cleanup_report_schedule(report_schedule) -@pytest.fixture() +@pytest.fixture def create_report_email_dashboard_force_screenshot(): dashboard = db.session.query(Dashboard).first() report_schedule = create_report_notification( @@ -293,7 +293,7 @@ def create_report_email_dashboard_force_screenshot(): cleanup_report_schedule(report_schedule) -@pytest.fixture() +@pytest.fixture def create_report_slack_chart(): chart = db.session.query(Slice).first() report_schedule = create_report_notification( @@ -304,7 +304,7 @@ def create_report_slack_chart(): cleanup_report_schedule(report_schedule) -@pytest.fixture() +@pytest.fixture def create_report_slack_chartv2(): chart = db.session.query(Slice).first() report_schedule = create_report_notification( @@ -315,7 +315,7 @@ def create_report_slack_chartv2(): cleanup_report_schedule(report_schedule) -@pytest.fixture() +@pytest.fixture def create_report_slack_chart_with_csv(): chart = db.session.query(Slice).first() chart.query_context = '{"mock": "query_context"}' @@ -329,7 +329,7 @@ def create_report_slack_chart_with_csv(): cleanup_report_schedule(report_schedule) -@pytest.fixture() +@pytest.fixture def create_report_slack_chart_with_text(): chart = db.session.query(Slice).first() chart.query_context = '{"mock": "query_context"}' @@ -343,7 +343,7 @@ def create_report_slack_chart_with_text(): cleanup_report_schedule(report_schedule) -@pytest.fixture() +@pytest.fixture def create_report_slack_chart_working(): chart = db.session.query(Slice).first() report_schedule = create_report_notification( @@ -372,7 +372,7 @@ def create_report_slack_chart_working(): cleanup_report_schedule(report_schedule) -@pytest.fixture() +@pytest.fixture def create_alert_slack_chart_success(): chart = db.session.query(Slice).first() report_schedule = create_report_notification( @@ -666,7 +666,7 @@ def test_email_chart_report_schedule_with_cc_bcc( ): """ ExecuteReport Command: Test chart email report schedule with screenshot and email cc, bcc options - """ + """ # noqa: E501 # setup screenshot mock screenshot_mock.return_value = SCREENSHOT_FILE @@ -1185,8 +1185,8 @@ def test_email_dashboard_report_schedule( "superset.extensions.feature_flag_manager._feature_flags", ALERT_REPORT_TABS=True ) def test_email_dashboard_report_schedule_with_tab_anchor( - _email_mock, - _screenshot_mock, + _email_mock, # noqa: PT019 + _screenshot_mock, # noqa: PT019 ): """ ExecuteReport Command: Test dashboard email report schedule with tab metadata @@ -1232,8 +1232,8 @@ def test_email_dashboard_report_schedule_with_tab_anchor( "superset.extensions.feature_flag_manager._feature_flags", ALERT_REPORT_TABS=False ) def test_email_dashboard_report_schedule_disabled_tabs( - _email_mock, - _screenshot_mock, + _email_mock, # noqa: PT019 + _screenshot_mock, # noqa: PT019 ): """ ExecuteReport Command: Test dashboard email report schedule with tab metadata @@ -1432,7 +1432,7 @@ def test_slack_chart_report_schedule_with_errors( SlackApiError(message="foo", response="bar"), ] - for idx, er in enumerate(slack_errors): + for idx, er in enumerate(slack_errors): # noqa: B007 web_client_mock.side_effect = [SlackApiError(None, None), er] with pytest.raises(ReportScheduleClientErrorsException): @@ -1565,7 +1565,7 @@ def test_slack_chart_report_schedule_with_text( ] ) assert ( - f"" + f"" # noqa: E501 in slack_client_mock_class.return_value.chat_postMessage.call_args[1][ "text" ] @@ -1762,7 +1762,7 @@ def test_email_dashboard_report_fails_uncaught_exception( email_mock.side_effect = Exception("Uncaught exception") app.config["EMAIL_REPORTS_CTA"] = "Call to action" - with pytest.raises(Exception): + with pytest.raises(Exception): # noqa: B017, PT011 AsyncExecuteReportScheduleCommand( TEST_ID, create_report_email_dashboard.id, datetime.utcnow() ).run() @@ -1870,7 +1870,7 @@ def test_slack_token_callable_chart_report( TEST_ID, create_report_slack_chart.id, datetime.utcnow() ).run() app.config["SLACK_API_TOKEN"].assert_called() - assert slack_client_mock_class.called_with(token="cool_code", proxy="") + assert slack_client_mock_class.called_with(token="cool_code", proxy="") # noqa: S106 assert_log(ReportState.SUCCESS) @@ -2281,7 +2281,7 @@ def test__send_with_client_errors(notification_mock, logger_mock): assert excinfo.errisinstance(SupersetException) logger_mock.warning.assert_called_with( - "SupersetError(message='', error_type=, level=, extra=None)" + "SupersetError(message='', error_type=, level=, extra=None)" # noqa: E501 ) @@ -2303,10 +2303,10 @@ def test__send_with_multiple_errors(notification_mock, logger_mock): logger_mock.warning.assert_has_calls( [ call( - "SupersetError(message='', error_type=, level=, extra=None)" + "SupersetError(message='', error_type=, level=, extra=None)" # noqa: E501 ), call( - "SupersetError(message='', error_type=, level=, extra=None)" + "SupersetError(message='', error_type=, level=, extra=None)" # noqa: E501 ), ] ) @@ -2324,5 +2324,5 @@ def test__send_with_server_errors(notification_mock, logger_mock): assert excinfo.errisinstance(SupersetException) # it logs the error logger_mock.warning.assert_called_with( - "SupersetError(message='', error_type=, level=, extra=None)" + "SupersetError(message='', error_type=, level=, extra=None)" # noqa: E501 ) diff --git a/tests/integration_tests/reports/scheduler_tests.py b/tests/integration_tests/reports/scheduler_tests.py index ae25b575a..32d91e23e 100644 --- a/tests/integration_tests/reports/scheduler_tests.py +++ b/tests/integration_tests/reports/scheduler_tests.py @@ -157,7 +157,7 @@ def test_execute_task(update_state_mock, command_mock, init_mock, owners): report_schedule = insert_report_schedule( type=ReportScheduleType.ALERT, - name=f"report-{randint(0,1000)}", + name=f"report-{randint(0,1000)}", # noqa: S311 crontab="0 4 * * *", timezone="America/New_York", owners=owners, @@ -184,7 +184,7 @@ def test_execute_task_with_command_exception( report_schedule = insert_report_schedule( type=ReportScheduleType.ALERT, - name=f"report-{randint(0,1000)}", + name=f"report-{randint(0,1000)}", # noqa: S311 crontab="0 4 * * *", timezone="America/New_York", owners=owners, @@ -195,7 +195,7 @@ def test_execute_task_with_command_exception( execute(report_schedule.id) update_state_mock.assert_called_with(state="FAILURE") logger_mock.exception.assert_called_with( - "A downstream exception occurred while generating a report: None. Unexpected error", + "A downstream exception occurred while generating a report: None. Unexpected error", # noqa: E501 exc_info=True, ) diff --git a/tests/integration_tests/reports/utils.py b/tests/integration_tests/reports/utils.py index 45b919c2b..3ab5c46c4 100644 --- a/tests/integration_tests/reports/utils.py +++ b/tests/integration_tests/reports/utils.py @@ -117,8 +117,8 @@ def create_report_notification( extra: Optional[dict[str, Any]] = None, force_screenshot: bool = False, owners: Optional[list[User]] = None, - ccTarget: Optional[str] = None, - bccTarget: Optional[str] = None, + ccTarget: Optional[str] = None, # noqa: N803 + bccTarget: Optional[str] = None, # noqa: N803 ) -> ReportSchedule: if not owners: owners = [ diff --git a/tests/integration_tests/result_set_tests.py b/tests/integration_tests/result_set_tests.py index fcdbd19d5..6f6d701ed 100644 --- a/tests/integration_tests/result_set_tests.py +++ b/tests/integration_tests/result_set_tests.py @@ -269,7 +269,7 @@ class TestSupersetResultSet(SupersetTestCase): df = results.to_pandas_df() assert df_to_records(df) == [ { - "metadata": '["test", [["foo", 123456, [[["test"], 3432546, 7657658766], [["fake"], 656756765, 324324324324]]]], ["test2", 43, 765765765], null, null]' + "metadata": '["test", [["foo", 123456, [[["test"], 3432546, 7657658766], [["fake"], 656756765, 324324324324]]]], ["test2", 43, 765765765], null, null]' # noqa: E501 } ] diff --git a/tests/integration_tests/security/analytics_db_safety_tests.py b/tests/integration_tests/security/analytics_db_safety_tests.py index 7e36268e3..a9303dda6 100644 --- a/tests/integration_tests/security/analytics_db_safety_tests.py +++ b/tests/integration_tests/security/analytics_db_safety_tests.py @@ -30,37 +30,37 @@ from superset.security.analytics_db_safety import check_sqlalchemy_uri ( "sqlite:///home/superset/bad.db", True, - "SQLiteDialect_pysqlite cannot be used as a data source for security reasons.", + "SQLiteDialect_pysqlite cannot be used as a data source for security reasons.", # noqa: E501 ), ( "sqlite+pysqlite:///home/superset/bad.db", True, - "SQLiteDialect_pysqlite cannot be used as a data source for security reasons.", + "SQLiteDialect_pysqlite cannot be used as a data source for security reasons.", # noqa: E501 ), ( "sqlite+aiosqlite:///home/superset/bad.db", True, - "SQLiteDialect_pysqlite cannot be used as a data source for security reasons.", + "SQLiteDialect_pysqlite cannot be used as a data source for security reasons.", # noqa: E501 ), ( "sqlite+pysqlcipher:///home/superset/bad.db", True, - "SQLiteDialect_pysqlite cannot be used as a data source for security reasons.", + "SQLiteDialect_pysqlite cannot be used as a data source for security reasons.", # noqa: E501 ), ( "sqlite+:///home/superset/bad.db", True, - "SQLiteDialect_pysqlite cannot be used as a data source for security reasons.", + "SQLiteDialect_pysqlite cannot be used as a data source for security reasons.", # noqa: E501 ), ( "sqlite+new+driver:///home/superset/bad.db", True, - "SQLiteDialect_pysqlite cannot be used as a data source for security reasons.", + "SQLiteDialect_pysqlite cannot be used as a data source for security reasons.", # noqa: E501 ), ( "sqlite+new+:///home/superset/bad.db", True, - "SQLiteDialect_pysqlite cannot be used as a data source for security reasons.", + "SQLiteDialect_pysqlite cannot be used as a data source for security reasons.", # noqa: E501 ), ( "shillelagh:///home/superset/bad.db", @@ -84,7 +84,7 @@ def test_check_sqlalchemy_uri( sqlalchemy_uri: str, error: bool, error_message: Optional[str] ): if error: - with pytest.raises(SupersetSecurityException) as excinfo: + with pytest.raises(SupersetSecurityException) as excinfo: # noqa: PT012 check_sqlalchemy_uri(make_url(sqlalchemy_uri)) assert str(excinfo.value) == error_message else: diff --git a/tests/integration_tests/security/guest_token_security_tests.py b/tests/integration_tests/security/guest_token_security_tests.py index 5dcfd1357..36c2c9273 100644 --- a/tests/integration_tests/security/guest_token_security_tests.py +++ b/tests/integration_tests/security/guest_token_security_tests.py @@ -190,7 +190,7 @@ class TestGuestUserDashboardAccess(SupersetTestCase): def test_raise_for_access_dashboard_as_unauthorized_guest(self): g.user = self.unauthorized_guest - with self.assertRaises(SupersetSecurityException): + with self.assertRaises(SupersetSecurityException): # noqa: PT027 security_manager.raise_for_access(dashboard=self.dash) def test_raise_for_access_dashboard_as_guest_no_rbac(self): @@ -214,7 +214,7 @@ class TestGuestUserDashboardAccess(SupersetTestCase): db.session.add(dash) db.session.commit() - with self.assertRaises(SupersetSecurityException): + with self.assertRaises(SupersetSecurityException): # noqa: PT027 security_manager.raise_for_access(dashboard=dash) db.session.delete(dash) @@ -345,7 +345,7 @@ class TestGuestUserDatasourceAccess(SupersetTestCase): def test_raise_for_access__no_dashboard_in_form_data(self): g.user = self.authorized_guest for kwarg in ["viz", "query_context"]: - with self.assertRaises(SupersetSecurityException): + with self.assertRaises(SupersetSecurityException): # noqa: PT027 security_manager.raise_for_access( **{ kwarg: Mock( @@ -360,7 +360,7 @@ class TestGuestUserDatasourceAccess(SupersetTestCase): def test_raise_for_access__no_chart_in_form_data(self): g.user = self.authorized_guest for kwarg in ["viz", "query_context"]: - with self.assertRaises(SupersetSecurityException): + with self.assertRaises(SupersetSecurityException): # noqa: PT027 security_manager.raise_for_access( **{ kwarg: Mock( @@ -375,7 +375,7 @@ class TestGuestUserDatasourceAccess(SupersetTestCase): def test_raise_for_access__chart_not_on_dashboard(self): g.user = self.authorized_guest for kwarg in ["viz", "query_context"]: - with self.assertRaises(SupersetSecurityException): + with self.assertRaises(SupersetSecurityException): # noqa: PT027 security_manager.raise_for_access( **{ kwarg: Mock( @@ -391,7 +391,7 @@ class TestGuestUserDatasourceAccess(SupersetTestCase): def test_raise_for_access__chart_doesnt_belong_to_datasource(self): g.user = self.authorized_guest for kwarg in ["viz", "query_context"]: - with self.assertRaises(SupersetSecurityException): + with self.assertRaises(SupersetSecurityException): # noqa: PT027 security_manager.raise_for_access( **{ kwarg: Mock( @@ -407,7 +407,7 @@ class TestGuestUserDatasourceAccess(SupersetTestCase): def test_raise_for_access__native_filter_no_id_in_form_data(self): g.user = self.authorized_guest for kwarg in ["viz", "query_context"]: - with self.assertRaises(SupersetSecurityException): + with self.assertRaises(SupersetSecurityException): # noqa: PT027 security_manager.raise_for_access( **{ kwarg: Mock( @@ -427,7 +427,7 @@ class TestGuestUserDatasourceAccess(SupersetTestCase): def test_raise_for_access__native_filter_datasource_not_associated(self): g.user = self.authorized_guest for kwarg in ["viz", "query_context"]: - with self.assertRaises(SupersetSecurityException): + with self.assertRaises(SupersetSecurityException): # noqa: PT027 security_manager.raise_for_access( **{ kwarg: Mock( @@ -452,7 +452,7 @@ class TestGuestUserDatasourceAccess(SupersetTestCase): def test_raise_for_access__embedded_feature_flag_off(self): g.user = self.authorized_guest for kwarg in ["viz", "query_context"]: - with self.assertRaises(SupersetSecurityException): + with self.assertRaises(SupersetSecurityException): # noqa: PT027 security_manager.raise_for_access( **{ kwarg: Mock( @@ -468,7 +468,7 @@ class TestGuestUserDatasourceAccess(SupersetTestCase): def test_raise_for_access__unauthorized_guest_user(self): g.user = self.unauthorized_guest for kwarg in ["viz", "query_context"]: - with self.assertRaises(SupersetSecurityException): + with self.assertRaises(SupersetSecurityException): # noqa: PT027 security_manager.raise_for_access( **{ kwarg: Mock( diff --git a/tests/integration_tests/security/migrate_roles_tests.py b/tests/integration_tests/security/migrate_roles_tests.py index 326509d08..738bb6b60 100644 --- a/tests/integration_tests/security/migrate_roles_tests.py +++ b/tests/integration_tests/security/migrate_roles_tests.py @@ -40,7 +40,7 @@ logger = logging.getLogger(__name__) def create_old_role(pvm_map: PvmMigrationMapType, external_pvms): with app.app_context(): pvms = [] - for old_pvm, new_pvms in pvm_map.items(): + for old_pvm, new_pvms in pvm_map.items(): # noqa: B007 pvms.append( security_manager.add_permission_view_menu( old_pvm.permission, old_pvm.view diff --git a/tests/integration_tests/security/row_level_security_tests.py b/tests/integration_tests/security/row_level_security_tests.py index 05c353fde..0916124a6 100644 --- a/tests/integration_tests/security/row_level_security_tests.py +++ b/tests/integration_tests/security/row_level_security_tests.py @@ -56,7 +56,7 @@ class TestRowLevelSecurity(SupersetTestCase): """ rls_entry = None - query_obj: dict[str, Any] = dict( + query_obj: dict[str, Any] = dict( # noqa: C408 groupby=[], metrics=None, filter=[], @@ -153,7 +153,7 @@ class TestRowLevelSecurity(SupersetTestCase): db.session.delete(self.get_user("NoRlsRoleUser")) db.session.commit() - @pytest.fixture() + @pytest.fixture def create_dataset(self): with self.create_app().app_context(): dataset = SqlaTable(database_id=1, schema=None, table_name="table1") @@ -268,7 +268,7 @@ class TestRowLevelSecurity(SupersetTestCase): # establish that the filters are grouped together correctly with # ANDs, ORs and parens in the correct place assert ( - "WHERE ((name like 'A%' or name like 'B%') OR (name like 'Q%')) AND (gender = 'boy');" + "WHERE ((name like 'A%' or name like 'B%') OR (name like 'Q%')) AND (gender = 'boy');" # noqa: E501 in sql ) @@ -622,7 +622,7 @@ RLS_GENDER_REGEX = re.compile(r"AND \([\s\n]*gender = 'girl'[\s\n]*\)") EMBEDDED_SUPERSET=True, ) class GuestTokenRowLevelSecurityTests(SupersetTestCase): - query_obj: dict[str, Any] = dict( + query_obj: dict[str, Any] = dict( # noqa: C408 groupby=[], metrics=None, filter=[], diff --git a/tests/integration_tests/security_tests.py b/tests/integration_tests/security_tests.py index 199c1328f..8be13183e 100644 --- a/tests/integration_tests/security_tests.py +++ b/tests/integration_tests/security_tests.py @@ -1191,7 +1191,7 @@ class TestRolePermission(SupersetTestCase): delete_schema_perm("[examples].[1]") def test_schemas_accessible_by_user_datasource_access(self): - # User has schema access to the datasource temp_schema.wb_health_population in examples DB. + # User has schema access to the datasource temp_schema.wb_health_population in examples DB. # noqa: E501 database = get_example_database() with self.client.application.test_request_context(): with override_user(security_manager.find_user("gamma")): @@ -1201,7 +1201,7 @@ class TestRolePermission(SupersetTestCase): assert schemas == {"temp_schema"} def test_schemas_accessible_by_user_datasource_and_schema_access(self): - # User has schema access to the datasource temp_schema.wb_health_population in examples DB. + # User has schema access to the datasource temp_schema.wb_health_population in examples DB. # noqa: E501 create_schema_perm("[examples].[2]") with self.client.application.test_request_context(): database = get_example_database() @@ -1286,7 +1286,7 @@ class TestRolePermission(SupersetTestCase): "page": 0, "page_size": -1, } - NEW_FLASK_GET_SQL_DBS_REQUEST = f"/api/v1/database/?q={prison.dumps(arguments)}" + NEW_FLASK_GET_SQL_DBS_REQUEST = f"/api/v1/database/?q={prison.dumps(arguments)}" # noqa: N806 self.login(GAMMA_USERNAME) databases_json = self.client.get(NEW_FLASK_GET_SQL_DBS_REQUEST).json assert databases_json["count"] == 1 @@ -1616,7 +1616,7 @@ class TestSecurityManager(SupersetTestCase): mock_can_access_schema.return_value = False mock_is_owner.return_value = False - with self.assertRaises(SupersetSecurityException): + with self.assertRaises(SupersetSecurityException): # noqa: PT027 security_manager.raise_for_access(datasource=datasource) @patch("superset.security.SupersetSecurityManager.is_owner") @@ -1635,12 +1635,12 @@ class TestSecurityManager(SupersetTestCase): mock_can_access.return_value = False mock_is_owner.return_value = False - with self.assertRaises(SupersetSecurityException): + with self.assertRaises(SupersetSecurityException): # noqa: PT027 security_manager.raise_for_access(query=query) def test_raise_for_access_sql_fails(self): with override_user(security_manager.find_user("gamma")): - with self.assertRaises(SupersetSecurityException): + with self.assertRaises(SupersetSecurityException): # noqa: PT027 security_manager.raise_for_access( database=get_example_database(), schema="bar", @@ -1672,7 +1672,7 @@ class TestSecurityManager(SupersetTestCase): mock_can_access_schema.return_value = False mock_is_owner.return_value = False with override_user(security_manager.find_user("gamma")): - with self.assertRaises(SupersetSecurityException): + with self.assertRaises(SupersetSecurityException): # noqa: PT027 security_manager.raise_for_access(query_context=query_context) @patch("superset.security.SupersetSecurityManager.can_access") @@ -1685,7 +1685,7 @@ class TestSecurityManager(SupersetTestCase): mock_can_access.return_value = False - with self.assertRaises(SupersetSecurityException): + with self.assertRaises(SupersetSecurityException): # noqa: PT027 security_manager.raise_for_access(database=database, table=table) @patch("superset.security.SupersetSecurityManager.is_owner") @@ -1703,7 +1703,7 @@ class TestSecurityManager(SupersetTestCase): mock_can_access_schema.return_value = False mock_is_owner.return_value = False with override_user(security_manager.find_user("gamma")): - with self.assertRaises(SupersetSecurityException): + with self.assertRaises(SupersetSecurityException): # noqa: PT027 security_manager.raise_for_access(viz=test_viz) @pytest.mark.usefixtures("load_birth_names_dashboard_with_slices") @@ -1748,7 +1748,7 @@ class TestSecurityManager(SupersetTestCase): births.roles = [] # No dashboard roles. - with self.assertRaises(SupersetSecurityException): + with self.assertRaises(SupersetSecurityException): # noqa: PT027 security_manager.raise_for_access( **{ kwarg: Mock( @@ -1764,7 +1764,7 @@ class TestSecurityManager(SupersetTestCase): births.roles = [self.get_role("Gamma")] # Undefined dashboard. - with self.assertRaises(SupersetSecurityException): + with self.assertRaises(SupersetSecurityException): # noqa: PT027 security_manager.raise_for_access( **{ kwarg: Mock( @@ -1775,7 +1775,7 @@ class TestSecurityManager(SupersetTestCase): ) # Undefined dashboard chart. - with self.assertRaises(SupersetSecurityException): + with self.assertRaises(SupersetSecurityException): # noqa: PT027 security_manager.raise_for_access( **{ kwarg: Mock( @@ -1786,7 +1786,7 @@ class TestSecurityManager(SupersetTestCase): ) # Ill-defined dashboard chart. - with self.assertRaises(SupersetSecurityException): + with self.assertRaises(SupersetSecurityException): # noqa: PT027 security_manager.raise_for_access( **{ kwarg: Mock( @@ -1800,7 +1800,7 @@ class TestSecurityManager(SupersetTestCase): ) # Dashboard chart not associated with said datasource. - with self.assertRaises(SupersetSecurityException): + with self.assertRaises(SupersetSecurityException): # noqa: PT027 security_manager.raise_for_access( **{ kwarg: Mock( @@ -1827,7 +1827,7 @@ class TestSecurityManager(SupersetTestCase): ) # Ill-defined native filter. - with self.assertRaises(SupersetSecurityException): + with self.assertRaises(SupersetSecurityException): # noqa: PT027 security_manager.raise_for_access( **{ kwarg: Mock( @@ -1841,7 +1841,7 @@ class TestSecurityManager(SupersetTestCase): ) # Native filter not associated with said datasource. - with self.assertRaises(SupersetSecurityException): + with self.assertRaises(SupersetSecurityException): # noqa: PT027 security_manager.raise_for_access( **{ kwarg: Mock( @@ -2055,7 +2055,7 @@ class TestGuestTokens(SupersetTestCase): guest_user = security_manager.get_guest_user_from_request(fake_request) assert guest_user is None - self.assertRaisesRegex(ValueError, "Guest token does not contain a user claim") + self.assertRaisesRegex(ValueError, "Guest token does not contain a user claim") # noqa: PT027 def test_get_guest_user_no_resource(self): user = {"username": "test_guest"} @@ -2066,7 +2066,7 @@ class TestGuestTokens(SupersetTestCase): fake_request.headers[current_app.config["GUEST_TOKEN_HEADER_NAME"]] = token security_manager.get_guest_user_from_request(fake_request) - self.assertRaisesRegex( + self.assertRaisesRegex( # noqa: PT027 ValueError, "Guest token does not contain a resources claim" ) @@ -2095,7 +2095,7 @@ class TestGuestTokens(SupersetTestCase): guest_user = security_manager.get_guest_user_from_request(fake_request) assert guest_user is None - self.assertRaisesRegex(ValueError, "This is not a guest token.") + self.assertRaisesRegex(ValueError, "This is not a guest token.") # noqa: PT027 def test_get_guest_user_bad_audience(self): now = time.time() @@ -2121,7 +2121,7 @@ class TestGuestTokens(SupersetTestCase): fake_request.headers[current_app.config["GUEST_TOKEN_HEADER_NAME"]] = token guest_user = security_manager.get_guest_user_from_request(fake_request) - self.assertRaisesRegex(jwt.exceptions.InvalidAudienceError, "Invalid audience") + self.assertRaisesRegex(jwt.exceptions.InvalidAudienceError, "Invalid audience") # noqa: PT027 assert guest_user is None @patch("superset.security.SupersetSecurityManager._get_current_epoch_time") diff --git a/tests/integration_tests/sql_lab/api_tests.py b/tests/integration_tests/sql_lab/api_tests.py index cf1e190bb..7d0ea7cc2 100644 --- a/tests/integration_tests/sql_lab/api_tests.py +++ b/tests/integration_tests/sql_lab/api_tests.py @@ -197,7 +197,7 @@ class TestSqlLabApi(SupersetTestCase): "Dummy Role", email="unauth_user1@superset.org", # noqa: F541 ) - self.login(username="unauth_user1", password="password") + self.login(username="unauth_user1", password="password") # noqa: S106 rv = self.client.get("/api/v1/sqllab/") assert rv.status_code == 403 @@ -361,7 +361,7 @@ class TestSqlLabApi(SupersetTestCase): sql_lab_mock.return_value = resp dbobj = self.create_fake_db_for_macros() - json_payload = dict(database_id=dbobj.id, sql=sql) + json_payload = dict(database_id=dbobj.id, sql=sql) # noqa: C408 self.get_json_resp( "/api/v1/sqllab/execute/", raise_on_error=False, json_=json_payload ) @@ -424,7 +424,7 @@ class TestSqlLabApi(SupersetTestCase): app.config["RESULTS_BACKEND_USE_MSGPACK"] = use_msgpack - @mock.patch("superset.models.sql_lab.Query.raise_for_access", lambda _: None) + @mock.patch("superset.models.sql_lab.Query.raise_for_access", lambda _: None) # noqa: PT008 @mock.patch("superset.models.core.Database.get_df") def test_export_results(self, get_df_mock: mock.Mock) -> None: self.login(ADMIN_USERNAME) diff --git a/tests/integration_tests/sql_lab/commands_tests.py b/tests/integration_tests/sql_lab/commands_tests.py index d18c7dbad..acd9edbf2 100644 --- a/tests/integration_tests/sql_lab/commands_tests.py +++ b/tests/integration_tests/sql_lab/commands_tests.py @@ -86,10 +86,13 @@ class TestQueryEstimationCommand(SupersetTestCase): assert ( ex_info.value.error.error_type == SupersetErrorType.SQLLAB_TIMEOUT_ERROR ) - assert ex_info.value.error.message == __( - "The query estimation was killed after %(sqllab_timeout)s seconds. It might " - "be too complex, or the database might be under heavy load.", - sqllab_timeout=app.config["SQLLAB_QUERY_COST_ESTIMATE_TIMEOUT"], + assert ( + ex_info.value.error.message + == __( + "The query estimation was killed after %(sqllab_timeout)s seconds. It might " # noqa: E501 + "be too complex, or the database might be under heavy load.", + sqllab_timeout=app.config["SQLLAB_QUERY_COST_ESTIMATE_TIMEOUT"], + ) ) def test_run_success(self) -> None: @@ -112,7 +115,7 @@ class TestQueryEstimationCommand(SupersetTestCase): class TestSqlResultExportCommand(SupersetTestCase): - @pytest.fixture() + @pytest.fixture def create_database_and_query(self): with self.create_app().app_context(): database = get_example_database() @@ -246,7 +249,7 @@ class TestSqlResultExportCommand(SupersetTestCase): class TestSqlExecutionResultsCommand(SupersetTestCase): - @pytest.fixture() + @pytest.fixture def create_database_and_query(self): with self.create_app().app_context(): database = get_example_database() @@ -335,7 +338,7 @@ class TestSqlExecutionResultsCommand(SupersetTestCase): "superset.views.utils._deserialize_results_payload", side_effect=SerializationError(), ): - with pytest.raises(SupersetErrorException) as ex_info: + with pytest.raises(SupersetErrorException) as ex_info: # noqa: PT012 command = results.SqlExecutionResultsCommand("test_other", 1000) command.run() assert ( diff --git a/tests/integration_tests/sql_lab/test_execute_sql_statements.py b/tests/integration_tests/sql_lab/test_execute_sql_statements.py index 7a08f35d3..41b7a74ca 100644 --- a/tests/integration_tests/sql_lab/test_execute_sql_statements.py +++ b/tests/integration_tests/sql_lab/test_execute_sql_statements.py @@ -31,7 +31,7 @@ def test_non_async_execute(non_async_example_db: Database, example_query: Query) return_results=True, start_time=now_as_float(), expand_data=True, - log_params=dict(), + log_params=dict(), # noqa: C408 ) assert result assert result["query_id"] == example_query.id diff --git a/tests/integration_tests/sql_validator_tests.py b/tests/integration_tests/sql_validator_tests.py index 901d66781..c573d5805 100644 --- a/tests/integration_tests/sql_validator_tests.py +++ b/tests/integration_tests/sql_validator_tests.py @@ -69,7 +69,7 @@ class TestPrestoValidator(SupersetTestCase): fetch_fn = self.database.db_engine_spec.fetch_data fetch_fn.side_effect = DatabaseError("dummy db error") - with self.assertRaises(PrestoSQLValidationError): + with self.assertRaises(PrestoSQLValidationError): # noqa: PT027 self.validator.validate(sql, None, schema, self.database) @patch("superset.utils.core.g") @@ -81,7 +81,7 @@ class TestPrestoValidator(SupersetTestCase): fetch_fn = self.database.db_engine_spec.fetch_data fetch_fn.side_effect = Exception("a mysterious failure") - with self.assertRaises(Exception): + with self.assertRaises(Exception): # noqa: B017, PT027 self.validator.validate(sql, None, schema, self.database) @patch("superset.utils.core.g") diff --git a/tests/integration_tests/sqla_models_tests.py b/tests/integration_tests/sqla_models_tests.py index 2f22b92c5..c3f4328bd 100644 --- a/tests/integration_tests/sqla_models_tests.py +++ b/tests/integration_tests/sqla_models_tests.py @@ -131,7 +131,7 @@ class TestDatabaseModel(SupersetTestCase): assert col.is_numeric == (db_col_type == GenericDataType.NUMERIC) assert col.is_string == (db_col_type == GenericDataType.STRING) - for str_type, db_col_type in test_cases.items(): + for str_type, db_col_type in test_cases.items(): # noqa: B007 col = TableColumn(column_name="foo", type=str_type, table=tbl, is_dttm=True) assert col.is_temporal @@ -321,7 +321,7 @@ class TestDatabaseModel(SupersetTestCase): sqla_query = table.get_sqla_query(**query_obj) sql = table.database.compile_sqla_query(sqla_query.sqla_query) if isinstance(filter_.expected, list): - assert any([candidate in sql for candidate in filter_.expected]) + assert any([candidate in sql for candidate in filter_.expected]) # noqa: C419 else: assert filter_.expected in sql @@ -524,7 +524,7 @@ class TestDatabaseModel(SupersetTestCase): db.session.commit() -@pytest.fixture() +@pytest.fixture def text_column_table(app_context: AppContext): table = SqlaTable( table_name="text_column_table", @@ -542,7 +542,7 @@ def text_column_table(app_context: AppContext): ) TableColumn(column_name="foo", type="VARCHAR(255)", table=table) SqlMetric(metric_name="count", expression="count(*)", table=table) - yield table + return table def test_values_for_column_on_text_column(text_column_table): @@ -741,7 +741,7 @@ def test_should_generate_closed_and_open_time_filter_range(login_as_admin): UNION SELECT '2023-03-10'::timestamp) AS virtual_table WHERE datetime_col >= TO_TIMESTAMP('2022-01-01 00:00:00.000000', 'YYYY-MM-DD HH24:MI:SS.US') AND datetime_col < TO_TIMESTAMP('2023-01-01 00:00:00.000000', 'YYYY-MM-DD HH24:MI:SS.US') - """ + """ # noqa: E501 assert result_object.df.iloc[0]["count"] == 2 @@ -769,7 +769,7 @@ def test_none_operand_in_filter(login_as_admin, physical_dataset): assert result.df["count"][0] == expected["count"] assert expected["sql_should_contain"] in result.query.upper() - with pytest.raises(QueryObjectValidationError): + with pytest.raises(QueryObjectValidationError): # noqa: PT012 for flt in [ FilterOperator.GREATER_THAN, FilterOperator.LESS_THAN, diff --git a/tests/integration_tests/sqllab_tests.py b/tests/integration_tests/sqllab_tests.py index b09372cb3..65d0c5fd3 100644 --- a/tests/integration_tests/sqllab_tests.py +++ b/tests/integration_tests/sqllab_tests.py @@ -103,11 +103,11 @@ class TestSqlLab(SupersetTestCase): "issue_codes": [ { "code": 1003, - "message": "Issue 1003 - There is a syntax error in the SQL query. Perhaps there was a misspelling or a typo.", + "message": "Issue 1003 - There is a syntax error in the SQL query. Perhaps there was a misspelling or a typo.", # noqa: E501 }, { "code": 1005, - "message": "Issue 1005 - The table was deleted or renamed in the database.", + "message": "Issue 1005 - The table was deleted or renamed in the database.", # noqa: E501 }, ], } @@ -121,7 +121,7 @@ class TestSqlLab(SupersetTestCase): "issue_codes": [ { "code": 1002, - "message": "Issue 1002 - The database returned an unexpected error.", + "message": "Issue 1002 - The database returned an unexpected error.", # noqa: E501 } ], "engine_name": engine_name, @@ -132,27 +132,30 @@ class TestSqlLab(SupersetTestCase): self.login(ADMIN_USERNAME) data = self.run_sql("DELETE FROM birth_names", "1") - assert data == { - "errors": [ - { - "message": ( - "This database does not allow for DDL/DML, and the query " - "could not be parsed to confirm it is a read-only query. Please " - "contact your administrator for more assistance." - ), - "error_type": SupersetErrorType.DML_NOT_ALLOWED_ERROR, - "level": ErrorLevel.ERROR, - "extra": { - "issue_codes": [ - { - "code": 1022, - "message": "Issue 1022 - Database does not allow data manipulation.", - } - ] - }, - } - ] - } + assert ( + data + == { + "errors": [ + { + "message": ( + "This database does not allow for DDL/DML, and the query " + "could not be parsed to confirm it is a read-only query. Please " # noqa: E501 + "contact your administrator for more assistance." + ), + "error_type": SupersetErrorType.DML_NOT_ALLOWED_ERROR, + "level": ErrorLevel.ERROR, + "extra": { + "issue_codes": [ + { + "code": 1022, + "message": "Issue 1022 - Database does not allow data manipulation.", # noqa: E501 + } + ] + }, + } + ] + } + ) @parameterized.expand([CtasMethod.TABLE, CtasMethod.VIEW]) @pytest.mark.usefixtures("load_birth_names_dashboard_with_slices") @@ -185,10 +188,10 @@ class TestSqlLab(SupersetTestCase): examples_db = get_example_database() with examples_db.get_sqla_engine() as engine: data = engine.execute( - f"SELECT * FROM admin_database.{tmp_table_name}" + f"SELECT * FROM admin_database.{tmp_table_name}" # noqa: S608 ).fetchall() names_count = engine.execute( - f"SELECT COUNT(*) FROM birth_names" # noqa: F541 + f"SELECT COUNT(*) FROM birth_names" # noqa: F541, S608 ).first() assert names_count[0] == len( data @@ -275,16 +278,18 @@ class TestSqlLab(SupersetTestCase): with examples_db.get_sqla_engine() as engine: engine.execute( - f"CREATE TABLE IF NOT EXISTS {CTAS_SCHEMA_NAME}.test_table AS SELECT 1 as c1, 2 as c2" + f"CREATE TABLE IF NOT EXISTS {CTAS_SCHEMA_NAME}.test_table AS SELECT 1 as c1, 2 as c2" # noqa: E501 ) data = self.run_sql( - f"SELECT * FROM {CTAS_SCHEMA_NAME}.test_table", "3", username="SchemaUser" + f"SELECT * FROM {CTAS_SCHEMA_NAME}.test_table", # noqa: S608 + "3", + username="SchemaUser", # noqa: S608 ) assert 1 == len(data["data"]) data = self.run_sql( - f"SELECT * FROM {CTAS_SCHEMA_NAME}.test_table", + f"SELECT * FROM {CTAS_SCHEMA_NAME}.test_table", # noqa: S608 "4", username="SchemaUser", schema=CTAS_SCHEMA_NAME, @@ -350,7 +355,7 @@ class TestSqlLab(SupersetTestCase): assert len(data["data"]) == test_limit data = self.run_sql( - f"SELECT * FROM birth_names LIMIT {test_limit}", + f"SELECT * FROM birth_names LIMIT {test_limit}", # noqa: S608 client_id="sql_limit_3", query_limit=test_limit + 1, ) @@ -358,7 +363,7 @@ class TestSqlLab(SupersetTestCase): assert data["query"]["limitingFactor"] == LimitingFactor.QUERY data = self.run_sql( - f"SELECT * FROM birth_names LIMIT {test_limit + 1}", + f"SELECT * FROM birth_names LIMIT {test_limit + 1}", # noqa: S608 client_id="sql_limit_4", query_limit=test_limit, ) @@ -366,7 +371,7 @@ class TestSqlLab(SupersetTestCase): assert data["query"]["limitingFactor"] == LimitingFactor.DROPDOWN data = self.run_sql( - f"SELECT * FROM birth_names LIMIT {test_limit}", + f"SELECT * FROM birth_names LIMIT {test_limit}", # noqa: S608 client_id="sql_limit_5", query_limit=test_limit, ) @@ -544,7 +549,7 @@ class TestSqlLab(SupersetTestCase): assert data["status"] == "success" data = self.run_sql( - "SELECT * FROM birth_names WHERE state = '{{ state }}' -- blabblah {{ extra1 }}\nLIMIT 10", + "SELECT * FROM birth_names WHERE state = '{{ state }}' -- blabblah {{ extra1 }}\nLIMIT 10", # noqa: E501 "3", template_params=json.dumps({"state": "CA", "extra1": "comment"}), ) @@ -560,7 +565,7 @@ class TestSqlLab(SupersetTestCase): "issue_codes": [ { "code": 1006, - "message": "Issue 1006 - One or more parameters specified in the query are missing.", + "message": "Issue 1006 - One or more parameters specified in the query are missing.", # noqa: E501 } ], "template_parameters": {"state": "CA"}, @@ -779,14 +784,14 @@ class TestSqlLab(SupersetTestCase): log_params=None, ) assert excinfo.value.error == SupersetError( - message="CTAS (create table as select) can only be run with a query where the last statement is a SELECT. Please make sure your query has a SELECT as its last statement. Then, try running your query again.", + message="CTAS (create table as select) can only be run with a query where the last statement is a SELECT. Please make sure your query has a SELECT as its last statement. Then, try running your query again.", # noqa: E501 error_type=SupersetErrorType.INVALID_CTAS_QUERY_ERROR, level=ErrorLevel.ERROR, extra={ "issue_codes": [ { "code": 1023, - "message": "Issue 1023 - The CTAS (create table as select) doesn't have a SELECT statement at the end. Please make sure your query has a SELECT as its last statement. Then, try running your query again.", + "message": "Issue 1023 - The CTAS (create table as select) doesn't have a SELECT statement at the end. Please make sure your query has a SELECT as its last statement. Then, try running your query again.", # noqa: E501 } ] }, @@ -812,18 +817,18 @@ class TestSqlLab(SupersetTestCase): log_params=None, ) assert excinfo.value.error == SupersetError( - message="CVAS (create view as select) can only be run with a query with a single SELECT statement. Please make sure your query has only a SELECT statement. Then, try running your query again.", + message="CVAS (create view as select) can only be run with a query with a single SELECT statement. Please make sure your query has only a SELECT statement. Then, try running your query again.", # noqa: E501 error_type=SupersetErrorType.INVALID_CVAS_QUERY_ERROR, level=ErrorLevel.ERROR, extra={ "issue_codes": [ { "code": 1024, - "message": "Issue 1024 - CVAS (create view as select) query has more than one statement.", + "message": "Issue 1024 - CVAS (create view as select) query has more than one statement.", # noqa: E501 }, { "code": 1025, - "message": "Issue 1025 - CVAS (create view as select) query is not a SELECT statement.", + "message": "Issue 1025 - CVAS (create view as select) query is not a SELECT statement.", # noqa: E501 }, ] }, @@ -843,30 +848,33 @@ class TestSqlLab(SupersetTestCase): handle_cursor.side_effect = SoftTimeLimitExceeded() data = self.run_sql("SELECT * FROM birth_names LIMIT 1", "1") - assert data == { - "errors": [ - { - "message": ( - "The query was killed after 21600 seconds. It might be too complex, " - "or the database might be under heavy load." - ), - "error_type": SupersetErrorType.SQLLAB_TIMEOUT_ERROR, - "level": ErrorLevel.ERROR, - "extra": { - "issue_codes": [ - { - "code": 1026, - "message": "Issue 1026 - Query is too complex and takes too long to run.", - }, - { - "code": 1027, - "message": "Issue 1027 - The database is currently running too many queries.", - }, - ] - }, - } - ] - } + assert ( + data + == { + "errors": [ + { + "message": ( + "The query was killed after 21600 seconds. It might be too complex, " # noqa: E501 + "or the database might be under heavy load." + ), + "error_type": SupersetErrorType.SQLLAB_TIMEOUT_ERROR, + "level": ErrorLevel.ERROR, + "extra": { + "issue_codes": [ + { + "code": 1026, + "message": "Issue 1026 - Query is too complex and takes too long to run.", # noqa: E501 + }, + { + "code": 1027, + "message": "Issue 1027 - The database is currently running too many queries.", # noqa: E501 + }, + ] + }, + } + ] + } + ) def test_apply_limit_if_exists_when_incremented_limit_is_none(self): sql = """ diff --git a/tests/integration_tests/stats_logger_tests.py b/tests/integration_tests/stats_logger_tests.py index 284cf415f..47709a126 100644 --- a/tests/integration_tests/stats_logger_tests.py +++ b/tests/integration_tests/stats_logger_tests.py @@ -43,7 +43,7 @@ class TestStatsdStatsLogger(TestCase): self.verify_client_calls(stats_logger, client) def test_init_with_params(self): - with patch("superset.stats_logger.StatsClient") as MockStatsdClient: + with patch("superset.stats_logger.StatsClient") as MockStatsdClient: # noqa: N806 mock_client = MockStatsdClient.return_value stats_logger = StatsdStatsLogger() diff --git a/tests/integration_tests/superset_test_config.py b/tests/integration_tests/superset_test_config.py index e1b3ce016..4358ba875 100644 --- a/tests/integration_tests/superset_test_config.py +++ b/tests/integration_tests/superset_test_config.py @@ -34,14 +34,14 @@ logging.getLogger("flask_appbuilder.api").setLevel(logging.WARNING) logging.getLogger("flask_appbuilder.security.sqla.manager").setLevel(logging.WARNING) logging.getLogger("sqlalchemy.engine.Engine").setLevel(logging.WARNING) -SECRET_KEY = "dummy_secret_key_for_test_to_silence_warnings" +SECRET_KEY = "dummy_secret_key_for_test_to_silence_warnings" # noqa: S105 AUTH_USER_REGISTRATION_ROLE = "alpha" SQLALCHEMY_DATABASE_URI = "sqlite:///" + os.path.join( # noqa: F405 DATA_DIR, "unittests.integration_tests.db", # noqa: F405 ) SILENCE_FAB = False -# Allowing SQLALCHEMY_DATABASE_URI and SQLALCHEMY_EXAMPLES_URI to be defined as an env vars for +# Allowing SQLALCHEMY_DATABASE_URI and SQLALCHEMY_EXAMPLES_URI to be defined as an env vars for # noqa: E501 # continuous integration if "SUPERSET__SQLALCHEMY_DATABASE_URI" in os.environ: # noqa: F405 SQLALCHEMY_DATABASE_URI = os.environ["SUPERSET__SQLALCHEMY_DATABASE_URI"] # noqa: F405 @@ -81,7 +81,7 @@ FEATURE_FLAGS = { WEBDRIVER_BASEURL = "http://0.0.0.0:8081/" -def GET_FEATURE_FLAGS_FUNC(ff): +def GET_FEATURE_FLAGS_FUNC(ff): # noqa: N802 ff_copy = copy(ff) ff_copy["super"] = "set" return ff_copy @@ -128,7 +128,7 @@ EXPLORE_FORM_DATA_CACHE_CONFIG = { "CACHE_DEFAULT_TIMEOUT": int(timedelta(minutes=10).total_seconds()), } -GLOBAL_ASYNC_QUERIES_JWT_SECRET = "test-secret-change-me-test-secret-change-me" +GLOBAL_ASYNC_QUERIES_JWT_SECRET = "test-secret-change-me-test-secret-change-me" # noqa: S105 ALERT_REPORTS_WORKING_TIME_OUT_KILL = True diff --git a/tests/integration_tests/superset_test_config_thumbnails.py b/tests/integration_tests/superset_test_config_thumbnails.py index a8e78d187..010e58771 100644 --- a/tests/integration_tests/superset_test_config_thumbnails.py +++ b/tests/integration_tests/superset_test_config_thumbnails.py @@ -22,7 +22,7 @@ from sqlalchemy.engine import make_url from superset.config import * # noqa: F403 from superset.config import DATA_DIR -SECRET_KEY = "dummy_secret_key_for_test_to_silence_warnings" +SECRET_KEY = "dummy_secret_key_for_test_to_silence_warnings" # noqa: S105 AUTH_USER_REGISTRATION_ROLE = "alpha" SQLALCHEMY_DATABASE_URI = "sqlite:///" + os.path.join( # noqa: F405 DATA_DIR, @@ -45,7 +45,7 @@ SQL_SELECT_AS_CTA = True SQL_MAX_ROW = 666 -def GET_FEATURE_FLAGS_FUNC(ff): +def GET_FEATURE_FLAGS_FUNC(ff): # noqa: N802 ff_copy = copy(ff) ff_copy["super"] = "set" return ff_copy diff --git a/tests/integration_tests/superset_test_custom_template_processors.py b/tests/integration_tests/superset_test_custom_template_processors.py index 79ece8a11..660e693e3 100644 --- a/tests/integration_tests/superset_test_custom_template_processors.py +++ b/tests/integration_tests/superset_test_custom_template_processors.py @@ -23,7 +23,7 @@ from typing import Any, Dict, SupportsInt # noqa: F401 from superset.jinja_context import PrestoTemplateProcessor -def DATE( +def DATE( # noqa: N802 ts: datetime, day_offset: SupportsInt = 0, hour_offset: SupportsInt = 0 ) -> str: """Current day as a string""" diff --git a/tests/integration_tests/tagging_tests.py b/tests/integration_tests/tagging_tests.py index fe42dd4a5..d3f8aff79 100644 --- a/tests/integration_tests/tagging_tests.py +++ b/tests/integration_tests/tagging_tests.py @@ -216,7 +216,7 @@ class TestTagging(SupersetTestCase): db.session.add(test_saved_query) db.session.commit() - # Test to make sure that a favorited object tag was added to the tagged_object table + # Test to make sure that a favorited object tag was added to the tagged_object table # noqa: E501 tags = self.query_tagged_object_table() assert 1 == len(tags) assert "ObjectType.chart" == str(tags[0].object_type) @@ -292,5 +292,5 @@ class TestTagging(SupersetTestCase): db.session.delete(test_favorited_object) db.session.commit() - # Test to make sure all the tags are deleted when the associated objects are deleted + # Test to make sure all the tags are deleted when the associated objects are deleted # noqa: E501 assert [] == self.query_tagged_object_table() diff --git a/tests/integration_tests/tags/api_tests.py b/tests/integration_tests/tags/api_tests.py index 3de8b67fb..c9a718f60 100644 --- a/tests/integration_tests/tags/api_tests.py +++ b/tests/integration_tests/tags/api_tests.py @@ -100,7 +100,7 @@ class TestTagApi(SupersetTestCase): db.session.commit() return tagged_object - @pytest.fixture() + @pytest.fixture def create_tags(self): with self.create_app().app_context(): # clear tags table diff --git a/tests/integration_tests/tags/dao_tests.py b/tests/integration_tests/tags/dao_tests.py index 8a6ba6e5f..16e09f414 100644 --- a/tests/integration_tests/tags/dao_tests.py +++ b/tests/integration_tests/tags/dao_tests.py @@ -66,7 +66,7 @@ class TestTagsDAO(SupersetTestCase): db.session.commit() return tagged_object - @pytest.fixture() + @pytest.fixture def create_tags(self): with self.create_app().app_context(): # clear tags table @@ -85,7 +85,7 @@ class TestTagsDAO(SupersetTestCase): ) yield tags - @pytest.fixture() + @pytest.fixture def create_tagged_objects(self): with self.create_app().app_context(): # clear tags table diff --git a/tests/integration_tests/tasks/async_queries_tests.py b/tests/integration_tests/tasks/async_queries_tests.py index 01b759c35..4462fa537 100644 --- a/tests/integration_tests/tasks/async_queries_tests.py +++ b/tests/integration_tests/tasks/async_queries_tests.py @@ -151,7 +151,7 @@ class TestAsyncQueries(SupersetTestCase): } errors = ["A timeout occurred while loading chart data"] - with pytest.raises(SoftTimeLimitExceeded): + with pytest.raises(SoftTimeLimitExceeded): # noqa: PT012 with mock.patch( "superset.tasks.async_queries.set_form_data" ) as set_form_data: @@ -270,7 +270,7 @@ class TestAsyncQueries(SupersetTestCase): } errors = ["A timeout occurred while loading explore JSON data"] - with pytest.raises(SoftTimeLimitExceeded): + with pytest.raises(SoftTimeLimitExceeded): # noqa: PT012 with mock.patch( "superset.tasks.async_queries.set_form_data" ) as set_form_data: diff --git a/tests/integration_tests/test_app.py b/tests/integration_tests/test_app.py index e88d0e5cb..6ec30c015 100644 --- a/tests/integration_tests/test_app.py +++ b/tests/integration_tests/test_app.py @@ -32,11 +32,13 @@ app = create_app(superset_config_module=superset_config_module) def login( - client: "FlaskClient[Any]", username: str = "admin", password: str = "general" + client: "FlaskClient[Any]", + username: str = "admin", + password: str = "general", # noqa: S107 ): resp = client.post( "/login/", - data=dict(username=username, password=password), + data=dict(username=username, password=password), # noqa: C408 ).get_data(as_text=True) assert "User confirmation needed" not in resp return resp diff --git a/tests/integration_tests/users/api_tests.py b/tests/integration_tests/users/api_tests.py index 416e2be57..e16a957d1 100644 --- a/tests/integration_tests/users/api_tests.py +++ b/tests/integration_tests/users/api_tests.py @@ -25,7 +25,7 @@ from tests.integration_tests.base_tests import SupersetTestCase from tests.integration_tests.conftest import with_config, with_feature_flags from tests.integration_tests.constants import ADMIN_USERNAME -meUri = "/api/v1/me/" +meUri = "/api/v1/me/" # noqa: N816 AVATAR_URL = "/internal/avatar.png" diff --git a/tests/integration_tests/utils/core_tests.py b/tests/integration_tests/utils/core_tests.py index 29b94d6d3..f04f32dbd 100644 --- a/tests/integration_tests/utils/core_tests.py +++ b/tests/integration_tests/utils/core_tests.py @@ -80,5 +80,5 @@ def test_form_data_to_adhoc_generates_deterministic_values(): def test_form_data_to_adhoc_incorrect_clause_type(): form_data = {"where": "1 = 1", "having": "count(*) > 1"} - with pytest.raises(ValueError): + with pytest.raises(ValueError): # noqa: PT011 form_data_to_adhoc(form_data, "foobar") diff --git a/tests/integration_tests/utils/encrypt_tests.py b/tests/integration_tests/utils/encrypt_tests.py index dccfc54a7..4f48f6691 100644 --- a/tests/integration_tests/utils/encrypt_tests.py +++ b/tests/integration_tests/utils/encrypt_tests.py @@ -64,7 +64,7 @@ class EncryptedFieldTest(SupersetTestCase): field = encrypted_field_factory.create(String(1024)) assert isinstance(field, StringEncryptedType) assert not isinstance(field, EncryptedType) - assert getattr(field, "__created_by_enc_field_adapter__") + assert field.__created_by_enc_field_adapter__ assert self.app.config["SECRET_KEY"] == field.key def test_ensure_encrypted_field_factory_is_used(self): diff --git a/tests/integration_tests/utils/hashing_tests.py b/tests/integration_tests/utils/hashing_tests.py index 719d03ff2..cfdbfbc5f 100644 --- a/tests/integration_tests/utils/hashing_tests.py +++ b/tests/integration_tests/utils/hashing_tests.py @@ -66,7 +66,7 @@ def test_custom_default_md5_sha(): "datetime": datetime.datetime.now(), } - serialized_obj = '{"company": "Gobias Industries", "datetime": "", "product": "Coffee"}' + serialized_obj = '{"company": "Gobias Industries", "datetime": "", "product": "Coffee"}' # noqa: E501 assert md5_sha_from_str(serialized_obj) == md5_sha_from_dict( obj, default=custom_datetime_serializer diff --git a/tests/integration_tests/utils_tests.py b/tests/integration_tests/utils_tests.py index 202a5b84d..70bebde9e 100644 --- a/tests/integration_tests/utils_tests.py +++ b/tests/integration_tests/utils_tests.py @@ -91,7 +91,7 @@ class TestUtils(SupersetTestCase): assert json.json_int_dttm_ser(dttm + timedelta(milliseconds=1)) == (ts + 1) assert json.json_int_dttm_ser(np.int64(1)) == 1 - with self.assertRaises(TypeError): + with self.assertRaises(TypeError): # noqa: PT027 json.json_int_dttm_ser(np.datetime64()) def test_json_iso_dttm_ser(self): @@ -108,7 +108,7 @@ class TestUtils(SupersetTestCase): == "Unserializable []" ) - with self.assertRaises(TypeError): + with self.assertRaises(TypeError): # noqa: PT027 json.json_iso_dttm_ser(np.datetime64()) def test_base_json_conv(self): @@ -578,7 +578,7 @@ class TestUtils(SupersetTestCase): valid = '{"a": 5, "b": [1, 5, ["g", "h"]]}' assert json.validate_json(valid) is None invalid = '{"a": 5, "b": [1, 5, ["g", "h]]}' - with self.assertRaises(json.JSONDecodeError): + with self.assertRaises(json.JSONDecodeError): # noqa: PT027 json.validate_json(invalid) def test_convert_legacy_filters_into_adhoc_where(self): @@ -720,7 +720,7 @@ class TestUtils(SupersetTestCase): db.session.commit() def test_get_or_create_db_invalid_uri(self): - with self.assertRaises(DatabaseInvalidError): + with self.assertRaises(DatabaseInvalidError): # noqa: PT027 get_or_create_db("test_db", "yoursql:superset.db/()") def test_get_or_create_db_existing_invalid_uri(self): @@ -913,7 +913,7 @@ class TestUtils(SupersetTestCase): valid = '{"a": 5, "b": [1, 5, ["g", "h"]]}' assert schema.validate_json(valid) is None invalid = '{"a": 5, "b": [1, 5, ["g", "h]]}' - self.assertRaises(marshmallow.ValidationError, schema.validate_json, invalid) + self.assertRaises(marshmallow.ValidationError, schema.validate_json, invalid) # noqa: PT027 def test_schema_one_of_case_insensitive(self): validator = schema.OneOfCaseInsensitive(choices=[1, 2, 3, "FoO", "BAR", "baz"]) @@ -923,8 +923,8 @@ class TestUtils(SupersetTestCase): assert "FOO" == validator("FOO") assert "bar" == validator("bar") assert "BaZ" == validator("BaZ") - self.assertRaises(marshmallow.ValidationError, validator, "qwerty") - self.assertRaises(marshmallow.ValidationError, validator, 4) + self.assertRaises(marshmallow.ValidationError, validator, "qwerty") # noqa: PT027 + self.assertRaises(marshmallow.ValidationError, validator, 4) # noqa: PT027 def test_cast_to_num(self) -> None: assert cast_to_num("5") == 5 @@ -985,7 +985,7 @@ class TestUtils(SupersetTestCase): df = df.copy() normalize_dttm_col( df, - tuple( + tuple( # noqa: C409 [ DateColumn.get_legacy_time_column( timestamp_format=timestamp_format, diff --git a/tests/integration_tests/viz_tests.py b/tests/integration_tests/viz_tests.py index 8403708a5..5777cbd0f 100644 --- a/tests/integration_tests/viz_tests.py +++ b/tests/integration_tests/viz_tests.py @@ -41,7 +41,7 @@ class TestBaseViz(SupersetTestCase): def test_constructor_exception_no_datasource(self): form_data = {} datasource = None - with self.assertRaises(Exception): + with self.assertRaises(Exception): # noqa: B017, PT027 viz.BaseViz(datasource, form_data) def test_process_metrics(self): @@ -362,7 +362,7 @@ class TestPairedTTest(SupersetTestCase): raw["metric2"] = [10, 20, 30, 40, 50, 60, 70, 80, 90] raw["metric3"] = [100, 200, 300, 400, 500, 600, 700, 800, 900] df = pd.DataFrame(raw) - pairedTTestViz = viz.viz_types["paired_ttest"](datasource, form_data) + pairedTTestViz = viz.viz_types["paired_ttest"](datasource, form_data) # noqa: N806 data = pairedTTestViz.get_data(df) # Check method correctly transforms data expected = { @@ -457,7 +457,7 @@ class TestPairedTTest(SupersetTestCase): raw[None] = [10, 20, 30] df = pd.DataFrame(raw) - pairedTTestViz = viz.viz_types["paired_ttest"](datasource, form_data) + pairedTTestViz = viz.viz_types["paired_ttest"](datasource, form_data) # noqa: N806 data = pairedTTestViz.get_data(df) # Check method correctly transforms data expected = { @@ -475,7 +475,7 @@ class TestPairedTTest(SupersetTestCase): assert data == expected form_data = {"groupby": [], "metrics": [None]} - with self.assertRaises(ValueError): + with self.assertRaises(ValueError): # noqa: PT027 viz.viz_types["paired_ttest"](datasource, form_data) @@ -655,7 +655,7 @@ class TestPartitionViz(SupersetTestCase): raw["metric3"] = [100, 200, 300, 400, 500, 600, 700, 800, 900] df = pd.DataFrame(raw) test_viz = viz.PartitionViz(Mock(), {}) - with self.assertRaises(ValueError): + with self.assertRaises(ValueError): # noqa: PT027 test_viz.get_data(df) test_viz.levels_for = Mock(return_value=1) test_viz.nest_values = Mock(return_value=1) @@ -776,11 +776,11 @@ class TestTimeSeriesTableViz(SupersetTestCase): form_data = {"groupby": ["a"]} super_query_obj.return_value = {} test_viz = viz.TimeTableViz(datasource, form_data) - with self.assertRaises(Exception): + with self.assertRaises(Exception): # noqa: B017, PT027 test_viz.query_obj() form_data["metrics"] = ["x", "y"] test_viz = viz.TimeTableViz(datasource, form_data) - with self.assertRaises(Exception): + with self.assertRaises(Exception): # noqa: B017, PT027 test_viz.query_obj() def test_query_obj_order_by(self): @@ -835,7 +835,7 @@ class TestBaseDeckGLViz(SupersetTestCase): datasource = self.get_datasource_mock() test_viz_deckgl = viz.BaseDeckGLViz(datasource, form_data) - with self.assertRaises(NotImplementedError) as context: + with self.assertRaises(NotImplementedError) as context: # noqa: PT027 test_viz_deckgl.get_properties(mock_d) assert "" in str(context.exception) @@ -847,7 +847,7 @@ class TestBaseDeckGLViz(SupersetTestCase): mock_gb = [] test_viz_deckgl = viz.BaseDeckGLViz(datasource, form_data) - with self.assertRaises(ValueError) as context: + with self.assertRaises(ValueError) as context: # noqa: PT027 test_viz_deckgl.process_spatial_query_obj(mock_key, mock_gb) assert "Bad spatial key" in str(context.exception) @@ -900,10 +900,10 @@ class TestBaseDeckGLViz(SupersetTestCase): datasource = self.get_datasource_mock() test_viz_deckgl = viz.BaseDeckGLViz(datasource, form_data) - with self.assertRaises(SpatialException): + with self.assertRaises(SpatialException): # noqa: PT027 test_viz_deckgl.parse_coordinates("NULL") - with self.assertRaises(SpatialException): + with self.assertRaises(SpatialException): # noqa: PT027 test_viz_deckgl.parse_coordinates("fldkjsalkj,fdlaskjfjadlksj") def test_filter_nulls(self): diff --git a/tests/unit_tests/advanced_data_type/types_tests.py b/tests/unit_tests/advanced_data_type/types_tests.py index 37c3ba533..0dcbc3543 100644 --- a/tests/unit_tests/advanced_data_type/types_tests.py +++ b/tests/unit_tests/advanced_data_type/types_tests.py @@ -53,7 +53,7 @@ def test_ip_func_valid_ip(): def test_cidr_func_invalid_ip(): - """Test to see if the cidr_func behaves as expected when an invalid IP is passed in""" + """Test to see if the cidr_func behaves as expected when an invalid IP is passed in""" # noqa: E501 cidr_request: AdvancedDataTypeRequest = { "advanced_data_type": "cidr", "values": ["abc"], @@ -100,7 +100,7 @@ def test_cidr_func_empty_ip(): def test_port_translation_func_valid_port_number(): """Test to see if the port_translation_func behaves as expected when a valid port number - is passed in""" + is passed in""" # noqa: E501 port_request: AdvancedDataTypeRequest = { "advanced_data_type": "port", "values": ["80"], @@ -124,7 +124,7 @@ def test_port_translation_func_valid_port_number(): def test_port_translation_func_valid_port_name(): """Test to see if the port_translation_func behaves as expected when a valid port name - is passed in""" + is passed in""" # noqa: E501 port_request: AdvancedDataTypeRequest = { "advanced_data_type": "port", "values": ["https"], @@ -148,7 +148,7 @@ def test_port_translation_func_valid_port_name(): def test_port_translation_func_invalid_port_name(): """Test to see if the port_translation_func behaves as expected when an invalid port name - is passed in""" + is passed in""" # noqa: E501 port_request: AdvancedDataTypeRequest = { "advanced_data_type": "port", "values": ["abc"], @@ -235,7 +235,7 @@ def test_cidr_translate_filter_func_equals(): def test_cidr_translate_filter_func_not_equals(): """Test to see if the cidr_translate_filter_func behaves as expected when the NOT_EQUALS - operator is used""" + operator is used""" # noqa: E501 input_column = Column("user_ip", Integer) input_operation = FilterOperator.NOT_EQUALS @@ -284,7 +284,7 @@ def test_cidr_translate_filter_func_greater_than(): def test_cidr_translate_filter_func_less_than(): """Test to see if the cidr_translate_filter_func behaves as expected when the LESS_THAN - operator is used""" + operator is used""" # noqa: E501 input_column = Column("user_ip", Integer) input_operation = FilterOperator.LESS_THAN @@ -318,7 +318,7 @@ def test_cidr_translate_filter_func_less_than_or_equals(): def test_cidr_translate_filter_func_in_single(): """Test to see if the cidr_translate_filter_func behaves as expected when the IN operator - is used with a single IP""" + is used with a single IP""" # noqa: E501 input_column = Column("user_ip", Integer) input_operation = FilterOperator.IN @@ -335,7 +335,7 @@ def test_cidr_translate_filter_func_in_single(): def test_cidr_translate_filter_func_in_double(): """Test to see if the cidr_translate_filter_func behaves as expected when the IN operator - is used with two IP's""" + is used with two IP's""" # noqa: E501 input_column = Column("user_ip", Integer) input_operation = FilterOperator.IN @@ -407,7 +407,7 @@ def test_port_translate_filter_func_equals(): def test_port_translate_filter_func_not_equals(): """Test to see if the port_translate_filter_func behaves as expected when the NOT_EQUALS - operator is used""" + operator is used""" # noqa: E501 input_column = Column("user_ip", Integer) input_operation = FilterOperator.NOT_EQUALS @@ -475,7 +475,7 @@ def test_port_translate_filter_func_less_than_or_equals(): def test_port_translate_filter_func_less_than(): """Test to see if the port_translate_filter_func behaves as expected when the LESS_THAN - operator is used""" + operator is used""" # noqa: E501 input_column = Column("user_ip", Integer) input_operation = FilterOperator.LESS_THAN @@ -492,7 +492,7 @@ def test_port_translate_filter_func_less_than(): def test_port_translate_filter_func_in_single(): """Test to see if the port_translate_filter_func behaves as expected when the IN operator - is used with a single port""" + is used with a single port""" # noqa: E501 input_column = Column("user_ip", Integer) input_operation = FilterOperator.IN @@ -509,7 +509,7 @@ def test_port_translate_filter_func_in_single(): def test_port_translate_filter_func_in_double(): """Test to see if the port_translate_filter_func behaves as expected when the IN operator - is used with two ports""" + is used with two ports""" # noqa: E501 input_column = Column("user_ip", Integer) input_operation = FilterOperator.IN diff --git a/tests/unit_tests/async_events/async_query_manager_tests.py b/tests/unit_tests/async_events/async_query_manager_tests.py index 2ccae644a..005058b00 100644 --- a/tests/unit_tests/async_events/async_query_manager_tests.py +++ b/tests/unit_tests/async_events/async_query_manager_tests.py @@ -20,7 +20,7 @@ from unittest.mock import ANY, Mock import redis from flask import g from jwt import encode -from pytest import fixture, mark, raises +from pytest import fixture, mark, raises # noqa: PT013 from superset import security_manager from superset.async_events.async_query_manager import ( @@ -32,8 +32,8 @@ from superset.async_events.cache_backend import ( RedisSentinelCacheBackend, ) -JWT_TOKEN_SECRET = "some_secret" -JWT_TOKEN_COOKIE_NAME = "superset_async_jwt" +JWT_TOKEN_SECRET = "some_secret" # noqa: S105 +JWT_TOKEN_COOKIE_NAME = "superset_async_jwt" # noqa: S105 @fixture diff --git a/tests/unit_tests/charts/commands/importers/v1/import_test.py b/tests/unit_tests/charts/commands/importers/v1/import_test.py index 8284c8565..c31aeb257 100644 --- a/tests/unit_tests/charts/commands/importers/v1/import_test.py +++ b/tests/unit_tests/charts/commands/importers/v1/import_test.py @@ -68,7 +68,7 @@ def session_with_schema(session: Session) -> Generator[Session, None, None]: engine = session.get_bind() SqlaTable.metadata.create_all(engine) # pylint: disable=no-member - yield session + return session def test_import_chart(mocker: MockerFixture, session_with_schema: Session) -> None: @@ -158,7 +158,7 @@ def test_filter_chart_annotations(session: Session) -> None: annotation_layers = params["annotation_layers"] assert len(annotation_layers) == 1 - assert all([al["annotationType"] == "FORMULA" for al in annotation_layers]) + assert all([al["annotationType"] == "FORMULA" for al in annotation_layers]) # noqa: C419 def test_import_existing_chart_without_permission( @@ -186,7 +186,7 @@ def test_import_existing_chart_without_permission( import_chart(chart_config, overwrite=True) assert ( str(excinfo.value) - == "A chart already exists and user doesn't have permissions to overwrite it" + == "A chart already exists and user doesn't have permissions to overwrite it" # noqa: E501 ) # Assert that the can write to chart was checked diff --git a/tests/unit_tests/charts/test_post_processing.py b/tests/unit_tests/charts/test_post_processing.py index 181b9f063..f3eaf6fa7 100644 --- a/tests/unit_tests/charts/test_post_processing.py +++ b/tests/unit_tests/charts/test_post_processing.py @@ -300,7 +300,7 @@ def test_pivot_df_single_row_two_metrics(): | | ('SUM(num)', 'boy') | ('SUM(num)', 'girl') | ('MAX(num)', 'boy') | ('MAX(num)', 'girl') | |:-----------------|----------------------:|-----------------------:|----------------------:|-----------------------:| | ('{_("Total")} (Sum)',) | 47123 | 118065 | 1280 | 2588 | - """.strip() + """.strip() # noqa: E501 ) # combine_metrics does nothing in this case @@ -469,7 +469,7 @@ def test_pivot_df_single_row_null_values(): | | ('SUM(num)', 'boy') | ('SUM(num)', 'girl') | ('MAX(num)', 'boy') | ('MAX(num)', 'girl') | |:-----------------|----------------------:|-----------------------:|----------------------:|-----------------------:| | ('Total (Sum)',) | nan | 118065 | nan | 2588 | - """.strip() + """.strip() # noqa: E501 ) # combine_metrics does nothing in this case @@ -639,7 +639,7 @@ def test_pivot_df_single_row_null_mix_values_strings(): |:-----------------|:----------------------|-----------------------:|----------------------:|-----------------------:| | ('Total (Sum)',) | NULL | 118065 | nan | 2588 | - """.strip() + """.strip() # noqa: E501 ) # combine_metrics does nothing in this case @@ -776,7 +776,7 @@ def test_pivot_df_single_row_null_mix_values_numbers(): == """ | | ('SUM(num)', 'boy') | ('SUM(num)', 'girl') | ('MAX(num)', 'boy') | ('MAX(num)', 'girl') | |:-----------------|----------------------:|-----------------------:|----------------------:|-----------------------:| -| ('Total (Sum)',) | 21 | 118065 | nan | 2588 | """.strip() +| ('Total (Sum)',) | 21 | 118065 | nan | 2588 | """.strip() # noqa: E501 ) # combine_metrics does nothing in this case @@ -998,7 +998,7 @@ def test_pivot_df_complex(): | ('girl', 'Cindy') | 14149 | 1218 | 842 | 217 | | ('girl', 'Dawn') | 11403 | 5089 | 1157 | 461 | | ('girl', 'Sophia') | 18859 | 7181 | 2588 | 1187 | - """.strip() + """.strip() # noqa: E501 ) # transpose_pivot @@ -1021,7 +1021,7 @@ def test_pivot_df_complex(): |:--------|--------------------------------:|------------------------------:|------------------------------:|--------------------------------:|-------------------------------:|---------------------------------:|--------------------------------:|------------------------------:|------------------------------:|--------------------------------:|-------------------------------:|---------------------------------:| | ('CA',) | 31290 | 3765 | 45426 | 14149 | 11403 | 18859 | 1280 | 598 | 2227 | 842 | 1157 | 2588 | | ('FL',) | 9395 | 2673 | 14740 | 1218 | 5089 | 7181 | 389 | 247 | 854 | 217 | 461 | 1187 | - """.strip() + """.strip() # noqa: E501 ) # combine_metrics @@ -1048,7 +1048,7 @@ def test_pivot_df_complex(): | ('girl', 'Cindy') | 14149 | 842 | 1218 | 217 | | ('girl', 'Dawn') | 11403 | 1157 | 5089 | 461 | | ('girl', 'Sophia') | 18859 | 2588 | 7181 | 1187 | - """.strip() + """.strip() # noqa: E501 ) # show totals @@ -1078,7 +1078,7 @@ def test_pivot_df_complex(): | ('girl', 'Sophia') | 18859 | 7181 | 26040 | 2588 | 1187 | 3775 | 29815 | | ('girl', 'Subtotal') | 89837 | 28228 | 118065 | 6814 | 2719 | 9533 | 127598 | | ('Total (Sum)', '') | 124892 | 40296 | 165188 | 8692 | 3355 | 12047 | 177235 | - """.strip() + """.strip() # noqa: E501 ) # apply_metrics_on_rows @@ -1172,7 +1172,7 @@ def test_pivot_df_complex(): | ('FL', 'MAX(num)') | 389 | 247 | 636 | 854 | 217 | 461 | 1187 | 2719 | 3355 | | ('FL', 'Subtotal') | 9784 | 2920 | 12704 | 15594 | 1435 | 5550 | 8368 | 30947 | 43651 | | ('Total (Sum)', '') | 42354 | 7283 | 49637 | 63247 | 16426 | 18110 | 29815 | 127598 | 177235 | - """.strip() + """.strip() # noqa: E501 ) # fraction @@ -1202,7 +1202,7 @@ def test_pivot_df_complex(): | ('girl', 'Sophia') | 0.151002 | 0.178206 | 0.297745 | 0.3538 | | ('girl', 'Subtotal') | 0.719317 | 0.700516 | 0.783939 | 0.810432 | | ('Total (Sum as Fraction of Columns)', '') | 1 | 1 | 1 | 1 | - """.strip() + """.strip() # noqa: E501 ) @@ -1290,7 +1290,7 @@ def test_pivot_df_multi_column(): |:-----------------|----------------------:|-----------------------:|----------------------:|-----------------------:| | ('CA',) | 35055 | 89837 | 1878 | 6814 | | ('Total (Sum)',) | 12068 | 28228 | 636 | 2719 | - """.strip() + """.strip() # noqa: E501 ) # transpose_pivot @@ -1338,7 +1338,7 @@ def test_pivot_df_multi_column(): |:-----------------|----------------------:|----------------------:|-----------------------:|-----------------------:| | ('CA',) | 35055 | 1878 | 89837 | 6814 | | ('Total (Sum)',) | 12068 | 636 | 28228 | 2719 | - """.strip() + """.strip() # noqa: E501 ) # show totals @@ -1362,7 +1362,7 @@ def test_pivot_df_multi_column(): | ('CA',) | 35055 | 89837 | 124892 | 1878 | 6814 | 8692 | 133584 | | ('Total (Sum)',) | 12068 | 28228 | 40296 | 636 | 2719 | 3355 | 43651 | - """.strip() + """.strip() # noqa: E501 ) # apply_metrics_on_rows @@ -1385,7 +1385,7 @@ def test_pivot_df_multi_column(): |:--------------|----------------:|-----------------:|----------------:|-----------------:| | ('SUM(num)',) | 35055 | 89837 | 12068 | 28228 | | ('MAX(num)',) | 1878 | 6814 | 636 | 2719 | - """.strip() + """.strip() # noqa: E501 ) # apply_metrics_on_rows with combine_metrics @@ -1408,7 +1408,7 @@ def test_pivot_df_multi_column(): |:--------------|----------------:|-----------------:|----------------:|-----------------:| | ('SUM(num)',) | 35055 | 89837 | 12068 | 28228 | | ('MAX(num)',) | 1878 | 6814 | 636 | 2719 | - """.strip() + """.strip() # noqa: E501 ) # everything @@ -1459,7 +1459,7 @@ def test_pivot_df_multi_column(): |:----------------------------------------|----------------------:|-----------------------:|----------------------:|-----------------------:| | ('CA',) | 0.743904 | 0.760911 | 0.747017 | 0.71478 | | ('Total (Sum as Fraction of Columns)',) | 0.256096 | 0.239089 | 0.252983 | 0.28522 | - """.strip() + """.strip() # noqa: E501 ) @@ -1607,7 +1607,7 @@ def test_pivot_df_complex_null_values(): | | ('SUM(num)', 'boy', 'Edward') | ('SUM(num)', 'boy', 'Tony') | ('SUM(num)', 'girl', 'Amy') | ('SUM(num)', 'girl', 'Cindy') | ('SUM(num)', 'girl', 'Dawn') | ('SUM(num)', 'girl', 'Sophia') | ('MAX(num)', 'boy', 'Edward') | ('MAX(num)', 'boy', 'Tony') | ('MAX(num)', 'girl', 'Amy') | ('MAX(num)', 'girl', 'Cindy') | ('MAX(num)', 'girl', 'Dawn') | ('MAX(num)', 'girl', 'Sophia') | |:-------|--------------------------------:|------------------------------:|------------------------------:|--------------------------------:|-------------------------------:|---------------------------------:|--------------------------------:|------------------------------:|------------------------------:|--------------------------------:|-------------------------------:|---------------------------------:| | (nan,) | 40685 | 6438 | 60166 | 15367 | 16492 | 26040 | 1669 | 845 | 3081 | 1059 | 1618 | 3775 | - """.strip() + """.strip() # noqa: E501 ) # combine_metrics @@ -1664,7 +1664,7 @@ def test_pivot_df_complex_null_values(): | ('girl', 'Sophia') | 26040 | 26040 | 3775 | 3775 | 29815 | | ('girl', 'Subtotal') | 118065 | 118065 | 9533 | 9533 | 127598 | | ('Total (Sum)', '') | 165188 | 165188 | 12047 | 12047 | 177235 | - """.strip() + """.strip() # noqa: E501 ) # apply_metrics_on_rows @@ -1755,7 +1755,7 @@ def test_pivot_df_complex_null_values(): | (nan, 'MAX(num)') | 1669 | 845 | 2514 | 3081 | 1059 | 1618 | 3775 | 9533 | 12047 | | (nan, 'Subtotal') | 42354 | 7283 | 49637 | 63247 | 16426 | 18110 | 29815 | 127598 | 177235 | | ('Total (Sum)', '') | 42354 | 7283 | 49637 | 63247 | 16426 | 18110 | 29815 | 127598 | 177235 | - """.strip() + """.strip() # noqa: E501 ) # fraction @@ -1785,7 +1785,7 @@ def test_pivot_df_complex_null_values(): | ('girl', 'Sophia') | 0.157639 | 0.313356 | | ('girl', 'Subtotal') | 0.714731 | 0.791317 | | ('Total (Sum as Fraction of Columns)', '') | 1 | 1 | - """.strip() + """.strip() # noqa: E501 ) @@ -1857,7 +1857,7 @@ def test_apply_post_process_without_result_format(): result = {"queries": [{"result_format": "foo"}]} form_data = {"viz_type": "pivot_table_v2"} - with pytest.raises(Exception) as ex: + with pytest.raises(Exception) as ex: # noqa: PT011 apply_post_process(result, form_data) assert ex.match("Result format foo not supported") is True # noqa: E712 diff --git a/tests/unit_tests/commands/databases/columnar_reader_test.py b/tests/unit_tests/commands/databases/columnar_reader_test.py index 7d512178d..1cbaa86b8 100644 --- a/tests/unit_tests/commands/databases/columnar_reader_test.py +++ b/tests/unit_tests/commands/databases/columnar_reader_test.py @@ -153,7 +153,7 @@ def test_excel_reader_wrong_columns_to_read(): "__filename: string" ) != ( - "Parsing error: Usecols do not match columns, columns expected but not found: " + "Parsing error: Usecols do not match columns, columns expected but not found: " # noqa: E501 "['xpto'] (sheet: 0)" ) ) diff --git a/tests/unit_tests/commands/databases/create_test.py b/tests/unit_tests/commands/databases/create_test.py index 61591274d..5e4d23ab6 100644 --- a/tests/unit_tests/commands/databases/create_test.py +++ b/tests/unit_tests/commands/databases/create_test.py @@ -25,7 +25,7 @@ from superset.exceptions import OAuth2RedirectError from superset.extensions import security_manager -@pytest.fixture() +@pytest.fixture def database_with_catalog(mocker: MockerFixture) -> MagicMock: """ Mock a database with catalogs and schemas. @@ -42,13 +42,13 @@ def database_with_catalog(mocker: MockerFixture) -> MagicMock: {"schema3", "schema4"}, ] - DatabaseDAO = mocker.patch("superset.commands.database.create.DatabaseDAO") + DatabaseDAO = mocker.patch("superset.commands.database.create.DatabaseDAO") # noqa: N806 DatabaseDAO.create.return_value = database return database -@pytest.fixture() +@pytest.fixture def database_without_catalog(mocker: MockerFixture) -> MagicMock: """ Mock a database without catalogs. @@ -61,7 +61,7 @@ def database_without_catalog(mocker: MockerFixture) -> MagicMock: database.db_engine_spec.supports_catalog = False database.get_all_schema_names.return_value = ["schema1", "schema2"] - DatabaseDAO = mocker.patch("superset.commands.database.create.DatabaseDAO") + DatabaseDAO = mocker.patch("superset.commands.database.create.DatabaseDAO") # noqa: N806 DatabaseDAO.create.return_value = database return database @@ -134,7 +134,7 @@ def test_create_with_oauth2( """ Test that the database can be created even if OAuth2 is needed to connect. """ - TestConnectionDatabaseCommand = mocker.patch( + TestConnectionDatabaseCommand = mocker.patch( # noqa: N806 "superset.commands.database.create.TestConnectionDatabaseCommand" ) TestConnectionDatabaseCommand().run.side_effect = OAuth2RedirectError( diff --git a/tests/unit_tests/commands/databases/excel_reader_test.py b/tests/unit_tests/commands/databases/excel_reader_test.py index b2bd24703..5f4ebca2c 100644 --- a/tests/unit_tests/commands/databases/excel_reader_test.py +++ b/tests/unit_tests/commands/databases/excel_reader_test.py @@ -237,7 +237,7 @@ def test_excel_reader_invalid_file(): with pytest.raises(DatabaseUploadFailed) as ex: excel_reader.file_to_dataframe(FileStorage(io.BytesIO(b"c1"))) assert str(ex.value) == ( - "Parsing error: Excel file format cannot be determined, you must specify an engine manually." + "Parsing error: Excel file format cannot be determined, you must specify an engine manually." # noqa: E501 ) diff --git a/tests/unit_tests/commands/databases/tables_test.py b/tests/unit_tests/commands/databases/tables_test.py index d9a8583f9..db446b46b 100644 --- a/tests/unit_tests/commands/databases/tables_test.py +++ b/tests/unit_tests/commands/databases/tables_test.py @@ -25,7 +25,7 @@ from superset.extensions import security_manager from superset.utils.core import DatasourceName -@pytest.fixture() +@pytest.fixture def database_with_catalog(mocker: MockerFixture) -> MagicMock: """ Mock a database with catalogs and schemas. @@ -42,13 +42,13 @@ def database_with_catalog(mocker: MockerFixture) -> MagicMock: DatasourceName("view1", "schema1", "catalog1"), ] - DatabaseDAO = mocker.patch("superset.commands.database.tables.DatabaseDAO") + DatabaseDAO = mocker.patch("superset.commands.database.tables.DatabaseDAO") # noqa: N806 DatabaseDAO.find_by_id.return_value = database return database -@pytest.fixture() +@pytest.fixture def database_without_catalog(mocker: MockerFixture) -> MagicMock: """ Mock a database without catalogs but with schemas. @@ -65,7 +65,7 @@ def database_without_catalog(mocker: MockerFixture) -> MagicMock: DatasourceName("view1", "schema1"), ] - DatabaseDAO = mocker.patch("superset.commands.database.tables.DatabaseDAO") + DatabaseDAO = mocker.patch("superset.commands.database.tables.DatabaseDAO") # noqa: N806 DatabaseDAO.find_by_id.return_value = database return database diff --git a/tests/unit_tests/commands/databases/test_connection_test.py b/tests/unit_tests/commands/databases/test_connection_test.py index eab2b4667..59ca89df2 100644 --- a/tests/unit_tests/commands/databases/test_connection_test.py +++ b/tests/unit_tests/commands/databases/test_connection_test.py @@ -37,7 +37,7 @@ def test_command(mocker: MockerFixture) -> None: with database.get_sqla_engine() as engine: engine.dialect.do_ping.return_value = True - DatabaseDAO = mocker.patch("superset.commands.database.test_connection.DatabaseDAO") + DatabaseDAO = mocker.patch("superset.commands.database.test_connection.DatabaseDAO") # noqa: N806 DatabaseDAO.build_db_for_connection_test.return_value = database properties = { @@ -71,7 +71,7 @@ def test_command_with_oauth2(mocker: MockerFixture) -> None: with database.get_sqla_engine() as engine: engine.dialect.do_ping.side_effect = Exception("OAuth2 needed") - DatabaseDAO = mocker.patch("superset.commands.database.test_connection.DatabaseDAO") + DatabaseDAO = mocker.patch("superset.commands.database.test_connection.DatabaseDAO") # noqa: N806 DatabaseDAO.build_db_for_connection_test.return_value = database properties = { diff --git a/tests/unit_tests/commands/databases/update_test.py b/tests/unit_tests/commands/databases/update_test.py index dfec42180..7ca3d70dc 100644 --- a/tests/unit_tests/commands/databases/update_test.py +++ b/tests/unit_tests/commands/databases/update_test.py @@ -35,7 +35,7 @@ oauth2_client_info = { } -@pytest.fixture() +@pytest.fixture def database_with_catalog(mocker: MockerFixture) -> MagicMock: """ Mock a database with catalogs and schemas. @@ -54,7 +54,7 @@ def database_with_catalog(mocker: MockerFixture) -> MagicMock: return database -@pytest.fixture() +@pytest.fixture def database_without_catalog(mocker: MockerFixture) -> MagicMock: """ Mock a database without catalogs. @@ -68,7 +68,7 @@ def database_without_catalog(mocker: MockerFixture) -> MagicMock: return database -@pytest.fixture() +@pytest.fixture def database_needs_oauth2(mocker: MockerFixture) -> MagicMock: """ Mock a database without catalogs that needs OAuth2. @@ -106,7 +106,7 @@ def test_update_with_catalog( When update is called, only `catalog2.schema3` has permissions associated with it, so `catalog1.*` and `catalog2.schema4` are added. """ - DatabaseDAO = mocker.patch("superset.commands.database.update.DatabaseDAO") + DatabaseDAO = mocker.patch("superset.commands.database.update.DatabaseDAO") # noqa: N806 DatabaseDAO.find_by_id.return_value = database_with_catalog DatabaseDAO.update.return_value = database_with_catalog @@ -156,8 +156,8 @@ def test_update_without_catalog( When update is called, only `schema2` has permissions associated with it, so `schema1` is added. - """ - DatabaseDAO = mocker.patch("superset.commands.database.update.DatabaseDAO") + """ # noqa: E501 + DatabaseDAO = mocker.patch("superset.commands.database.update.DatabaseDAO") # noqa: N806 DatabaseDAO.find_by_id.return_value = database_without_catalog DatabaseDAO.update.return_value = database_without_catalog @@ -202,7 +202,7 @@ def test_rename_with_catalog( so `catalog1.*` and `catalog2.schema4` are added. Additionally, the database has been renamed from `my_db` to `my_other_db`. """ - DatabaseDAO = mocker.patch("superset.commands.database.update.DatabaseDAO") + DatabaseDAO = mocker.patch("superset.commands.database.update.DatabaseDAO") # noqa: N806 original_database = mocker.MagicMock() original_database.database_name = "my_db" DatabaseDAO.find_by_id.return_value = original_database @@ -212,7 +212,7 @@ def test_rename_with_catalog( dataset = mocker.MagicMock() chart = mocker.MagicMock() DatabaseDAO.get_datasets.return_value = [dataset] - DatasetDAO = mocker.patch("superset.commands.database.update.DatasetDAO") + DatasetDAO = mocker.patch("superset.commands.database.update.DatasetDAO") # noqa: N806 DatasetDAO.get_related_objects.return_value = {"charts": [chart]} find_permission_view_menu = mocker.patch.object( @@ -273,8 +273,8 @@ def test_rename_without_catalog( When update is called, only `schema2` has permissions associated with it, so `schema1` is added. Additionally, the database has been renamed from `my_db` to `my_other_db`. - """ - DatabaseDAO = mocker.patch("superset.commands.database.update.DatabaseDAO") + """ # noqa: E501 + DatabaseDAO = mocker.patch("superset.commands.database.update.DatabaseDAO") # noqa: N806 original_database = mocker.MagicMock() original_database.database_name = "my_db" DatabaseDAO.find_by_id.return_value = original_database @@ -315,7 +315,7 @@ def test_update_with_oauth2( """ Test that the database can be updated even if OAuth2 is needed to connect. """ - DatabaseDAO = mocker.patch("superset.commands.database.update.DatabaseDAO") + DatabaseDAO = mocker.patch("superset.commands.database.update.DatabaseDAO") # noqa: N806 DatabaseDAO.find_by_id.return_value = database_needs_oauth2 DatabaseDAO.update.return_value = database_needs_oauth2 @@ -349,7 +349,7 @@ def test_update_with_oauth2_changed( """ Test that the database can be updated even if OAuth2 is needed to connect. """ - DatabaseDAO = mocker.patch("superset.commands.database.update.DatabaseDAO") + DatabaseDAO = mocker.patch("superset.commands.database.update.DatabaseDAO") # noqa: N806 DatabaseDAO.find_by_id.return_value = database_needs_oauth2 DatabaseDAO.update.return_value = database_needs_oauth2 diff --git a/tests/unit_tests/commands/databases/validate_test.py b/tests/unit_tests/commands/databases/validate_test.py index fde462536..c5992356c 100644 --- a/tests/unit_tests/commands/databases/validate_test.py +++ b/tests/unit_tests/commands/databases/validate_test.py @@ -40,7 +40,7 @@ def test_command(mocker: MockerFixture) -> None: with database.get_sqla_engine() as engine: engine.dialect.do_ping.return_value = True - DatabaseDAO = mocker.patch("superset.commands.database.validate.DatabaseDAO") + DatabaseDAO = mocker.patch("superset.commands.database.validate.DatabaseDAO") # noqa: N806 DatabaseDAO.build_db_for_connection_test.return_value = database properties = { @@ -65,7 +65,7 @@ def test_command_invalid(mocker: MockerFixture) -> None: with database.get_sqla_engine() as engine: engine.dialect.do_ping.return_value = True - DatabaseDAO = mocker.patch("superset.commands.database.validate.DatabaseDAO") + DatabaseDAO = mocker.patch("superset.commands.database.validate.DatabaseDAO") # noqa: N806 DatabaseDAO.build_db_for_connection_test.return_value = database properties = { @@ -109,7 +109,7 @@ def test_command_no_ping(mocker: MockerFixture) -> None: with database.get_sqla_engine() as engine: engine.dialect.do_ping.return_value = False - DatabaseDAO = mocker.patch("superset.commands.database.validate.DatabaseDAO") + DatabaseDAO = mocker.patch("superset.commands.database.validate.DatabaseDAO") # noqa: N806 DatabaseDAO.build_db_for_connection_test.return_value = database properties = { @@ -128,7 +128,7 @@ def test_command_no_ping(mocker: MockerFixture) -> None: "issue_codes": [ { "code": 1002, - "message": "Issue 1002 - The database returned an unexpected error.", + "message": "Issue 1002 - The database returned an unexpected error.", # noqa: E501 } ] }, @@ -150,7 +150,7 @@ def test_command_with_oauth2(mocker: MockerFixture) -> None: with database.get_sqla_engine() as engine: engine.dialect.do_ping.side_effect = Exception("OAuth2 needed") - DatabaseDAO = mocker.patch("superset.commands.database.validate.DatabaseDAO") + DatabaseDAO = mocker.patch("superset.commands.database.validate.DatabaseDAO") # noqa: N806 DatabaseDAO.build_db_for_connection_test.return_value = database properties = { @@ -185,7 +185,7 @@ def test_command_with_oauth2_not_configured(mocker: MockerFixture) -> None: with database.get_sqla_engine() as engine: engine.dialect.do_ping.side_effect = Exception("OAuth2 needed") - DatabaseDAO = mocker.patch("superset.commands.database.validate.DatabaseDAO") + DatabaseDAO = mocker.patch("superset.commands.database.validate.DatabaseDAO") # noqa: N806 DatabaseDAO.build_db_for_connection_test.return_value = database properties = { diff --git a/tests/unit_tests/commands/export_test.py b/tests/unit_tests/commands/export_test.py index 4ce354573..2145aca17 100644 --- a/tests/unit_tests/commands/export_test.py +++ b/tests/unit_tests/commands/export_test.py @@ -26,53 +26,53 @@ def test_export_assets_command(mocker: MockerFixture) -> None: """ from superset.commands.export.assets import ExportAssetsCommand - ExportDatabasesCommand = mocker.patch( + ExportDatabasesCommand = mocker.patch( # noqa: N806 "superset.commands.export.assets.ExportDatabasesCommand" ) ExportDatabasesCommand.return_value.run.return_value = [ ( "metadata.yaml", - lambda: "version: 1.0.0\ntype: Database\ntimestamp: '2022-01-01T00:00:00+00:00'\n", + lambda: "version: 1.0.0\ntype: Database\ntimestamp: '2022-01-01T00:00:00+00:00'\n", # noqa: E501 ), ("databases/example.yaml", lambda: ""), ] - ExportDatasetsCommand = mocker.patch( + ExportDatasetsCommand = mocker.patch( # noqa: N806 "superset.commands.export.assets.ExportDatasetsCommand" ) ExportDatasetsCommand.return_value.run.return_value = [ ( "metadata.yaml", - lambda: "version: 1.0.0\ntype: Dataset\ntimestamp: '2022-01-01T00:00:00+00:00'\n", + lambda: "version: 1.0.0\ntype: Dataset\ntimestamp: '2022-01-01T00:00:00+00:00'\n", # noqa: E501 ), ("datasets/example/dataset.yaml", lambda: ""), ] - ExportChartsCommand = mocker.patch( + ExportChartsCommand = mocker.patch( # noqa: N806 "superset.commands.export.assets.ExportChartsCommand" ) ExportChartsCommand.return_value.run.return_value = [ ( "metadata.yaml", - lambda: "version: 1.0.0\ntype: Slice\ntimestamp: '2022-01-01T00:00:00+00:00'\n", + lambda: "version: 1.0.0\ntype: Slice\ntimestamp: '2022-01-01T00:00:00+00:00'\n", # noqa: E501 ), ("charts/pie.yaml", lambda: ""), ] - ExportDashboardsCommand = mocker.patch( + ExportDashboardsCommand = mocker.patch( # noqa: N806 "superset.commands.export.assets.ExportDashboardsCommand" ) ExportDashboardsCommand.return_value.run.return_value = [ ( "metadata.yaml", - lambda: "version: 1.0.0\ntype: Dashboard\ntimestamp: '2022-01-01T00:00:00+00:00'\n", + lambda: "version: 1.0.0\ntype: Dashboard\ntimestamp: '2022-01-01T00:00:00+00:00'\n", # noqa: E501 ), ("dashboards/sales.yaml", lambda: ""), ] - ExportSavedQueriesCommand = mocker.patch( + ExportSavedQueriesCommand = mocker.patch( # noqa: N806 "superset.commands.export.assets.ExportSavedQueriesCommand" ) ExportSavedQueriesCommand.return_value.run.return_value = [ ( "metadata.yaml", - lambda: "version: 1.0.0\ntype: SavedQuery\ntimestamp: '2022-01-01T00:00:00+00:00'\n", + lambda: "version: 1.0.0\ntype: SavedQuery\ntimestamp: '2022-01-01T00:00:00+00:00'\n", # noqa: E501 ), ("queries/example/metric.yaml", lambda: ""), ] diff --git a/tests/unit_tests/common/test_query_object_factory.py b/tests/unit_tests/common/test_query_object_factory.py index a67f2887d..4d54f77de 100644 --- a/tests/unit_tests/common/test_query_object_factory.py +++ b/tests/unit_tests/common/test_query_object_factory.py @@ -17,7 +17,7 @@ from typing import Any, Optional from unittest.mock import Mock -from pytest import fixture +from pytest import fixture # noqa: PT013 from superset.common.query_object_factory import QueryObjectFactory from tests.common.query_context_generator import QueryContextGenerator diff --git a/tests/unit_tests/common/test_time_shifts.py b/tests/unit_tests/common/test_time_shifts.py index 3f25236a7..7ac91c680 100644 --- a/tests/unit_tests/common/test_time_shifts.py +++ b/tests/unit_tests/common/test_time_shifts.py @@ -16,7 +16,7 @@ # under the License. from pandas import DataFrame, Series, Timestamp from pandas.testing import assert_frame_equal -from pytest import fixture, mark +from pytest import fixture, mark # noqa: PT013 from superset.common.chart_data import ChartDataResultFormat, ChartDataResultType from superset.common.query_context import QueryContext diff --git a/tests/unit_tests/conftest.py b/tests/unit_tests/conftest.py index e82c4cf53..a33b0b7b2 100644 --- a/tests/unit_tests/conftest.py +++ b/tests/unit_tests/conftest.py @@ -20,7 +20,7 @@ import importlib import os import unittest.mock from collections.abc import Iterator -from typing import Any, Callable +from typing import Any, Callable, Union from unittest.mock import patch import pytest @@ -46,7 +46,7 @@ def get_session(mocker: MockerFixture) -> Callable[[], Session]: engine = create_engine("sqlite://") def get_session(): - Session_ = sessionmaker(bind=engine) # pylint: disable=invalid-name + Session_ = sessionmaker(bind=engine) # pylint: disable=invalid-name # noqa: N806 in_memory_session = Session_() # flask calls db.session.remove() @@ -70,7 +70,7 @@ def get_session(mocker: MockerFixture) -> Callable[[], Session]: @pytest.fixture def session(get_session) -> Iterator[Session]: - yield get_session() + return get_session() @pytest.fixture(scope="module") @@ -122,7 +122,7 @@ def app(request: SubRequest) -> Iterator[SupersetApp]: importlib.reload(superset.views.base) - yield app + return app @pytest.fixture @@ -141,7 +141,7 @@ def app_context(app: SupersetApp) -> Iterator[None]: @pytest.fixture -def full_api_access(mocker: MockerFixture) -> Iterator[None]: +def full_api_access(mocker: MockerFixture) -> Union[Iterator[None], None]: """ Allow full access to the API. @@ -156,7 +156,7 @@ def full_api_access(mocker: MockerFixture) -> Iterator[None]: mocker.patch.object(security_manager, "has_access", return_value=True) mocker.patch.object(security_manager, "can_access_all_databases", return_value=True) - yield + return None @pytest.fixture @@ -174,7 +174,7 @@ def dummy_query_object(request, app_context): else: result_type = result_type_marker.args[0] - yield QueryObjectFactory( + return QueryObjectFactory( app_configurations={ "ROW_LIMIT": 100, }, diff --git a/tests/unit_tests/connectors/sqla/models_test.py b/tests/unit_tests/connectors/sqla/models_test.py index 013d03e7e..5820e5fa3 100644 --- a/tests/unit_tests/connectors/sqla/models_test.py +++ b/tests/unit_tests/connectors/sqla/models_test.py @@ -187,10 +187,13 @@ def test_query_datasources_by_permissions_with_catalog_schema( ["[my_db].[db1].[schema1]", "[my_other_db].[schema]"], # type: ignore ) clause = db.session.query().filter_by().filter.mock_calls[0].args[0] - assert str(clause.compile(engine, compile_kwargs={"literal_binds": True})) == ( - "tables.perm IN ('[my_db].[table1](id:1)') OR " - "tables.schema_perm IN ('[my_db].[db1].[schema1]', '[my_other_db].[schema]') OR " - "tables.catalog_perm IN ('[my_db].[db1]')" + assert ( + str(clause.compile(engine, compile_kwargs={"literal_binds": True})) + == ( + "tables.perm IN ('[my_db].[table1](id:1)') OR " + "tables.schema_perm IN ('[my_db].[db1].[schema1]', '[my_other_db].[schema]') OR " # noqa: E501 + "tables.catalog_perm IN ('[my_db].[db1]')" + ) ) diff --git a/tests/unit_tests/connectors/sqla/utils_test.py b/tests/unit_tests/connectors/sqla/utils_test.py index 0da3ab7e9..ef17bd9b2 100644 --- a/tests/unit_tests/connectors/sqla/utils_test.py +++ b/tests/unit_tests/connectors/sqla/utils_test.py @@ -33,7 +33,7 @@ def test_returns_column_descriptions(mocker: MockerFixture) -> None: result_set = mocker.MagicMock() db_engine_spec = mocker.MagicMock() - CURSOR_DESCR = ( + CURSOR_DESCR = ( # noqa: N806 ("foo", "string"), ("bar", "string"), ("baz", "string"), @@ -42,7 +42,7 @@ def test_returns_column_descriptions(mocker: MockerFixture) -> None: ) cursor.description = CURSOR_DESCR - database.get_raw_connection.return_value.__enter__.return_value.cursor.return_value = cursor + database.get_raw_connection.return_value.__enter__.return_value.cursor.return_value = cursor # noqa: E501 database.db_engine_spec = db_engine_spec database.apply_limit_to_sql.return_value = "SELECT * FROM table LIMIT 1" database.mutate_sql_based_on_config.return_value = "SELECT * FROM table LIMIT 1" diff --git a/tests/unit_tests/core_tests.py b/tests/unit_tests/core_tests.py index 1473f1838..acd0501d8 100644 --- a/tests/unit_tests/core_tests.py +++ b/tests/unit_tests/core_tests.py @@ -89,31 +89,31 @@ def test_get_metric_name_invalid_metric(): metric = deepcopy(SIMPLE_SUM_ADHOC_METRIC) del metric["label"] del metric["column"] - with pytest.raises(ValueError): + with pytest.raises(ValueError): # noqa: PT011 get_metric_name(metric) metric = deepcopy(SIMPLE_SUM_ADHOC_METRIC) del metric["label"] metric["expressionType"] = "FOO" - with pytest.raises(ValueError): + with pytest.raises(ValueError): # noqa: PT011 get_metric_name(metric) metric = deepcopy(SQL_ADHOC_METRIC) del metric["label"] metric["expressionType"] = "FOO" - with pytest.raises(ValueError): + with pytest.raises(ValueError): # noqa: PT011 get_metric_name(metric) metric = deepcopy(SQL_ADHOC_METRIC) del metric["expressionType"] - with pytest.raises(ValueError): + with pytest.raises(ValueError): # noqa: PT011 get_metric_name(metric) - with pytest.raises(ValueError): + with pytest.raises(ValueError): # noqa: PT011 get_metric_name(None) - with pytest.raises(ValueError): + with pytest.raises(ValueError): # noqa: PT011 get_metric_name(0) - with pytest.raises(ValueError): + with pytest.raises(ValueError): # noqa: PT011 get_metric_name({}) @@ -160,7 +160,7 @@ def test_get_column_name_invalid_metric(): column = deepcopy(SQL_ADHOC_COLUMN) del column["label"] del column["sqlExpression"] - with pytest.raises(ValueError): + with pytest.raises(ValueError): # noqa: PT011 get_column_name(column) diff --git a/tests/unit_tests/dao/dataset_test.py b/tests/unit_tests/dao/dataset_test.py index 2b0b5c3d5..ff6d93a86 100644 --- a/tests/unit_tests/dao/dataset_test.py +++ b/tests/unit_tests/dao/dataset_test.py @@ -27,7 +27,7 @@ def test_validate_update_uniqueness(session: Session) -> None: In particular, allow datasets with the same name in the same database as long as they are in different schemas - """ + """ # noqa: E501 from superset import db from superset.connectors.sqla.models import SqlaTable from superset.models.core import Database diff --git a/tests/unit_tests/dao/key_value_test.py b/tests/unit_tests/dao/key_value_test.py index abe05f877..41b8c3e59 100644 --- a/tests/unit_tests/dao/key_value_test.py +++ b/tests/unit_tests/dao/key_value_test.py @@ -19,7 +19,7 @@ from __future__ import annotations import pickle from datetime import datetime, timedelta -from typing import Generator, TYPE_CHECKING +from typing import TYPE_CHECKING from uuid import UUID import pytest @@ -54,7 +54,7 @@ NEW_VALUE = {"foo": "baz"} @pytest.fixture -def key_value_entry() -> Generator[KeyValueEntry, None, None]: +def key_value_entry() -> KeyValueEntry: from superset.key_value.models import KeyValueEntry entry = KeyValueEntry( @@ -65,7 +65,7 @@ def key_value_entry() -> Generator[KeyValueEntry, None, None]: ) db.session.add(entry) db.session.flush() - yield entry + return entry def test_create_id_entry( @@ -143,7 +143,7 @@ def test_create_pickle_entry( found_entry = ( db.session.query(KeyValueEntry).filter_by(id=created_entry.id).one() ) - assert isinstance(pickle.loads(found_entry.value), type(PICKLE_VALUE)) + assert isinstance(pickle.loads(found_entry.value), type(PICKLE_VALUE)) # noqa: S301 assert found_entry.created_by_fk == admin_user.id diff --git a/tests/unit_tests/dao/tag_test.py b/tests/unit_tests/dao/tag_test.py index 7662393d4..c1ca0ee51 100644 --- a/tests/unit_tests/dao/tag_test.py +++ b/tests/unit_tests/dao/tag_test.py @@ -22,7 +22,7 @@ def test_user_favorite_tag(mocker): from superset.daos.tag import TagDAO # Mock the behavior of TagDAO and g - mock_TagDAO = mocker.patch( + mock_TagDAO = mocker.patch( # noqa: N806 "superset.daos.tag.TagDAO" ) # Replace with the actual path to TagDAO mock_TagDAO.find_by_id.return_value = mocker.MagicMock(users_favorited=[]) @@ -44,7 +44,7 @@ def test_remove_user_favorite_tag(mocker): from superset.daos.tag import TagDAO # Mock the behavior of TagDAO and g - mock_TagDAO = mocker.patch("superset.daos.tag.TagDAO") + mock_TagDAO = mocker.patch("superset.daos.tag.TagDAO") # noqa: N806 mock_tag = mocker.MagicMock(users_favorited=[]) mock_TagDAO.find_by_id.return_value = mock_tag @@ -71,7 +71,7 @@ def test_remove_user_favorite_tag_no_user(mocker): # Mock the behavior of TagDAO and g mocker.patch("superset.daos.tag.db.session") # noqa: F841 - mock_TagDAO = mocker.patch("superset.daos.tag.TagDAO") + mock_TagDAO = mocker.patch("superset.daos.tag.TagDAO") # noqa: N806 mock_tag = mocker.MagicMock(users_favorited=[]) mock_TagDAO.find_by_id.return_value = mock_tag @@ -88,7 +88,7 @@ def test_remove_user_favorite_tag_exc_raise(mocker): # Mock the behavior of TagDAO and g mock_session = mocker.patch("superset.daos.tag.db.session") - mock_TagDAO = mocker.patch("superset.daos.tag.TagDAO") + mock_TagDAO = mocker.patch("superset.daos.tag.TagDAO") # noqa: N806 mock_tag = mocker.MagicMock(users_favorited=[]) mock_TagDAO.find_by_id.return_value = mock_tag @@ -98,7 +98,7 @@ def test_remove_user_favorite_tag_exc_raise(mocker): # Test that exception is raised when commit fails mock_session.commit.side_effect = Exception("DB Error") - with pytest.raises(Exception): + with pytest.raises(Exception): # noqa: B017, PT011 TagDAO.remove_user_favorite_tag(1) @@ -108,7 +108,7 @@ def test_user_favorite_tag_no_user(mocker): # Mock the behavior of TagDAO and g mocker.patch("superset.daos.tag.db.session") # noqa: F841 - mock_TagDAO = mocker.patch("superset.daos.tag.TagDAO") + mock_TagDAO = mocker.patch("superset.daos.tag.TagDAO") # noqa: N806 mock_tag = mocker.MagicMock(users_favorited=[]) mock_TagDAO.find_by_id.return_value = mock_tag @@ -125,7 +125,7 @@ def test_user_favorite_tag_exc_raise(mocker): # Mock the behavior of TagDAO and g mock_session = mocker.patch("superset.daos.tag.db.session") - mock_TagDAO = mocker.patch("superset.daos.tag.TagDAO") + mock_TagDAO = mocker.patch("superset.daos.tag.TagDAO") # noqa: N806 mock_tag = mocker.MagicMock(users_favorited=[]) mock_TagDAO.find_by_id.return_value = mock_tag @@ -135,7 +135,7 @@ def test_user_favorite_tag_exc_raise(mocker): # Test that exception is raised when commit fails mock_session.commit.side_effect = Exception("DB Error") - with pytest.raises(Exception): + with pytest.raises(Exception): # noqa: B017, PT011 TagDAO.remove_user_favorite_tag(1) diff --git a/tests/unit_tests/dashboards/commands/importers/v1/import_test.py b/tests/unit_tests/dashboards/commands/importers/v1/import_test.py index c311f1b39..09b3eed42 100644 --- a/tests/unit_tests/dashboards/commands/importers/v1/import_test.py +++ b/tests/unit_tests/dashboards/commands/importers/v1/import_test.py @@ -115,7 +115,7 @@ def test_import_dashboard_without_permission( import_dashboard(dashboard_config) assert ( str(excinfo.value) - == "Dashboard doesn't exist and user doesn't have permission to create dashboards" + == "Dashboard doesn't exist and user doesn't have permission to create dashboards" # noqa: E501 ) # Assert that the can write to dashboard was checked @@ -147,7 +147,7 @@ def test_import_existing_dashboard_without_permission( import_dashboard(dashboard_config, overwrite=True) assert ( str(excinfo.value) - == "A dashboard already exists and user doesn't have permissions to overwrite it" + == "A dashboard already exists and user doesn't have permissions to overwrite it" # noqa: E501 ) # Assert that the can write to dashboard was checked @@ -161,7 +161,7 @@ def test_import_existing_dashboard_with_permission( ) -> None: """ Test importing a dashboard that exists when a user has access permission to that dashboard. - """ + """ # noqa: E501 mock_can_access = mocker.patch.object( security_manager, "can_access", return_value=True ) diff --git a/tests/unit_tests/databases/api_test.py b/tests/unit_tests/databases/api_test.py index 4c28a3c29..e24580cb8 100644 --- a/tests/unit_tests/databases/api_test.py +++ b/tests/unit_tests/databases/api_test.py @@ -148,7 +148,7 @@ def test_password_mask( "project_id": "black-sanctum-314419", "private_key_id": "259b0d419a8f840056158763ff54d8b08f7b8173", "private_key": "SECRET", - "client_email": "google-spreadsheets-demo-servi@black-sanctum-314419.iam.gserviceaccount.com", + "client_email": "google-spreadsheets-demo-servi@black-sanctum-314419.iam.gserviceaccount.com", # noqa: E501 "client_id": "114567578578109757129", "auth_uri": "https://accounts.google.com/o/oauth2/auth", "token_uri": "https://oauth2.googleapis.com/token", @@ -204,7 +204,7 @@ def test_database_connection( "project_id": "black-sanctum-314419", "private_key_id": "259b0d419a8f840056158763ff54d8b08f7b8173", "private_key": "SECRET", - "client_email": "google-spreadsheets-demo-servi@black-sanctum-314419.iam.gserviceaccount.com", + "client_email": "google-spreadsheets-demo-servi@black-sanctum-314419.iam.gserviceaccount.com", # noqa: E501 "client_id": "114567578578109757129", "auth_uri": "https://accounts.google.com/o/oauth2/auth", "token_uri": "https://oauth2.googleapis.com/token", @@ -242,7 +242,7 @@ def test_database_connection( "supports_oauth2": True, }, "expose_in_sqllab": True, - "extra": '{\n "metadata_params": {},\n "engine_params": {},\n "metadata_cache_timeout": {},\n "schemas_allowed_for_file_upload": []\n}\n', + "extra": '{\n "metadata_params": {},\n "engine_params": {},\n "metadata_cache_timeout": {},\n "schemas_allowed_for_file_upload": []\n}\n', # noqa: E501 "force_ctas_schema": None, "id": 1, "impersonate_user": False, @@ -254,7 +254,7 @@ def test_database_connection( "project_id": "black-sanctum-314419", "private_key_id": "259b0d419a8f840056158763ff54d8b08f7b8173", "private_key": "XXXXXXXXXX", - "client_email": "google-spreadsheets-demo-servi@black-sanctum-314419.iam.gserviceaccount.com", + "client_email": "google-spreadsheets-demo-servi@black-sanctum-314419.iam.gserviceaccount.com", # noqa: E501 "client_id": "114567578578109757129", "auth_uri": "https://accounts.google.com/o/oauth2/auth", "token_uri": "https://oauth2.googleapis.com/token", @@ -267,7 +267,7 @@ def test_database_connection( "service_account_info": { "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs", "auth_uri": "https://accounts.google.com/o/oauth2/auth", - "client_email": "google-spreadsheets-demo-servi@black-sanctum-314419.iam.gserviceaccount.com", + "client_email": "google-spreadsheets-demo-servi@black-sanctum-314419.iam.gserviceaccount.com", # noqa: E501 "client_id": "114567578578109757129", "client_x509_cert_url": "https://www.googleapis.com/robot/v1/metadata/x509/google-spreadsheets-demo-servi%40black-sanctum-314419.iam.gserviceaccount.com", "private_key": "XXXXXXXXXX", @@ -373,7 +373,7 @@ def test_update_with_password_mask( database = db.session.query(Database).one() assert ( database.encrypted_extra - == '{"service_account_info": {"project_id": "yellow-unicorn-314419", "private_key": "SECRET"}}' + == '{"service_account_info": {"project_id": "yellow-unicorn-314419", "private_key": "SECRET"}}' # noqa: E501 ) @@ -401,7 +401,7 @@ def test_non_zip_import(client: Any, full_api_access: None) -> None: "issue_codes": [ { "code": 1010, - "message": "Issue 1010 - Superset encountered an error while running a command.", + "message": "Issue 1010 - Superset encountered an error while running a command.", # noqa: E501 } ] }, @@ -442,7 +442,7 @@ def test_delete_ssh_tunnel( "project_id": "black-sanctum-314419", "private_key_id": "259b0d419a8f840056158763ff54d8b08f7b8173", "private_key": "SECRET", - "client_email": "google-spreadsheets-demo-servi@black-sanctum-314419.iam.gserviceaccount.com", + "client_email": "google-spreadsheets-demo-servi@black-sanctum-314419.iam.gserviceaccount.com", # noqa: E501 "client_id": "SSH_TUNNEL_CREDENTIALS_CLIENT", "auth_uri": "https://accounts.google.com/o/oauth2/auth", "token_uri": "https://oauth2.googleapis.com/token", @@ -520,7 +520,7 @@ def test_delete_ssh_tunnel_not_found( "project_id": "black-sanctum-314419", "private_key_id": "259b0d419a8f840056158763ff54d8b08f7b8173", "private_key": "SECRET", - "client_email": "google-spreadsheets-demo-servi@black-sanctum-314419.iam.gserviceaccount.com", + "client_email": "google-spreadsheets-demo-servi@black-sanctum-314419.iam.gserviceaccount.com", # noqa: E501 "client_id": "SSH_TUNNEL_CREDENTIALS_CLIENT", "auth_uri": "https://accounts.google.com/o/oauth2/auth", "token_uri": "https://oauth2.googleapis.com/token", @@ -723,9 +723,9 @@ def test_oauth2_happy_path( token = db.session.query(DatabaseUserOAuth2Tokens).one() assert token.user_id == 1 assert token.database_id == 1 - assert token.access_token == "YYY" + assert token.access_token == "YYY" # noqa: S105 assert token.access_token_expiration == datetime(2024, 1, 1, 1, 0) - assert token.refresh_token == "ZZZ" + assert token.refresh_token == "ZZZ" # noqa: S105 def test_oauth2_multiple_tokens( @@ -804,8 +804,8 @@ def test_oauth2_multiple_tokens( tokens = db.session.query(DatabaseUserOAuth2Tokens).all() assert len(tokens) == 1 token = tokens[0] - assert token.access_token == "YYY2" - assert token.refresh_token == "ZZZ2" + assert token.access_token == "YYY2" # noqa: S105 + assert token.refresh_token == "ZZZ2" # noqa: S105 def test_oauth2_error( @@ -1811,7 +1811,7 @@ def test_table_metadata_no_table( "issue_codes": [ { "code": 1020, - "message": "Issue 1020 - The submitted payload has the incorrect schema.", + "message": "Issue 1020 - The submitted payload has the incorrect schema.", # noqa: E501 } ], }, @@ -1862,7 +1862,7 @@ def test_table_metadata_invalid_database( "issue_codes": [ { "code": 1011, - "message": "Issue 1011 - Superset encountered an unexpected error.", + "message": "Issue 1011 - Superset encountered an unexpected error.", # noqa: E501 }, { "code": 1036, @@ -1975,7 +1975,7 @@ def test_table_extra_metadata_no_table( "issue_codes": [ { "code": 1020, - "message": "Issue 1020 - The submitted payload has the incorrect schema.", + "message": "Issue 1020 - The submitted payload has the incorrect schema.", # noqa: E501 } ], }, @@ -2026,7 +2026,7 @@ def test_table_extra_metadata_invalid_database( "issue_codes": [ { "code": 1011, - "message": "Issue 1011 - Superset encountered an unexpected error.", + "message": "Issue 1011 - Superset encountered an unexpected error.", # noqa: E501 }, { "code": 1036, @@ -2084,7 +2084,7 @@ def test_catalogs( """ database = mocker.MagicMock() database.get_all_catalog_names.return_value = {"db1", "db2"} - DatabaseDAO = mocker.patch("superset.databases.api.DatabaseDAO") + DatabaseDAO = mocker.patch("superset.databases.api.DatabaseDAO") # noqa: N806 DatabaseDAO.find_by_id.return_value = database security_manager = mocker.patch( @@ -2128,7 +2128,7 @@ def test_catalogs_with_oauth2( "tab_id", "redirect_uri", ) - DatabaseDAO = mocker.patch("superset.databases.api.DatabaseDAO") + DatabaseDAO = mocker.patch("superset.databases.api.DatabaseDAO") # noqa: N806 DatabaseDAO.find_by_id.return_value = database security_manager = mocker.patch( diff --git a/tests/unit_tests/databases/commands/importers/v1/import_test.py b/tests/unit_tests/databases/commands/importers/v1/import_test.py index 06be5bc16..b255dae28 100644 --- a/tests/unit_tests/databases/commands/importers/v1/import_test.py +++ b/tests/unit_tests/databases/commands/importers/v1/import_test.py @@ -112,7 +112,7 @@ def test_import_database_sqlite_invalid( _ = import_database(config) assert ( str(excinfo.value) - == "SQLiteDialect_pysqlite cannot be used as a data source for security reasons." + == "SQLiteDialect_pysqlite cannot be used as a data source for security reasons." # noqa: E501 ) # restore app config app.config["PREVENT_UNSAFE_DB_CONNECTIONS"] = True diff --git a/tests/unit_tests/databases/filters_test.py b/tests/unit_tests/databases/filters_test.py index a1e51fce2..9e1b1c0ae 100644 --- a/tests/unit_tests/databases/filters_test.py +++ b/tests/unit_tests/databases/filters_test.py @@ -65,7 +65,7 @@ def test_database_filter_full_db_access(mocker: MockerFixture) -> None: mocker.patch.object(security_manager, "can_access_all_databases", return_value=True) engine = create_engine("sqlite://") - Session = sessionmaker(bind=engine) + Session = sessionmaker(bind=engine) # noqa: N806 session = Session() query = session.query(Database) @@ -108,7 +108,7 @@ def test_database_filter(mocker: MockerFixture) -> None: ) engine = create_engine("sqlite://") - Session = sessionmaker(bind=engine) + Session = sessionmaker(bind=engine) # noqa: N806 session = Session() query = session.query(Database) @@ -124,5 +124,5 @@ def test_database_filter(mocker: MockerFixture) -> None: str(compiled_query) == f"""SELECT dbs.uuid, dbs.created_on, dbs.changed_on, dbs.id, dbs.verbose_name, dbs.database_name, dbs.sqlalchemy_uri, dbs.password, dbs.cache_timeout, dbs.select_as_create_table_as, dbs.expose_in_sqllab, dbs.configuration_method, dbs.allow_run_async, dbs.allow_file_upload, dbs.allow_ctas, dbs.allow_cvas, dbs.allow_dml, dbs.force_ctas_schema, dbs.extra, dbs.encrypted_extra, dbs.impersonate_user, dbs.server_cert, dbs.is_managed_externally, dbs.external_url, dbs.created_by_fk, dbs.changed_by_fk{space} FROM dbs{space} -WHERE '[' || dbs.database_name || '].(id:' || CAST(dbs.id AS VARCHAR) || ')' IN ('[my_db].(id:42)', '[my_other_db].(id:43)') OR dbs.database_name IN ('my_db', 'my_other_db', 'third_db')""" +WHERE '[' || dbs.database_name || '].(id:' || CAST(dbs.id AS VARCHAR) || ')' IN ('[my_db].(id:42)', '[my_other_db].(id:43)') OR dbs.database_name IN ('my_db', 'my_other_db', 'third_db')""" # noqa: S608, E501 ) diff --git a/tests/unit_tests/databases/schema_tests.py b/tests/unit_tests/databases/schema_tests.py index 864f9d3f0..a61e63000 100644 --- a/tests/unit_tests/databases/schema_tests.py +++ b/tests/unit_tests/databases/schema_tests.py @@ -103,14 +103,17 @@ def test_database_parameters_schema_mixin_no_engine( try: dummy_schema.load(payload) except ValidationError as err: - assert err.messages == { - "_schema": [ - ( - "An engine must be specified when passing individual parameters to " - "a database." - ), - ] - } + assert ( # noqa: PT017 + err.messages + == { # noqa: PT017 + "_schema": [ + ( + "An engine must be specified when passing individual parameters to " # noqa: E501 + "a database." + ), + ] + } + ) def test_database_parameters_schema_mixin_invalid_engine( @@ -133,7 +136,7 @@ def test_database_parameters_schema_mixin_invalid_engine( try: dummy_schema.load(payload) except ValidationError as err: - assert err.messages == { + assert err.messages == { # noqa: PT017 "_schema": ['Engine "dummy_engine" is not a valid engine.'] } @@ -158,14 +161,17 @@ def test_database_parameters_schema_no_mixin( try: dummy_schema.load(payload) except ValidationError as err: - assert err.messages == { - "_schema": [ - ( - 'Engine spec "InvalidEngine" does not support ' - "being configured via individual parameters." - ) - ] - } + assert ( # noqa: PT017 + err.messages + == { # noqa: PT017 + "_schema": [ + ( + 'Engine spec "InvalidEngine" does not support ' + "being configured via individual parameters." + ) + ] + } + ) def test_database_parameters_schema_mixin_invalid_type( @@ -188,7 +194,7 @@ def test_database_parameters_schema_mixin_invalid_type( try: dummy_schema.load(payload) except ValidationError as err: - assert err.messages == {"port": ["Not a valid integer."]} + assert err.messages == {"port": ["Not a valid integer."]} # noqa: PT017 def test_rename_encrypted_extra() -> None: diff --git a/tests/unit_tests/dataframe_test.py b/tests/unit_tests/dataframe_test.py index f0d9bc31b..0443bc146 100644 --- a/tests/unit_tests/dataframe_test.py +++ b/tests/unit_tests/dataframe_test.py @@ -42,7 +42,7 @@ def test_df_to_records() -> None: ] -def test_df_to_records_NaT_type() -> None: +def test_df_to_records_NaT_type() -> None: # noqa: N802 from superset.db_engine_specs import BaseEngineSpec from superset.result_set import SupersetResultSet diff --git a/tests/unit_tests/datasets/dao/dao_tests.py b/tests/unit_tests/datasets/dao/dao_tests.py index a4632fad3..28aee0ea4 100644 --- a/tests/unit_tests/datasets/dao/dao_tests.py +++ b/tests/unit_tests/datasets/dao/dao_tests.py @@ -81,8 +81,8 @@ def test_datasource_find_by_ids_skip_base_filter(session_with_data: Session) -> ) assert result - assert [1] == list(map(lambda x: x.id, result)) - assert ["my_sqla_table"] == list(map(lambda x: x.table_name, result)) + assert [1] == list(map(lambda x: x.id, result)) # noqa: C417 + assert ["my_sqla_table"] == list(map(lambda x: x.table_name, result)) # noqa: C417 assert isinstance(result[0], SqlaTable) diff --git a/tests/unit_tests/datasource/dao_tests.py b/tests/unit_tests/datasource/dao_tests.py index af456a118..bf486010a 100644 --- a/tests/unit_tests/datasource/dao_tests.py +++ b/tests/unit_tests/datasource/dao_tests.py @@ -67,7 +67,7 @@ def session_with_data(session: Session) -> Iterator[Session]: session.add(database) session.add(sqla_table) session.flush() - yield session + return session def test_get_datasource_sqlatable(session_with_data: Session) -> None: diff --git a/tests/unit_tests/db_engine_specs/test_athena.py b/tests/unit_tests/db_engine_specs/test_athena.py index f954b3d04..8337bfd7a 100644 --- a/tests/unit_tests/db_engine_specs/test_athena.py +++ b/tests/unit_tests/db_engine_specs/test_athena.py @@ -43,7 +43,7 @@ def test_convert_dttm( expected_result: Optional[str], dttm: datetime, # noqa: F811 ) -> None: - from superset.db_engine_specs.athena import AthenaEngineSpec as spec + from superset.db_engine_specs.athena import AthenaEngineSpec as spec # noqa: N813 assert_convert_dttm(spec, target_type, expected_result, dttm) @@ -59,7 +59,7 @@ def test_extract_errors() -> None: result = AthenaEngineSpec.extract_errors(Exception(msg)) assert result == [ SupersetError( - message='Please check your query for syntax errors at or near "from_". Then, try running your query again.', + message='Please check your query for syntax errors at or near "from_". Then, try running your query again.', # noqa: E501 error_type=SupersetErrorType.SYNTAX_ERROR, level=ErrorLevel.ERROR, extra={ diff --git a/tests/unit_tests/db_engine_specs/test_base.py b/tests/unit_tests/db_engine_specs/test_base.py index d8e632ce0..2644cd6e6 100644 --- a/tests/unit_tests/db_engine_specs/test_base.py +++ b/tests/unit_tests/db_engine_specs/test_base.py @@ -90,7 +90,7 @@ def test_validate_db_uri(mocker: MockerFixture) -> None: from superset.db_engine_specs.base import BaseEngineSpec - with pytest.raises(ValueError): + with pytest.raises(ValueError): # noqa: PT011 BaseEngineSpec.validate_database_uri(URL.create("sqlite")) @@ -164,7 +164,9 @@ def test_get_column_spec( generic_type: GenericDataType, is_dttm: bool, ) -> None: - from superset.db_engine_specs.databricks import DatabricksNativeEngineSpec as spec + from superset.db_engine_specs.databricks import ( + DatabricksNativeEngineSpec as spec, # noqa: N813 + ) assert_column_spec(spec, native_type, sqla_type, attrs, generic_type, is_dttm) diff --git a/tests/unit_tests/db_engine_specs/test_bigquery.py b/tests/unit_tests/db_engine_specs/test_bigquery.py index 9e3d98ff8..458a6a039 100644 --- a/tests/unit_tests/db_engine_specs/test_bigquery.py +++ b/tests/unit_tests/db_engine_specs/test_bigquery.py @@ -344,11 +344,11 @@ def test_parse_error_message() -> None: (job ID: ddf30b05-44e8-4fbf-aa29-40bfccaed886) -----Query Job SQL Follows----- | . | . | . |\n 1:select * from case_detail_all_suites\n 2:LIMIT 1001\n | . | . | . | - """ + """ # noqa: E501 from superset.db_engine_specs.bigquery import BigQueryEngineSpec - message = 'bigquery error: 400 Syntax error: Table "case_detail_all_suites" must be qualified with a dataset (e.g. dataset.table).\n\n(job ID: ddf30b05-44e8-4fbf-aa29-40bfccaed886)\n\n -----Query Job SQL Follows----- \n\n | . | . | . |\n 1:select * from case_detail_all_suites\n 2:LIMIT 1001\n | . | . | . |' - expected_result = 'bigquery error: 400 Syntax error: Table "case_detail_all_suites" must be qualified with a dataset (e.g. dataset.table).' + message = 'bigquery error: 400 Syntax error: Table "case_detail_all_suites" must be qualified with a dataset (e.g. dataset.table).\n\n(job ID: ddf30b05-44e8-4fbf-aa29-40bfccaed886)\n\n -----Query Job SQL Follows----- \n\n | . | . | . |\n 1:select * from case_detail_all_suites\n 2:LIMIT 1001\n | . | . | . |' # noqa: E501 + expected_result = 'bigquery error: 400 Syntax error: Table "case_detail_all_suites" must be qualified with a dataset (e.g. dataset.table).' # noqa: E501 assert ( str(BigQueryEngineSpec.parse_error_exception(Exception(message))) == expected_result @@ -362,12 +362,12 @@ def test_parse_error_raises_exception() -> None: Example errors: 400 Syntax error: Expected "(" or keyword UNNEST but got "@" at [4:80] bigquery error: 400 Table \"case_detail_all_suites\" must be qualified with a dataset (e.g. dataset.table). - """ + """ # noqa: E501 from superset.db_engine_specs.bigquery import BigQueryEngineSpec - message = 'bigquery error: 400 Syntax error: Table "case_detail_all_suites" must be qualified with a dataset (e.g. dataset.table).' + message = 'bigquery error: 400 Syntax error: Table "case_detail_all_suites" must be qualified with a dataset (e.g. dataset.table).' # noqa: E501 message_2 = "6" - expected_result = 'bigquery error: 400 Syntax error: Table "case_detail_all_suites" must be qualified with a dataset (e.g. dataset.table).' + expected_result = 'bigquery error: 400 Syntax error: Table "case_detail_all_suites" must be qualified with a dataset (e.g. dataset.table).' # noqa: E501 assert ( str(BigQueryEngineSpec.parse_error_exception(Exception(message))) == expected_result @@ -393,7 +393,9 @@ def test_convert_dttm( """ DB Eng Specs (bigquery): Test conversion to date time """ - from superset.db_engine_specs.bigquery import BigQueryEngineSpec as spec + from superset.db_engine_specs.bigquery import ( + BigQueryEngineSpec as spec, # noqa: N813 + ) assert_convert_dttm(spec, target_type, expected_result, dttm) diff --git a/tests/unit_tests/db_engine_specs/test_clickhouse.py b/tests/unit_tests/db_engine_specs/test_clickhouse.py index 369dd934c..c4277ce4e 100644 --- a/tests/unit_tests/db_engine_specs/test_clickhouse.py +++ b/tests/unit_tests/db_engine_specs/test_clickhouse.py @@ -54,7 +54,9 @@ def test_convert_dttm( expected_result: Optional[str], dttm: datetime, # noqa: F811 ) -> None: - from superset.db_engine_specs.clickhouse import ClickHouseEngineSpec as spec + from superset.db_engine_specs.clickhouse import ( + ClickHouseEngineSpec as spec, # noqa: N813 + ) assert_convert_dttm(spec, target_type, expected_result, dttm) @@ -86,7 +88,9 @@ def test_connect_convert_dttm( expected_result: Optional[str], dttm: datetime, # noqa: F811 ) -> None: - from superset.db_engine_specs.clickhouse import ClickHouseEngineSpec as spec + from superset.db_engine_specs.clickhouse import ( + ClickHouseEngineSpec as spec, # noqa: N813 + ) assert_convert_dttm(spec, target_type, expected_result, dttm) @@ -200,7 +204,9 @@ def test_connect_get_column_spec( generic_type: GenericDataType, is_dttm: bool, ) -> None: - from superset.db_engine_specs.clickhouse import ClickHouseConnectEngineSpec as spec + from superset.db_engine_specs.clickhouse import ( + ClickHouseConnectEngineSpec as spec, # noqa: N813 + ) assert_column_spec(spec, native_type, sqla_type, attrs, generic_type, is_dttm) @@ -213,7 +219,9 @@ def test_connect_get_column_spec( ], ) def test_connect_make_label_compatible(column_name: str, expected_result: str) -> None: - from superset.db_engine_specs.clickhouse import ClickHouseConnectEngineSpec as spec + from superset.db_engine_specs.clickhouse import ( + ClickHouseConnectEngineSpec as spec, # noqa: N813 + ) label = spec.make_label_compatible(column_name) assert label == expected_result diff --git a/tests/unit_tests/db_engine_specs/test_couchbase.py b/tests/unit_tests/db_engine_specs/test_couchbase.py index 99e509db3..dcc3c386d 100644 --- a/tests/unit_tests/db_engine_specs/test_couchbase.py +++ b/tests/unit_tests/db_engine_specs/test_couchbase.py @@ -62,7 +62,9 @@ def test_convert_dttm( expected_result: Optional[str], dttm: datetime, # noqa: F811 ) -> None: - from superset.db_engine_specs.couchbase import CouchbaseEngineSpec as spec + from superset.db_engine_specs.couchbase import ( + CouchbaseEngineSpec as spec, # noqa: N813 + ) assert_convert_dttm(spec, target_type, expected_result, dttm) @@ -88,6 +90,8 @@ def test_get_column_spec( generic_type: GenericDataType, is_dttm: bool, ) -> None: - from superset.db_engine_specs.couchbase import CouchbaseEngineSpec as spec + from superset.db_engine_specs.couchbase import ( + CouchbaseEngineSpec as spec, # noqa: N813 + ) assert_column_spec(spec, native_type, sqla_type, attrs, generic_type, is_dttm) diff --git a/tests/unit_tests/db_engine_specs/test_crate.py b/tests/unit_tests/db_engine_specs/test_crate.py index 2df50c6dd..ba36dda1b 100644 --- a/tests/unit_tests/db_engine_specs/test_crate.py +++ b/tests/unit_tests/db_engine_specs/test_crate.py @@ -68,6 +68,6 @@ def test_convert_dttm( expected_result: Optional[str], dttm: datetime, # noqa: F811 ) -> None: - from superset.db_engine_specs.crate import CrateEngineSpec as spec + from superset.db_engine_specs.crate import CrateEngineSpec as spec # noqa: N813 assert_convert_dttm(spec, target_type, expected_result, dttm) diff --git a/tests/unit_tests/db_engine_specs/test_crdb.py b/tests/unit_tests/db_engine_specs/test_crdb.py index 507088b83..1ee661300 100644 --- a/tests/unit_tests/db_engine_specs/test_crdb.py +++ b/tests/unit_tests/db_engine_specs/test_crdb.py @@ -37,6 +37,8 @@ def test_convert_dttm( expected_result: Optional[str], dttm: datetime, # noqa: F811 ) -> None: - from superset.db_engine_specs.cockroachdb import CockroachDbEngineSpec as spec + from superset.db_engine_specs.cockroachdb import ( + CockroachDbEngineSpec as spec, # noqa: N813 + ) assert_convert_dttm(spec, target_type, expected_result, dttm) diff --git a/tests/unit_tests/db_engine_specs/test_databend.py b/tests/unit_tests/db_engine_specs/test_databend.py index 8252f96e2..b1c25bbbe 100644 --- a/tests/unit_tests/db_engine_specs/test_databend.py +++ b/tests/unit_tests/db_engine_specs/test_databend.py @@ -53,7 +53,9 @@ def test_convert_dttm( expected_result: Optional[str], dttm: datetime, # noqa: F811 ) -> None: - from superset.db_engine_specs.databend import DatabendEngineSpec as spec + from superset.db_engine_specs.databend import ( + DatabendEngineSpec as spec, # noqa: N813 + ) assert_convert_dttm(spec, target_type, expected_result, dttm) @@ -115,7 +117,9 @@ def test_get_column_spec( generic_type: GenericDataType, is_dttm: bool, ) -> None: - from superset.db_engine_specs.databend import DatabendConnectEngineSpec as spec + from superset.db_engine_specs.databend import ( + DatabendConnectEngineSpec as spec, # noqa: N813 + ) assert_column_spec(spec, native_type, sqla_type, attrs, generic_type, is_dttm) @@ -128,7 +132,9 @@ def test_get_column_spec( ], ) def test_make_label_compatible(column_name: str, expected_result: str) -> None: - from superset.db_engine_specs.databend import DatabendConnectEngineSpec as spec + from superset.db_engine_specs.databend import ( + DatabendConnectEngineSpec as spec, # noqa: N813 + ) label = spec.make_label_compatible(column_name) assert label == expected_result diff --git a/tests/unit_tests/db_engine_specs/test_databricks.py b/tests/unit_tests/db_engine_specs/test_databricks.py index 0c9e3843d..95403780e 100644 --- a/tests/unit_tests/db_engine_specs/test_databricks.py +++ b/tests/unit_tests/db_engine_specs/test_databricks.py @@ -191,7 +191,7 @@ def test_extract_errors() -> None: "issue_codes": [ { "code": 1002, - "message": "Issue 1002 - The database returned an unexpected error.", + "message": "Issue 1002 - The database returned an unexpected error.", # noqa: E501 } ], }, @@ -218,7 +218,7 @@ def test_extract_errors_with_context() -> None: "issue_codes": [ { "code": 1002, - "message": "Issue 1002 - The database returned an unexpected error.", + "message": "Issue 1002 - The database returned an unexpected error.", # noqa: E501 } ], }, @@ -242,7 +242,9 @@ def test_convert_dttm( expected_result: Optional[str], dttm: datetime, # noqa: F811 ) -> None: - from superset.db_engine_specs.databricks import DatabricksNativeEngineSpec as spec + from superset.db_engine_specs.databricks import ( + DatabricksNativeEngineSpec as spec, # noqa: N813 + ) assert_convert_dttm(spec, target_type, expected_result, dttm) diff --git a/tests/unit_tests/db_engine_specs/test_denodo.py b/tests/unit_tests/db_engine_specs/test_denodo.py index 31e9c0dea..9da7874ef 100644 --- a/tests/unit_tests/db_engine_specs/test_denodo.py +++ b/tests/unit_tests/db_engine_specs/test_denodo.py @@ -22,7 +22,7 @@ import pytest from sqlalchemy import column, types from sqlalchemy.engine.url import make_url -from superset.db_engine_specs.denodo import DenodoEngineSpec as spec +from superset.db_engine_specs.denodo import DenodoEngineSpec as spec # noqa: N813 from superset.utils.core import GenericDataType from tests.unit_tests.db_engine_specs.utils import ( assert_column_spec, diff --git a/tests/unit_tests/db_engine_specs/test_doris.py b/tests/unit_tests/db_engine_specs/test_doris.py index d7444f8d2..ced1a6862 100644 --- a/tests/unit_tests/db_engine_specs/test_doris.py +++ b/tests/unit_tests/db_engine_specs/test_doris.py @@ -74,7 +74,7 @@ def test_get_column_spec( generic_type: GenericDataType, is_dttm: bool, ) -> None: - from superset.db_engine_specs.doris import DorisEngineSpec as spec + from superset.db_engine_specs.doris import DorisEngineSpec as spec # noqa: N813 assert_column_spec(spec, native_type, sqla_type, attrs, generic_type, is_dttm) diff --git a/tests/unit_tests/db_engine_specs/test_dremio.py b/tests/unit_tests/db_engine_specs/test_dremio.py index 487f5a9b7..88eae7647 100644 --- a/tests/unit_tests/db_engine_specs/test_dremio.py +++ b/tests/unit_tests/db_engine_specs/test_dremio.py @@ -40,7 +40,7 @@ def test_convert_dttm( expected_result: Optional[str], dttm: datetime, # noqa: F811 ) -> None: - from superset.db_engine_specs.dremio import DremioEngineSpec as spec + from superset.db_engine_specs.dremio import DremioEngineSpec as spec # noqa: N813 assert_convert_dttm(spec, target_type, expected_result, dttm) diff --git a/tests/unit_tests/db_engine_specs/test_drill.py b/tests/unit_tests/db_engine_specs/test_drill.py index 7ff4aa757..eb3414ea5 100644 --- a/tests/unit_tests/db_engine_specs/test_drill.py +++ b/tests/unit_tests/db_engine_specs/test_drill.py @@ -106,7 +106,7 @@ def test_convert_dttm( expected_result: Optional[str], dttm: datetime, # noqa: F811 ) -> None: - from superset.db_engine_specs.drill import DrillEngineSpec as spec + from superset.db_engine_specs.drill import DrillEngineSpec as spec # noqa: N813 assert_convert_dttm(spec, target_type, expected_result, dttm) diff --git a/tests/unit_tests/db_engine_specs/test_druid.py b/tests/unit_tests/db_engine_specs/test_druid.py index 178c8d0e9..4d6f84e1e 100644 --- a/tests/unit_tests/db_engine_specs/test_druid.py +++ b/tests/unit_tests/db_engine_specs/test_druid.py @@ -39,7 +39,7 @@ def test_convert_dttm( expected_result: Optional[str], dttm: datetime, # noqa: F811 ) -> None: - from superset.db_engine_specs.druid import DruidEngineSpec as spec + from superset.db_engine_specs.druid import DruidEngineSpec as spec # noqa: N813 assert_convert_dttm(spec, target_type, expected_result, dttm) @@ -51,11 +51,11 @@ def test_convert_dttm( ("PT5M", "TIME_FLOOR(CAST({col} AS TIMESTAMP), 'PT5M')"), ( "P1W/1970-01-03T00:00:00Z", - "TIME_SHIFT(TIME_FLOOR(TIME_SHIFT(CAST(col AS TIMESTAMP), 'P1D', 1), 'P1W'), 'P1D', 5)", + "TIME_SHIFT(TIME_FLOOR(TIME_SHIFT(CAST(col AS TIMESTAMP), 'P1D', 1), 'P1W'), 'P1D', 5)", # noqa: E501 ), ( "1969-12-28T00:00:00Z/P1W", - "TIME_SHIFT(TIME_FLOOR(TIME_SHIFT(CAST(col AS TIMESTAMP), 'P1D', 1), 'P1W'), 'P1D', -1)", + "TIME_SHIFT(TIME_FLOOR(TIME_SHIFT(CAST(col AS TIMESTAMP), 'P1D', 1), 'P1W'), 'P1D', -1)", # noqa: E501 ), ], ) diff --git a/tests/unit_tests/db_engine_specs/test_duckdb.py b/tests/unit_tests/db_engine_specs/test_duckdb.py index 7fc3416c8..9bbd38e5d 100644 --- a/tests/unit_tests/db_engine_specs/test_duckdb.py +++ b/tests/unit_tests/db_engine_specs/test_duckdb.py @@ -40,7 +40,7 @@ def test_convert_dttm( expected_result: Optional[str], dttm: datetime, # noqa: F811 ) -> None: - from superset.db_engine_specs.duckdb import DuckDBEngineSpec as spec + from superset.db_engine_specs.duckdb import DuckDBEngineSpec as spec # noqa: N813 assert_convert_dttm(spec, target_type, expected_result, dttm) @@ -100,16 +100,16 @@ def test_md_build_sqlalchemy_uri() -> None: # No access token provided, throw ValueError parameters = DuckDBParametersType(database="my_db") - with pytest.raises(ValueError): + with pytest.raises(ValueError): # noqa: PT011 MotherDuckEngineSpec.build_sqlalchemy_uri(parameters) # No database provided, default to "md:" - parameters = DuckDBParametersType(access_token="token") + parameters = DuckDBParametersType(access_token="token") # noqa: S106 uri = MotherDuckEngineSpec.build_sqlalchemy_uri(parameters) assert "duckdb:///md:?motherduck_token=token" # Database and access_token provided - parameters = DuckDBParametersType(database="my_db", access_token="token") + parameters = DuckDBParametersType(database="my_db", access_token="token") # noqa: S106 uri = MotherDuckEngineSpec.build_sqlalchemy_uri(parameters) assert "duckdb:///md:my_db?motherduck_token=token" == uri @@ -126,4 +126,4 @@ def test_get_parameters_from_uri() -> None: parameters = DuckDBEngineSpec.get_parameters_from_uri(uri) assert parameters["database"] == "md:my_db" - assert parameters["access_token"] == "token" + assert parameters["access_token"] == "token" # noqa: S105 diff --git a/tests/unit_tests/db_engine_specs/test_dynamodb.py b/tests/unit_tests/db_engine_specs/test_dynamodb.py index df439a38e..71b98fd4c 100644 --- a/tests/unit_tests/db_engine_specs/test_dynamodb.py +++ b/tests/unit_tests/db_engine_specs/test_dynamodb.py @@ -37,6 +37,8 @@ def test_convert_dttm( expected_result: Optional[str], dttm: datetime, # noqa: F811 ) -> None: - from superset.db_engine_specs.dynamodb import DynamoDBEngineSpec as spec + from superset.db_engine_specs.dynamodb import ( + DynamoDBEngineSpec as spec, # noqa: N813 + ) assert_convert_dttm(spec, target_type, expected_result, dttm) diff --git a/tests/unit_tests/db_engine_specs/test_elasticsearch.py b/tests/unit_tests/db_engine_specs/test_elasticsearch.py index 36e072708..2c003aba5 100644 --- a/tests/unit_tests/db_engine_specs/test_elasticsearch.py +++ b/tests/unit_tests/db_engine_specs/test_elasticsearch.py @@ -53,7 +53,9 @@ def test_elasticsearch_convert_dttm( expected_result: Optional[str], dttm: datetime, # noqa: F811 ) -> None: - from superset.db_engine_specs.elasticsearch import ElasticSearchEngineSpec as spec + from superset.db_engine_specs.elasticsearch import ( + ElasticSearchEngineSpec as spec, # noqa: N813 + ) assert_convert_dttm(spec, target_type, expected_result, dttm, db_extra) @@ -70,7 +72,9 @@ def test_opendistro_convert_dttm( expected_result: Optional[str], dttm: datetime, # noqa: F811 ) -> None: - from superset.db_engine_specs.elasticsearch import OpenDistroEngineSpec as spec + from superset.db_engine_specs.elasticsearch import ( + OpenDistroEngineSpec as spec, # noqa: N813 + ) assert_convert_dttm(spec, target_type, expected_result, dttm) diff --git a/tests/unit_tests/db_engine_specs/test_firebird.py b/tests/unit_tests/db_engine_specs/test_firebird.py index 7e5fc187a..752c26f45 100644 --- a/tests/unit_tests/db_engine_specs/test_firebird.py +++ b/tests/unit_tests/db_engine_specs/test_firebird.py @@ -99,6 +99,8 @@ def test_convert_dttm( expected_result: Optional[str], dttm: datetime, # noqa: F811 ) -> None: - from superset.db_engine_specs.firebird import FirebirdEngineSpec as spec + from superset.db_engine_specs.firebird import ( + FirebirdEngineSpec as spec, # noqa: N813 + ) assert_convert_dttm(spec, target_type, expected_result, dttm) diff --git a/tests/unit_tests/db_engine_specs/test_firebolt.py b/tests/unit_tests/db_engine_specs/test_firebolt.py index b3fcdb582..5f417d06c 100644 --- a/tests/unit_tests/db_engine_specs/test_firebolt.py +++ b/tests/unit_tests/db_engine_specs/test_firebolt.py @@ -45,7 +45,9 @@ def test_convert_dttm( expected_result: Optional[str], dttm: datetime, # noqa: F811 ) -> None: - from superset.db_engine_specs.firebolt import FireboltEngineSpec as spec + from superset.db_engine_specs.firebolt import ( + FireboltEngineSpec as spec, # noqa: N813 + ) assert_convert_dttm(spec, target_type, expected_result, dttm) diff --git a/tests/unit_tests/db_engine_specs/test_gsheets.py b/tests/unit_tests/db_engine_specs/test_gsheets.py index 4e17054db..5405f7420 100644 --- a/tests/unit_tests/db_engine_specs/test_gsheets.py +++ b/tests/unit_tests/db_engine_specs/test_gsheets.py @@ -120,58 +120,61 @@ def test_validate_parameters_catalog( } errors = GSheetsEngineSpec.validate_parameters(properties) # ignore: type - assert errors == [ - SupersetError( - message=( - "The URL could not be identified. Please check for typos " - "and make sure that ‘Type of Google Sheets allowed’ " - "selection matches the input." - ), - error_type=SupersetErrorType.TABLE_DOES_NOT_EXIST_ERROR, - level=ErrorLevel.WARNING, - extra={ - "catalog": { - "idx": 0, - "url": True, + assert ( + errors + == [ + SupersetError( + message=( + "The URL could not be identified. Please check for typos " + "and make sure that ‘Type of Google Sheets allowed’ " + "selection matches the input." + ), + error_type=SupersetErrorType.TABLE_DOES_NOT_EXIST_ERROR, + level=ErrorLevel.WARNING, + extra={ + "catalog": { + "idx": 0, + "url": True, + }, + "issue_codes": [ + { + "code": 1003, + "message": "Issue 1003 - There is a syntax error in the SQL query. Perhaps there was a misspelling or a typo.", # noqa: E501 + }, + { + "code": 1005, + "message": "Issue 1005 - The table was deleted or renamed in the database.", # noqa: E501 + }, + ], }, - "issue_codes": [ - { - "code": 1003, - "message": "Issue 1003 - There is a syntax error in the SQL query. Perhaps there was a misspelling or a typo.", - }, - { - "code": 1005, - "message": "Issue 1005 - The table was deleted or renamed in the database.", - }, - ], - }, - ), - SupersetError( - message=( - "The URL could not be identified. Please check for typos " - "and make sure that ‘Type of Google Sheets allowed’ " - "selection matches the input." ), - error_type=SupersetErrorType.TABLE_DOES_NOT_EXIST_ERROR, - level=ErrorLevel.WARNING, - extra={ - "catalog": { - "idx": 2, - "url": True, + SupersetError( + message=( + "The URL could not be identified. Please check for typos " + "and make sure that ‘Type of Google Sheets allowed’ " + "selection matches the input." + ), + error_type=SupersetErrorType.TABLE_DOES_NOT_EXIST_ERROR, + level=ErrorLevel.WARNING, + extra={ + "catalog": { + "idx": 2, + "url": True, + }, + "issue_codes": [ + { + "code": 1003, + "message": "Issue 1003 - There is a syntax error in the SQL query. Perhaps there was a misspelling or a typo.", # noqa: E501 + }, + { + "code": 1005, + "message": "Issue 1005 - The table was deleted or renamed in the database.", # noqa: E501 + }, + ], }, - "issue_codes": [ - { - "code": 1003, - "message": "Issue 1003 - There is a syntax error in the SQL query. Perhaps there was a misspelling or a typo.", - }, - { - "code": 1005, - "message": "Issue 1005 - The table was deleted or renamed in the database.", - }, - ], - }, - ), - ] + ), + ] + ) create_engine.assert_called_with( "gsheets://", @@ -229,11 +232,11 @@ def test_validate_parameters_catalog_and_credentials( "issue_codes": [ { "code": 1003, - "message": "Issue 1003 - There is a syntax error in the SQL query. Perhaps there was a misspelling or a typo.", + "message": "Issue 1003 - There is a syntax error in the SQL query. Perhaps there was a misspelling or a typo.", # noqa: E501 }, { "code": 1005, - "message": "Issue 1005 - The table was deleted or renamed in the database.", + "message": "Issue 1005 - The table was deleted or renamed in the database.", # noqa: E501 }, ], }, @@ -502,7 +505,7 @@ def test_get_url_for_impersonation_access_token() -> None: url=make_url("gsheets://"), impersonate_user=True, username=None, - access_token="access-token", + access_token="access-token", # noqa: S106 ) == make_url("gsheets://?access_token=access-token") diff --git a/tests/unit_tests/db_engine_specs/test_hana.py b/tests/unit_tests/db_engine_specs/test_hana.py index 996c5f6e0..ade424062 100644 --- a/tests/unit_tests/db_engine_specs/test_hana.py +++ b/tests/unit_tests/db_engine_specs/test_hana.py @@ -30,7 +30,7 @@ from tests.unit_tests.fixtures.common import dttm # noqa: F401 ("Date", "TO_DATE('2019-01-02', 'YYYY-MM-DD')"), ( "TimeStamp", - "TO_TIMESTAMP('2019-01-02T03:04:05.678900', 'YYYY-MM-DD\"T\"HH24:MI:SS.ff6')", + "TO_TIMESTAMP('2019-01-02T03:04:05.678900', 'YYYY-MM-DD\"T\"HH24:MI:SS.ff6')", # noqa: E501 ), ("UnknownType", None), ], @@ -40,6 +40,6 @@ def test_convert_dttm( expected_result: Optional[str], dttm: datetime, # noqa: F811 ) -> None: - from superset.db_engine_specs.hana import HanaEngineSpec as spec + from superset.db_engine_specs.hana import HanaEngineSpec as spec # noqa: N813 assert_convert_dttm(spec, target_type, expected_result, dttm) diff --git a/tests/unit_tests/db_engine_specs/test_hive.py b/tests/unit_tests/db_engine_specs/test_hive.py index ce2b5a8e0..e534259d2 100644 --- a/tests/unit_tests/db_engine_specs/test_hive.py +++ b/tests/unit_tests/db_engine_specs/test_hive.py @@ -42,7 +42,7 @@ def test_convert_dttm( expected_result: Optional[str], dttm: datetime, # noqa: F811 ) -> None: - from superset.db_engine_specs.hive import HiveEngineSpec as spec + from superset.db_engine_specs.hive import HiveEngineSpec as spec # noqa: N813 assert_convert_dttm(spec, target_type, expected_result, dttm) diff --git a/tests/unit_tests/db_engine_specs/test_impala.py b/tests/unit_tests/db_engine_specs/test_impala.py index 543db2436..e85a2ce78 100644 --- a/tests/unit_tests/db_engine_specs/test_impala.py +++ b/tests/unit_tests/db_engine_specs/test_impala.py @@ -21,7 +21,7 @@ from unittest.mock import Mock, patch import pytest -from superset.db_engine_specs.impala import ImpalaEngineSpec as spec +from superset.db_engine_specs.impala import ImpalaEngineSpec as spec # noqa: N813 from superset.models.core import Database from superset.models.sql_lab import Query from tests.unit_tests.db_engine_specs.utils import assert_convert_dttm diff --git a/tests/unit_tests/db_engine_specs/test_kusto.py b/tests/unit_tests/db_engine_specs/test_kusto.py index 68330ed2e..a0593f7e3 100644 --- a/tests/unit_tests/db_engine_specs/test_kusto.py +++ b/tests/unit_tests/db_engine_specs/test_kusto.py @@ -126,7 +126,7 @@ def test_kql_convert_dttm( expected_result: Optional[str], dttm: datetime, # noqa: F811 ) -> None: - from superset.db_engine_specs.kusto import KustoKqlEngineSpec as spec + from superset.db_engine_specs.kusto import KustoKqlEngineSpec as spec # noqa: N813 assert_convert_dttm(spec, target_type, expected_result, dttm) @@ -146,6 +146,6 @@ def test_sql_convert_dttm( expected_result: Optional[str], dttm: datetime, # noqa: F811 ) -> None: - from superset.db_engine_specs.kusto import KustoSqlEngineSpec as spec + from superset.db_engine_specs.kusto import KustoSqlEngineSpec as spec # noqa: N813 assert_convert_dttm(spec, target_type, expected_result, dttm) diff --git a/tests/unit_tests/db_engine_specs/test_kylin.py b/tests/unit_tests/db_engine_specs/test_kylin.py index 4e29c4b4b..3392be77d 100644 --- a/tests/unit_tests/db_engine_specs/test_kylin.py +++ b/tests/unit_tests/db_engine_specs/test_kylin.py @@ -37,6 +37,6 @@ def test_convert_dttm( expected_result: Optional[str], dttm: datetime, # noqa: F811 ) -> None: - from superset.db_engine_specs.kylin import KylinEngineSpec as spec + from superset.db_engine_specs.kylin import KylinEngineSpec as spec # noqa: N813 assert_convert_dttm(spec, target_type, expected_result, dttm) diff --git a/tests/unit_tests/db_engine_specs/test_mssql.py b/tests/unit_tests/db_engine_specs/test_mssql.py index 0a3760a47..affd758f0 100644 --- a/tests/unit_tests/db_engine_specs/test_mssql.py +++ b/tests/unit_tests/db_engine_specs/test_mssql.py @@ -57,7 +57,7 @@ def test_get_column_spec( generic_type: GenericDataType, is_dttm: bool, ) -> None: - from superset.db_engine_specs.mssql import MssqlEngineSpec as spec + from superset.db_engine_specs.mssql import MssqlEngineSpec as spec # noqa: N813 assert_column_spec(spec, native_type, sqla_type, attrs, generic_type, is_dttm) @@ -128,7 +128,7 @@ def test_convert_dttm( expected_result: Optional[str], dttm: datetime, # noqa: F811 ) -> None: - from superset.db_engine_specs.mssql import MssqlEngineSpec as spec + from superset.db_engine_specs.mssql import MssqlEngineSpec as spec # noqa: N813 assert_convert_dttm(spec, target_type, expected_result, dttm) @@ -307,7 +307,7 @@ Unable to connect: Adaptive Server is unavailable or does not exist (localhost_) "issue_codes": [ { "code": 1007, - "message": "Issue 1007 - The hostname provided can't be resolved.", + "message": "Issue 1007 - The hostname provided can't be resolved.", # noqa: E501 } ], }, @@ -367,7 +367,7 @@ Net-Lib error during Operation timed out (60) "issue_codes": [ { "code": 1009, - "message": "Issue 1009 - The host might be down, and can't be reached on the provided port.", + "message": "Issue 1009 - The host might be down, and can't be reached on the provided port.", # noqa: E501 } ], }, @@ -400,7 +400,7 @@ Net-Lib error during Operation timed out (60) "issue_codes": [ { "code": 1009, - "message": "Issue 1009 - The host might be down, and can't be reached on the provided port.", + "message": "Issue 1009 - The host might be down, and can't be reached on the provided port.", # noqa: E501 } ], }, @@ -420,28 +420,31 @@ Adaptive Server connection failed (mssqldb.cxiotftzsypc.us-west-2.rds.amazonaws. result = MssqlEngineSpec.extract_errors( Exception(msg), context={"username": "testuser", "database": "testdb"} ) - assert result == [ - SupersetError( - message='Either the username "testuser", password, or database name "testdb" is incorrect.', - error_type=SupersetErrorType.CONNECTION_ACCESS_DENIED_ERROR, - level=ErrorLevel.ERROR, - extra={ - "engine_name": "Microsoft SQL Server", - "issue_codes": [ - { - "code": 1014, - "message": "Issue 1014 - Either the username or " - "the password is wrong.", - }, - { - "code": 1015, - "message": "Issue 1015 - Either the database is " - "spelled incorrectly or does not exist.", - }, - ], - }, - ) - ] + assert ( + result + == [ + SupersetError( + message='Either the username "testuser", password, or database name "testdb" is incorrect.', # noqa: E501 + error_type=SupersetErrorType.CONNECTION_ACCESS_DENIED_ERROR, + level=ErrorLevel.ERROR, + extra={ + "engine_name": "Microsoft SQL Server", + "issue_codes": [ + { + "code": 1014, + "message": "Issue 1014 - Either the username or " + "the password is wrong.", + }, + { + "code": 1015, + "message": "Issue 1015 - Either the database is " + "spelled incorrectly or does not exist.", + }, + ], + }, + ) + ] + ) @pytest.mark.parametrize( @@ -453,6 +456,6 @@ Adaptive Server connection failed (mssqldb.cxiotftzsypc.us-west-2.rds.amazonaws. ], ) def test_denormalize_name(name: str, expected_result: str): - from superset.db_engine_specs.mssql import MssqlEngineSpec as spec + from superset.db_engine_specs.mssql import MssqlEngineSpec as spec # noqa: N813 assert spec.denormalize_name(mssql.dialect(), name) == expected_result diff --git a/tests/unit_tests/db_engine_specs/test_mysql.py b/tests/unit_tests/db_engine_specs/test_mysql.py index 0e48450dc..649af5c7c 100644 --- a/tests/unit_tests/db_engine_specs/test_mysql.py +++ b/tests/unit_tests/db_engine_specs/test_mysql.py @@ -77,7 +77,7 @@ def test_get_column_spec( generic_type: GenericDataType, is_dttm: bool, ) -> None: - from superset.db_engine_specs.mysql import MySQLEngineSpec as spec + from superset.db_engine_specs.mysql import MySQLEngineSpec as spec # noqa: N813 assert_column_spec(spec, native_type, sqla_type, attrs, generic_type, is_dttm) @@ -98,7 +98,7 @@ def test_convert_dttm( expected_result: Optional[str], dttm: datetime, # noqa: F811 ) -> None: - from superset.db_engine_specs.mysql import MySQLEngineSpec as spec + from superset.db_engine_specs.mysql import MySQLEngineSpec as spec # noqa: N813 assert_convert_dttm(spec, target_type, expected_result, dttm) @@ -119,7 +119,7 @@ def test_validate_database_uri(sqlalchemy_uri: str, error: bool) -> None: url = make_url(sqlalchemy_uri) if error: - with pytest.raises(ValueError): + with pytest.raises(ValueError): # noqa: PT011 MySQLEngineSpec.validate_database_uri(url) return MySQLEngineSpec.validate_database_uri(url) @@ -255,7 +255,7 @@ def test_column_type_mutator( description: list[Any], expected_result: list[tuple[Any, ...]], ): - from superset.db_engine_specs.mysql import MySQLEngineSpec as spec + from superset.db_engine_specs.mysql import MySQLEngineSpec as spec # noqa: N813 mock_cursor = Mock() mock_cursor.fetchall.return_value = data diff --git a/tests/unit_tests/db_engine_specs/test_oceanbase.py b/tests/unit_tests/db_engine_specs/test_oceanbase.py index 87fddb63e..a9288f631 100644 --- a/tests/unit_tests/db_engine_specs/test_oceanbase.py +++ b/tests/unit_tests/db_engine_specs/test_oceanbase.py @@ -54,6 +54,8 @@ def test_get_column_spec( generic_type: GenericDataType, is_dttm: bool, ) -> None: - from superset.db_engine_specs.oceanbase import OceanBaseEngineSpec as spec + from superset.db_engine_specs.oceanbase import ( + OceanBaseEngineSpec as spec, # noqa: N813 + ) assert_column_spec(spec, native_type, sqla_type, attrs, generic_type, is_dttm) diff --git a/tests/unit_tests/db_engine_specs/test_ocient.py b/tests/unit_tests/db_engine_specs/test_ocient.py index a58f31d24..445a118b7 100644 --- a/tests/unit_tests/db_engine_specs/test_ocient.py +++ b/tests/unit_tests/db_engine_specs/test_ocient.py @@ -45,7 +45,7 @@ MARSHALED_OCIENT_ERRORS: list[tuple[str, SupersetError]] = [ "issue_codes": [ { "code": 1012, - "message": "Issue 1012 - The username provided when connecting to a database is not valid.", + "message": "Issue 1012 - The username provided when connecting to a database is not valid.", # noqa: E501 } ], }, @@ -54,7 +54,7 @@ MARSHALED_OCIENT_ERRORS: list[tuple[str, SupersetError]] = [ ( "The userid/password combination was not valid (Incorrect password for user)", SupersetError( - message="The user/password combination is not valid (Incorrect password for user).", + message="The user/password combination is not valid (Incorrect password for user).", # noqa: E501 error_type=SupersetErrorType.CONNECTION_INVALID_PASSWORD_ERROR, level=ErrorLevel.ERROR, extra={ @@ -62,7 +62,7 @@ MARSHALED_OCIENT_ERRORS: list[tuple[str, SupersetError]] = [ "issue_codes": [ { "code": 1013, - "message": "Issue 1013 - The password provided when connecting to a database is not valid.", + "message": "Issue 1013 - The password provided when connecting to a database is not valid.", # noqa: E501 } ], }, @@ -79,7 +79,7 @@ MARSHALED_OCIENT_ERRORS: list[tuple[str, SupersetError]] = [ "issue_codes": [ { "code": 1015, - "message": "Issue 1015 - Either the database is spelled incorrectly or does not exist.", + "message": "Issue 1015 - Either the database is spelled incorrectly or does not exist.", # noqa: E501 } ], }, @@ -96,7 +96,7 @@ MARSHALED_OCIENT_ERRORS: list[tuple[str, SupersetError]] = [ "issue_codes": [ { "code": 1007, - "message": "Issue 1007 - The hostname provided can't be resolved.", + "message": "Issue 1007 - The hostname provided can't be resolved.", # noqa: E501 } ], }, @@ -120,7 +120,7 @@ MARSHALED_OCIENT_ERRORS: list[tuple[str, SupersetError]] = [ ), ), ( - "An invalid connection string attribute was specified (failed to decrypt cipher text)", + "An invalid connection string attribute was specified (failed to decrypt cipher text)", # noqa: E501 SupersetError( message="Invalid Connection String: Expecting String of the form 'ocient://user:pass@host:port/database'.", error_type=SupersetErrorType.GENERIC_DB_ENGINE_ERROR, @@ -130,16 +130,16 @@ MARSHALED_OCIENT_ERRORS: list[tuple[str, SupersetError]] = [ "issue_codes": [ { "code": 1002, - "message": "Issue 1002 - The database returned an unexpected error.", + "message": "Issue 1002 - The database returned an unexpected error.", # noqa: E501 } ], }, ), ), ( - "There is a syntax error in your statement (extraneous input 'foo bar baz' expecting {, 'trace', 'using'})", + "There is a syntax error in your statement (extraneous input 'foo bar baz' expecting {, 'trace', 'using'})", # noqa: E501 SupersetError( - message="Syntax Error: extraneous input \"foo bar baz\" expecting \"{, 'trace', 'using'}", + message="Syntax Error: extraneous input \"foo bar baz\" expecting \"{, 'trace', 'using'}", # noqa: E501 error_type=SupersetErrorType.SYNTAX_ERROR, level=ErrorLevel.ERROR, extra={ @@ -154,9 +154,9 @@ MARSHALED_OCIENT_ERRORS: list[tuple[str, SupersetError]] = [ ), ), ( - "There is a syntax error in your statement (mismatched input 'to' expecting {, 'trace', 'using'})", + "There is a syntax error in your statement (mismatched input 'to' expecting {, 'trace', 'using'})", # noqa: E501 SupersetError( - message="Syntax Error: mismatched input \"to\" expecting \"{, 'trace', 'using'}", + message="Syntax Error: mismatched input \"to\" expecting \"{, 'trace', 'using'}", # noqa: E501 error_type=SupersetErrorType.SYNTAX_ERROR, level=ErrorLevel.ERROR, extra={ @@ -181,11 +181,11 @@ MARSHALED_OCIENT_ERRORS: list[tuple[str, SupersetError]] = [ "issue_codes": [ { "code": 1003, - "message": "Issue 1003 - There is a syntax error in the SQL query. Perhaps there was a misspelling or a typo.", + "message": "Issue 1003 - There is a syntax error in the SQL query. Perhaps there was a misspelling or a typo.", # noqa: E501 }, { "code": 1005, - "message": "Issue 1005 - The table was deleted or renamed in the database.", + "message": "Issue 1005 - The table was deleted or renamed in the database.", # noqa: E501 }, ], }, @@ -202,11 +202,11 @@ MARSHALED_OCIENT_ERRORS: list[tuple[str, SupersetError]] = [ "issue_codes": [ { "code": 1003, - "message": "Issue 1003 - There is a syntax error in the SQL query. Perhaps there was a misspelling or a typo.", + "message": "Issue 1003 - There is a syntax error in the SQL query. Perhaps there was a misspelling or a typo.", # noqa: E501 }, { "code": 1004, - "message": "Issue 1004 - The column was deleted or renamed in the database.", + "message": "Issue 1004 - The column was deleted or renamed in the database.", # noqa: E501 }, ], }, diff --git a/tests/unit_tests/db_engine_specs/test_oracle.py b/tests/unit_tests/db_engine_specs/test_oracle.py index 745a8c44a..376b29bbf 100644 --- a/tests/unit_tests/db_engine_specs/test_oracle.py +++ b/tests/unit_tests/db_engine_specs/test_oracle.py @@ -100,7 +100,7 @@ def test_fetch_data() -> None: ("DateTime", """TO_DATE('2019-01-02T03:04:05', 'YYYY-MM-DD"T"HH24:MI:SS')"""), ( "TimeStamp", - """TO_TIMESTAMP('2019-01-02T03:04:05.678900', 'YYYY-MM-DD"T"HH24:MI:SS.ff6')""", + """TO_TIMESTAMP('2019-01-02T03:04:05.678900', 'YYYY-MM-DD"T"HH24:MI:SS.ff6')""", # noqa: E501 ), ("Other", None), ], @@ -110,7 +110,7 @@ def test_convert_dttm( expected_result: Optional[str], dttm: datetime, # noqa: F811 ) -> None: - from superset.db_engine_specs.oracle import OracleEngineSpec as spec + from superset.db_engine_specs.oracle import OracleEngineSpec as spec # noqa: N813 assert_convert_dttm(spec, target_type, expected_result, dttm) @@ -124,6 +124,6 @@ def test_convert_dttm( ], ) def test_denormalize_name(name: str, expected_result: str): - from superset.db_engine_specs.oracle import OracleEngineSpec as spec + from superset.db_engine_specs.oracle import OracleEngineSpec as spec # noqa: N813 assert spec.denormalize_name(oracle.dialect(), name) == expected_result diff --git a/tests/unit_tests/db_engine_specs/test_pinot.py b/tests/unit_tests/db_engine_specs/test_pinot.py index 72c826781..9d83fc2b1 100644 --- a/tests/unit_tests/db_engine_specs/test_pinot.py +++ b/tests/unit_tests/db_engine_specs/test_pinot.py @@ -26,7 +26,7 @@ from sqlalchemy import column ("PT1S", "CAST(DATE_TRUNC('second', CAST(col AS TIMESTAMP)) AS TIMESTAMP)"), ( "PT5M", - "CAST(ROUND(DATE_TRUNC('minute', CAST(col AS TIMESTAMP)), 300000) AS TIMESTAMP)", + "CAST(ROUND(DATE_TRUNC('minute', CAST(col AS TIMESTAMP)), 300000) AS TIMESTAMP)", # noqa: E501 ), ("P1W", "CAST(DATE_TRUNC('week', CAST(col AS TIMESTAMP)) AS TIMESTAMP)"), ("P1M", "CAST(DATE_TRUNC('month', CAST(col AS TIMESTAMP)) AS TIMESTAMP)"), @@ -38,7 +38,7 @@ def test_timegrain_expressions(time_grain: str, expected_result: str) -> None: """ DB Eng Specs (pinot): Test time grain expressions """ - from superset.db_engine_specs.pinot import PinotEngineSpec as spec + from superset.db_engine_specs.pinot import PinotEngineSpec as spec # noqa: N813 actual = str( spec.get_timestamp_expr(col=column("col"), pdf=None, time_grain=time_grain) @@ -47,7 +47,7 @@ def test_timegrain_expressions(time_grain: str, expected_result: str) -> None: def test_extras_without_ssl() -> None: - from superset.db_engine_specs.pinot import PinotEngineSpec as spec + from superset.db_engine_specs.pinot import PinotEngineSpec as spec # noqa: N813 from tests.integration_tests.fixtures.database import default_db_extra database = mock.Mock() diff --git a/tests/unit_tests/db_engine_specs/test_postgres.py b/tests/unit_tests/db_engine_specs/test_postgres.py index 08e2034ad..b93d70f5f 100644 --- a/tests/unit_tests/db_engine_specs/test_postgres.py +++ b/tests/unit_tests/db_engine_specs/test_postgres.py @@ -24,7 +24,7 @@ from sqlalchemy import column, types from sqlalchemy.dialects.postgresql import DOUBLE_PRECISION, ENUM, JSON from sqlalchemy.engine.url import make_url -from superset.db_engine_specs.postgres import PostgresEngineSpec as spec +from superset.db_engine_specs.postgres import PostgresEngineSpec as spec # noqa: N813 from superset.exceptions import SupersetSecurityException from superset.utils.core import GenericDataType from tests.unit_tests.db_engine_specs.utils import ( @@ -121,7 +121,7 @@ def test_get_schema_from_engine_params() -> None: == "secret" ) - with pytest.raises(Exception) as excinfo: + with pytest.raises(Exception) as excinfo: # noqa: PT011 spec.get_schema_from_engine_params( make_url("postgresql://user:password@host/db1"), {"options": "-csearch_path=secret,public"}, @@ -204,28 +204,28 @@ def test_get_default_catalog() -> None: ("PT1S", "DATE_TRUNC('second', col)"), ( "PT5S", - "DATE_TRUNC('minute', col) + INTERVAL '5 seconds' * FLOOR(EXTRACT(SECOND FROM col) / 5)", + "DATE_TRUNC('minute', col) + INTERVAL '5 seconds' * FLOOR(EXTRACT(SECOND FROM col) / 5)", # noqa: E501 ), ( "PT30S", - "DATE_TRUNC('minute', col) + INTERVAL '30 seconds' * FLOOR(EXTRACT(SECOND FROM col) / 30)", + "DATE_TRUNC('minute', col) + INTERVAL '30 seconds' * FLOOR(EXTRACT(SECOND FROM col) / 30)", # noqa: E501 ), ("PT1M", "DATE_TRUNC('minute', col)"), ( "PT5M", - "DATE_TRUNC('hour', col) + INTERVAL '5 minutes' * FLOOR(EXTRACT(MINUTE FROM col) / 5)", + "DATE_TRUNC('hour', col) + INTERVAL '5 minutes' * FLOOR(EXTRACT(MINUTE FROM col) / 5)", # noqa: E501 ), ( "PT10M", - "DATE_TRUNC('hour', col) + INTERVAL '10 minutes' * FLOOR(EXTRACT(MINUTE FROM col) / 10)", + "DATE_TRUNC('hour', col) + INTERVAL '10 minutes' * FLOOR(EXTRACT(MINUTE FROM col) / 10)", # noqa: E501 ), ( "PT15M", - "DATE_TRUNC('hour', col) + INTERVAL '15 minutes' * FLOOR(EXTRACT(MINUTE FROM col) / 15)", + "DATE_TRUNC('hour', col) + INTERVAL '15 minutes' * FLOOR(EXTRACT(MINUTE FROM col) / 15)", # noqa: E501 ), ( "PT30M", - "DATE_TRUNC('hour', col) + INTERVAL '30 minutes' * FLOOR(EXTRACT(MINUTE FROM col) / 30)", + "DATE_TRUNC('hour', col) + INTERVAL '30 minutes' * FLOOR(EXTRACT(MINUTE FROM col) / 30)", # noqa: E501 ), ("PT1H", "DATE_TRUNC('hour', col)"), ("P1D", "DATE_TRUNC('day', col)"), diff --git a/tests/unit_tests/db_engine_specs/test_presto.py b/tests/unit_tests/db_engine_specs/test_presto.py index fee07cae1..4555c1c79 100644 --- a/tests/unit_tests/db_engine_specs/test_presto.py +++ b/tests/unit_tests/db_engine_specs/test_presto.py @@ -59,7 +59,7 @@ def test_convert_dttm( dttm: datetime, expected_result: Optional[str], ) -> None: - from superset.db_engine_specs.presto import PrestoEngineSpec as spec + from superset.db_engine_specs.presto import PrestoEngineSpec as spec # noqa: N813 assert_convert_dttm(spec, target_type, expected_result, dttm) @@ -83,7 +83,7 @@ def test_get_column_spec( generic_type: GenericDataType, is_dttm: bool, ) -> None: - from superset.db_engine_specs.presto import PrestoEngineSpec as spec + from superset.db_engine_specs.presto import PrestoEngineSpec as spec # noqa: N813 assert_column_spec(spec, native_type, sqla_type, attrs, generic_type, is_dttm) @@ -150,7 +150,7 @@ def test_where_latest_partition( compile_kwargs={"literal_binds": True}, ) ) - == f"""SELECT * FROM table \nWHERE "partition_key" = {expected_value}""" + == f"""SELECT * FROM table \nWHERE "partition_key" = {expected_value}""" # noqa: S608 ) @@ -248,33 +248,33 @@ def test_get_default_catalog() -> None: ("PT1S", "date_trunc('second', CAST(col AS TIMESTAMP))"), ( "PT5S", - "date_trunc('second', CAST(col AS TIMESTAMP)) - interval '1' second * (second(CAST(col AS TIMESTAMP)) % 5)", + "date_trunc('second', CAST(col AS TIMESTAMP)) - interval '1' second * (second(CAST(col AS TIMESTAMP)) % 5)", # noqa: E501 ), ( "PT30S", - "date_trunc('second', CAST(col AS TIMESTAMP)) - interval '1' second * (second(CAST(col AS TIMESTAMP)) % 30)", + "date_trunc('second', CAST(col AS TIMESTAMP)) - interval '1' second * (second(CAST(col AS TIMESTAMP)) % 30)", # noqa: E501 ), ("PT1M", "date_trunc('minute', CAST(col AS TIMESTAMP))"), ( "PT5M", - "date_trunc('minute', CAST(col AS TIMESTAMP)) - interval '1' minute * (minute(CAST(col AS TIMESTAMP)) % 5)", + "date_trunc('minute', CAST(col AS TIMESTAMP)) - interval '1' minute * (minute(CAST(col AS TIMESTAMP)) % 5)", # noqa: E501 ), ( "PT10M", - "date_trunc('minute', CAST(col AS TIMESTAMP)) - interval '1' minute * (minute(CAST(col AS TIMESTAMP)) % 10)", + "date_trunc('minute', CAST(col AS TIMESTAMP)) - interval '1' minute * (minute(CAST(col AS TIMESTAMP)) % 10)", # noqa: E501 ), ( "PT15M", - "date_trunc('minute', CAST(col AS TIMESTAMP)) - interval '1' minute * (minute(CAST(col AS TIMESTAMP)) % 15)", + "date_trunc('minute', CAST(col AS TIMESTAMP)) - interval '1' minute * (minute(CAST(col AS TIMESTAMP)) % 15)", # noqa: E501 ), ( "PT0.5H", - "date_trunc('minute', CAST(col AS TIMESTAMP)) - interval '1' minute * (minute(CAST(col AS TIMESTAMP)) % 30)", + "date_trunc('minute', CAST(col AS TIMESTAMP)) - interval '1' minute * (minute(CAST(col AS TIMESTAMP)) % 30)", # noqa: E501 ), ("PT1H", "date_trunc('hour', CAST(col AS TIMESTAMP))"), ( "PT6H", - "date_trunc('hour', CAST(col AS TIMESTAMP)) - interval '1' hour * (hour(CAST(col AS TIMESTAMP)) % 6)", + "date_trunc('hour', CAST(col AS TIMESTAMP)) - interval '1' hour * (hour(CAST(col AS TIMESTAMP)) % 6)", # noqa: E501 ), ("P1D", "date_trunc('day', CAST(col AS TIMESTAMP))"), ("P1W", "date_trunc('week', CAST(col AS TIMESTAMP))"), @@ -283,12 +283,12 @@ def test_get_default_catalog() -> None: ("P1Y", "date_trunc('year', CAST(col AS TIMESTAMP))"), ( "1969-12-28T00:00:00Z/P1W", - "date_trunc('week', CAST(col AS TIMESTAMP) + interval '1' day) - interval '1' day", + "date_trunc('week', CAST(col AS TIMESTAMP) + interval '1' day) - interval '1' day", # noqa: E501 ), ("1969-12-29T00:00:00Z/P1W", "date_trunc('week', CAST(col AS TIMESTAMP))"), ( "P1W/1970-01-03T00:00:00Z", - "date_trunc('week', CAST(col AS TIMESTAMP) + interval '1' day) + interval '5' day", + "date_trunc('week', CAST(col AS TIMESTAMP) + interval '1' day) + interval '5' day", # noqa: E501 ), ( "P1W/1970-01-04T00:00:00Z", @@ -297,7 +297,7 @@ def test_get_default_catalog() -> None: ], ) def test_timegrain_expressions(time_grain: str, expected_result: str) -> None: - from superset.db_engine_specs.presto import PrestoEngineSpec as spec + from superset.db_engine_specs.presto import PrestoEngineSpec as spec # noqa: N813 actual = str( spec.get_timestamp_expr(col=column("col"), pdf=None, time_grain=time_grain) diff --git a/tests/unit_tests/db_engine_specs/test_redshift.py b/tests/unit_tests/db_engine_specs/test_redshift.py index 9ce54384c..770228090 100644 --- a/tests/unit_tests/db_engine_specs/test_redshift.py +++ b/tests/unit_tests/db_engine_specs/test_redshift.py @@ -44,6 +44,8 @@ def test_convert_dttm( expected_result: Optional[str], dttm: datetime, # noqa: F811 ) -> None: - from superset.db_engine_specs.redshift import RedshiftEngineSpec as spec + from superset.db_engine_specs.redshift import ( + RedshiftEngineSpec as spec, # noqa: N813 + ) assert_convert_dttm(spec, target_type, expected_result, dttm) diff --git a/tests/unit_tests/db_engine_specs/test_rockset.py b/tests/unit_tests/db_engine_specs/test_rockset.py index 9ac1f5375..2d5f23c18 100644 --- a/tests/unit_tests/db_engine_specs/test_rockset.py +++ b/tests/unit_tests/db_engine_specs/test_rockset.py @@ -38,6 +38,6 @@ def test_convert_dttm( expected_result: Optional[str], dttm: datetime, # noqa: F811 ) -> None: - from superset.db_engine_specs.rockset import RocksetEngineSpec as spec + from superset.db_engine_specs.rockset import RocksetEngineSpec as spec # noqa: N813 assert_convert_dttm(spec, target_type, expected_result, dttm) diff --git a/tests/unit_tests/db_engine_specs/test_snowflake.py b/tests/unit_tests/db_engine_specs/test_snowflake.py index 73b93b27e..68c611c5c 100644 --- a/tests/unit_tests/db_engine_specs/test_snowflake.py +++ b/tests/unit_tests/db_engine_specs/test_snowflake.py @@ -56,7 +56,9 @@ def test_convert_dttm( expected_result: Optional[str], dttm: datetime, # noqa: F811 ) -> None: - from superset.db_engine_specs.snowflake import SnowflakeEngineSpec as spec + from superset.db_engine_specs.snowflake import ( + SnowflakeEngineSpec as spec, # noqa: N813 + ) assert_convert_dttm(spec, target_type, expected_result, dttm) @@ -89,7 +91,7 @@ def test_extract_errors() -> None: "issue_codes": [ { "code": 1029, - "message": "Issue 1029 - The object does not exist in the given database.", + "message": "Issue 1029 - The object does not exist in the given database.", # noqa: E501 } ], }, @@ -100,7 +102,7 @@ def test_extract_errors() -> None: result = SnowflakeEngineSpec.extract_errors(Exception(msg)) assert result == [ SupersetError( - message='Please check your query for syntax errors at or near "limited". Then, try running your query again.', + message='Please check your query for syntax errors at or near "limited". Then, try running your query again.', # noqa: E501 error_type=SupersetErrorType.SYNTAX_ERROR, level=ErrorLevel.ERROR, extra={ diff --git a/tests/unit_tests/db_engine_specs/test_sqlite.py b/tests/unit_tests/db_engine_specs/test_sqlite.py index 5b95ba789..485ec293a 100644 --- a/tests/unit_tests/db_engine_specs/test_sqlite.py +++ b/tests/unit_tests/db_engine_specs/test_sqlite.py @@ -40,7 +40,7 @@ def test_convert_dttm( expected_result: Optional[str], dttm: datetime, # noqa: F811 ) -> None: - from superset.db_engine_specs.sqlite import SqliteEngineSpec as spec + from superset.db_engine_specs.sqlite import SqliteEngineSpec as spec # noqa: N813 assert_convert_dttm(spec, target_type, expected_result, dttm) @@ -126,6 +126,6 @@ def test_time_grain_expressions(dttm: str, grain: str, expected: str) -> None: # pylint: disable=protected-access expression = SqliteEngineSpec._time_grain_expressions[grain].format(col="dttm") - sql = f"SELECT {expression} FROM t" + sql = f"SELECT {expression} FROM t" # noqa: S608 result = connection.execute(sql).scalar() assert result == expected diff --git a/tests/unit_tests/db_engine_specs/test_starrocks.py b/tests/unit_tests/db_engine_specs/test_starrocks.py index c167755a1..45a68fd62 100644 --- a/tests/unit_tests/db_engine_specs/test_starrocks.py +++ b/tests/unit_tests/db_engine_specs/test_starrocks.py @@ -66,7 +66,9 @@ def test_get_column_spec( generic_type: GenericDataType, is_dttm: bool, ) -> None: - from superset.db_engine_specs.starrocks import StarRocksEngineSpec as spec + from superset.db_engine_specs.starrocks import ( + StarRocksEngineSpec as spec, # noqa: N813 + ) assert_column_spec(spec, native_type, sqla_type, attrs, generic_type, is_dttm) diff --git a/tests/unit_tests/db_engine_specs/test_trino.py b/tests/unit_tests/db_engine_specs/test_trino.py index a2c401b87..cbf3bdd5d 100644 --- a/tests/unit_tests/db_engine_specs/test_trino.py +++ b/tests/unit_tests/db_engine_specs/test_trino.py @@ -240,7 +240,7 @@ def test_auth_custom_auth_denied() -> None: superset.config.ALLOWED_EXTRA_AUTHENTICATIONS = {} - with pytest.raises(ValueError) as excinfo: + with pytest.raises(ValueError) as excinfo: # noqa: PT011 TrinoEngineSpec.update_params_from_encrypted_extra(database, {}) assert str(excinfo.value) == ( @@ -291,7 +291,7 @@ def test_get_column_spec( generic_type: GenericDataType, is_dttm: bool, ) -> None: - from superset.db_engine_specs.trino import TrinoEngineSpec as spec + from superset.db_engine_specs.trino import TrinoEngineSpec as spec # noqa: N813 assert_column_spec( spec, @@ -791,7 +791,7 @@ def test_where_latest_partition( compile_kwargs={"literal_binds": True}, ) ) - == f"""SELECT * FROM table \nWHERE partition_key = {expected_value}""" + == f"""SELECT * FROM table \nWHERE partition_key = {expected_value}""" # noqa: S608 ) @@ -855,33 +855,33 @@ def test_get_oauth2_token( ("PT1S", "date_trunc('second', CAST(col AS TIMESTAMP))"), ( "PT5S", - "date_trunc('second', CAST(col AS TIMESTAMP)) - interval '1' second * (second(CAST(col AS TIMESTAMP)) % 5)", + "date_trunc('second', CAST(col AS TIMESTAMP)) - interval '1' second * (second(CAST(col AS TIMESTAMP)) % 5)", # noqa: E501 ), ( "PT30S", - "date_trunc('second', CAST(col AS TIMESTAMP)) - interval '1' second * (second(CAST(col AS TIMESTAMP)) % 30)", + "date_trunc('second', CAST(col AS TIMESTAMP)) - interval '1' second * (second(CAST(col AS TIMESTAMP)) % 30)", # noqa: E501 ), ("PT1M", "date_trunc('minute', CAST(col AS TIMESTAMP))"), ( "PT5M", - "date_trunc('minute', CAST(col AS TIMESTAMP)) - interval '1' minute * (minute(CAST(col AS TIMESTAMP)) % 5)", + "date_trunc('minute', CAST(col AS TIMESTAMP)) - interval '1' minute * (minute(CAST(col AS TIMESTAMP)) % 5)", # noqa: E501 ), ( "PT10M", - "date_trunc('minute', CAST(col AS TIMESTAMP)) - interval '1' minute * (minute(CAST(col AS TIMESTAMP)) % 10)", + "date_trunc('minute', CAST(col AS TIMESTAMP)) - interval '1' minute * (minute(CAST(col AS TIMESTAMP)) % 10)", # noqa: E501 ), ( "PT15M", - "date_trunc('minute', CAST(col AS TIMESTAMP)) - interval '1' minute * (minute(CAST(col AS TIMESTAMP)) % 15)", + "date_trunc('minute', CAST(col AS TIMESTAMP)) - interval '1' minute * (minute(CAST(col AS TIMESTAMP)) % 15)", # noqa: E501 ), ( "PT0.5H", - "date_trunc('minute', CAST(col AS TIMESTAMP)) - interval '1' minute * (minute(CAST(col AS TIMESTAMP)) % 30)", + "date_trunc('minute', CAST(col AS TIMESTAMP)) - interval '1' minute * (minute(CAST(col AS TIMESTAMP)) % 30)", # noqa: E501 ), ("PT1H", "date_trunc('hour', CAST(col AS TIMESTAMP))"), ( "PT6H", - "date_trunc('hour', CAST(col AS TIMESTAMP)) - interval '1' hour * (hour(CAST(col AS TIMESTAMP)) % 6)", + "date_trunc('hour', CAST(col AS TIMESTAMP)) - interval '1' hour * (hour(CAST(col AS TIMESTAMP)) % 6)", # noqa: E501 ), ("P1D", "date_trunc('day', CAST(col AS TIMESTAMP))"), ("P1W", "date_trunc('week', CAST(col AS TIMESTAMP))"), @@ -890,12 +890,12 @@ def test_get_oauth2_token( ("P1Y", "date_trunc('year', CAST(col AS TIMESTAMP))"), ( "1969-12-28T00:00:00Z/P1W", - "date_trunc('week', CAST(col AS TIMESTAMP) + interval '1' day) - interval '1' day", + "date_trunc('week', CAST(col AS TIMESTAMP) + interval '1' day) - interval '1' day", # noqa: E501 ), ("1969-12-29T00:00:00Z/P1W", "date_trunc('week', CAST(col AS TIMESTAMP))"), ( "P1W/1970-01-03T00:00:00Z", - "date_trunc('week', CAST(col AS TIMESTAMP) + interval '1' day) + interval '5' day", + "date_trunc('week', CAST(col AS TIMESTAMP) + interval '1' day) + interval '5' day", # noqa: E501 ), ( "P1W/1970-01-04T00:00:00Z", @@ -904,7 +904,7 @@ def test_get_oauth2_token( ], ) def test_timegrain_expressions(time_grain: str, expected_result: str) -> None: - from superset.db_engine_specs.trino import TrinoEngineSpec as spec + from superset.db_engine_specs.trino import TrinoEngineSpec as spec # noqa: N813 actual = str( spec.get_timestamp_expr(col=column("col"), pdf=None, time_grain=time_grain) diff --git a/tests/unit_tests/db_engine_specs/test_ydb.py b/tests/unit_tests/db_engine_specs/test_ydb.py index c4e158629..63538270b 100644 --- a/tests/unit_tests/db_engine_specs/test_ydb.py +++ b/tests/unit_tests/db_engine_specs/test_ydb.py @@ -50,7 +50,7 @@ def test_convert_dttm( expected_result: Optional[str], dttm: datetime, # noqa: F811 ) -> None: - from superset.db_engine_specs.ydb import YDBEngineSpec as spec + from superset.db_engine_specs.ydb import YDBEngineSpec as spec # noqa: N813 assert_convert_dttm(spec, target_type, expected_result, dttm) diff --git a/tests/unit_tests/distributed_lock/distributed_lock_tests.py b/tests/unit_tests/distributed_lock/distributed_lock_tests.py index 6fe363f09..398fb8683 100644 --- a/tests/unit_tests/distributed_lock/distributed_lock_tests.py +++ b/tests/unit_tests/distributed_lock/distributed_lock_tests.py @@ -52,7 +52,7 @@ def _get_other_session() -> Session: from superset import db bind = db.session.get_bind() - SessionMaker = sessionmaker(bind=bind) + SessionMaker = sessionmaker(bind=bind) # noqa: N806 return SessionMaker() diff --git a/tests/unit_tests/explore/api_test.py b/tests/unit_tests/explore/api_test.py index f8f6c54bd..ff070d99a 100644 --- a/tests/unit_tests/explore/api_test.py +++ b/tests/unit_tests/explore/api_test.py @@ -24,5 +24,5 @@ def test_explore_datasource_not_found(client: Any, full_api_access: None) -> Non response = client.get( "/api/v1/explore/?datasource_id=50000&datasource_type=table", ) - response.json["result"]["dataset"]["name"] == "[Missing Dataset]" + response.json["result"]["dataset"]["name"] == "[Missing Dataset]" # noqa: B015 assert response.status_code == 200 diff --git a/tests/unit_tests/explore/utils_test.py b/tests/unit_tests/explore/utils_test.py index 813ef05b3..924493aef 100644 --- a/tests/unit_tests/explore/utils_test.py +++ b/tests/unit_tests/explore/utils_test.py @@ -15,7 +15,7 @@ # specific language governing permissions and limitations # under the License. from flask_appbuilder.security.sqla.models import User -from pytest import raises +from pytest import raises # noqa: PT013 from pytest_mock import MockerFixture from superset.commands.chart.exceptions import ( @@ -63,7 +63,7 @@ def test_unsaved_chart_no_dataset_id() -> None: def test_unsaved_chart_unknown_dataset_id(mocker: MockerFixture) -> None: from superset.explore.utils import check_access as check_chart_access - with raises(DatasetNotFoundError): + with raises(DatasetNotFoundError): # noqa: PT012 mocker.patch(dataset_find_by_id, return_value=None) with override_user(User()): @@ -77,7 +77,7 @@ def test_unsaved_chart_unknown_dataset_id(mocker: MockerFixture) -> None: def test_unsaved_chart_unknown_query_id(mocker: MockerFixture) -> None: from superset.explore.utils import check_access as check_chart_access - with raises(QueryNotFoundValidationError): + with raises(QueryNotFoundValidationError): # noqa: PT012 mocker.patch(query_find_by_id, return_value=None) with override_user(User()): @@ -92,7 +92,7 @@ def test_unsaved_chart_unauthorized_dataset(mocker: MockerFixture) -> None: from superset.connectors.sqla.models import SqlaTable from superset.explore.utils import check_access as check_chart_access - with raises(DatasetAccessDeniedError): + with raises(DatasetAccessDeniedError): # noqa: PT012 mocker.patch(dataset_find_by_id, return_value=SqlaTable()) mocker.patch(can_access_datasource, return_value=False) @@ -123,7 +123,7 @@ def test_saved_chart_unknown_chart_id(mocker: MockerFixture) -> None: from superset.connectors.sqla.models import SqlaTable from superset.explore.utils import check_access as check_chart_access - with raises(ChartNotFoundError): + with raises(ChartNotFoundError): # noqa: PT012 mocker.patch(dataset_find_by_id, return_value=SqlaTable()) mocker.patch(can_access_datasource, return_value=True) mocker.patch(chart_find_by_id, return_value=None) @@ -140,7 +140,7 @@ def test_saved_chart_unauthorized_dataset(mocker: MockerFixture) -> None: from superset.connectors.sqla.models import SqlaTable from superset.explore.utils import check_access as check_chart_access - with raises(DatasetAccessDeniedError): + with raises(DatasetAccessDeniedError): # noqa: PT012 mocker.patch(dataset_find_by_id, return_value=SqlaTable()) mocker.patch(can_access_datasource, return_value=False) @@ -214,7 +214,7 @@ def test_saved_chart_no_access(mocker: MockerFixture) -> None: from superset.explore.utils import check_access as check_chart_access from superset.models.slice import Slice - with raises(ChartAccessDeniedError): + with raises(ChartAccessDeniedError): # noqa: PT012 mocker.patch(dataset_find_by_id, return_value=SqlaTable()) mocker.patch(can_access_datasource, return_value=True) mocker.patch(is_admin, return_value=False) diff --git a/tests/unit_tests/fixtures/bash_mock.py b/tests/unit_tests/fixtures/bash_mock.py index a05006300..f2512d127 100644 --- a/tests/unit_tests/fixtures/bash_mock.py +++ b/tests/unit_tests/fixtures/bash_mock.py @@ -22,7 +22,7 @@ class BashMock: @staticmethod def tag_latest_release(tag): bash_command = f"./scripts/tag_latest_release.sh {tag} --dry-run" - result = subprocess.run( + result = subprocess.run( # noqa: S602 bash_command, shell=True, capture_output=True, diff --git a/tests/unit_tests/fixtures/common.py b/tests/unit_tests/fixtures/common.py index 4ee1d9d0e..f96802f95 100644 --- a/tests/unit_tests/fixtures/common.py +++ b/tests/unit_tests/fixtures/common.py @@ -79,7 +79,7 @@ def create_columnar_file( @pytest.fixture -def admin_user() -> Generator[User, None, None]: +def admin_user() -> User: role = db.session.query(Role).filter_by(name="Admin").one() user = User( first_name="Alice", @@ -90,7 +90,7 @@ def admin_user() -> Generator[User, None, None]: ) db.session.add(user) db.session.flush() - yield user + return user @pytest.fixture diff --git a/tests/unit_tests/fixtures/dataframes.py b/tests/unit_tests/fixtures/dataframes.py index e1499792c..7e0993839 100644 --- a/tests/unit_tests/fixtures/dataframes.py +++ b/tests/unit_tests/fixtures/dataframes.py @@ -119,8 +119,8 @@ categories_df = DataFrame( "category": [f"cat{i%3}" for i in range(0, 101)], "dept": [f"dept{i%5}" for i in range(0, 101)], "name": [f"person{i}" for i in range(0, 101)], - "asc_idx": [i for i in range(0, 101)], - "desc_idx": [i for i in range(100, -1, -1)], + "asc_idx": [i for i in range(0, 101)], # noqa: C416 + "desc_idx": [i for i in range(100, -1, -1)], # noqa: C416 "idx_nulls": [i if i % 5 == 0 else None for i in range(0, 101)], } ) diff --git a/tests/unit_tests/importexport/api_test.py b/tests/unit_tests/importexport/api_test.py index ffdc49ae2..0c125c2cc 100644 --- a/tests/unit_tests/importexport/api_test.py +++ b/tests/unit_tests/importexport/api_test.py @@ -48,12 +48,12 @@ def test_export_assets( mocked_export_result = [ ( "metadata.yaml", - lambda: "version: 1.0.0\ntype: assets\ntimestamp: '2022-01-01T00:00:00+00:00'\n", + lambda: "version: 1.0.0\ntype: assets\ntimestamp: '2022-01-01T00:00:00+00:00'\n", # noqa: E501 ), ("databases/example.yaml", lambda: ""), ] - ExportAssetsCommand = mocker.patch("superset.importexport.api.ExportAssetsCommand") + ExportAssetsCommand = mocker.patch("superset.importexport.api.ExportAssetsCommand") # noqa: N806 ExportAssetsCommand().run.return_value = mocked_export_result[:] response = client.get("/api/v1/assets/export/") @@ -83,7 +83,7 @@ def test_import_assets( "databases/example.yaml": "", } - ImportAssetsCommand = mocker.patch("superset.importexport.api.ImportAssetsCommand") + ImportAssetsCommand = mocker.patch("superset.importexport.api.ImportAssetsCommand") # noqa: N806 root = Path("assets_export") buf = BytesIO() diff --git a/tests/unit_tests/jinja_context_test.py b/tests/unit_tests/jinja_context_test.py index 391ead3f4..c17c066b9 100644 --- a/tests/unit_tests/jinja_context_test.py +++ b/tests/unit_tests/jinja_context_test.py @@ -463,7 +463,7 @@ def test_dataset_macro(mocker: MockerFixture) -> None: schema_perm=None, extra=json.dumps({"warning_markdown": "*WARNING*"}), ) - DatasetDAO = mocker.patch("superset.daos.dataset.DatasetDAO") + DatasetDAO = mocker.patch("superset.daos.dataset.DatasetDAO") # noqa: N806 DatasetDAO.find_by_id.return_value = dataset mocker.patch( "superset.connectors.sqla.models.security_manager.get_guest_rls_filters", @@ -477,7 +477,7 @@ def test_dataset_macro(mocker: MockerFixture) -> None: == f"""( SELECT ds AS ds, num_boys AS num_boys, revenue AS revenue, expenses AS expenses, revenue-expenses AS profit{space} FROM my_schema.old_dataset -) AS dataset_1""" +) AS dataset_1""" # noqa: S608, E501 ) assert ( @@ -485,7 +485,7 @@ FROM my_schema.old_dataset == f"""( SELECT ds AS ds, num_boys AS num_boys, revenue AS revenue, expenses AS expenses, revenue-expenses AS profit, COUNT(*) AS cnt{space} FROM my_schema.old_dataset GROUP BY ds, num_boys, revenue, expenses, revenue-expenses -) AS dataset_1""" +) AS dataset_1""" # noqa: S608, E501 ) assert ( @@ -493,7 +493,7 @@ FROM my_schema.old_dataset GROUP BY ds, num_boys, revenue, expenses, revenue-exp == f"""( SELECT ds AS ds, COUNT(*) AS cnt{space} FROM my_schema.old_dataset GROUP BY ds -) AS dataset_1""" +) AS dataset_1""" # noqa: S608 ) DatasetDAO.find_by_id.return_value = None @@ -513,7 +513,7 @@ def test_dataset_macro_mutator_with_comments(mocker: MockerFixture) -> None: """ return f"-- begin\n{sql}\n-- end" - DatasetDAO = mocker.patch("superset.daos.dataset.DatasetDAO") + DatasetDAO = mocker.patch("superset.daos.dataset.DatasetDAO") # noqa: N806 DatasetDAO.find_by_id().get_query_str_extended().sql = mutator("SELECT 1") assert ( dataset_macro(1) @@ -530,7 +530,7 @@ def test_metric_macro_with_dataset_id(mocker: MockerFixture) -> None: Test the ``metric_macro`` when passing a dataset ID. """ mock_get_form_data = mocker.patch("superset.views.utils.get_form_data") - DatasetDAO = mocker.patch("superset.daos.dataset.DatasetDAO") + DatasetDAO = mocker.patch("superset.daos.dataset.DatasetDAO") # noqa: N806 DatasetDAO.find_by_id.return_value = SqlaTable( table_name="test_dataset", metrics=[ @@ -549,7 +549,7 @@ def test_metric_macro_with_dataset_id_invalid_key(mocker: MockerFixture) -> None Test the ``metric_macro`` when passing a dataset ID and an invalid key. """ mock_get_form_data = mocker.patch("superset.views.utils.get_form_data") - DatasetDAO = mocker.patch("superset.daos.dataset.DatasetDAO") + DatasetDAO = mocker.patch("superset.daos.dataset.DatasetDAO") # noqa: N806 DatasetDAO.find_by_id.return_value = SqlaTable( table_name="test_dataset", metrics=[ @@ -570,7 +570,7 @@ def test_metric_macro_invalid_dataset_id(mocker: MockerFixture) -> None: Test the ``metric_macro`` when specifying a dataset that doesn't exist. """ mock_get_form_data = mocker.patch("superset.views.utils.get_form_data") - DatasetDAO = mocker.patch("superset.daos.dataset.DatasetDAO") + DatasetDAO = mocker.patch("superset.daos.dataset.DatasetDAO") # noqa: N806 DatasetDAO.find_by_id.return_value = None with pytest.raises(DatasetNotFoundError) as excinfo: metric_macro("macro_key", 100) @@ -583,14 +583,14 @@ def test_metric_macro_no_dataset_id_no_context(mocker: MockerFixture) -> None: Test the ``metric_macro`` when not specifying a dataset ID and it's not available in the context. """ - DatasetDAO = mocker.patch("superset.daos.dataset.DatasetDAO") + DatasetDAO = mocker.patch("superset.daos.dataset.DatasetDAO") # noqa: N806 mock_g = mocker.patch("superset.jinja_context.g") mock_g.form_data = {} with app.test_request_context(): with pytest.raises(SupersetTemplateException) as excinfo: metric_macro("macro_key") assert str(excinfo.value) == ( - "Please specify the Dataset ID for the ``macro_key`` metric in the Jinja macro." + "Please specify the Dataset ID for the ``macro_key`` metric in the Jinja macro." # noqa: E501 ) DatasetDAO.find_by_id.assert_not_called() @@ -602,7 +602,7 @@ def test_metric_macro_no_dataset_id_with_context_missing_info( Test the ``metric_macro`` when not specifying a dataset ID and request has context but no dataset/chart ID. """ - DatasetDAO = mocker.patch("superset.daos.dataset.DatasetDAO") + DatasetDAO = mocker.patch("superset.daos.dataset.DatasetDAO") # noqa: N806 mock_g = mocker.patch("superset.jinja_context.g") mock_g.form_data = {"queries": []} with app.test_request_context( @@ -625,7 +625,7 @@ def test_metric_macro_no_dataset_id_with_context_missing_info( with pytest.raises(SupersetTemplateException) as excinfo: metric_macro("macro_key") assert str(excinfo.value) == ( - "Please specify the Dataset ID for the ``macro_key`` metric in the Jinja macro." + "Please specify the Dataset ID for the ``macro_key`` metric in the Jinja macro." # noqa: E501 ) DatasetDAO.find_by_id.assert_not_called() @@ -637,7 +637,7 @@ def test_metric_macro_no_dataset_id_with_context_datasource_id( Test the ``metric_macro`` when not specifying a dataset ID and it's available in the context (url_params.datasource_id). """ - DatasetDAO = mocker.patch("superset.daos.dataset.DatasetDAO") + DatasetDAO = mocker.patch("superset.daos.dataset.DatasetDAO") # noqa: N806 DatasetDAO.find_by_id.return_value = SqlaTable( table_name="test_dataset", metrics=[ @@ -711,7 +711,7 @@ def test_metric_macro_no_dataset_id_with_context_datasource_id_none( with pytest.raises(SupersetTemplateException) as excinfo: metric_macro("macro_key") assert str(excinfo.value) == ( - "Please specify the Dataset ID for the ``macro_key`` metric in the Jinja macro." + "Please specify the Dataset ID for the ``macro_key`` metric in the Jinja macro." # noqa: E501 ) # Getting data from g's form_data @@ -728,7 +728,7 @@ def test_metric_macro_no_dataset_id_with_context_datasource_id_none( with pytest.raises(SupersetTemplateException) as excinfo: metric_macro("macro_key") assert str(excinfo.value) == ( - "Please specify the Dataset ID for the ``macro_key`` metric in the Jinja macro." + "Please specify the Dataset ID for the ``macro_key`` metric in the Jinja macro." # noqa: E501 ) @@ -739,11 +739,11 @@ def test_metric_macro_no_dataset_id_with_context_chart_id( Test the ``metric_macro`` when not specifying a dataset ID and context includes an existing chart ID (url_params.slice_id). """ - ChartDAO = mocker.patch("superset.daos.chart.ChartDAO") + ChartDAO = mocker.patch("superset.daos.chart.ChartDAO") # noqa: N806 ChartDAO.find_by_id.return_value = Slice( datasource_id=1, ) - DatasetDAO = mocker.patch("superset.daos.dataset.DatasetDAO") + DatasetDAO = mocker.patch("superset.daos.dataset.DatasetDAO") # noqa: N806 DatasetDAO.find_by_id.return_value = SqlaTable( table_name="test_dataset", metrics=[ @@ -818,7 +818,7 @@ def test_metric_macro_no_dataset_id_with_context_slice_id_none( with pytest.raises(SupersetTemplateException) as excinfo: metric_macro("macro_key") assert str(excinfo.value) == ( - "Please specify the Dataset ID for the ``macro_key`` metric in the Jinja macro." + "Please specify the Dataset ID for the ``macro_key`` metric in the Jinja macro." # noqa: E501 ) # Getting data from g's form_data @@ -835,7 +835,7 @@ def test_metric_macro_no_dataset_id_with_context_slice_id_none( with pytest.raises(SupersetTemplateException) as excinfo: metric_macro("macro_key") assert str(excinfo.value) == ( - "Please specify the Dataset ID for the ``macro_key`` metric in the Jinja macro." + "Please specify the Dataset ID for the ``macro_key`` metric in the Jinja macro." # noqa: E501 ) @@ -846,7 +846,7 @@ def test_metric_macro_no_dataset_id_with_context_deleted_chart( Test the ``metric_macro`` when not specifying a dataset ID and context includes a deleted chart ID. """ - ChartDAO = mocker.patch("superset.daos.chart.ChartDAO") + ChartDAO = mocker.patch("superset.daos.chart.ChartDAO") # noqa: N806 ChartDAO.find_by_id.return_value = None mock_g = mocker.patch("superset.jinja_context.g") mock_g.form_data = {} @@ -870,7 +870,7 @@ def test_metric_macro_no_dataset_id_with_context_deleted_chart( with pytest.raises(SupersetTemplateException) as excinfo: metric_macro("macro_key") assert str(excinfo.value) == ( - "Please specify the Dataset ID for the ``macro_key`` metric in the Jinja macro." + "Please specify the Dataset ID for the ``macro_key`` metric in the Jinja macro." # noqa: E501 ) # Getting data from g's form_data @@ -887,7 +887,7 @@ def test_metric_macro_no_dataset_id_with_context_deleted_chart( with pytest.raises(SupersetTemplateException) as excinfo: metric_macro("macro_key") assert str(excinfo.value) == ( - "Please specify the Dataset ID for the ``macro_key`` metric in the Jinja macro." + "Please specify the Dataset ID for the ``macro_key`` metric in the Jinja macro." # noqa: E501 ) @@ -898,7 +898,7 @@ def test_metric_macro_no_dataset_id_available_in_request_form_data( Test the ``metric_macro`` when not specifying a dataset ID and context includes an existing dataset ID (datasource.id). """ - DatasetDAO = mocker.patch("superset.daos.dataset.DatasetDAO") + DatasetDAO = mocker.patch("superset.daos.dataset.DatasetDAO") # noqa: N806 DatasetDAO.find_by_id.return_value = SqlaTable( table_name="test_dataset", metrics=[ @@ -953,14 +953,14 @@ def test_metric_macro_no_dataset_id_available_in_request_form_data( [], ), ( - "Missing time range and filter with default value will return a result with the defaults", + "Missing time range and filter with default value will return a result with the defaults", # noqa: E501 [], {"default": "Last week", "target_type": "TIMESTAMP"}, "postgresql://mydb", [{}], TimeFilter( - from_expr="TO_TIMESTAMP('2024-08-27 00:00:00.000000', 'YYYY-MM-DD HH24:MI:SS.US')", - to_expr="TO_TIMESTAMP('2024-09-03 00:00:00.000000', 'YYYY-MM-DD HH24:MI:SS.US')", + from_expr="TO_TIMESTAMP('2024-08-27 00:00:00.000000', 'YYYY-MM-DD HH24:MI:SS.US')", # noqa: E501 + to_expr="TO_TIMESTAMP('2024-09-03 00:00:00.000000', 'YYYY-MM-DD HH24:MI:SS.US')", # noqa: E501 time_range="Last week", ), [], @@ -973,8 +973,8 @@ def test_metric_macro_no_dataset_id_available_in_request_form_data( "postgresql://mydb", [{"time_range": "Last week"}], TimeFilter( - from_expr="TO_TIMESTAMP('2024-08-27 00:00:00.000000', 'YYYY-MM-DD HH24:MI:SS.US')", - to_expr="TO_TIMESTAMP('2024-09-03 00:00:00.000000', 'YYYY-MM-DD HH24:MI:SS.US')", + from_expr="TO_TIMESTAMP('2024-08-27 00:00:00.000000', 'YYYY-MM-DD HH24:MI:SS.US')", # noqa: E501 + to_expr="TO_TIMESTAMP('2024-09-03 00:00:00.000000', 'YYYY-MM-DD HH24:MI:SS.US')", # noqa: E501 time_range="Last week", ), [], @@ -997,8 +997,8 @@ def test_metric_macro_no_dataset_id_available_in_request_form_data( } ], TimeFilter( - from_expr="TO_TIMESTAMP('2024-08-27 00:00:00.000000', 'YYYY-MM-DD HH24:MI:SS.US')", - to_expr="TO_TIMESTAMP('2024-09-03 00:00:00.000000', 'YYYY-MM-DD HH24:MI:SS.US')", + from_expr="TO_TIMESTAMP('2024-08-27 00:00:00.000000', 'YYYY-MM-DD HH24:MI:SS.US')", # noqa: E501 + to_expr="TO_TIMESTAMP('2024-09-03 00:00:00.000000', 'YYYY-MM-DD HH24:MI:SS.US')", # noqa: E501 time_range="Last week", ), [], diff --git a/tests/unit_tests/models/core_test.py b/tests/unit_tests/models/core_test.py index 8b29116f5..5bc3c86af 100644 --- a/tests/unit_tests/models/core_test.py +++ b/tests/unit_tests/models/core_test.py @@ -634,16 +634,16 @@ def test_purge_oauth2_tokens(session: Session) -> None: DatabaseUserOAuth2Tokens( user_id=user.id, database_id=database1.id, - access_token="my_access_token", + access_token="my_access_token", # noqa: S106 access_token_expiration=datetime(2023, 1, 1), - refresh_token="my_refresh_token", + refresh_token="my_refresh_token", # noqa: S106 ), DatabaseUserOAuth2Tokens( user_id=user.id, database_id=database2.id, - access_token="my_other_access_token", + access_token="my_other_access_token", # noqa: S106 access_token_expiration=datetime(2024, 1, 1), - refresh_token="my_other_refresh_token", + refresh_token="my_other_refresh_token", # noqa: S106 ), ] session.add_all(tokens) @@ -658,9 +658,9 @@ def test_purge_oauth2_tokens(session: Session) -> None: ) assert token.user_id == user.id assert token.database_id == database1.id - assert token.access_token == "my_access_token" + assert token.access_token == "my_access_token" # noqa: S105 assert token.access_token_expiration == datetime(2023, 1, 1) - assert token.refresh_token == "my_refresh_token" + assert token.refresh_token == "my_refresh_token" # noqa: S105 database1.purge_oauth2_tokens() diff --git a/tests/unit_tests/models/helpers_test.py b/tests/unit_tests/models/helpers_test.py index c87b21792..cc106dbe6 100644 --- a/tests/unit_tests/models/helpers_test.py +++ b/tests/unit_tests/models/helpers_test.py @@ -33,7 +33,7 @@ if TYPE_CHECKING: from superset.models.core import Database -@pytest.fixture() +@pytest.fixture def database(mocker: MockerFixture, session: Session) -> Database: from superset.connectors.sqla.models import SqlaTable from superset.models.core import Database diff --git a/tests/unit_tests/pandas_postprocessing/test_boxplot.py b/tests/unit_tests/pandas_postprocessing/test_boxplot.py index 27dff0ade..619c87706 100644 --- a/tests/unit_tests/pandas_postprocessing/test_boxplot.py +++ b/tests/unit_tests/pandas_postprocessing/test_boxplot.py @@ -29,7 +29,7 @@ def test_boxplot_tukey(): whisker_type=PostProcessingBoxplotWhiskerType.TUKEY, metrics=["cars"], ) - columns = {column for column in df.columns} + columns = {column for column in df.columns} # noqa: C416 assert columns == { "cars__mean", "cars__median", @@ -51,7 +51,7 @@ def test_boxplot_min_max(): whisker_type=PostProcessingBoxplotWhiskerType.MINMAX, metrics=["cars"], ) - columns = {column for column in df.columns} + columns = {column for column in df.columns} # noqa: C416 assert columns == { "cars__mean", "cars__median", @@ -74,7 +74,7 @@ def test_boxplot_percentile(): metrics=["cars"], percentiles=[1, 99], ) - columns = {column for column in df.columns} + columns = {column for column in df.columns} # noqa: C416 assert columns == { "cars__mean", "cars__median", @@ -136,7 +136,7 @@ def test_boxplot_type_coercion(): metrics=["cars"], ) - columns = {column for column in df.columns} + columns = {column for column in df.columns} # noqa: C416 assert columns == { "cars__mean", "cars__median", diff --git a/tests/unit_tests/pandas_postprocessing/test_compare.py b/tests/unit_tests/pandas_postprocessing/test_compare.py index a26aa11d2..a1b15b415 100644 --- a/tests/unit_tests/pandas_postprocessing/test_compare.py +++ b/tests/unit_tests/pandas_postprocessing/test_compare.py @@ -19,7 +19,7 @@ import sys import pandas as pd -from superset.constants import PandasPostprocessingCompare as PPC +from superset.constants import PandasPostprocessingCompare as PPC # noqa: N817 from superset.utils import pandas_postprocessing as pp from superset.utils.pandas_postprocessing.utils import FLAT_COLUMN_SEPARATOR from tests.unit_tests.fixtures.dataframes import multiple_metrics_df, timeseries_df2 @@ -166,7 +166,7 @@ def test_compare_multi_index_column(): 0 2021-01-01 0 0 0 0 1 2021-01-02 0 0 0 0 2 2021-01-03 0 0 0 0 - """ + """ # noqa: E501 assert flat_df.equals( pd.DataFrame( data={ @@ -230,7 +230,7 @@ def test_compare_multi_index_column_non_lex_sorted(): 0 2021-01-01 0 0 0 0 1 2021-01-02 0 0 0 0 2 2021-01-03 0 0 0 0 - """ + """ # noqa: E501 assert flat_df.equals( pd.DataFrame( data={ @@ -282,7 +282,7 @@ def test_compare_after_pivot(): dttm difference__count_metric__sum_metric, UK difference__count_metric__sum_metric, US 0 2019-01-01 -4 -4 1 2019-01-02 -4 -4 - """ + """ # noqa: E501 assert flat_df.equals( pd.DataFrame( data={ diff --git a/tests/unit_tests/pandas_postprocessing/test_histogram.py b/tests/unit_tests/pandas_postprocessing/test_histogram.py index 73370c8e6..ee0261a9f 100644 --- a/tests/unit_tests/pandas_postprocessing/test_histogram.py +++ b/tests/unit_tests/pandas_postprocessing/test_histogram.py @@ -119,7 +119,7 @@ def test_histogram_with_non_numeric_column(): try: histogram(data, "group", None, bins) except ValueError as e: - assert str(e) == "Column 'group' contains non-numeric values" + assert str(e) == "Column 'group' contains non-numeric values" # noqa: PT017 def test_histogram_with_some_non_numeric_values(): @@ -133,4 +133,4 @@ def test_histogram_with_some_non_numeric_values(): try: histogram(data_with_non_numeric, "a", ["group"], bins) except ValueError as e: - assert str(e) == "Column 'group' contains non-numeric values" + assert str(e) == "Column 'group' contains non-numeric values" # noqa: PT017 diff --git a/tests/unit_tests/pandas_postprocessing/test_prophet.py b/tests/unit_tests/pandas_postprocessing/test_prophet.py index 4d9acdb06..c21dbb0d8 100644 --- a/tests/unit_tests/pandas_postprocessing/test_prophet.py +++ b/tests/unit_tests/pandas_postprocessing/test_prophet.py @@ -28,7 +28,7 @@ from tests.unit_tests.fixtures.dataframes import prophet_df def test_prophet_valid(): df = prophet(df=prophet_df, time_grain="P1M", periods=3, confidence_interval=0.9) - columns = {column for column in df.columns} + columns = {column for column in df.columns} # noqa: C416 assert columns == { DTTM_ALIAS, "a__yhat", @@ -112,7 +112,7 @@ def test_prophet_valid(): def test_prophet_valid_zero_periods(): df = prophet(df=prophet_df, time_grain="P1M", periods=0, confidence_interval=0.9) - columns = {column for column in df.columns} + columns = {column for column in df.columns} # noqa: C416 assert columns == { DTTM_ALIAS, "a__yhat", diff --git a/tests/unit_tests/pandas_postprocessing/test_rename.py b/tests/unit_tests/pandas_postprocessing/test_rename.py index ee4e795e8..23f62067d 100644 --- a/tests/unit_tests/pandas_postprocessing/test_rename.py +++ b/tests/unit_tests/pandas_postprocessing/test_rename.py @@ -133,7 +133,7 @@ def test_should_raise_exception_duplication_on_multiindex(): 2 1 1 1 1 1 1 1 1 """ - with pytest.raises(InvalidPostProcessingError): + with pytest.raises(InvalidPostProcessingError): # noqa: PT012 pp.rename( df=df, columns={ @@ -151,7 +151,7 @@ def test_should_raise_exception_duplication_on_multiindex(): def test_should_raise_exception_invalid_level(): - with pytest.raises(InvalidPostProcessingError): + with pytest.raises(InvalidPostProcessingError): # noqa: PT012 pp.rename( df=categories_df, columns={ diff --git a/tests/unit_tests/pandas_postprocessing/test_sort.py b/tests/unit_tests/pandas_postprocessing/test_sort.py index e19da38ef..ea1e39186 100644 --- a/tests/unit_tests/pandas_postprocessing/test_sort.py +++ b/tests/unit_tests/pandas_postprocessing/test_sort.py @@ -48,6 +48,6 @@ def test_sort(): df = sort(df=timeseries_df) assert df.equals(timeseries_df) - with pytest.raises(InvalidPostProcessingError): + with pytest.raises(InvalidPostProcessingError): # noqa: PT012 sort(df=df, by="abc", ascending=False) sort(df=df, by=["abc", "def"]) diff --git a/tests/unit_tests/reports/notifications/slack_tests.py b/tests/unit_tests/reports/notifications/slack_tests.py index b7f996631..3c7b12308 100644 --- a/tests/unit_tests/reports/notifications/slack_tests.py +++ b/tests/unit_tests/reports/notifications/slack_tests.py @@ -63,7 +63,7 @@ def test_get_channel_with_multi_recipients(mock_header_data) -> None: slack_notification = SlackNotification( recipient=ReportRecipients( type=ReportRecipientType.SLACK, - recipient_config_json='{"target": "some_channel; second_channel, third_channel"}', + recipient_config_json='{"target": "some_channel; second_channel, third_channel"}', # noqa: E501 ), content=content, ) @@ -72,13 +72,13 @@ def test_get_channel_with_multi_recipients(mock_header_data) -> None: assert result == "some_channel,second_channel,third_channel" - # Test if the recipient configuration JSON is valid when using a SlackV2 recipient type + # Test if the recipient configuration JSON is valid when using a SlackV2 recipient type # noqa: E501 def test_valid_recipient_config_json_slackv2(mock_header_data) -> None: """ Test if the recipient configuration JSON is valid when using a SlackV2 recipient type - """ + """ # noqa: E501 from superset.reports.models import ReportRecipients, ReportRecipientType from superset.reports.notifications.base import NotificationContent from superset.reports.notifications.slack import SlackNotification @@ -107,7 +107,7 @@ def test_valid_recipient_config_json_slackv2(mock_header_data) -> None: assert result == '{"target": "some_channel"}' - # Ensure _get_inline_files function returns the correct tuple when content has screenshots + # Ensure _get_inline_files function returns the correct tuple when content has screenshots # noqa: E501 def test_get_inline_files_with_screenshots(mock_header_data) -> None: @@ -144,7 +144,7 @@ def test_get_inline_files_with_screenshots(mock_header_data) -> None: assert result == ("png", [b"screenshot1", b"screenshot2"]) - # Ensure _get_inline_files function returns None when content has no screenshots or csv + # Ensure _get_inline_files function returns None when content has no screenshots or csv # noqa: E501 def test_get_inline_files_with_no_screenshots_or_csv(mock_header_data) -> None: diff --git a/tests/unit_tests/scripts/tag_latest_release_test.py b/tests/unit_tests/scripts/tag_latest_release_test.py index 0b7e33e21..15e33978a 100644 --- a/tests/unit_tests/scripts/tag_latest_release_test.py +++ b/tests/unit_tests/scripts/tag_latest_release_test.py @@ -38,7 +38,7 @@ def wrapped(*args, **kwargs): ("2.1.0rc1", "This tag 2.1.0rc1 is not a valid release version. Not tagging."), ( "", - "Missing tag parameter, usage: ./scripts/tag_latest_release.sh ", + "Missing tag parameter, usage: ./scripts/tag_latest_release.sh ", # noqa: E501 ), ("2.1", "This tag 2.1 is not a valid release version. Not tagging."), ( @@ -53,7 +53,7 @@ def test_tag_latest_release(tag, expected_output): ) as subprocess_mock: result = BashMock.tag_latest_release(tag) - subprocess_mock.assert_called_once_with( + subprocess_mock.assert_called_once_with( # noqa: S604 f"./scripts/tag_latest_release.sh {tag} --dry-run", shell=True, capture_output=True, diff --git a/tests/unit_tests/security/manager_test.py b/tests/unit_tests/security/manager_test.py index 40d89ba35..d1b2aa264 100644 --- a/tests/unit_tests/security/manager_test.py +++ b/tests/unit_tests/security/manager_test.py @@ -361,7 +361,7 @@ def test_raise_for_access_query_default_schema( mocker.patch.object(sm, "can_access_database", return_value=False) mocker.patch.object(sm, "get_schema_perm", return_value="[PostgreSQL].[public]") mocker.patch.object(sm, "is_guest_user", return_value=False) - SqlaTable = mocker.patch("superset.connectors.sqla.models.SqlaTable") + SqlaTable = mocker.patch("superset.connectors.sqla.models.SqlaTable") # noqa: N806 SqlaTable.query_datasources_by_name.return_value = [] database = mocker.MagicMock() @@ -417,7 +417,7 @@ def test_raise_for_access_jinja_sql(mocker: MockerFixture, app_context: None) -> get_table_access_error_object = mocker.patch.object( sm, "get_table_access_error_object" ) - SqlaTable = mocker.patch("superset.connectors.sqla.models.SqlaTable") + SqlaTable = mocker.patch("superset.connectors.sqla.models.SqlaTable") # noqa: N806 SqlaTable.query_datasources_by_name.return_value = [] database = mocker.MagicMock() @@ -1067,7 +1067,7 @@ def test_raise_for_access_catalog( return_value="[PostgreSQL].[db1]", ) mocker.patch.object(sm, "is_guest_user", return_value=False) - SqlaTable = mocker.patch("superset.connectors.sqla.models.SqlaTable") + SqlaTable = mocker.patch("superset.connectors.sqla.models.SqlaTable") # noqa: N806 SqlaTable.query_datasources_by_name.return_value = [] database = mocker.MagicMock() diff --git a/tests/unit_tests/sql/parse_tests.py b/tests/unit_tests/sql/parse_tests.py index 4911d4c0e..5103ef12e 100644 --- a/tests/unit_tests/sql/parse_tests.py +++ b/tests/unit_tests/sql/parse_tests.py @@ -37,7 +37,7 @@ def test_table() -> None: Test the `Table` class and its string conversion. Special characters in the table, schema, or catalog name should be escaped correctly. - """ + """ # noqa: E501 assert str(Table("tbname")) == "tbname" assert str(Table("tbname", "schemaname")) == "schemaname.tbname" assert ( diff --git a/tests/unit_tests/sql_lab_execution_context.py b/tests/unit_tests/sql_lab_execution_context.py index ea39e2bf3..1591967de 100644 --- a/tests/unit_tests/sql_lab_execution_context.py +++ b/tests/unit_tests/sql_lab_execution_context.py @@ -63,7 +63,10 @@ def test_sql_json_execution_context_init(query_params): @with_feature_flags(SQLLAB_FORCE_RUN_ASYNC=True) @pytest.mark.parametrize("runAsync, expected_async_flag", [(True, True), (False, True)]) def test_sql_json_execution_context_feature_flag_false( - mocker, query_params, runAsync, expected_async_flag + mocker, + query_params, + runAsync, # noqa: N803 + expected_async_flag, # noqa: N803 ): query_params["runAsync"] = runAsync context = SqlJsonExecutionContext(query_params) @@ -76,7 +79,10 @@ def test_sql_json_execution_context_feature_flag_false( "runAsync, expected_async_flag", [(True, True), (False, False)] ) def test_sql_json_execution_context_feature_flag_true( - mocker, query_params, runAsync, expected_async_flag + mocker, + query_params, + runAsync, # noqa: N803 + expected_async_flag, # noqa: N803 ): query_params["runAsync"] = runAsync context = SqlJsonExecutionContext(query_params) diff --git a/tests/unit_tests/sql_lab_test.py b/tests/unit_tests/sql_lab_test.py index cc9b146f3..21c9a9524 100644 --- a/tests/unit_tests/sql_lab_test.py +++ b/tests/unit_tests/sql_lab_test.py @@ -56,7 +56,7 @@ def test_execute_sql_statement(mocker: MockerFixture, app: None) -> None: db_engine_spec.fetch_data.return_value = [(42,)] cursor = mocker.MagicMock() - SupersetResultSet = mocker.patch("superset.sql_lab.SupersetResultSet") + SupersetResultSet = mocker.patch("superset.sql_lab.SupersetResultSet") # noqa: N806 execute_sql_statement( sql_statement, @@ -99,7 +99,7 @@ def test_execute_sql_statement_with_rls( db_engine_spec.fetch_data.return_value = [(42,)] cursor = mocker.MagicMock() - SupersetResultSet = mocker.patch("superset.sql_lab.SupersetResultSet") + SupersetResultSet = mocker.patch("superset.sql_lab.SupersetResultSet") # noqa: N806 mocker.patch( "superset.sql_lab.insert_rls_as_subquery", return_value=sqlparse.parse("SELECT * FROM sales WHERE organization_id=42")[0], @@ -232,7 +232,7 @@ def test_execute_sql_statement_within_payload_limit(mocker: MockerFixture) -> No ) except SupersetErrorException: pytest.fail( - "SupersetErrorException should not have been raised for payload within the limit" + "SupersetErrorException should not have been raised for payload within the limit" # noqa: E501 ) diff --git a/tests/unit_tests/sql_parse_tests.py b/tests/unit_tests/sql_parse_tests.py index d7d9941b6..3b44c1c2c 100644 --- a/tests/unit_tests/sql_parse_tests.py +++ b/tests/unit_tests/sql_parse_tests.py @@ -58,7 +58,7 @@ def test_table() -> None: Test the ``Table`` class and its string conversion. Special characters in the table, schema, or catalog name should be escaped correctly. - """ + """ # noqa: E501 assert str(Table("tbname")) == "tbname" assert str(Table("tbname", "schemaname")) == "schemaname.tbname" assert ( @@ -260,21 +260,21 @@ def test_extract_tables_illdefined() -> None: extract_tables("SELECT * FROM schemaname.") assert ( str(excinfo.value) - == "You may have an error in your SQL statement. Error parsing near '.' at line 1:25" + == "You may have an error in your SQL statement. Error parsing near '.' at line 1:25" # noqa: E501 ) with pytest.raises(SupersetSecurityException) as excinfo: extract_tables("SELECT * FROM catalogname.schemaname.") assert ( str(excinfo.value) - == "You may have an error in your SQL statement. Error parsing near '.' at line 1:37" + == "You may have an error in your SQL statement. Error parsing near '.' at line 1:37" # noqa: E501 ) with pytest.raises(SupersetSecurityException) as excinfo: extract_tables("SELECT * FROM catalogname..") assert ( str(excinfo.value) - == "You may have an error in your SQL statement. Error parsing near '.' at line 1:27" + == "You may have an error in your SQL statement. Error parsing near '.' at line 1:27" # noqa: E501 ) with pytest.raises(SupersetSecurityException) as excinfo: @@ -1313,12 +1313,12 @@ def test_sqlparse_issue_652(): ("postgresql", "(SELECT COUNT(DISTINCT name) from birth_names)", True), ( "postgresql", - "(SELECT table_name FROM information_schema.tables WHERE table_name LIKE '%user%' LIMIT 1)", + "(SELECT table_name FROM information_schema.tables WHERE table_name LIKE '%user%' LIMIT 1)", # noqa: E501 True, ), ( "postgresql", - "(SELECT table_name FROM /**/ information_schema.tables WHERE table_name LIKE '%user%' LIMIT 1)", + "(SELECT table_name FROM /**/ information_schema.tables WHERE table_name LIKE '%user%' LIMIT 1)", # noqa: E501 True, ), ( @@ -1333,7 +1333,7 @@ def test_sqlparse_issue_652(): ), ( "postgresql", - "((select users.id from (select 'majorie' as a) b, users where b.a = users.name and users.name in ('majorie') limit 1) like 'U%')", + "((select users.id from (select 'majorie' as a) b, users where b.a = users.name and users.name in ('majorie') limit 1) like 'U%')", # noqa: E501 True, ), ], @@ -1536,7 +1536,7 @@ def test_insert_rls_as_subquery( "id=42", "SELECT * FROM some_table WHERE ( 1=1) AND some_table.id=42", ), - # Any existing predicates MUST to be wrapped in parenthesis because AND has higher + # Any existing predicates MUST to be wrapped in parenthesis because AND has higher # noqa: E501 # precedence than OR. If the RLS it `1=0` and we didn't add parenthesis a user # could bypass it by crafting a query with `WHERE TRUE OR FALSE`, since # `WHERE TRUE OR FALSE AND 1=0` evaluates to `WHERE TRUE OR (FALSE AND 1=0)`. @@ -1604,7 +1604,7 @@ def test_insert_rls_as_subquery( "id=42", "SELECT * FROM some_table WHERE some_table.id=42", ), - # We add the RLS even if it's already present, to be conservative. It should have + # We add the RLS even if it's already present, to be conservative. It should have # noqa: E501 # no impact on the query, and it's easier than testing if the RLS is already # present (it could be present in an OR clause, eg). ( @@ -1666,7 +1666,7 @@ def test_insert_rls_as_subquery( "SELECT * FROM table UNION ALL SELECT * FROM other_table", "table", "id=42", - "SELECT * FROM table WHERE table.id=42 UNION ALL SELECT * FROM other_table", + "SELECT * FROM table WHERE table.id=42 UNION ALL SELECT * FROM other_table", # noqa: E501 ), ( "SELECT * FROM table UNION ALL SELECT * FROM other_table", @@ -1836,7 +1836,7 @@ def test_is_select() -> None: """ assert not ParsedQuery("SELECT 1; DROP DATABASE superset").is_select() assert ParsedQuery( - "with base as(select id from table1 union all select id from table2) select * from base" + "with base as(select id from table1 union all select id from table2) select * from base" # noqa: E501 ).is_select() assert ParsedQuery( """ diff --git a/tests/unit_tests/tags/commands/create_test.py b/tests/unit_tests/tags/commands/create_test.py index 9843a2ec5..58d3325b8 100644 --- a/tests/unit_tests/tags/commands/create_test.py +++ b/tests/unit_tests/tags/commands/create_test.py @@ -64,7 +64,7 @@ def session_with_data(session: Session): session.add(saved_query) session.add(dashboard_obj) session.commit() - yield session + return session def test_create_command_success(session_with_data: Session, mocker: MockerFixture): diff --git a/tests/unit_tests/tags/commands/update_test.py b/tests/unit_tests/tags/commands/update_test.py index 16bf9180f..e22fcc2be 100644 --- a/tests/unit_tests/tags/commands/update_test.py +++ b/tests/unit_tests/tags/commands/update_test.py @@ -73,7 +73,7 @@ def session_with_data(session: Session): session.add(dashboard_obj) session.add(tag) session.commit() - yield session + return session def test_update_command_success(session_with_data: Session, mocker: MockerFixture): diff --git a/tests/unit_tests/tasks/test_cron_util.py b/tests/unit_tests/tasks/test_cron_util.py index 56f1258e3..128e4141c 100644 --- a/tests/unit_tests/tasks/test_cron_util.py +++ b/tests/unit_tests/tasks/test_cron_util.py @@ -60,7 +60,7 @@ def test_cron_schedule_window_los_angeles( datetime.fromisoformat(current_dttm), cron, "America/Los_Angeles" ) assert ( - list(cron.strftime("%A, %d %B %Y, %H:%M:%S") for cron in datetimes) == expected + list(cron.strftime("%A, %d %B %Y, %H:%M:%S") for cron in datetimes) == expected # noqa: C400 ) @@ -104,7 +104,7 @@ def test_cron_schedule_window_invalid_timezone( ) # it should default to UTC assert ( - list(cron.strftime("%A, %d %B %Y, %H:%M:%S") for cron in datetimes) == expected + list(cron.strftime("%A, %d %B %Y, %H:%M:%S") for cron in datetimes) == expected # noqa: C400 ) @@ -147,7 +147,7 @@ def test_cron_schedule_window_new_york( datetime.fromisoformat(current_dttm), cron, "America/New_York" ) assert ( - list(cron.strftime("%A, %d %B %Y, %H:%M:%S") for cron in datetimes) == expected + list(cron.strftime("%A, %d %B %Y, %H:%M:%S") for cron in datetimes) == expected # noqa: C400 ) @@ -190,7 +190,7 @@ def test_cron_schedule_window_chicago( datetime.fromisoformat(current_dttm), cron, "America/Chicago" ) assert ( - list(cron.strftime("%A, %d %B %Y, %H:%M:%S") for cron in datetimes) == expected + list(cron.strftime("%A, %d %B %Y, %H:%M:%S") for cron in datetimes) == expected # noqa: C400 ) @@ -233,5 +233,5 @@ def test_cron_schedule_window_chicago_daylight( datetime.fromisoformat(current_dttm), cron, "America/Chicago" ) assert ( - list(cron.strftime("%A, %d %B %Y, %H:%M:%S") for cron in datetimes) == expected + list(cron.strftime("%A, %d %B %Y, %H:%M:%S") for cron in datetimes) == expected # noqa: C400 ) diff --git a/tests/unit_tests/thumbnails/test_digest.py b/tests/unit_tests/thumbnails/test_digest.py index 2ce26c55c..b08b89691 100644 --- a/tests/unit_tests/thumbnails/test_digest.py +++ b/tests/unit_tests/thumbnails/test_digest.py @@ -47,7 +47,7 @@ _DEFAULT_CHART_KWARGS = { } -def CUSTOM_DASHBOARD_FUNC( +def CUSTOM_DASHBOARD_FUNC( # noqa: N802 dashboard: Dashboard, executor_type: ExecutorType, executor: str, @@ -55,7 +55,7 @@ def CUSTOM_DASHBOARD_FUNC( return f"{dashboard.id}.{executor_type.value}.{executor}" -def CUSTOM_CHART_FUNC( +def CUSTOM_CHART_FUNC( # noqa: N802 chart: Slice, executor_type: ExecutorType, executor: str, diff --git a/tests/unit_tests/utils/date_parser_tests.py b/tests/unit_tests/utils/date_parser_tests.py index a5a3f8b0a..3837cf8ac 100644 --- a/tests/unit_tests/utils/date_parser_tests.py +++ b/tests/unit_tests/utils/date_parser_tests.py @@ -38,7 +38,7 @@ from superset.utils.date_parser import ( from tests.unit_tests.conftest import with_feature_flags -def mock_parse_human_datetime(s: str) -> Optional[datetime]: +def mock_parse_human_datetime(s: str) -> Optional[datetime]: # noqa: C901 if s == "now": return datetime(2016, 11, 7, 9, 30, 10) elif s == "2018": @@ -230,7 +230,7 @@ def test_get_since_until() -> None: expected = datetime(1999, 12, 25), datetime(2017, 12, 25) assert result == expected - with pytest.raises(ValueError): + with pytest.raises(ValueError): # noqa: PT011 get_since_until(time_range="tomorrow : yesterday") @@ -420,12 +420,12 @@ def test_datetime_eval() -> None: assert result == -9 result = datetime_eval( - "datediff(datetime('2018-01-01T00:00:00'), datetime('2018-01-10T00:00:00'), day)" # pylint: disable=line-too-long,useless-suppression + "datediff(datetime('2018-01-01T00:00:00'), datetime('2018-01-10T00:00:00'), day)" # pylint: disable=line-too-long,useless-suppression # noqa: E501 ) assert result == 9 result = datetime_eval( - "datediff(datetime('2018-01-01T00:00:00'), datetime('2018-01-10T00:00:00'), year)" # pylint: disable=line-too-long,useless-suppression + "datediff(datetime('2018-01-01T00:00:00'), datetime('2018-01-10T00:00:00'), year)" # pylint: disable=line-too-long,useless-suppression # noqa: E501 ) assert result == 0 diff --git a/tests/unit_tests/utils/json_tests.py b/tests/unit_tests/utils/json_tests.py index 0a302dfb4..39643cbd5 100644 --- a/tests/unit_tests/utils/json_tests.py +++ b/tests/unit_tests/utils/json_tests.py @@ -183,7 +183,7 @@ def test_sensitive_fields() -> None: } new_payload = copy.deepcopy(redacted_payload) - new_payload["credentials"]["user_token"] = "NEW_TOKEN" + new_payload["credentials"]["user_token"] = "NEW_TOKEN" # noqa: S105 assert json.reveal_sensitive(payload, new_payload, sensitive_fields) == { "password": "SECRET", diff --git a/tests/unit_tests/utils/oauth2_tests.py b/tests/unit_tests/utils/oauth2_tests.py index 19e8ad5aa..e9aa283b1 100644 --- a/tests/unit_tests/utils/oauth2_tests.py +++ b/tests/unit_tests/utils/oauth2_tests.py @@ -43,7 +43,7 @@ def test_get_oauth2_access_token_base_token_valid(mocker: MockerFixture) -> None db = mocker.patch("superset.utils.oauth2.db") db_engine_spec = mocker.MagicMock() token = mocker.MagicMock() - token.access_token = "access-token" + token.access_token = "access-token" # noqa: S105 token.access_token_expiration = datetime(2024, 1, 2) db.session.query().filter_by().one_or_none.return_value = token @@ -62,16 +62,16 @@ def test_get_oauth2_access_token_base_refresh(mocker: MockerFixture) -> None: "expires_in": 3600, } token = mocker.MagicMock() - token.access_token = "access-token" + token.access_token = "access-token" # noqa: S105 token.access_token_expiration = datetime(2024, 1, 1) - token.refresh_token = "refresh-token" + token.refresh_token = "refresh-token" # noqa: S105 db.session.query().filter_by().one_or_none.return_value = token with freeze_time("2024-01-02"): assert get_oauth2_access_token({}, 1, 1, db_engine_spec) == "new-token" # check that token was updated - assert token.access_token == "new-token" + assert token.access_token == "new-token" # noqa: S105 assert token.access_token_expiration == datetime(2024, 1, 2, 1) db.session.add.assert_called_with(token) @@ -83,7 +83,7 @@ def test_get_oauth2_access_token_base_no_refresh(mocker: MockerFixture) -> None: db = mocker.patch("superset.utils.oauth2.db") db_engine_spec = mocker.MagicMock() token = mocker.MagicMock() - token.access_token = "access-token" + token.access_token = "access-token" # noqa: S105 token.access_token_expiration = datetime(2024, 1, 1) token.refresh_token = None db.session.query().filter_by().one_or_none.return_value = token diff --git a/tests/unit_tests/utils/test_core.py b/tests/unit_tests/utils/test_core.py index 2ebec87c2..cdeb58a50 100644 --- a/tests/unit_tests/utils/test_core.py +++ b/tests/unit_tests/utils/test_core.py @@ -220,7 +220,7 @@ def test_check_if_safe_zip_success(app_context: None) -> None: """ Test if ZIP files are safe """ - ZipFile = MagicMock() + ZipFile = MagicMock() # noqa: N806 ZipFile.infolist.return_value = [ MockZipInfo(file_size=1000, compress_size=10), MockZipInfo(file_size=1000, compress_size=10), @@ -235,7 +235,7 @@ def test_check_if_safe_zip_high_rate(app_context: None) -> None: """ Test if ZIP files is not highly compressed """ - ZipFile = MagicMock() + ZipFile = MagicMock() # noqa: N806 ZipFile.infolist.return_value = [ MockZipInfo(file_size=1000, compress_size=1), MockZipInfo(file_size=1000, compress_size=1), @@ -251,7 +251,7 @@ def test_check_if_safe_zip_hidden_bomb(app_context: None) -> None: """ Test if ZIP file does not contain a big file highly compressed """ - ZipFile = MagicMock() + ZipFile = MagicMock() # noqa: N806 ZipFile.infolist.return_value = [ MockZipInfo(file_size=1000, compress_size=100), MockZipInfo(file_size=1000, compress_size=100),