Skip to content

Expect column values to not be null or empty string

Expectation to check if column value is not null or empty string.

ColumnValuesNotNullOrEpmtyString

Bases: ColumnMapMetricProvider

Asserts that column values are not null or empty string.

Source code in mkdocs/lakehouse_engine/packages/dq_processors/custom_expectations/expect_column_values_to_not_be_null_or_empty_string.py
class ColumnValuesNotNullOrEpmtyString(ColumnMapMetricProvider):
    """Asserts that column values are not null or empty string."""

    condition_metric_name = "column_values.not_null_or_empty_string"
    filter_column_isnull = False
    condition_domain_keys = (
        "batch_id",
        "table",
        "column",
        "ignore_row_if",
    )  # type: ignore
    condition_value_keys = ()

    @column_condition_partial(engine=SparkDFExecutionEngine)
    def _spark(
        self: ColumnMapMetricProvider,
        column: Any,
        **kwargs: dict,
    ) -> Any:
        """Implementation of the expectation's logic.

        Args:
            column: Name of column to validate.
            kwargs: dict with additional parameters.

        Returns:
            If the condition is met.
        """
        return (column.isNotNull()) & (column != "")

ExpectColumnValuesToNotBeNullOrEmptyString

Bases: ColumnMapExpectation

Expect value in column to be not null or empty string.

Parameters:

Name Type Description Default
column

Name of column to validate.

required
kwargs

dict with additional parameters.

required

Other Parameters:

Name Type Description
allow_cross_type_comparisons

If True, allow comparisons between types (e.g. integer and string). Otherwise, attempting such comparisons will raise an exception.

ignore_row_if

"both_values_are_missing", "either_value_is_missing", "neither" (default).

result_format

Which output mode to use: BOOLEAN_ONLY, BASIC (default), COMPLETE, or SUMMARY.

include_config

If True (default), then include the expectation config as part of the result object.

catch_exceptions

If True, then catch exceptions and include them as part of the result object. Default: False.

meta

A JSON-serializable dictionary (nesting allowed) that will be included in the output without modification.

Returns:

Type Description

An ExpectationSuiteValidationResult.

Source code in mkdocs/lakehouse_engine/packages/dq_processors/custom_expectations/expect_column_values_to_not_be_null_or_empty_string.py
class ExpectColumnValuesToNotBeNullOrEmptyString(ColumnMapExpectation):
    """Expect value in column to be not null or empty string.

    Args:
        column: Name of column to validate.
        kwargs: dict with additional parameters.

    Keyword Args:
        allow_cross_type_comparisons: If True, allow
            comparisons between types (e.g. integer and string).
            Otherwise, attempting such comparisons will raise an exception.
        ignore_row_if: "both_values_are_missing",
            "either_value_is_missing", "neither" (default).
        result_format: Which output mode to use:
            `BOOLEAN_ONLY`, `BASIC` (default), `COMPLETE`, or `SUMMARY`.
        include_config: If True (default), then include the expectation config
            as part of the result object.
        catch_exceptions: If True, then catch exceptions and
            include them as part of the result object. Default: False.
        meta: A JSON-serializable dictionary (nesting allowed)
            that will be included in the output without modification.

    Returns:
        An ExpectationSuiteValidationResult.
    """

    mostly: float = 1.0
    ignore_row_if: str = "neither"
    result_format: dict = {"result_format": "BASIC"}
    include_config: bool = True
    catch_exceptions: bool = False
    column: Any = None

    examples = [
        {
            "dataset_name": "Test Dataset",
            "data": [
                {
                    "data": {
                        "a": [
                            "4061622965678",
                            "4061622965679",
                            "4061622965680",
                        ],
                        "b": [
                            "4061622965678",
                            "",
                            "4061622965680",
                        ],
                    }
                }
            ],
            "schemas": {"spark": {"a": "StringType", "b": "StringType"}},
            "tests": [
                {
                    "title": "positive_test",
                    "exact_match_out": False,
                    "include_in_gallery": True,
                    "in": {
                        "column": "a",
                        "result_format": {
                            "result_format": "BASIC",
                            "unexpected_index_column_names": ["b"],
                        },
                    },
                    "out": {
                        "success": True,
                        "unexpected_index_list": [],
                    },
                },
                {
                    "title": "negative_test",
                    "exact_match_out": False,
                    "include_in_gallery": True,
                    "in": {
                        "column": "b",
                        "result_format": {
                            "result_format": "COMPLETE",
                            "unexpected_index_column_names": ["a"],
                        },
                    },
                    "out": {
                        "success": False,
                        "unexpected_index_list": [
                            {
                                "a": "4061622965679",
                                "b": "",
                            }
                        ],
                    },
                },
            ],
        },
    ]

    map_metric = "column_values.not_null_or_empty_string"
    success_keys = ("column", "ignore_row_if", "mostly")

    def _validate(
        self,
        metrics: Dict,
        runtime_configuration: Optional[dict] = None,
        execution_engine: Optional[ExecutionEngine] = None,
    ) -> Any:
        """Custom implementation of the GE _validate method.

        This method is used on the tests to validate both the result
        of the tests themselves and if the unexpected index list
        is correctly generated.
        The GE test logic does not do this validation, and thus
        we need to make it manually.

        Args:
            metrics: Test result metrics.
            runtime_configuration: Configuration used when running the expectation.
            execution_engine: Execution Engine where the expectation was run.

        Returns:
            Dictionary with the result of the validation.
        """
        validate_result(
            self,
            metrics,
        )

        return super()._validate(metrics, runtime_configuration, execution_engine)