Skip to main content
Version: 0.16.10

SparkAzureBlobStorageDatasource

class great_expectations.datasource.fluent.SparkAzureBlobStorageDatasource(*, type: Literal['spark_abs'] = 'spark_abs', name: str, id: Optional[uuid.UUID] = None, assets: List[Union[great_expectations.datasource.fluent.spark_file_path_datasource.CSVAsset, great_expectations.datasource.fluent.spark_file_path_datasource.DirectoryCSVAsset, great_expectations.datasource.fluent.spark_file_path_datasource.ParquetAsset]] = [], spark_config: Optional[Dict[pydantic.types.StrictStr, Union[pydantic.types.StrictStr, pydantic.types.StrictInt, pydantic.types.StrictFloat, pydantic.types.StrictBool]]] = None, force_reuse_spark_context: bool = True, azure_options: Dict[str, Union[great_expectations.datasource.fluent.config_str.ConfigStr, Any]] = )#
add_csv_asset(name: str, *, id: Optional[uuid.UUID] = None, order_by: List[great_expectations.datasource.fluent.interfaces.Sorter] = None, batch_metadata: Dict[str, Any] = None, batching_regex: Pattern = re.compile('.*'), connect_options: Mapping = None, header: bool = False, InferSchema: bool = False) pydantic.BaseModel#
add_directory_csv_asset(name: str, *, id: Optional[uuid.UUID] = None, order_by: List[great_expectations.datasource.fluent.interfaces.Sorter] = None, batch_metadata: Dict[str, Any] = None, batching_regex: Pattern = re.compile('.*'), connect_options: Mapping = None, data_directory: pathlib.Path, header: bool = False, InferSchema: bool = False) pydantic.BaseModel#
add_parquet_asset(name: str, *, id: Optional[uuid.UUID] = None, order_by: List[great_expectations.datasource.fluent.interfaces.Sorter] = None, batch_metadata: Dict[str, Any] = None, batching_regex: Pattern = re.compile('.*'), connect_options: Mapping = None, datetimeRebaseMode: Literal['EXCEPTION', 'CORRECTED', 'LEGACY'], int96RebaseMode: Literal['EXCEPTION', 'CORRECTED', 'LEGACY'], mergeSchema: bool = False) pydantic.BaseModel#