diff --git a/Cargo.toml b/Cargo.toml index 98b2768459e..2b1e36cb2d8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -322,7 +322,6 @@ derive_partial_eq_without_eq = "deny" equatable_if_let = "deny" exit = "deny" expect_fun_call = "deny" -expect_used = "deny" fallible_impl_from = "deny" get_unwrap = "deny" host_endian_bytes = "deny" @@ -342,7 +341,6 @@ same_name_method = "deny" tests_outside_test_module = "deny" # todo = "deny" # unimplemented = "deny" -unwrap_in_result = "deny" unwrap_used = "deny" use_debug = "deny" diff --git a/STYLE.md b/STYLE.md index 67f331220e7..fbb332f0e93 100644 --- a/STYLE.md +++ b/STYLE.md @@ -54,7 +54,6 @@ - `vortex_panic!` for handling invariant violations - Add context to errors using `.with_context()` - Include backtraces for better debugging -- Use `VortexExpect` trait when unwrapping is appropriate with proper error context. ## Code Structure diff --git a/benchmarks/duckdb-bench/src/lib.rs b/benchmarks/duckdb-bench/src/lib.rs index f6da732aef4..c4be48b903a 100644 --- a/benchmarks/duckdb-bench/src/lib.rs +++ b/benchmarks/duckdb-bench/src/lib.rs @@ -9,7 +9,6 @@ use std::time::Instant; use anyhow::Result; use tracing::trace; -use vortex::error::VortexExpect; use vortex_bench::Benchmark; use vortex_bench::Format; use vortex_bench::IdempotentPath; @@ -66,7 +65,7 @@ impl DuckClient { path: Option, threads: Option, ) -> Result<(Database, Connection)> { - let mut config = Config::new().vortex_expect("failed to create duckdb config"); + let mut config = Config::new().expect("failed to create duckdb config"); // Set DuckDB thread count if specified if let Some(thread_count) = threads { @@ -140,7 +139,7 @@ impl DuckClient { let result = self.connection.query(query)?; let query_time = time_instant.elapsed(); - let row_count = usize::try_from(result.row_count()).vortex_expect("row count overflow"); + let row_count = usize::try_from(result.row_count()).expect("row count overflow"); // TODO: Extract DuckDB's internal timing from profiling info if available Ok((row_count, Some(query_time))) diff --git a/encodings/alp/src/alp/array.rs b/encodings/alp/src/alp/array.rs index 2e235926de1..0fc61732896 100644 --- a/encodings/alp/src/alp/array.rs +++ b/encodings/alp/src/alp/array.rs @@ -31,7 +31,6 @@ use vortex_array::vtable::ValidityVTableFromChild; use vortex_array::vtable::VisitorVTable; use vortex_dtype::DType; use vortex_dtype::PType; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_bail; use vortex_error::vortex_ensure; @@ -109,7 +108,14 @@ impl VTable for ALPVTable { let values = children.get(2, dtype, p.len()?)?; let chunk_offsets = p .chunk_offsets_dtype()? - .map(|dtype| children.get(3, &dtype, usize::try_from(p.chunk_offsets_len())?)) + .map(|dtype| { + children.get( + 3, + &dtype, + usize::try_from(p.chunk_offsets_len()) + .expect("offsets length must fit in usize"), + ) + }) .transpose()?; Patches::new(len, p.offset()?, indices, values, chunk_offsets) @@ -119,8 +125,8 @@ impl VTable for ALPVTable { ALPArray::try_new( encoded, Exponents { - e: u8::try_from(metadata.exp_e)?, - f: u8::try_from(metadata.exp_f)?, + e: u8::try_from(metadata.exp_e).expect("e exponent must fit in u8"), + f: u8::try_from(metadata.exp_f).expect("f exponent must fit in u8"), }, patches, ) @@ -298,7 +304,7 @@ impl ALPArray { /// See [`ALPArray::try_new`] for reference on preconditions that must pass before /// calling this method. pub fn new(encoded: ArrayRef, exponents: Exponents, patches: Option) -> Self { - Self::try_new(encoded, exponents, patches).vortex_expect("ALPArray new") + Self::try_new(encoded, exponents, patches).expect("ALPArray new") } /// Build a new `ALPArray` from components: diff --git a/encodings/alp/src/alp/compute/cast.rs b/encodings/alp/src/alp/compute/cast.rs index 00f3387f0a7..f50360e79ad 100644 --- a/encodings/alp/src/alp/compute/cast.rs +++ b/encodings/alp/src/alp/compute/cast.rs @@ -73,7 +73,6 @@ mod tests { use vortex_dtype::DType; use vortex_dtype::Nullability; use vortex_dtype::PType; - use vortex_error::VortexExpect; use vortex_error::VortexResult; use crate::alp_encode; @@ -146,7 +145,7 @@ mod tests { #[case(buffer![42.42f64].into_array())] #[case(buffer![0.0f32, -1.5, 2.5, -3.5, 4.5].into_array())] fn test_cast_alp_conformance(#[case] array: vortex_array::ArrayRef) -> VortexResult<()> { - let alp = alp_encode(&array.to_primitive(), None).vortex_expect("cannot fail"); + let alp = alp_encode(&array.to_primitive(), None).expect("cannot fail"); test_cast_conformance(alp.as_ref()); Ok(()) diff --git a/encodings/alp/src/alp/ops.rs b/encodings/alp/src/alp/ops.rs index a0e6388a4dc..5fcedaeb2a5 100644 --- a/encodings/alp/src/alp/ops.rs +++ b/encodings/alp/src/alp/ops.rs @@ -3,7 +3,6 @@ use vortex_array::scalar::Scalar; use vortex_array::vtable::OperationsVTable; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use crate::ALPArray; @@ -23,7 +22,7 @@ impl OperationsVTable for ALPVTable { Ok(match_each_alp_float_ptype!(array.ptype(), |T| { let encoded_val: ::ALPInt = - (&encoded_val).try_into().vortex_expect("invalid ALPInt"); + (&encoded_val).try_into().expect("invalid ALPInt"); Scalar::primitive( ::decode_single(encoded_val, array.exponents()), array.dtype().nullability(), diff --git a/encodings/alp/src/alp_rd/array.rs b/encodings/alp/src/alp_rd/array.rs index eeab80a1639..ec032db9d2d 100644 --- a/encodings/alp/src/alp_rd/array.rs +++ b/encodings/alp/src/alp_rd/array.rs @@ -36,7 +36,6 @@ use vortex_buffer::Buffer; use vortex_dtype::DType; use vortex_dtype::Nullability; use vortex_dtype::PType; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_bail; use vortex_error::vortex_ensure; @@ -90,7 +89,7 @@ impl VTable for ALPRDVTable { dict_len: array.left_parts_dictionary().len() as u32, dict, left_parts_ptype: PType::try_from(array.left_parts().dtype()) - .vortex_expect("Must be a valid PType") as i32, + .expect("Must be a valid PType") as i32, patches: array .left_parts_patches() .map(|p| p.to_metadata(array.len(), array.left_parts().dtype())) diff --git a/encodings/alp/src/alp_rd/mod.rs b/encodings/alp/src/alp_rd/mod.rs index 77ccfe4872a..9d58b5b2911 100644 --- a/encodings/alp/src/alp_rd/mod.rs +++ b/encodings/alp/src/alp_rd/mod.rs @@ -33,7 +33,6 @@ use vortex_buffer::BufferMut; use vortex_dtype::DType; use vortex_dtype::NativePType; use vortex_dtype::match_each_integer_ptype; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_panic; use vortex_utils::aliases::hash_map::HashMap; @@ -233,7 +232,7 @@ impl RDEncoder { // SAFETY: by construction, all values in left_parts can be packed to left_bit_width. let packed_left = unsafe { bitpack_encode_unchecked(primitive_left, left_bit_width as _) - .vortex_expect("bitpack_encode_unchecked should succeed for left parts") + .expect("bitpack_encode_unchecked should succeed for left parts") .into_array() }; @@ -241,7 +240,7 @@ impl RDEncoder { // SAFETY: by construction, all values in right_parts are right_bit_width + leading zeros. let packed_right = unsafe { bitpack_encode_unchecked(primitive_right, self.right_bit_width as _) - .vortex_expect("bitpack_encode_unchecked should succeed for right parts") + .expect("bitpack_encode_unchecked should succeed for right parts") .into_array() }; @@ -256,9 +255,7 @@ impl RDEncoder { // SAFETY: We calculate bw such that it is wide enough to hold the largest position index. let packed_pos = unsafe { bitpack_encode_unchecked(exc_pos_array, bw) - .vortex_expect( - "bitpack_encode_unchecked should succeed for exception positions", - ) + .expect("bitpack_encode_unchecked should succeed for exception positions") .into_array() }; @@ -270,7 +267,7 @@ impl RDEncoder { // TODO(0ax1): handle chunk offsets None, ) - .vortex_expect("Patches construction in encode") + .expect("Patches construction in encode") }); ALPRDArray::try_new( @@ -281,7 +278,7 @@ impl RDEncoder { self.right_bit_width, exceptions, ) - .vortex_expect("ALPRDArray construction in encode") + .expect("ALPRDArray construction in encode") } } diff --git a/encodings/alp/src/alp_rd/ops.rs b/encodings/alp/src/alp_rd/ops.rs index 5bba5567344..27892d653ee 100644 --- a/encodings/alp/src/alp_rd/ops.rs +++ b/encodings/alp/src/alp_rd/ops.rs @@ -4,7 +4,6 @@ use vortex_array::Array; use vortex_array::scalar::Scalar; use vortex_array::vtable::OperationsVTable; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use crate::ALPRDArray; @@ -22,14 +21,14 @@ impl OperationsVTable for ALPRDVTable { Some(patched_value) => patched_value .as_primitive() .as_::() - .vortex_expect("patched values must be non-null"), + .expect("patched values must be non-null"), _ => { let left_code: u16 = array .left_parts() .scalar_at(index)? .as_primitive() .as_::() - .vortex_expect("left_code must be non-null"); + .expect("left_code must be non-null"); array.left_parts_dictionary()[left_code as usize] } }; @@ -41,7 +40,7 @@ impl OperationsVTable for ALPRDVTable { .scalar_at(index)? .as_primitive() .as_::() - .vortex_expect("non-null"); + .expect("non-null"); let packed = f32::from_bits((left as u32) << array.right_bit_width() | right); Scalar::primitive(packed, array.dtype().nullability()) } else { @@ -50,7 +49,7 @@ impl OperationsVTable for ALPRDVTable { .scalar_at(index)? .as_primitive() .as_::() - .vortex_expect("non-null"); + .expect("non-null"); let packed = f64::from_bits(((left as u64) << array.right_bit_width()) | right); Scalar::primitive(packed, array.dtype().nullability()) }) diff --git a/encodings/bytebool/src/array.rs b/encodings/bytebool/src/array.rs index 67918e87336..ef79bf053f7 100644 --- a/encodings/bytebool/src/array.rs +++ b/encodings/bytebool/src/array.rs @@ -32,7 +32,6 @@ use vortex_array::vtable::validity_nchildren; use vortex_buffer::BitBuffer; use vortex_buffer::ByteBuffer; use vortex_dtype::DType; -use vortex_error::VortexExpect as _; use vortex_error::VortexResult; use vortex_error::vortex_bail; use vortex_error::vortex_ensure; @@ -109,7 +108,7 @@ impl VTable for ByteBoolVTable { array.validity = if children.is_empty() { Validity::from(array.dtype.nullability()) } else { - Validity::Array(children.into_iter().next().vortex_expect("checked")) + Validity::Array(children.into_iter().next().expect("checked")) }; Ok(()) diff --git a/encodings/datetime-parts/src/array.rs b/encodings/datetime-parts/src/array.rs index 68f1c625732..2bc73ff3ac5 100644 --- a/encodings/datetime-parts/src/array.rs +++ b/encodings/datetime-parts/src/array.rs @@ -30,7 +30,6 @@ use vortex_array::vtable::VisitorVTable; use vortex_dtype::DType; use vortex_dtype::Nullability; use vortex_dtype::PType; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_bail; use vortex_error::vortex_ensure; @@ -152,9 +151,9 @@ impl VTable for DateTimePartsVTable { ); let mut children_iter = children.into_iter(); - array.days = children_iter.next().vortex_expect("checked"); - array.seconds = children_iter.next().vortex_expect("checked"); - array.subseconds = children_iter.next().vortex_expect("checked"); + array.days = children_iter.next().expect("checked"); + array.seconds = children_iter.next().expect("checked"); + array.subseconds = children_iter.next().expect("checked"); Ok(()) } diff --git a/encodings/datetime-parts/src/canonical.rs b/encodings/datetime-parts/src/canonical.rs index 066f2994a80..38e4167d0b5 100644 --- a/encodings/datetime-parts/src/canonical.rs +++ b/encodings/datetime-parts/src/canonical.rs @@ -14,7 +14,6 @@ use vortex_dtype::PType; use vortex_dtype::datetime::TimeUnit; use vortex_dtype::datetime::Timestamp; use vortex_dtype::match_each_integer_ptype; -use vortex_error::VortexExpect as _; use vortex_error::VortexResult; use vortex_error::vortex_panic; @@ -46,7 +45,7 @@ pub fn decode_to_temporal( let days_buf = array .days() .cast(DType::Primitive(PType::I64, array.dtype().nullability())) - .vortex_expect("must be able to cast days to i64") + .expect("must be able to cast days to i64") .execute::(ctx)?; // We start with the days component, which is always present. @@ -58,10 +57,7 @@ pub fn decode_to_temporal( .map_each_in_place(|d| d * 86_400 * divisor); if let Some(seconds) = array.seconds().as_constant() { - let seconds = seconds - .as_primitive() - .as_::() - .vortex_expect("non-nullable"); + let seconds = seconds.as_primitive().as_::().expect("non-nullable"); let seconds = seconds * divisor; for v in values.iter_mut() { *v += seconds; @@ -80,7 +76,7 @@ pub fn decode_to_temporal( let subseconds = subseconds .as_primitive() .as_::() - .vortex_expect("non-nullable"); + .expect("non-nullable"); for v in values.iter_mut() { *v += subseconds; } diff --git a/encodings/datetime-parts/src/compute/rules.rs b/encodings/datetime-parts/src/compute/rules.rs index 768ac4a9218..af5ac385509 100644 --- a/encodings/datetime-parts/src/compute/rules.rs +++ b/encodings/datetime-parts/src/compute/rules.rs @@ -22,7 +22,6 @@ use vortex_array::optimizer::rules::ArrayParentReduceRule; use vortex_array::optimizer::rules::ParentRuleSet; use vortex_dtype::DType; use vortex_dtype::datetime::Timestamp; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use crate::DateTimePartsArray; @@ -62,12 +61,12 @@ impl ArrayParentReduceRule for DTPFilterPushDownRule { child.dtype().clone(), child.days().clone().filter(parent.filter_mask().clone())?, ConstantArray::new( - child.seconds().as_constant().vortex_expect("constant"), + child.seconds().as_constant().expect("constant"), parent.filter_mask().true_count(), ) .into_array(), ConstantArray::new( - child.subseconds().as_constant().vortex_expect("constant"), + child.subseconds().as_constant().expect("constant"), parent.filter_mask().true_count(), ) .into_array(), diff --git a/encodings/datetime-parts/src/ops.rs b/encodings/datetime-parts/src/ops.rs index be159ef670f..74f3f427bdb 100644 --- a/encodings/datetime-parts/src/ops.rs +++ b/encodings/datetime-parts/src/ops.rs @@ -6,7 +6,6 @@ use vortex_array::scalar::Scalar; use vortex_array::vtable::OperationsVTable; use vortex_dtype::DType; use vortex_dtype::datetime::Timestamp; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_panic; @@ -37,19 +36,19 @@ impl OperationsVTable for DateTimePartsVTable { .scalar_at(index)? .as_primitive() .as_::() - .vortex_expect("days fits in i64"); + .expect("days fits in i64"); let seconds: i64 = array .seconds() .scalar_at(index)? .as_primitive() .as_::() - .vortex_expect("seconds fits in i64"); + .expect("seconds fits in i64"); let subseconds: i64 = array .subseconds() .scalar_at(index)? .as_primitive() .as_::() - .vortex_expect("subseconds fits in i64"); + .expect("subseconds fits in i64"); let ts = timestamp::combine( TimestampParts { diff --git a/encodings/decimal-byte-parts/src/decimal_byte_parts/compute/compare.rs b/encodings/decimal-byte-parts/src/decimal_byte_parts/compute/compare.rs index b4313ee16f6..cdd95b85be1 100644 --- a/encodings/decimal-byte-parts/src/decimal_byte_parts/compute/compare.rs +++ b/encodings/decimal-byte-parts/src/decimal_byte_parts/compute/compare.rs @@ -19,7 +19,6 @@ use vortex_dtype::PType; use vortex_dtype::ToI256; use vortex_dtype::match_each_decimal_value; use vortex_dtype::match_each_integer_ptype; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use crate::DecimalBytePartsVTable; @@ -42,7 +41,7 @@ impl CompareKernel for DecimalBytePartsVTable { let rhs_decimal = rhs_const .as_decimal() .decimal_value() - .vortex_expect("checked for null in entry func"); + .expect("checked for null in entry func"); match decimal_value_wrapper_to_primitive(rhs_decimal, lhs.msp.as_primitive_typed().ptype()) { @@ -110,11 +109,11 @@ where let Some(encoded) =

::from(decimal_v) else { let decimal_i256 = decimal_v .to_i256() - .vortex_expect("i256 is big enough for any DecimalValue"); + .expect("i256 is big enough for any DecimalValue"); return if decimal_i256 > P::max_value() .to_i256() - .vortex_expect("i256 is big enough for any PType") + .expect("i256 is big enough for any PType") { Err(Positive) } else { @@ -122,7 +121,7 @@ where decimal_i256 < P::min_value() .to_i256() - .vortex_expect("i256 is big enough for any PType") + .expect("i256 is big enough for any PType") ); Err(Negative) }; diff --git a/encodings/decimal-byte-parts/src/decimal_byte_parts/mod.rs b/encodings/decimal-byte-parts/src/decimal_byte_parts/mod.rs index 97644129c1f..afa64a30027 100644 --- a/encodings/decimal-byte-parts/src/decimal_byte_parts/mod.rs +++ b/encodings/decimal-byte-parts/src/decimal_byte_parts/mod.rs @@ -41,7 +41,6 @@ use vortex_dtype::DType; use vortex_dtype::DecimalDType; use vortex_dtype::PType; use vortex_dtype::match_each_signed_integer_ptype; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_bail; use vortex_error::vortex_ensure; @@ -124,7 +123,7 @@ impl VTable for DecimalBytePartsVTable { "DecimalBytePartsArray expects exactly 1 child (msp), got {}", children.len() ); - array.msp = children.into_iter().next().vortex_expect("checked"); + array.msp = children.into_iter().next().expect("checked"); Ok(()) } @@ -208,7 +207,7 @@ impl DecimalBytePartsArray { pub fn decimal_dtype(&self) -> &DecimalDType { self.dtype .as_decimal_opt() - .vortex_expect("must be a decimal dtype") + .expect("must be a decimal dtype") } pub(crate) fn msp(&self) -> &ArrayRef { @@ -286,7 +285,7 @@ impl OperationsVTable for DecimalBytePartsVTable { // Note. values in msp, can only be signed integers upto size i64. let primitive_scalar = scalar.as_primitive(); // TODO(joe): extend this to support multiple parts. - let value = primitive_scalar.as_::().vortex_expect("non-null"); + let value = primitive_scalar.as_::().expect("non-null"); Scalar::try_new( array.dtype.clone(), Some(ScalarValue::Decimal(DecimalValue::I64(value))), diff --git a/encodings/fastlanes/benches/canonicalize_bench.rs b/encodings/fastlanes/benches/canonicalize_bench.rs index e18c47c778f..8dddcbbca5a 100644 --- a/encodings/fastlanes/benches/canonicalize_bench.rs +++ b/encodings/fastlanes/benches/canonicalize_bench.rs @@ -15,7 +15,6 @@ use vortex_array::builders::ArrayBuilder; use vortex_array::builders::PrimitiveBuilder; use vortex_array::compute::warm_up_vtables; use vortex_array::session::ArraySession; -use vortex_error::VortexExpect; use vortex_fastlanes::bitpack_compress::test_harness::make_array; use vortex_session::VortexSession; @@ -50,9 +49,7 @@ fn into_canonical_non_nullable( let mut rng = StdRng::seed_from_u64(0); let chunks = (0..chunk_count) - .map(|_| { - make_array(&mut rng, chunk_len, fraction_patched, 0.0).vortex_expect("make_array works") - }) + .map(|_| make_array(&mut rng, chunk_len, fraction_patched, 0.0).expect("make_array works")) .collect::>(); bencher @@ -69,9 +66,7 @@ fn canonical_into_non_nullable( let mut rng = StdRng::seed_from_u64(0); let chunks = (0..chunk_count) - .map(|_| { - make_array(&mut rng, chunk_len, fraction_patched, 0.0).vortex_expect("make_array works") - }) + .map(|_| make_array(&mut rng, chunk_len, fraction_patched, 0.0).expect("make_array works")) .collect::>(); bencher @@ -86,7 +81,7 @@ fn canonical_into_non_nullable( .bench_refs(|(chunked, primitive_builder)| { chunked .append_to_builder(primitive_builder, &mut SESSION.create_execution_ctx()) - .vortex_expect("append failed"); + .expect("append failed"); primitive_builder.finish() }); } @@ -110,10 +105,7 @@ fn into_canonical_nullable( let mut rng = StdRng::seed_from_u64(0); let chunks = (0..chunk_count) - .map(|_| { - make_array(&mut rng, chunk_len, fraction_patched, 0.05) - .vortex_expect("make_array works") - }) + .map(|_| make_array(&mut rng, chunk_len, fraction_patched, 0.05).expect("make_array works")) .collect::>(); bencher @@ -130,10 +122,7 @@ fn canonical_into_nullable( let mut rng = StdRng::seed_from_u64(0); let chunks = (0..chunk_count) - .map(|_| { - make_array(&mut rng, chunk_len, fraction_patched, 0.05) - .vortex_expect("make_array works") - }) + .map(|_| make_array(&mut rng, chunk_len, fraction_patched, 0.05).expect("make_array works")) .collect::>(); bencher @@ -148,7 +137,7 @@ fn canonical_into_nullable( .bench_refs(|(chunked, primitive_builder)| { chunked .append_to_builder(primitive_builder, &mut SESSION.create_execution_ctx()) - .vortex_expect("append failed"); + .expect("append failed"); primitive_builder.finish() }); } diff --git a/encodings/fastlanes/benches/compute_between.rs b/encodings/fastlanes/benches/compute_between.rs index 27938cffd70..01f6a831f32 100644 --- a/encodings/fastlanes/benches/compute_between.rs +++ b/encodings/fastlanes/benches/compute_between.rs @@ -13,7 +13,6 @@ use vortex_array::ToCanonical; use vortex_array::arrays::PrimitiveArray; use vortex_array::compute::warm_up_vtables; use vortex_dtype::NativePType; -use vortex_error::VortexExpect; use vortex_fastlanes::bitpack_compress::bitpack_to_best_bit_width; fn main() { @@ -26,7 +25,7 @@ fn generate_primitive_array( len: usize, ) -> PrimitiveArray { (0..len) - .map(|_| T::from_usize(rng.random_range(0..10_000)).vortex_expect("")) + .map(|_| T::from_usize(rng.random_range(0..10_000)).expect("")) .collect::() } @@ -35,10 +34,10 @@ fn generate_bit_pack_primitive_array( len: usize, ) -> ArrayRef { let a = (0..len) - .map(|_| T::from_usize(rng.random_range(0..10_000)).vortex_expect("")) + .map(|_| T::from_usize(rng.random_range(0..10_000)).expect("")) .collect::(); - bitpack_to_best_bit_width(&a).vortex_expect("").into_array() + bitpack_to_best_bit_width(&a).expect("").into_array() } fn generate_alp_bit_pack_primitive_array( @@ -46,17 +45,15 @@ fn generate_alp_bit_pack_primitive_array( len: usize, ) -> ArrayRef { let a = (0..len) - .map(|_| T::from_usize(rng.random_range(0..10_000)).vortex_expect("")) + .map(|_| T::from_usize(rng.random_range(0..10_000)).expect("")) .collect::(); - let alp = alp_encode(&a, None).vortex_expect(""); + let alp = alp_encode(&a, None).expect(""); let encoded = alp.encoded().to_primitive(); - let bp = bitpack_to_best_bit_width(&encoded) - .vortex_expect("") - .into_array(); - ALPArray::new(bp, alp.exponents(), None).into_array() + let bp = bitpack_to_best_bit_width(&encoded).expect("").into_array(); + ALPArray::new(bp, alp.exponents(), alp.patches().cloned()).into_array() } const BENCH_ARGS: &[usize] = &[2 << 10, 2 << 13, 2 << 14]; @@ -79,7 +76,6 @@ mod primitive { use vortex_array::expr::BetweenOptions; use vortex_array::expr::StrictComparison::NonStrict; use vortex_dtype::NativePType; - use vortex_error::VortexExpect; use crate::BENCH_ARGS; use crate::generate_primitive_array; @@ -93,8 +89,8 @@ mod primitive { T: NumCast + NativePType, vortex_array::scalar::Scalar: From, { - let min = T::from_usize(5561).vortex_expect(""); - let max = T::from_usize(6032).vortex_expect(""); + let min = T::from_usize(5561).expect(""); + let max = T::from_usize(6032).expect(""); let mut rng = StdRng::seed_from_u64(0); let arr = generate_primitive_array::(&mut rng, len); @@ -105,16 +101,16 @@ mod primitive { ConstantArray::new(min, arr.len()).as_ref(), Operator::Gte, ) - .vortex_expect(""), + .expect(""), &compare( arr.as_ref(), ConstantArray::new(max, arr.len()).as_ref(), Operator::Lt, ) - .vortex_expect(""), + .expect(""), BooleanOperator::And, ) - .vortex_expect("") + .expect("") }) } @@ -127,8 +123,8 @@ mod primitive { T: NumCast + NativePType, vortex_array::scalar::Scalar: From, { - let min = T::from_usize(5561).vortex_expect(""); - let max = T::from_usize(6032).vortex_expect(""); + let min = T::from_usize(5561).expect(""); + let max = T::from_usize(6032).expect(""); let mut rng = StdRng::seed_from_u64(0); let arr = generate_primitive_array::(&mut rng, len); @@ -169,7 +165,6 @@ mod bitpack { use vortex_array::expr::BetweenOptions; use vortex_array::expr::StrictComparison::NonStrict; use vortex_dtype::NativePType; - use vortex_error::VortexExpect; use crate::BENCH_ARGS; use crate::generate_bit_pack_primitive_array; @@ -183,8 +178,8 @@ mod bitpack { T: NumCast + NativePType, vortex_array::scalar::Scalar: From, { - let min = T::from_usize(5561).vortex_expect(""); - let max = T::from_usize(6032).vortex_expect(""); + let min = T::from_usize(5561).expect(""); + let max = T::from_usize(6032).expect(""); let mut rng = StdRng::seed_from_u64(0); let arr = generate_bit_pack_primitive_array::(&mut rng, len); @@ -195,13 +190,13 @@ mod bitpack { ConstantArray::new(min, arr.len()).as_ref(), Operator::Gte, ) - .vortex_expect(""), + .expect(""), &compare( arr.as_ref(), ConstantArray::new(max, arr.len()).as_ref(), Operator::Lt, ) - .vortex_expect(""), + .expect(""), BooleanOperator::And, ) }) @@ -216,8 +211,8 @@ mod bitpack { T: NumCast + NativePType, vortex_array::scalar::Scalar: From, { - let min = T::from_usize(5561).vortex_expect(""); - let max = T::from_usize(6032).vortex_expect(""); + let min = T::from_usize(5561).expect(""); + let max = T::from_usize(6032).expect(""); let mut rng = StdRng::seed_from_u64(0); let arr = generate_bit_pack_primitive_array::(&mut rng, len); @@ -258,7 +253,6 @@ mod alp { use vortex_array::expr::BetweenOptions; use vortex_array::expr::StrictComparison::NonStrict; use vortex_dtype::NativePType; - use vortex_error::VortexExpect; use crate::BENCH_ARGS; use crate::generate_alp_bit_pack_primitive_array; @@ -272,8 +266,8 @@ mod alp { T: NumCast + NativePType, vortex_array::scalar::Scalar: From, { - let min = T::from_usize(5561).vortex_expect(""); - let max = T::from_usize(6032).vortex_expect(""); + let min = T::from_usize(5561).expect(""); + let max = T::from_usize(6032).expect(""); let mut rng = StdRng::seed_from_u64(0); let arr = generate_alp_bit_pack_primitive_array::(&mut rng, len); @@ -284,13 +278,13 @@ mod alp { ConstantArray::new(min, arr.len()).as_ref(), Operator::Gte, ) - .vortex_expect(""), + .expect(""), &compare( arr.as_ref(), ConstantArray::new(max, arr.len()).as_ref(), Operator::Lt, ) - .vortex_expect(""), + .expect(""), BooleanOperator::And, ) }) @@ -305,8 +299,8 @@ mod alp { T: NumCast + NativePType, vortex_array::scalar::Scalar: From, { - let min = T::from_usize(5561).vortex_expect(""); - let max = T::from_usize(6032).vortex_expect(""); + let min = T::from_usize(5561).expect(""); + let max = T::from_usize(6032).expect(""); let mut rng = StdRng::seed_from_u64(0); let arr = generate_alp_bit_pack_primitive_array::(&mut rng, len); diff --git a/encodings/fastlanes/src/bitpacking/array/bitpack_compress.rs b/encodings/fastlanes/src/bitpacking/array/bitpack_compress.rs index cdff8025300..eb947be85a9 100644 --- a/encodings/fastlanes/src/bitpacking/array/bitpack_compress.rs +++ b/encodings/fastlanes/src/bitpacking/array/bitpack_compress.rs @@ -18,7 +18,6 @@ use vortex_dtype::NativePType; use vortex_dtype::PType; use vortex_dtype::match_each_integer_ptype; use vortex_dtype::match_each_unsigned_integer_ptype; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_bail; use vortex_mask::AllOr; @@ -284,7 +283,7 @@ where if (value.leading_zeros() as usize) < T::PTYPE.bit_width() - bit_width as usize && validity_mask.value(idx) { - indices.push(P::from(idx).vortex_expect("cast index from usize")); + indices.push(P::from(idx).expect("cast index from usize")); values.push(*value); } } diff --git a/encodings/fastlanes/src/bitpacking/array/bitpack_decompress.rs b/encodings/fastlanes/src/bitpacking/array/bitpack_decompress.rs index e1ce5bbb8a2..d267c1c6384 100644 --- a/encodings/fastlanes/src/bitpacking/array/bitpack_decompress.rs +++ b/encodings/fastlanes/src/bitpacking/array/bitpack_decompress.rs @@ -17,7 +17,6 @@ use vortex_dtype::NativePType; use vortex_dtype::Nullability; use vortex_dtype::match_each_integer_ptype; use vortex_dtype::match_each_unsigned_integer_ptype; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_panic; use vortex_mask::Mask; @@ -47,10 +46,7 @@ pub fn unpack_to_primitive_typed(array: &BitPackedArray) -> Primit // SAFETY: `decode_into` initialized exactly `len` elements into the spare (existing) capacity. unsafe { elements.set_len(len) }; - let mut validity = array - .validity_mask() - .vortex_expect("validity_mask") - .into_mut(); + let mut validity = array.validity_mask().expect("validity_mask").into_mut(); debug_assert_eq!(validity.len(), len); // TODO(connor): Implement a fused version of patching instead. @@ -198,7 +194,7 @@ pub fn unpack_single(array: &BitPackedArray, index: usize) -> Scalar { } }); // Cast to fix signedness and nullability - scalar.cast(array.dtype()).vortex_expect("cast failure") + scalar.cast(array.dtype()).expect("cast failure") } /// # Safety diff --git a/encodings/fastlanes/src/bitpacking/compute/take.rs b/encodings/fastlanes/src/bitpacking/compute/take.rs index 2ce7c9583eb..dd1fea07927 100644 --- a/encodings/fastlanes/src/bitpacking/compute/take.rs +++ b/encodings/fastlanes/src/bitpacking/compute/take.rs @@ -21,7 +21,6 @@ use vortex_dtype::NativePType; use vortex_dtype::PType; use vortex_dtype::match_each_integer_ptype; use vortex_dtype::match_each_unsigned_integer_ptype; -use vortex_error::VortexExpect as _; use vortex_error::VortexResult; use super::chunked_indices; @@ -77,10 +76,10 @@ fn take_primitive( let packed = array.packed_slice::(); // Group indices by 1024-element chunk, *without* allocating on the heap - let indices_iter = indices.as_slice::().iter().map(|i| { - i.to_usize() - .vortex_expect("index must be expressible as usize") - }); + let indices_iter = indices + .as_slice::() + .iter() + .map(|i| i.to_usize().expect("index must be expressible as usize")); let mut output = BufferMut::::with_capacity(indices.len()); let mut unpacked = [const { MaybeUninit::uninit() }; 1024]; diff --git a/encodings/fastlanes/src/bitpacking/vtable/mod.rs b/encodings/fastlanes/src/bitpacking/vtable/mod.rs index d45fcd2555a..8b45ce15ed5 100644 --- a/encodings/fastlanes/src/bitpacking/vtable/mod.rs +++ b/encodings/fastlanes/src/bitpacking/vtable/mod.rs @@ -20,7 +20,6 @@ use vortex_array::vtable::ValidityVTableFromValidityHelper; use vortex_dtype::DType; use vortex_dtype::PType; use vortex_dtype::match_each_integer_ptype; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_bail; use vortex_error::vortex_ensure; @@ -255,7 +254,7 @@ impl VTable for BitPackedVTable { builder .as_any_mut() .downcast_mut() - .vortex_expect("bit packed array must canonicalize into a primitive array"), + .expect("bit packed array must canonicalize into a primitive array"), ctx, ) }) diff --git a/encodings/fastlanes/src/delta/array/mod.rs b/encodings/fastlanes/src/delta/array/mod.rs index 35e0718a5a7..8d43164a4f0 100644 --- a/encodings/fastlanes/src/delta/array/mod.rs +++ b/encodings/fastlanes/src/delta/array/mod.rs @@ -12,7 +12,6 @@ use vortex_dtype::DType; use vortex_dtype::NativePType; use vortex_dtype::PType; use vortex_dtype::match_each_unsigned_integer_ptype; -use vortex_error::VortexExpect as _; use vortex_error::VortexResult; use vortex_error::vortex_bail; @@ -163,8 +162,7 @@ impl DeltaArray { #[inline] pub(crate) fn lanes(&self) -> usize { - let ptype = - PType::try_from(self.dtype()).vortex_expect("DeltaArray DType must be primitive"); + let ptype = PType::try_from(self.dtype()).expect("DeltaArray DType must be primitive"); lane_count(ptype) } diff --git a/encodings/fastlanes/src/for/array/for_decompress.rs b/encodings/fastlanes/src/for/array/for_decompress.rs index aa14ea3d565..fd05b00f0ea 100644 --- a/encodings/fastlanes/src/for/array/for_decompress.rs +++ b/encodings/fastlanes/src/for/array/for_decompress.rs @@ -15,7 +15,6 @@ use vortex_dtype::PhysicalPType; use vortex_dtype::UnsignedPType; use vortex_dtype::match_each_integer_ptype; use vortex_dtype::match_each_unsigned_integer_ptype; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use crate::BitPackedArray; @@ -66,7 +65,7 @@ pub fn decompress(array: &FoRArray, ctx: &mut ExecutionCtx) -> VortexResult() - .vortex_expect("reference must be non-null"); + .expect("reference must be non-null"); if min == 0 { encoded } else { @@ -89,7 +88,7 @@ pub(crate) fn fused_decompress< .reference_scalar() .as_primitive() .as_::() - .vortex_expect("cannot be null"); + .expect("cannot be null"); let strategy = FoRStrategy { reference: ref_ }; diff --git a/encodings/fastlanes/src/for/compute/compare.rs b/encodings/fastlanes/src/for/compute/compare.rs index 901b6473691..0c8c805153b 100644 --- a/encodings/fastlanes/src/for/compute/compare.rs +++ b/encodings/fastlanes/src/for/compute/compare.rs @@ -17,7 +17,6 @@ use vortex_dtype::NativePType; use vortex_dtype::Nullability; use vortex_dtype::match_each_integer_ptype; use vortex_error::VortexError; -use vortex_error::VortexExpect as _; use vortex_error::VortexResult; use crate::FoRArray; @@ -38,7 +37,7 @@ impl CompareKernel for FoRVTable { lhs, constant .typed_value::() - .vortex_expect("null scalar handled in adaptor"), + .expect("null scalar handled in adaptor"), rhs.dtype().nullability(), operator, ); diff --git a/encodings/fastlanes/src/for/vtable/operations.rs b/encodings/fastlanes/src/for/vtable/operations.rs index cee10d57010..1e2e424bd4b 100644 --- a/encodings/fastlanes/src/for/vtable/operations.rs +++ b/encodings/fastlanes/src/for/vtable/operations.rs @@ -4,7 +4,6 @@ use vortex_array::scalar::Scalar; use vortex_array::vtable::OperationsVTable; use vortex_dtype::match_each_integer_ptype; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use super::FoRVTable; @@ -24,7 +23,7 @@ impl OperationsVTable for FoRVTable { v.wrapping_add( reference .typed_value::

() - .vortex_expect("FoRArray Reference value cannot be null"), + .expect("FoRArray Reference value cannot be null"), ) }) .map(|v| Scalar::primitive::

(v, array.reference_scalar().dtype().nullability())) diff --git a/encodings/fastlanes/src/rle/vtable/mod.rs b/encodings/fastlanes/src/rle/vtable/mod.rs index fc44e30b7e4..2ca855437a5 100644 --- a/encodings/fastlanes/src/rle/vtable/mod.rs +++ b/encodings/fastlanes/src/rle/vtable/mod.rs @@ -125,13 +125,13 @@ impl VTable for RLEVTable { let values = children.get( 0, &DType::Primitive(dtype.as_ptype(), Nullability::NonNullable), - usize::try_from(metadata.values_len)?, + usize::try_from(metadata.values_len).expect("values len must fit in usize"), )?; let indices = children.get( 1, &DType::Primitive(metadata.indices_ptype(), dtype.nullability()), - usize::try_from(metadata.indices_len)?, + usize::try_from(metadata.indices_len).expect("indices len must fit in usize"), )?; let values_idx_offsets = children.get( @@ -140,7 +140,8 @@ impl VTable for RLEVTable { metadata.values_idx_offsets_ptype(), Nullability::NonNullable, ), - usize::try_from(metadata.values_idx_offsets_len)?, + usize::try_from(metadata.values_idx_offsets_len) + .expect("offsets len must fit in usize"), )?; RLEArray::try_new( diff --git a/encodings/fastlanes/src/rle/vtable/operations.rs b/encodings/fastlanes/src/rle/vtable/operations.rs index 6e45f8f949e..23770548d4c 100644 --- a/encodings/fastlanes/src/rle/vtable/operations.rs +++ b/encodings/fastlanes/src/rle/vtable/operations.rs @@ -3,7 +3,6 @@ use vortex_array::scalar::Scalar; use vortex_array::vtable::OperationsVTable; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use super::RLEVTable; @@ -18,7 +17,7 @@ impl OperationsVTable for RLEVTable { let chunk_relative_idx = chunk_relative_idx .as_primitive() .as_::() - .vortex_expect("Index must not be null"); + .expect("Index must not be null"); let chunk_id = (offset_in_chunk + index) / FL_CHUNK_SIZE; let value_idx_offset = array.values_idx_offset(chunk_id); diff --git a/encodings/fsst/benches/chunked_dict_fsst_builder.rs b/encodings/fsst/benches/chunked_dict_fsst_builder.rs index 2cd10ed7461..7cd6ef0eba6 100644 --- a/encodings/fsst/benches/chunked_dict_fsst_builder.rs +++ b/encodings/fsst/benches/chunked_dict_fsst_builder.rs @@ -13,7 +13,6 @@ use vortex_array::builders::builder_with_capacity; use vortex_array::compute::warm_up_vtables; use vortex_array::session::ArraySession; use vortex_dtype::NativePType; -use vortex_error::VortexExpect; use vortex_fsst::test_utils::gen_dict_fsst_test_data; use vortex_session::VortexSession; @@ -56,7 +55,7 @@ fn chunked_dict_fsst_canonical_into( let mut builder = builder_with_capacity(chunk.dtype(), len * chunk_count); chunk .append_to_builder(builder.as_mut(), &mut SESSION.create_execution_ctx()) - .vortex_expect("append failed"); + .expect("append failed"); builder.finish() }) } diff --git a/encodings/fsst/src/compress.rs b/encodings/fsst/src/compress.rs index 6b958f9fdb3..1aac659c4df 100644 --- a/encodings/fsst/src/compress.rs +++ b/encodings/fsst/src/compress.rs @@ -12,7 +12,6 @@ use vortex_array::arrays::builder::VarBinBuilder; use vortex_buffer::Buffer; use vortex_buffer::BufferMut; use vortex_dtype::DType; -use vortex_error::VortexExpect; use crate::FSSTArray; @@ -73,11 +72,8 @@ where uncompressed_lengths.push(0); } Some(s) => { - uncompressed_lengths.push( - s.len() - .try_into() - .vortex_expect("string length must fit in i32"), - ); + uncompressed_lengths + .push(s.len().try_into().expect("string length must fit in i32")); // SAFETY: buffer is large enough unsafe { compressor.compress_into(s, &mut buffer) }; @@ -94,5 +90,5 @@ where let uncompressed_lengths = uncompressed_lengths.into_array(); FSSTArray::try_new(dtype, symbols, symbol_lengths, codes, uncompressed_lengths) - .vortex_expect("building FSSTArray from parts") + .expect("building FSSTArray from parts") } diff --git a/encodings/fsst/src/compute/compare.rs b/encodings/fsst/src/compute/compare.rs index a1c19ce89ba..3154fedada8 100644 --- a/encodings/fsst/src/compute/compare.rs +++ b/encodings/fsst/src/compute/compare.rs @@ -18,7 +18,6 @@ use vortex_buffer::BitBuffer; use vortex_buffer::ByteBuffer; use vortex_dtype::DType; use vortex_dtype::match_each_integer_ptype; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_bail; @@ -50,11 +49,8 @@ fn compare_fsst_constant( DType::Binary(_) => right .as_binary() .is_empty() - .vortex_expect("RHS should not be null"), - DType::Utf8(_) => right - .as_utf8() - .is_empty() - .vortex_expect("RHS should not be null"), + .expect("RHS should not be null"), + DType::Utf8(_) => right.as_utf8().is_empty().expect("RHS should not be null"), _ => vortex_bail!("VarBinArray can only have type of Binary or Utf8"), }; if is_rhs_empty { @@ -92,17 +88,11 @@ fn compare_fsst_constant( let compressor = left.compressor(); let encoded_buffer = match left.dtype() { DType::Utf8(_) => { - let value = right - .as_utf8() - .value() - .vortex_expect("Expected non-null scalar"); + let value = right.as_utf8().value().expect("Expected non-null scalar"); ByteBuffer::from(compressor.compress(value.as_bytes())) } DType::Binary(_) => { - let value = right - .as_binary() - .value() - .vortex_expect("Expected non-null scalar"); + let value = right.as_binary().value().expect("Expected non-null scalar"); ByteBuffer::from(compressor.compress(value.as_slice())) } _ => unreachable!("FSSTArray can only have string or binary data type"), diff --git a/encodings/fsst/src/compute/filter.rs b/encodings/fsst/src/compute/filter.rs index e53e5685a1d..30becf12ae4 100644 --- a/encodings/fsst/src/compute/filter.rs +++ b/encodings/fsst/src/compute/filter.rs @@ -6,7 +6,6 @@ use vortex_array::ExecutionCtx; use vortex_array::IntoArray; use vortex_array::arrays::FilterKernel; use vortex_array::arrays::VarBinVTable; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_mask::Mask; @@ -21,7 +20,7 @@ impl FilterKernel for FSSTVTable { ) -> VortexResult> { // Directly invoke VarBin's FilterKernel to get a concrete VarBinArray back. let filtered_codes = ::filter(array.codes(), mask, ctx)? - .vortex_expect("VarBin filter kernel always returns Some") + .expect("VarBin filter kernel always returns Some") .as_::() .clone(); diff --git a/encodings/fsst/src/compute/mod.rs b/encodings/fsst/src/compute/mod.rs index 0d841cb4213..ef072520b0c 100644 --- a/encodings/fsst/src/compute/mod.rs +++ b/encodings/fsst/src/compute/mod.rs @@ -13,7 +13,6 @@ use vortex_array::arrays::TakeExecute; use vortex_array::arrays::VarBinVTable; use vortex_array::builtins::ArrayBuiltins; use vortex_array::scalar::Scalar; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_err; @@ -35,7 +34,7 @@ impl TakeExecute for FSSTVTable { array.symbols().clone(), array.symbol_lengths().clone(), VarBinVTable::take(array.codes(), indices, _ctx)? - .vortex_expect("cannot fail") + .expect("cannot fail") .try_into::() .map_err(|_| vortex_err!("take for codes must return varbin array"))?, array diff --git a/encodings/fsst/src/ops.rs b/encodings/fsst/src/ops.rs index 368fe5d819b..8930b503b40 100644 --- a/encodings/fsst/src/ops.rs +++ b/encodings/fsst/src/ops.rs @@ -5,7 +5,6 @@ use vortex_array::arrays::varbin_scalar; use vortex_array::scalar::Scalar; use vortex_array::vtable::OperationsVTable; use vortex_buffer::ByteBuffer; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use crate::FSSTArray; @@ -14,7 +13,7 @@ use crate::FSSTVTable; impl OperationsVTable for FSSTVTable { fn scalar_at(array: &FSSTArray, index: usize) -> VortexResult { let compressed = array.codes().scalar_at(index)?; - let binary_datum = compressed.as_binary().value().vortex_expect("non-null"); + let binary_datum = compressed.as_binary().value().expect("non-null"); let decoded_buffer = ByteBuffer::from(array.decompressor().decompress(binary_datum)); Ok(varbin_scalar(decoded_buffer, array.dtype())) diff --git a/encodings/fsst/src/test_utils.rs b/encodings/fsst/src/test_utils.rs index 40cfb0b3133..066c9261180 100644 --- a/encodings/fsst/src/test_utils.rs +++ b/encodings/fsst/src/test_utils.rs @@ -14,7 +14,6 @@ use vortex_array::arrays::VarBinArray; use vortex_dtype::DType; use vortex_dtype::NativePType; use vortex_dtype::Nullability; -use vortex_error::VortexExpect; use crate::fsst_compress; use crate::fsst_train_compressor; @@ -57,5 +56,5 @@ pub fn gen_dict_fsst_test_data( .map(|_| T::from(rng.random_range(0..unique_values)).unwrap()) .collect::(); DictArray::try_new(codes.into_array(), values) - .vortex_expect("DictArray::try_new should succeed for test data") + .expect("DictArray::try_new should succeed for test data") } diff --git a/encodings/pco/src/array.rs b/encodings/pco/src/array.rs index e64661fbc88..838b146ff8f 100644 --- a/encodings/pco/src/array.rs +++ b/encodings/pco/src/array.rs @@ -51,7 +51,6 @@ use vortex_dtype::DType; use vortex_dtype::PType; use vortex_dtype::half; use vortex_error::VortexError; -use vortex_error::VortexExpect as _; use vortex_error::VortexResult; use vortex_error::vortex_bail; use vortex_error::vortex_ensure; @@ -171,7 +170,7 @@ impl VTable for PcoVTable { array.unsliced_validity = Validity::from(array.dtype.nullability()); } else { array.unsliced_validity = - Validity::Array(children.into_iter().next().vortex_expect("validity child")); + Validity::Array(children.into_iter().next().expect("validity child")); } Ok(()) @@ -323,7 +322,8 @@ impl PcoArray { .map_err(vortex_err_from_pco)?; page_buffers.push(page.freeze()); page_infos.push(PcoPageInfo { - n_values: u32::try_from(page_n_values)?, + n_values: u32::try_from(page_n_values) + .expect("number of values in page must fit usize"), }); } chunk_infos.push(PcoChunkInfo { pages: page_infos }) @@ -386,7 +386,7 @@ impl PcoArray { // may exceed the bounds of the slice, so we need to slice later. let (fd, _) = FileDecompressor::new(self.metadata.header.as_slice()) .map_err(vortex_err_from_pco) - .vortex_expect("FileDecompressor::new should succeed with valid header"); + .expect("FileDecompressor::new should succeed with valid header"); let mut decompressed_values = BufferMut::::with_capacity(slice_n_values); let mut page_idx = 0; let mut page_value_start = 0; @@ -415,9 +415,7 @@ impl PcoArray { let (new_cd, _) = fd .chunk_decompressor(chunk_meta_bytes) .map_err(vortex_err_from_pco) - .vortex_expect( - "chunk_decompressor should succeed with valid chunk metadata", - ); + .expect("chunk_decompressor should succeed with valid chunk metadata"); cd = Some(new_cd); } let mut pd = cd @@ -425,10 +423,10 @@ impl PcoArray { .unwrap() .page_decompressor(page, page_n_values) .map_err(vortex_err_from_pco) - .vortex_expect("page_decompressor should succeed with valid page data"); + .expect("page_decompressor should succeed with valid page data"); pd.read(&mut decompressed_values[old_len..new_len]) .map_err(vortex_err_from_pco) - .vortex_expect("decompress should succeed with valid compressed data"); + .expect("decompress should succeed with valid compressed data"); } else { n_skipped_values += page_n_values; } diff --git a/encodings/runend/src/arbitrary.rs b/encodings/runend/src/arbitrary.rs index d70d4a5d4bc..d8e4aa69449 100644 --- a/encodings/runend/src/arbitrary.rs +++ b/encodings/runend/src/arbitrary.rs @@ -12,7 +12,6 @@ use vortex_buffer::Buffer; use vortex_dtype::DType; use vortex_dtype::Nullability; use vortex_dtype::PType; -use vortex_error::VortexExpect; use crate::RunEndArray; @@ -44,7 +43,7 @@ impl ArbitraryRunEndArray { let ends = PrimitiveArray::from_iter(Vec::::new()).into_array(); let values = ArbitraryArray::arbitrary_with(u, Some(0), dtype)?.0; let runend_array = RunEndArray::try_new(ends, values) - .vortex_expect("Empty RunEndArray creation should succeed"); + .expect("Empty RunEndArray creation should succeed"); return Ok(ArbitraryRunEndArray(runend_array)); } @@ -56,7 +55,7 @@ impl ArbitraryRunEndArray { let ends = random_strictly_sorted_ends(u, num_runs, len)?; let runend_array = RunEndArray::try_new(ends, values) - .vortex_expect("RunEndArray creation should succeed in arbitrary impl"); + .expect("RunEndArray creation should succeed in arbitrary impl"); Ok(ArbitraryRunEndArray(runend_array)) } @@ -107,21 +106,21 @@ fn random_strictly_sorted_ends( PType::U8 => { let ends_typed: Vec = ends .iter() - .map(|&e| u8::try_from(e).vortex_expect("end value fits in u8")) + .map(|&e| u8::try_from(e).expect("end value fits in u8")) .collect(); PrimitiveArray::new(Buffer::copy_from(ends_typed), Validity::NonNullable).into_array() } PType::U16 => { let ends_typed: Vec = ends .iter() - .map(|&e| u16::try_from(e).vortex_expect("end value fits in u16")) + .map(|&e| u16::try_from(e).expect("end value fits in u16")) .collect(); PrimitiveArray::new(Buffer::copy_from(ends_typed), Validity::NonNullable).into_array() } PType::U32 => { let ends_typed: Vec = ends .iter() - .map(|&e| u32::try_from(e).vortex_expect("end value fits in u32")) + .map(|&e| u32::try_from(e).expect("end value fits in u32")) .collect(); PrimitiveArray::new(Buffer::copy_from(ends_typed), Validity::NonNullable).into_array() } diff --git a/encodings/runend/src/array.rs b/encodings/runend/src/array.rs index 0e28b2e407e..c4cb69a4de6 100644 --- a/encodings/runend/src/array.rs +++ b/encodings/runend/src/array.rs @@ -31,7 +31,6 @@ use vortex_array::vtable::ValidityVTable; use vortex_dtype::DType; use vortex_dtype::Nullability; use vortex_dtype::PType; -use vortex_error::VortexExpect as _; use vortex_error::VortexResult; use vortex_error::vortex_bail; use vortex_error::vortex_ensure; @@ -72,7 +71,7 @@ impl VTable for RunEndVTable { fn metadata(array: &RunEndArray) -> VortexResult { Ok(ProstMetadata(RunEndMetadata { - ends_ptype: PType::try_from(array.ends().dtype()).vortex_expect("Must be a valid PType") + ends_ptype: PType::try_from(array.ends().dtype()).expect("Must be a valid PType") as i32, num_runs: array.ends().len() as u64, offset: array.offset() as u64, @@ -102,7 +101,7 @@ impl VTable for RunEndVTable { children: &dyn ArrayChildren, ) -> VortexResult { let ends_dtype = DType::Primitive(metadata.ends_ptype(), Nullability::NonNullable); - let runs = usize::try_from(metadata.num_runs).vortex_expect("Must be a valid usize"); + let runs = usize::try_from(metadata.num_runs).expect("Must be a valid usize"); let ends = children.get(0, &ends_dtype, runs)?; let values = children.get(1, dtype, runs)?; @@ -110,7 +109,7 @@ impl VTable for RunEndVTable { RunEndArray::try_new_offset_length( ends, values, - usize::try_from(metadata.offset).vortex_expect("Offset must be a valid usize"), + usize::try_from(metadata.offset).expect("Offset must be a valid usize"), len, ) } @@ -123,8 +122,8 @@ impl VTable for RunEndVTable { ); let mut children_iter = children.into_iter(); - array.ends = children_iter.next().vortex_expect("ends child"); - array.values = children_iter.next().vortex_expect("values child"); + array.ends = children_iter.next().expect("ends child"); + array.values = children_iter.next().expect("values child"); Ok(()) } @@ -281,7 +280,7 @@ impl RunEndArray { /// # } /// ``` pub fn new(ends: ArrayRef, values: ArrayRef) -> Self { - Self::try_new(ends, values).vortex_expect("RunEndArray new") + Self::try_new(ends, values).expect("RunEndArray new") } /// Build a new `RunEndArray` from components. diff --git a/encodings/runend/src/compress.rs b/encodings/runend/src/compress.rs index 14ce69ebaaf..c88578c2183 100644 --- a/encodings/runend/src/compress.rs +++ b/encodings/runend/src/compress.rs @@ -22,7 +22,6 @@ use vortex_dtype::NativePType; use vortex_dtype::Nullability; use vortex_dtype::match_each_native_ptype; use vortex_dtype::match_each_unsigned_integer_ptype; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_mask::Mask; @@ -70,7 +69,7 @@ pub fn runend_encode(array: &PrimitiveArray) -> (PrimitiveArray, ArrayRef) { let ends = ends .narrow() - .vortex_expect("Ends must succeed downcasting") + .expect("Ends must succeed downcasting") .to_primitive(); ends.statistics() diff --git a/encodings/runend/src/compute/filter.rs b/encodings/runend/src/compute/filter.rs index 269d64b97f4..fc754b3a6a8 100644 --- a/encodings/runend/src/compute/filter.rs +++ b/encodings/runend/src/compute/filter.rs @@ -16,7 +16,6 @@ use vortex_buffer::BitBuffer; use vortex_buffer::buffer_mut; use vortex_dtype::NativePType; use vortex_dtype::match_each_unsigned_integer_ptype; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_mask::Mask; @@ -34,7 +33,7 @@ impl FilterKernel for RunEndVTable { ) -> VortexResult> { let mask_values = mask .values() - .vortex_expect("FilterKernel precondition: mask is Mask::Values"); + .expect("FilterKernel precondition: mask is Mask::Values"); let runs_ratio = mask_values.true_count() as f64 / array.ends().len() as f64; @@ -92,7 +91,7 @@ fn filter_run_end_primitive + AsPrimitiv // Safety: predicate must be the same length as the array the ends have been taken from for pred in (start..end).map(|i| unsafe { - mask.value_unchecked(i.try_into().vortex_expect("index must fit in usize")) + mask.value_unchecked(i.try_into().expect("index must fit in usize")) }) { count += >::from(pred); keep |= pred diff --git a/encodings/sequence/src/array.rs b/encodings/sequence/src/array.rs index e1a2d6d9039..aa9ce123249 100644 --- a/encodings/sequence/src/array.rs +++ b/encodings/sequence/src/array.rs @@ -40,7 +40,6 @@ use vortex_dtype::Nullability::NonNullable; use vortex_dtype::PType; use vortex_dtype::match_each_integer_ptype; use vortex_dtype::match_each_native_ptype; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_bail; use vortex_error::vortex_ensure; @@ -133,9 +132,7 @@ impl SequenceArray { // A sequence A[i] = base + i * multiplier is sorted iff multiplier >= 0, // and strictly sorted iff multiplier > 0. - let m_int = multiplier - .cast::() - .vortex_expect("must be able to cast"); + let m_int = multiplier.cast::().expect("must be able to cast"); let is_sorted = m_int >= 0; let is_strict_sorted = m_int > 0; @@ -195,12 +192,9 @@ impl SequenceArray { assert!(idx < self.len, "index_value({idx}): index out of bounds"); match_each_native_ptype!(self.ptype(), |P| { - let base = self.base.cast::

().vortex_expect("must be able to cast"); - let multiplier = self - .multiplier - .cast::

() - .vortex_expect("must be able to cast"); - let value = base + (multiplier *

::from_usize(idx).vortex_expect("must fit")); + let base = self.base.cast::

().expect("must be able to cast"); + let multiplier = self.multiplier.cast::

().expect("must be able to cast"); + let value = base + (multiplier *

::from_usize(idx).expect("must fit")); PValue::from(value) }) @@ -208,8 +202,7 @@ impl SequenceArray { /// Returns the validated final value of a sequence array pub fn last(&self) -> PValue { - Self::try_last(self.base, self.multiplier, self.ptype(), self.len) - .vortex_expect("validated array") + Self::try_last(self.base, self.multiplier, self.ptype(), self.len).expect("validated array") } pub fn into_parts(self) -> SequenceArrayParts { @@ -280,7 +273,7 @@ impl VTable for SequenceVTable { )? .as_primitive() .pvalue() - .vortex_expect("non-nullable primitive"); + .expect("non-nullable primitive"); let multiplier = Scalar::from_proto_value( metadata @@ -292,7 +285,7 @@ impl VTable for SequenceVTable { )? .as_primitive() .pvalue() - .vortex_expect("non-nullable primitive"); + .expect("non-nullable primitive"); Ok(SequenceArray::unchecked_new( base, @@ -317,8 +310,7 @@ impl VTable for SequenceVTable { let base = array.base().cast::

()?; let multiplier = array.multiplier().cast::

()?; let values = BufferMut::from_iter( - (0..array.len()) - .map(|i| base +

::from_usize(i).vortex_expect("must fit") * multiplier), + (0..array.len()).map(|i| base +

::from_usize(i).expect("must fit") * multiplier), ); PrimitiveArray::new(values, array.dtype.nullability().into()) }); diff --git a/encodings/sequence/src/compute/compare.rs b/encodings/sequence/src/compute/compare.rs index c4d352fea90..1e196222e49 100644 --- a/encodings/sequence/src/compute/compare.rs +++ b/encodings/sequence/src/compute/compare.rs @@ -14,7 +14,6 @@ use vortex_buffer::BitBuffer; use vortex_dtype::NativePType; use vortex_dtype::Nullability; use vortex_dtype::match_each_integer_ptype; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_bail; use vortex_error::vortex_err; @@ -46,7 +45,7 @@ impl CompareKernel for SequenceVTable { constant .as_primitive() .pvalue() - .vortex_expect("null constant handled in adaptor"), + .expect("null constant handled in adaptor"), ); let nullability = lhs.dtype().nullability() | rhs.dtype().nullability(); @@ -98,7 +97,7 @@ fn find_intersection( vortex_bail!("len == 0") } - let count = P::from_usize(len - 1).vortex_expect("idx must fit into type"); + let count = P::from_usize(len - 1).expect("idx must fit into type"); let end_element = base + (multiplier * count); // Handle ascending vs descending sequences diff --git a/encodings/sequence/src/compute/filter.rs b/encodings/sequence/src/compute/filter.rs index d0228541845..c436dc63a33 100644 --- a/encodings/sequence/src/compute/filter.rs +++ b/encodings/sequence/src/compute/filter.rs @@ -10,7 +10,6 @@ use vortex_array::validity::Validity; use vortex_buffer::BufferMut; use vortex_dtype::NativePType; use vortex_dtype::match_each_native_ptype; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_mask::Mask; @@ -35,10 +34,10 @@ impl FilterKernel for SequenceVTable { fn filter_impl(mul: T, base: T, mask: &Mask, validity: Validity) -> ArrayRef { let mask_values = mask .values() - .vortex_expect("FilterKernel precondition: mask is Mask::Values"); + .expect("FilterKernel precondition: mask is Mask::Values"); let mut buffer = BufferMut::::with_capacity(mask_values.true_count()); buffer.extend(mask_values.indices().iter().map(|&idx| { - let i = T::from_usize(idx).vortex_expect("all valid indices fit"); + let i = T::from_usize(idx).expect("all valid indices fit"); base + i * mul })); PrimitiveArray::new(buffer.freeze(), validity).into_array() diff --git a/encodings/sequence/src/compute/list_contains.rs b/encodings/sequence/src/compute/list_contains.rs index 3b4aed2619e..07f002b9673 100644 --- a/encodings/sequence/src/compute/list_contains.rs +++ b/encodings/sequence/src/compute/list_contains.rs @@ -7,7 +7,6 @@ use vortex_array::arrays::BoolArray; use vortex_array::compute::ListContainsKernel; use vortex_array::compute::ListContainsKernelAdapter; use vortex_array::register_kernel; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use crate::array::SequenceVTable; @@ -26,7 +25,7 @@ impl ListContainsKernel for SequenceVTable { let list_elements = list_scalar .as_list() .elements() - .vortex_expect("non-null element (checked in entry)"); + .expect("non-null element (checked in entry)"); let mut set_indices: Vec = Vec::new(); for intercept in list_elements.iter() { diff --git a/encodings/sequence/src/compute/take.rs b/encodings/sequence/src/compute/take.rs index 72328bd1332..d8223274571 100644 --- a/encodings/sequence/src/compute/take.rs +++ b/encodings/sequence/src/compute/take.rs @@ -19,7 +19,6 @@ use vortex_dtype::NativePType; use vortex_dtype::Nullability; use vortex_dtype::match_each_integer_ptype; use vortex_dtype::match_each_native_ptype; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_panic; use vortex_mask::AllOr; @@ -42,7 +41,7 @@ fn take_inner( if i.as_() >= len { vortex_panic!(OutOfBounds: i.as_(), 0, len); } - let i = ::from::(*i).vortex_expect("all indices fit"); + let i = ::from::(*i).expect("all indices fit"); base + i * mul })), Validity::from(result_nullability), @@ -61,8 +60,7 @@ fn take_inner( vortex_panic!(OutOfBounds: i.as_(), 0, len); } - let i = - ::from::(*i).vortex_expect("all valid indices fit"); + let i = ::from::(*i).expect("all valid indices fit"); base + i * mul } else { S::zero() diff --git a/encodings/sparse/src/canonical.rs b/encodings/sparse/src/canonical.rs index 849b64751a2..fa93a0154c5 100644 --- a/encodings/sparse/src/canonical.rs +++ b/encodings/sparse/src/canonical.rs @@ -49,7 +49,6 @@ use vortex_dtype::match_each_integer_ptype; use vortex_dtype::match_each_native_ptype; use vortex_dtype::match_smallest_offset_type; use vortex_error::VortexError; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_panic; @@ -176,7 +175,7 @@ fn execute_sparse_lists_inner( let position_is_patched = patch_idx < patch_indices.len() && patch_indices[patch_idx] .to_usize() - .vortex_expect("patch index must fit in usize") + .expect("patch index must fit in usize") == position; if position_is_patched { @@ -185,16 +184,16 @@ fn execute_sparse_lists_inner( .append_value( patch_values .scalar_at(patch_idx) - .vortex_expect("scalar_at") + .expect("scalar_at") .as_list(), ) - .vortex_expect("Failed to append sparse value"); + .expect("Failed to append sparse value"); patch_idx += 1; } else { // Set with the fill value. builder .append_value(fill_value.clone()) - .vortex_expect("Failed to append fill value"); + .expect("Failed to append fill value"); } } @@ -247,7 +246,7 @@ fn execute_sparse_fixed_size_list_inner( let mut next_index = 0; let indices = indices .iter() - .map(|x| (*x).to_usize().vortex_expect("index must fit in usize")); + .map(|x| (*x).to_usize().expect("index must fit in usize")); for (patch_idx, sparse_idx) in indices.enumerate() { // Fill gap before this patch with fill values. @@ -259,18 +258,14 @@ fn execute_sparse_fixed_size_list_inner( ); // Append the patch value, handling null patches by appending defaults. - if values - .validity() - .is_valid(patch_idx) - .vortex_expect("is_valid") - { + if values.validity().is_valid(patch_idx).expect("is_valid") { let patch_list = values .fixed_size_list_elements_at(patch_idx) - .vortex_expect("fixed_size_list_elements_at"); + .expect("fixed_size_list_elements_at"); for i in 0..list_size as usize { builder - .append_scalar(&patch_list.scalar_at(i).vortex_expect("scalar_at")) - .vortex_expect("element dtype must match"); + .append_scalar(&patch_list.scalar_at(i).expect("scalar_at")) + .expect("element dtype must match"); } } else { builder.append_defaults(list_size as usize); @@ -308,7 +303,7 @@ fn append_n_lists( for elem in fill_elems { builder .append_scalar(elem) - .vortex_expect("element dtype must match"); + .expect("element dtype must match"); } } else { builder.append_defaults(list_size as usize); @@ -323,7 +318,7 @@ fn execute_sparse_bools(patches: &Patches, fill_value: &Scalar) -> VortexResult< ( fill_value .try_into() - .vortex_expect("Fill value must convert to bool"), + .expect("Fill value must convert to bool"), if patches.dtype().nullability() == Nullability::NonNullable { Validity::NonNullable } else { @@ -347,7 +342,7 @@ fn execute_sparse_primitives TryFrom<&'a Scalar, Error ( fill_value .try_into() - .vortex_expect("Fill value must convert to target T"), + .expect("Fill value must convert to target T"), if patches.dtype().nullability() == Nullability::NonNullable { Validity::NonNullable } else { @@ -391,7 +386,7 @@ fn execute_sparse_struct( unresolved_patches .values() .validity_mask() - .vortex_expect("validity_mask"), + .expect("validity_mask"), Nullability::Nullable, ), )? @@ -412,7 +407,7 @@ fn execute_sparse_struct( unresolved_patches .clone() .map_values(|_| Ok(patch_values)) - .vortex_expect("Replacing patch values"), + .expect("Replacing patch values"), fill_value, ) }), @@ -432,9 +427,7 @@ fn execute_sparse_decimal( let mut builder = DecimalBuilder::with_capacity::(len, decimal_dtype, nullability); match fill_value.decimal_value() { Some(fill_value) => { - let fill_value = fill_value - .cast::() - .vortex_expect("unexpected value type"); + let fill_value = fill_value.cast::().expect("unexpected value type"); for _ in 0..len { builder.append_value(fill_value) } @@ -482,7 +475,7 @@ fn execute_varbin_inner( buffers.push(BufferHandle::new_host(buffer.clone())); BinaryView::make_view( buffer.as_ref(), - u32::try_from(n_patch_buffers).vortex_expect("too many buffers"), + u32::try_from(n_patch_buffers).expect("too many buffers"), 0, ) } else { @@ -492,8 +485,8 @@ fn execute_varbin_inner( let mut views = buffer_mut![fill; len]; for (patch_index, &patch) in indices.into_iter().zip_eq(values.views().iter()) { - let patch_index_usize = ::from(patch_index) - .vortex_expect("var bin view indices must fit in usize"); + let patch_index_usize = + ::from(patch_index).expect("var bin view indices must fit in usize"); views[patch_index_usize] = patch; } @@ -534,7 +527,6 @@ mod test { use vortex_dtype::Nullability::Nullable; use vortex_dtype::PType; use vortex_dtype::StructFields; - use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_mask::Mask; @@ -1030,7 +1022,7 @@ mod test { .unwrap() .into_array(); - let actual = sparse.to_canonical().vortex_expect("no fail").into_array(); + let actual = sparse.to_canonical().expect("no fail").into_array(); let result_listview = actual.to_listview(); // Check the structure diff --git a/encodings/sparse/src/lib.rs b/encodings/sparse/src/lib.rs index 354f3f00190..8ff93a5b297 100644 --- a/encodings/sparse/src/lib.rs +++ b/encodings/sparse/src/lib.rs @@ -42,7 +42,6 @@ use vortex_buffer::Buffer; use vortex_buffer::ByteBufferMut; use vortex_dtype::DType; use vortex_dtype::Nullability; -use vortex_error::VortexExpect as _; use vortex_error::VortexResult; use vortex_error::vortex_bail; use vortex_error::vortex_ensure; @@ -148,8 +147,8 @@ impl VTable for SparseVTable { ); let mut children_iter = children.into_iter(); - let patch_indices = children_iter.next().vortex_expect("patch_indices child"); - let patch_values = children_iter.next().vortex_expect("patch_values child"); + let patch_indices = children_iter.next().expect("patch_indices child"); + let patch_values = children_iter.next().expect("patch_values child"); array.patches = Patches::new( array.patches.array_len(), @@ -324,7 +323,7 @@ impl SparseArray { AllOr::Some(values) => { let buffer: Buffer = values .iter() - .map(|&v| v.try_into().vortex_expect("indices must fit in u32")) + .map(|&v| v.try_into().expect("indices must fit in u32")) .collect(); buffer.into_array() @@ -347,7 +346,7 @@ impl SparseArray { let (top_pvalue, _) = array .to_primitive() .top_value()? - .vortex_expect("Non empty or all null array"); + .expect("Non empty or all null array"); Scalar::primitive_value(top_pvalue, top_pvalue.ptype(), array.dtype().nullability()) }; @@ -461,7 +460,6 @@ mod test { use vortex_dtype::DType; use vortex_dtype::Nullability; use vortex_dtype::PType; - use vortex_error::VortexExpect; use super::*; @@ -620,7 +618,7 @@ mod test { ]), ); let sparse = SparseArray::encode(&original.clone().into_array(), None) - .vortex_expect("SparseArray::encode should succeed for test data"); + .expect("SparseArray::encode should succeed for test data"); assert_eq!( sparse.validity_mask().unwrap(), Mask::from_iter(vec![ diff --git a/encodings/zigzag/src/array.rs b/encodings/zigzag/src/array.rs index eca1db02cfa..78ae5da9299 100644 --- a/encodings/zigzag/src/array.rs +++ b/encodings/zigzag/src/array.rs @@ -29,7 +29,6 @@ use vortex_array::vtable::VisitorVTable; use vortex_dtype::DType; use vortex_dtype::PType; use vortex_dtype::match_each_unsigned_integer_ptype; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_bail; use vortex_error::vortex_ensure; @@ -99,7 +98,7 @@ impl VTable for ZigZagVTable { "ZigZagArray expects exactly 1 child (encoded), got {}", children.len() ); - array.encoded = children.into_iter().next().vortex_expect("checked"); + array.encoded = children.into_iter().next().expect("checked"); Ok(()) } @@ -141,7 +140,7 @@ impl ZigZagVTable { impl ZigZagArray { pub fn new(encoded: ArrayRef) -> Self { - Self::try_new(encoded).vortex_expect("ZigZigArray new") + Self::try_new(encoded).expect("ZigZigArray new") } pub fn try_new(encoded: ArrayRef) -> VortexResult { @@ -203,9 +202,7 @@ impl OperationsVTable for ZigZagVTable { Ok(match_each_unsigned_integer_ptype!(pscalar.ptype(), |P| { Scalar::primitive( <

::Int>::decode( - pscalar - .typed_value::

() - .vortex_expect("zigzag corruption"), + pscalar.typed_value::

().expect("zigzag corruption"), ), array.dtype().nullability(), ) diff --git a/encodings/zstd/src/array.rs b/encodings/zstd/src/array.rs index f9ac6e22223..898d438bae2 100644 --- a/encodings/zstd/src/array.rs +++ b/encodings/zstd/src/array.rs @@ -47,7 +47,6 @@ use vortex_buffer::ByteBuffer; use vortex_buffer::ByteBufferMut; use vortex_dtype::DType; use vortex_error::VortexError; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_bail; use vortex_error::vortex_ensure; @@ -171,7 +170,7 @@ impl VTable for ZstdVTable { array.unsliced_validity = if children.is_empty() { Validity::from(array.dtype.nullability()) } else { - Validity::Array(children.into_iter().next().vortex_expect("checked")) + Validity::Array(children.into_iter().next().expect("checked")) }; Ok(()) @@ -256,7 +255,7 @@ fn collect_valid_vbv(vbv: &VarBinViewArray) -> VortexResult<(ByteBuffer, Vec (Buffer::empty(), Vec::new()), _ => { let mut buffer = BufferMut::with_capacity( - usize::try_from(vbv.nbytes()).vortex_expect("must fit into buffer") + usize::try_from(vbv.nbytes()).expect("must fit into buffer") + mask.true_count() * size_of::(), ); let mut value_byte_indices = Vec::new(); @@ -265,8 +264,12 @@ fn collect_valid_vbv(vbv: &VarBinViewArray) -> VortexResult<(ByteBuffer, Vec(()) @@ -287,17 +290,16 @@ pub fn reconstruct_views(buffer: &ByteBuffer) -> Buffer { let str_len = ViewLen::from_le_bytes( buffer .get(offset..offset + size_of::()) - .vortex_expect("corrupted zstd length") + .expect("corrupted zstd length") .try_into() - .ok() - .vortex_expect("must fit ViewLen size"), + .expect("must fit ViewLen size"), ) as usize; offset += size_of::(); let value = &buffer[offset..offset + str_len]; res.push(BinaryView::make_view( value, 0, - u32::try_from(offset).vortex_expect("offset must fit in u32"), + u32::try_from(offset).expect("offset must fit in u32"), )); offset += str_len; } @@ -466,7 +468,8 @@ impl ZstdArray { dictionary_size: dictionary .as_ref() .map_or(0, |dict| dict.len()) - .try_into()?, + .try_into() + .expect("Dictionary size must fit in usize"), frames: frame_metas, }; @@ -558,7 +561,8 @@ impl ZstdArray { dictionary_size: dictionary .as_ref() .map_or(0, |dict| dict.len()) - .try_into()?, + .try_into() + .expect("Dictionary size must fit in usize"), frames: frame_metas, }; Ok(ZstdArray::new( @@ -627,12 +631,12 @@ impl ZstdArray { } let frame_uncompressed_size = usize::try_from(frame_meta.uncompressed_size) - .vortex_expect("Uncompressed size must fit in usize"); + .expect("Uncompressed size must fit in usize"); let frame_n_values = if frame_meta.n_values == 0 { // possibly older primitive-only metadata that just didn't store this frame_uncompressed_size / byte_width } else { - usize::try_from(frame_meta.n_values).vortex_expect("frame size must fit usize") + usize::try_from(frame_meta.n_values).expect("frame size must fit usize") }; let value_idx_stop = value_idx_start + frame_n_values; @@ -652,7 +656,7 @@ impl ZstdArray { } else { zstd::bulk::Decompressor::new() } - .vortex_expect("Decompressor encountered io error"); + .expect("Decompressor encountered io error"); let mut decompressed = ByteBufferMut::with_capacity_aligned( uncompressed_size_to_decompress, Alignment::new(byte_width), @@ -666,7 +670,7 @@ impl ZstdArray { for frame in frames_to_decompress { let uncompressed_written = decompressor .decompress_to_buffer(frame.as_slice(), &mut decompressed[uncompressed_start..]) - .vortex_expect("error while decompressing zstd array"); + .expect("error while decompressing zstd array"); uncompressed_start += uncompressed_written; } if uncompressed_start != uncompressed_size_to_decompress { diff --git a/encodings/zstd/src/zstd_buffers.rs b/encodings/zstd/src/zstd_buffers.rs index 4789f50331b..c2c285a0588 100644 --- a/encodings/zstd/src/zstd_buffers.rs +++ b/encodings/zstd/src/zstd_buffers.rs @@ -31,7 +31,6 @@ use vortex_buffer::Alignment; use vortex_buffer::ByteBuffer; use vortex_buffer::ByteBufferMut; use vortex_dtype::DType; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_ensure_eq; use vortex_error::vortex_err; @@ -199,7 +198,8 @@ impl ZstdBuffersArray { .zip(&self.uncompressed_sizes) .enumerate() { - let size = usize::try_from(uncompressed_size)?; + let size = + usize::try_from(uncompressed_size).expect("uncompressed size must fit in usize"); let alignment = self.buffer_alignments.get(i).copied().unwrap_or(1); let aligned = Alignment::try_from(alignment)?; @@ -268,13 +268,13 @@ impl ZstdBuffersArray { // If invariants are somehow broken, device decompression could have UB, so ensure // they still hold. self.validate() - .vortex_expect("zstd_buffers invariant violated before decode_plan"); + .expect("zstd_buffers invariant violated before decode_plan"); let output_sizes = self .uncompressed_sizes .iter() - .map(|&size| usize::try_from(size)) - .collect::, _>>()?; + .map(|&size| usize::try_from(size).expect("buffer sizes must fit in usize")) + .collect::>(); let output_size_max = output_sizes.iter().copied().max().unwrap_or(0); let output_alignments = self diff --git a/fuzz/fuzz_targets/file_io.rs b/fuzz/fuzz_targets/file_io.rs index 8eb82c56a3e..adc11f1b312 100644 --- a/fuzz/fuzz_targets/file_io.rs +++ b/fuzz/fuzz_targets/file_io.rs @@ -19,7 +19,6 @@ use vortex_array::expr::root; use vortex_buffer::ByteBufferMut; use vortex_dtype::DType; use vortex_dtype::StructFields; -use vortex_error::VortexExpect; use vortex_error::vortex_panic; use vortex_file::OpenOptionsSessionExt; use vortex_file::WriteOptionsSessionExt; @@ -47,14 +46,14 @@ fuzz_target!(|fuzz: FuzzFileAction| -> Corpus { let expected_array = { let bool_mask = array_data .apply(&filter_expr.clone().unwrap_or_else(|| lit(true))) - .vortex_expect("filter expression evaluation should succeed in fuzz test"); + .expect("filter expression evaluation should succeed in fuzz test"); let mask = bool_mask.to_bool().to_mask_fill_null_false(); let filtered = array_data .filter(mask) - .vortex_expect("filter operation should succeed in fuzz test"); + .expect("filter operation should succeed in fuzz test"); filtered .apply(&projection_expr.clone().unwrap_or_else(root)) - .vortex_expect("projection expression evaluation should succeed in fuzz test") + .expect("projection expression evaluation should succeed in fuzz test") }; let write_options = match compressor_strategy { @@ -71,24 +70,24 @@ fuzz_target!(|fuzz: FuzzFileAction| -> Corpus { let _footer = write_options .blocking(&*RUNTIME) .write(&mut full_buff, array_data.to_array_iterator()) - .vortex_expect("file write should succeed in fuzz test"); + .expect("file write should succeed in fuzz test"); let mut output = SESSION .open_options() .open_buffer(full_buff) - .vortex_expect("open_buffer should succeed in fuzz test") + .expect("open_buffer should succeed in fuzz test") .scan() - .vortex_expect("scan should succeed in fuzz test") + .expect("scan should succeed in fuzz test") .with_projection(projection_expr.unwrap_or_else(root)) .with_some_filter(filter_expr) .into_array_iter(&*RUNTIME) - .vortex_expect("into_array_iter should succeed in fuzz test") + .expect("into_array_iter should succeed in fuzz test") .try_collect::<_, Vec<_>, _>() - .vortex_expect("collect should succeed in fuzz test"); + .expect("collect should succeed in fuzz test"); let output_array = match output.len() { 0 => Canonical::empty(expected_array.dtype()).into_array(), - 1 => output.pop().vortex_expect("one output"), + 1 => output.pop().expect("one output"), _ => ChunkedArray::from_iter(output).into_array(), }; @@ -108,12 +107,12 @@ fuzz_target!(|fuzz: FuzzFileAction| -> Corpus { ); let bool_result = compare(&expected_array, &output_array, Operator::Eq) - .vortex_expect("compare operation should succeed in fuzz test") + .expect("compare operation should succeed in fuzz test") .to_bool(); let true_count = bool_result.to_bit_buffer().true_count(); if true_count != expected_array.len() - && (bool_result.all_valid().vortex_expect("all_valid") - || expected_array.all_valid().vortex_expect("all_valid")) + && (bool_result.all_valid().expect("all_valid") + || expected_array.all_valid().expect("all_valid")) { vortex_panic!( "Failed to match original array {}with{}", diff --git a/fuzz/src/array/compare.rs b/fuzz/src/array/compare.rs index 18a6299aed3..9298469a795 100644 --- a/fuzz/src/array/compare.rs +++ b/fuzz/src/array/compare.rs @@ -17,7 +17,6 @@ use vortex_dtype::DType; use vortex_dtype::Nullability; use vortex_dtype::match_each_decimal_value_type; use vortex_dtype::match_each_native_ptype; -use vortex_error::VortexExpect; use vortex_error::vortex_panic; pub fn compare_canonical_array(array: &dyn Array, value: &Scalar, operator: Operator) -> ArrayRef { @@ -30,10 +29,7 @@ pub fn compare_canonical_array(array: &dyn Array, value: &Scalar, operator: Oper match array.dtype() { DType::Bool(_) => { - let bool = value - .as_bool() - .value() - .vortex_expect("nulls handled before"); + let bool = value.as_bool().value().expect("nulls handled before"); compare_to( array .to_bool() @@ -42,7 +38,7 @@ pub fn compare_canonical_array(array: &dyn Array, value: &Scalar, operator: Oper .zip( array .validity_mask() - .vortex_expect("validity_mask") + .expect("validity_mask") .to_bit_buffer() .iter(), ) @@ -56,9 +52,7 @@ pub fn compare_canonical_array(array: &dyn Array, value: &Scalar, operator: Oper let primitive = value.as_primitive(); let primitive_array = array.to_primitive(); match_each_native_ptype!(p, |P| { - let pval = primitive - .typed_value::

() - .vortex_expect("nulls handled before"); + let pval = primitive.typed_value::

().expect("nulls handled before"); compare_to( primitive_array .as_slice::

() @@ -67,7 +61,7 @@ pub fn compare_canonical_array(array: &dyn Array, value: &Scalar, operator: Oper .zip( array .validity_mask() - .vortex_expect("validity_mask") + .expect("validity_mask") .to_bit_buffer() .iter(), ) @@ -84,7 +78,7 @@ pub fn compare_canonical_array(array: &dyn Array, value: &Scalar, operator: Oper match_each_decimal_value_type!(decimal_array.values_type(), |D| { let dval = decimal .decimal_value() - .vortex_expect("nulls handled before") + .expect("nulls handled before") .cast::() .unwrap_or_else(|| vortex_panic!("todo: handle upcast of decimal array")); let buf = decimal_array.buffer::(); @@ -95,7 +89,7 @@ pub fn compare_canonical_array(array: &dyn Array, value: &Scalar, operator: Oper .zip( array .validity_mask() - .vortex_expect("validity_mask") + .expect("validity_mask") .to_bit_buffer() .iter(), ) @@ -110,7 +104,7 @@ pub fn compare_canonical_array(array: &dyn Array, value: &Scalar, operator: Oper let utf8_value = value.as_utf8(); compare_to( iter.map(|v| v.map(|b| unsafe { str::from_utf8_unchecked(b) })), - utf8_value.value().vortex_expect("nulls handled before"), + utf8_value.value().expect("nulls handled before"), operator, result_nullability, ) @@ -121,14 +115,14 @@ pub fn compare_canonical_array(array: &dyn Array, value: &Scalar, operator: Oper // Don't understand the lifetime problem here but identity map makes it go away #[allow(clippy::map_identity)] iter.map(|v| v), - binary_value.value().vortex_expect("nulls handled before"), + binary_value.value().expect("nulls handled before"), operator, result_nullability, ) }), DType::Struct(..) | DType::List(..) | DType::FixedSizeList(..) => { let scalar_vals: Vec = (0..array.len()) - .map(|i| array.scalar_at(i).vortex_expect("scalar_at")) + .map(|i| array.scalar_at(i).expect("scalar_at")) .collect(); BoolArray::from_iter( scalar_vals @@ -159,12 +153,7 @@ fn compare_to( }; if !nullability.is_nullable() { - BoolArray::from_iter( - values - .map(|val| val.vortex_expect("non nullable")) - .map(eval_fn), - ) - .into_array() + BoolArray::from_iter(values.map(|val| val.expect("non nullable")).map(eval_fn)).into_array() } else { BoolArray::from_iter(values.map(|val| val.map(eval_fn))).into_array() } diff --git a/fuzz/src/array/fill_null.rs b/fuzz/src/array/fill_null.rs index 66d216a96c5..ce651a09470 100644 --- a/fuzz/src/array/fill_null.rs +++ b/fuzz/src/array/fill_null.rs @@ -20,7 +20,6 @@ use vortex_dtype::DType; use vortex_dtype::Nullability; use vortex_dtype::match_each_decimal_value_type; use vortex_dtype::match_each_native_ptype; -use vortex_error::VortexExpect; use vortex_error::VortexResult; /// Apply fill_null on the canonical form of the array to get a consistent baseline. @@ -55,7 +54,7 @@ fn fill_bool_array( let fill_bool = fill_value .as_bool() .value() - .vortex_expect("cannot have null fill value"); + .expect("cannot have null fill value"); match array.validity() { Validity::NonNullable | Validity::AllValid => { @@ -85,8 +84,8 @@ fn fill_primitive_array( result_nullability: Nullability, ) -> ArrayRef { match_each_native_ptype!(array.ptype(), |T| { - let fill_val = T::try_from(fill_value) - .vortex_expect("fill value conversion should succeed in fuzz test"); + let fill_val = + T::try_from(fill_value).expect("fill value conversion should succeed in fuzz test"); match array.validity() { Validity::NonNullable | Validity::AllValid => { @@ -126,7 +125,7 @@ fn fill_decimal_array( match_each_decimal_value_type!(array.values_type(), |D| { let fill_val = D::try_from(decimal_scalar) - .vortex_expect("decimal fill value conversion should succeed in fuzz test"); + .expect("decimal fill value conversion should succeed in fuzz test"); match array.validity() { Validity::NonNullable | Validity::AllValid => DecimalArray::new( @@ -153,7 +152,7 @@ fn fill_decimal_array( } DecimalArray::try_new(new_data.freeze(), decimal_dtype, result_nullability.into()) - .vortex_expect("DecimalArray creation should succeed in fuzz test") + .expect("DecimalArray creation should succeed in fuzz test") .into_array() } } @@ -177,16 +176,16 @@ fn fill_varbinview_array( let fill_str = fill_value .as_utf8() .value() - .vortex_expect("cannot have null fill value"); + .expect("cannot have null fill value"); let strings: Vec = (0..array.len()) .map(|i| { if validity_bits.value(i) { array .scalar_at(i) - .vortex_expect("scalar_at") + .expect("scalar_at") .as_utf8() .value() - .vortex_expect("cannot have null valid value") + .expect("cannot have null valid value") .to_string() } else { fill_str.to_string() @@ -211,16 +210,16 @@ fn fill_varbinview_array( let fill_bytes = fill_value .as_binary() .value() - .vortex_expect("cannot have null fill value"); + .expect("cannot have null fill value"); let binaries: Vec> = (0..array.len()) .map(|i| { if validity_bits.value(i) { array .scalar_at(i) - .vortex_expect("scalar_at") + .expect("scalar_at") .as_binary() .value() - .vortex_expect("cannot have null valid value") + .expect("cannot have null valid value") .to_vec() } else { fill_bytes.to_vec() diff --git a/fuzz/src/array/mask.rs b/fuzz/src/array/mask.rs index c1d40beeb01..61171e9c4ad 100644 --- a/fuzz/src/array/mask.rs +++ b/fuzz/src/array/mask.rs @@ -18,7 +18,6 @@ use vortex_array::validity::Validity; use vortex_array::vtable::ValidityHelper; use vortex_dtype::Nullability; use vortex_dtype::match_each_decimal_value_type; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_mask::AllOr; use vortex_mask::Mask; @@ -120,13 +119,13 @@ pub fn mask_canonical_array(canonical: Canonical, mask: &Mask) -> VortexResult { // Recursively mask the storage array let masked_storage = mask_canonical_array(array.storage().to_canonical()?, mask) - .vortex_expect("mask_canonical_array should succeed in fuzz test"); + .expect("mask_canonical_array should succeed in fuzz test"); let ext_dtype = array .ext_dtype() diff --git a/fuzz/src/array/mod.rs b/fuzz/src/array/mod.rs index 809cda9f1b0..31ad6d2cc21 100644 --- a/fuzz/src/array/mod.rs +++ b/fuzz/src/array/mod.rs @@ -51,7 +51,6 @@ use vortex_array::search_sorted::SearchSortedSide; use vortex_btrblocks::BtrBlocksCompressor; use vortex_dtype::DType; use vortex_dtype::Nullability; -use vortex_error::VortexExpect; use vortex_error::vortex_panic; use vortex_mask::Mask; use vortex_utils::aliases::hash_set::HashSet; @@ -186,7 +185,7 @@ impl<'a> Arbitrary<'a> for FuzzArrayAction { let start = u.choose_index(current_array.len())?; let stop = u.int_in_range(start..=current_array.len())?; current_array = slice_canonical_array(¤t_array, start, stop) - .vortex_expect("slice_canonical_array should succeed in fuzz test"); + .expect("slice_canonical_array should succeed in fuzz test"); ( Action::Slice(start..stop), @@ -202,7 +201,7 @@ impl<'a> Arbitrary<'a> for FuzzArrayAction { let nullable = indices.contains(&None); current_array = take_canonical_array(¤t_array, &indices) - .vortex_expect("take_canonical_array should succeed in fuzz test"); + .expect("take_canonical_array should succeed in fuzz test"); let indices_array = if nullable { PrimitiveArray::from_option_iter( indices.iter().map(|i| i.map(|i| i as u64)), @@ -212,7 +211,7 @@ impl<'a> Arbitrary<'a> for FuzzArrayAction { PrimitiveArray::from_iter( indices .iter() - .map(|i| i.vortex_expect("must be present")) + .map(|i| i.expect("must be present")) .map(|i| i as u64), ) .into_array() @@ -220,7 +219,7 @@ impl<'a> Arbitrary<'a> for FuzzArrayAction { let compressed = BtrBlocksCompressor::default() .compress(&indices_array) - .vortex_expect("BtrBlocksCompressor compress should succeed in fuzz test"); + .expect("BtrBlocksCompressor compress should succeed in fuzz test"); ( Action::Take(compressed), ExpectedValue::Array(current_array.to_array()), @@ -234,7 +233,7 @@ impl<'a> Arbitrary<'a> for FuzzArrayAction { let scalar = if u.arbitrary()? { current_array .scalar_at(u.choose_index(current_array.len())?) - .vortex_expect("scalar_at") + .expect("scalar_at") } else { random_scalar(u, current_array.dtype())? }; @@ -244,7 +243,7 @@ impl<'a> Arbitrary<'a> for FuzzArrayAction { } let sorted = sort_canonical_array(¤t_array) - .vortex_expect("sort_canonical_array should succeed in fuzz test"); + .expect("sort_canonical_array should succeed in fuzz test"); let side = if u.arbitrary()? { SearchSortedSide::Left @@ -254,7 +253,7 @@ impl<'a> Arbitrary<'a> for FuzzArrayAction { ( Action::SearchSorted(scalar.clone(), side), ExpectedValue::Search( - search_sorted_canonical_array(&sorted, &scalar, side).vortex_expect( + search_sorted_canonical_array(&sorted, &scalar, side).expect( "search_sorted_canonical_array should succeed in fuzz test", ), ), @@ -265,7 +264,7 @@ impl<'a> Arbitrary<'a> for FuzzArrayAction { .map(|_| bool::arbitrary(u)) .collect::>>()?; current_array = filter_canonical_array(¤t_array, &mask) - .vortex_expect("filter_canonical_array should succeed in fuzz test"); + .expect("filter_canonical_array should succeed in fuzz test"); ( Action::Filter(Mask::from_iter(mask)), ExpectedValue::Array(current_array.to_array()), @@ -275,7 +274,7 @@ impl<'a> Arbitrary<'a> for FuzzArrayAction { let scalar = if u.arbitrary()? { current_array .scalar_at(u.choose_index(current_array.len())?) - .vortex_expect("scalar_at") + .expect("scalar_at") } else { // We can compare arrays with different nullability let null: Nullability = u.arbitrary()?; @@ -296,7 +295,7 @@ impl<'a> Arbitrary<'a> for FuzzArrayAction { return Err(EmptyChoose); } let Some(result) = cast_canonical_array(¤t_array, &to) - .vortex_expect("should fail to create array") + .expect("should fail to create array") else { return Err(EmptyChoose); }; @@ -313,9 +312,9 @@ impl<'a> Arbitrary<'a> for FuzzArrayAction { let sum_result = sum_canonical_array( current_array .to_canonical() - .vortex_expect("to_canonical should succeed in fuzz test"), + .expect("to_canonical should succeed in fuzz test"), ) - .vortex_expect("sum_canonical_array should succeed in fuzz test"); + .expect("sum_canonical_array should succeed in fuzz test"); (Action::Sum, ExpectedValue::Scalar(sum_result)) } ActionType::MinMax => { @@ -323,9 +322,9 @@ impl<'a> Arbitrary<'a> for FuzzArrayAction { let min_max_result = min_max_canonical_array( current_array .to_canonical() - .vortex_expect("to_canonical should succeed in fuzz test"), + .expect("to_canonical should succeed in fuzz test"), ) - .vortex_expect("min_max_canonical_array should succeed in fuzz test"); + .expect("min_max_canonical_array should succeed in fuzz test"); (Action::MinMax, ExpectedValue::MinMax(min_max_result)) } ActionType::FillNull => { @@ -336,7 +335,7 @@ impl<'a> Arbitrary<'a> for FuzzArrayAction { let fill_value = if u.arbitrary()? && !current_array.is_empty() { current_array .scalar_at(u.choose_index(current_array.len())?) - .vortex_expect("scalar_at") + .expect("scalar_at") } else { random_scalar( u, @@ -354,10 +353,10 @@ impl<'a> Arbitrary<'a> for FuzzArrayAction { let expected_result = fill_null_canonical_array( current_array .to_canonical() - .vortex_expect("to_canonical should succeed in fuzz test"), + .expect("to_canonical should succeed in fuzz test"), &fill_value, ) - .vortex_expect("fill_null_canonical_array should succeed in fuzz test"); + .expect("fill_null_canonical_array should succeed in fuzz test"); // Update current_array to the result for chaining current_array = expected_result.clone(); ( @@ -375,10 +374,10 @@ impl<'a> Arbitrary<'a> for FuzzArrayAction { let expected_result = mask_canonical_array( current_array .to_canonical() - .vortex_expect("to_canonical should succeed in fuzz test"), + .expect("to_canonical should succeed in fuzz test"), &Mask::from_iter(mask.clone()), ) - .vortex_expect("mask_canonical_array should succeed in fuzz test"); + .expect("mask_canonical_array should succeed in fuzz test"); // Update current_array to the result for chaining current_array = expected_result.clone(); ( @@ -393,11 +392,7 @@ impl<'a> Arbitrary<'a> for FuzzArrayAction { let num_indices = u.int_in_range(1..=5.min(current_array.len()))?; let indices_vec = (0..num_indices) - .map(|_| { - u.choose_index(current_array.len()) - .ok() - .vortex_expect("cannot pick") - }) + .map(|_| u.choose_index(current_array.len()).expect("cannot pick")) .unique() .collect::>(); @@ -408,10 +403,10 @@ impl<'a> Arbitrary<'a> for FuzzArrayAction { scalar_at_canonical_array( current_array .to_canonical() - .vortex_expect("to_canonical should succeed in fuzz test"), + .expect("to_canonical should succeed in fuzz test"), idx, ) - .vortex_expect("scalar_at_canonical_array should succeed in fuzz test") + .expect("scalar_at_canonical_array should succeed in fuzz test") }) .collect(); @@ -525,14 +520,14 @@ pub fn compress_array(array: &dyn Array, strategy: CompressorStrategy) -> ArrayR match strategy { CompressorStrategy::Default => BtrBlocksCompressor::default() .compress(array) - .vortex_expect("BtrBlocksCompressor compress should succeed in fuzz test"), + .expect("BtrBlocksCompressor compress should succeed in fuzz test"), CompressorStrategy::Compact => BtrBlocksCompressorBuilder::default() .include_string([StringCode::Zstd]) .include_int([IntCode::Pco]) .include_float([FloatCode::Pco]) .build() .compress(array) - .vortex_expect("Compact compress should succeed in fuzz test"), + .expect("Compact compress should succeed in fuzz test"), } } @@ -541,7 +536,7 @@ pub fn compress_array(array: &dyn Array, strategy: CompressorStrategy) -> ArrayR pub fn compress_array(array: &dyn Array, _strategy: CompressorStrategy) -> ArrayRef { BtrBlocksCompressor::default() .compress(array) - .vortex_expect("BtrBlocksCompressor compress should succeed in fuzz test") + .expect("BtrBlocksCompressor compress should succeed in fuzz test") } /// Run a fuzz action and return whether to keep it in the corpus. @@ -565,14 +560,14 @@ pub fn run_fuzz_action(fuzz_action: FuzzArrayAction) -> crate::error::VortexFuzz Action::Compress(strategy) => { let canonical = current_array .to_canonical() - .vortex_expect("to_canonical should succeed in fuzz test"); + .expect("to_canonical should succeed in fuzz test"); current_array = compress_array(canonical.as_ref(), strategy); assert_array_eq(&expected.array(), ¤t_array, i)?; } Action::Slice(range) => { current_array = current_array .slice(range) - .vortex_expect("slice operation should succeed in fuzz test"); + .expect("slice operation should succeed in fuzz test"); assert_array_eq(&expected.array(), ¤t_array, i)?; } Action::Take(indices) => { @@ -581,12 +576,12 @@ pub fn run_fuzz_action(fuzz_action: FuzzArrayAction) -> crate::error::VortexFuzz } current_array = current_array .take(indices) - .vortex_expect("take operation should succeed in fuzz test"); + .expect("take operation should succeed in fuzz test"); assert_array_eq(&expected.array(), ¤t_array, i)?; } Action::SearchSorted(s, side) => { let mut sorted = sort_canonical_array(¤t_array) - .vortex_expect("sort_canonical_array should succeed in fuzz test"); + .expect("sort_canonical_array should succeed in fuzz test"); if !current_array.is_canonical() { sorted = compress_array(&sorted, CompressorStrategy::Default); @@ -596,7 +591,7 @@ pub fn run_fuzz_action(fuzz_action: FuzzArrayAction) -> crate::error::VortexFuzz Action::Filter(mask_val) => { current_array = current_array .filter(mask_val) - .vortex_expect("filter operation should succeed in fuzz test"); + .expect("filter operation should succeed in fuzz test"); assert_array_eq(&expected.array(), ¤t_array, i)?; } Action::Compare(v, op) => { @@ -605,7 +600,7 @@ pub fn run_fuzz_action(fuzz_action: FuzzArrayAction) -> crate::error::VortexFuzz &ConstantArray::new(v.clone(), current_array.len()).into_array(), op, ) - .vortex_expect("compare operation should succeed in fuzz test"); + .expect("compare operation should succeed in fuzz test"); if let Err(e) = assert_array_eq(&expected.array(), &compare_result, i) { vortex_panic!( "Failed to compare {}with {op} {v}\nError: {e}", @@ -617,7 +612,7 @@ pub fn run_fuzz_action(fuzz_action: FuzzArrayAction) -> crate::error::VortexFuzz Action::Cast(to) => { let cast_result = current_array .cast(to.clone()) - .vortex_expect("cast operation should succeed in fuzz test"); + .expect("cast operation should succeed in fuzz test"); if let Err(e) = assert_array_eq(&expected.array(), &cast_result, i) { vortex_panic!( "Failed to cast {} to dtype {to}\nError: {e}", @@ -628,30 +623,30 @@ pub fn run_fuzz_action(fuzz_action: FuzzArrayAction) -> crate::error::VortexFuzz } Action::Sum => { let sum_result = - sum(¤t_array).vortex_expect("sum operation should succeed in fuzz test"); + sum(¤t_array).expect("sum operation should succeed in fuzz test"); assert_scalar_eq(&expected.scalar(), &sum_result, i)?; } Action::MinMax => { - let min_max_result = min_max(¤t_array) - .vortex_expect("min_max operation should succeed in fuzz test"); + let min_max_result = + min_max(¤t_array).expect("min_max operation should succeed in fuzz test"); assert_min_max_eq(&expected.min_max(), &min_max_result, i)?; } Action::FillNull(fill_value) => { current_array = current_array .fill_null(fill_value.clone()) - .vortex_expect("fill_null operation should succeed in fuzz test"); + .expect("fill_null operation should succeed in fuzz test"); assert_array_eq(&expected.array(), ¤t_array, i)?; } Action::Mask(mask_val) => { current_array = current_array .mask(mask_val.into_array()) - .vortex_expect("mask operation should succeed in fuzz test"); + .expect("mask operation should succeed in fuzz test"); assert_array_eq(&expected.array(), ¤t_array, i)?; } Action::ScalarAt(indices) => { let expected_scalars = expected.scalar_vec(); for (j, &idx) in indices.iter().enumerate() { - let scalar = current_array.scalar_at(idx).vortex_expect("scalar_at"); + let scalar = current_array.scalar_at(idx).expect("scalar_at"); assert_scalar_eq(&expected_scalars[j], &scalar, i)?; } } @@ -721,8 +716,8 @@ pub fn assert_array_eq( )); } for idx in 0..lhs.len() { - let l = lhs.scalar_at(idx).vortex_expect("scalar_at"); - let r = rhs.scalar_at(idx).vortex_expect("scalar_at"); + let l = lhs.scalar_at(idx).expect("scalar_at"); + let r = rhs.scalar_at(idx).expect("scalar_at"); if l != r { return Err(VortexFuzzError::ArrayNotEqual( diff --git a/fuzz/src/array/scalar_at.rs b/fuzz/src/array/scalar_at.rs index 3d47ba65c99..fe1e869290e 100644 --- a/fuzz/src/array/scalar_at.rs +++ b/fuzz/src/array/scalar_at.rs @@ -11,7 +11,6 @@ use vortex_array::scalar::Scalar; use vortex_dtype::DType; use vortex_dtype::match_each_decimal_value_type; use vortex_dtype::match_each_native_ptype; -use vortex_error::VortexExpect; use vortex_error::VortexResult; /// Baseline implementation of scalar_at that works on canonical arrays. @@ -48,10 +47,10 @@ pub fn scalar_at_canonical_array(canonical: Canonical, index: usize) -> VortexRe .map(|i| { scalar_at_canonical_array( list.to_canonical() - .vortex_expect("to_canonical should succeed in fuzz test"), + .expect("to_canonical should succeed in fuzz test"), i, ) - .vortex_expect("scalar_at_canonical_array should succeed in fuzz test") + .expect("scalar_at_canonical_array should succeed in fuzz test") }) .collect(); Scalar::list( @@ -66,10 +65,10 @@ pub fn scalar_at_canonical_array(canonical: Canonical, index: usize) -> VortexRe .map(|i| { scalar_at_canonical_array( list.to_canonical() - .vortex_expect("to_canonical should succeed in fuzz test"), + .expect("to_canonical should succeed in fuzz test"), i, ) - .vortex_expect("scalar_at_canonical_array should succeed in fuzz test") + .expect("scalar_at_canonical_array should succeed in fuzz test") }) .collect(); Scalar::fixed_size_list(list.dtype().clone(), children, array.dtype().nullability()) @@ -82,10 +81,10 @@ pub fn scalar_at_canonical_array(canonical: Canonical, index: usize) -> VortexRe scalar_at_canonical_array( field .to_canonical() - .vortex_expect("to_canonical should succeed in fuzz test"), + .expect("to_canonical should succeed in fuzz test"), index, ) - .vortex_expect("scalar_at_canonical_array should succeed in fuzz test") + .expect("scalar_at_canonical_array should succeed in fuzz test") }) .collect(); Scalar::struct_(array.dtype().clone(), field_scalars) diff --git a/fuzz/src/array/sort.rs b/fuzz/src/array/sort.rs index 6c45d436ab3..ed67c6a8263 100644 --- a/fuzz/src/array/sort.rs +++ b/fuzz/src/array/sort.rs @@ -16,7 +16,6 @@ use vortex_dtype::DType; use vortex_dtype::NativePType; use vortex_dtype::match_each_decimal_value_type; use vortex_dtype::match_each_native_ptype; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use crate::array::take_canonical_array_non_nullable_indices; @@ -75,9 +74,9 @@ pub fn sort_canonical_array(array: &dyn Array) -> VortexResult { sort_indices.sort_by(|a, b| { array .scalar_at(*a) - .vortex_expect("scalar_at") - .partial_cmp(&array.scalar_at(*b).vortex_expect("scalar_at")) - .vortex_expect("must be a valid comparison") + .expect("scalar_at") + .partial_cmp(&array.scalar_at(*b).expect("scalar_at")) + .expect("must be a valid comparison") }); take_canonical_array_non_nullable_indices(array, &sort_indices) } diff --git a/fuzz/src/array/take.rs b/fuzz/src/array/take.rs index c8b169f3747..2b891f4783e 100644 --- a/fuzz/src/array/take.rs +++ b/fuzz/src/array/take.rs @@ -21,7 +21,6 @@ use vortex_dtype::NativePType; use vortex_dtype::Nullability; use vortex_dtype::match_each_decimal_value_type; use vortex_dtype::match_each_native_ptype; -use vortex_error::VortexExpect; use vortex_error::VortexResult; pub fn take_canonical_array_non_nullable_indices( @@ -133,7 +132,7 @@ pub fn take_canonical_array( &array .scalar_at(*idx)? .cast(&array.dtype().union_nullability(nullable)) - .vortex_expect("cannot cast scalar nullability"), + .expect("cannot cast scalar nullability"), )?; } else { builder.append_null() diff --git a/fuzz/src/gpu/mod.rs b/fuzz/src/gpu/mod.rs index 099408b6ef3..37622e77a11 100644 --- a/fuzz/src/gpu/mod.rs +++ b/fuzz/src/gpu/mod.rs @@ -98,7 +98,6 @@ pub async fn run_compress_gpu(fuzz: FuzzCompressGpu) -> VortexFuzzResult { use vortex_cuda::CanonicalCudaExt; use vortex_cuda::CudaSession; use vortex_cuda::executor::CudaArrayExt; - use vortex_error::VortexExpect; if !vortex_cuda::cuda_available() { return Err(VortexFuzzError::VortexError( @@ -118,8 +117,7 @@ pub async fn run_compress_gpu(fuzz: FuzzCompressGpu) -> VortexFuzzResult { } }; - let mut cuda_ctx = - CudaSession::create_execution_ctx(&SESSION).vortex_expect("cannot create session"); + let mut cuda_ctx = CudaSession::create_execution_ctx(&SESSION).expect("cannot create session"); let gpu_canonical = match array.clone().execute_cuda(&mut cuda_ctx).await { Ok(c) => c, diff --git a/vortex-array/benches/chunk_array_builder.rs b/vortex-array/benches/chunk_array_builder.rs index 274622a335b..248c534403b 100644 --- a/vortex-array/benches/chunk_array_builder.rs +++ b/vortex-array/benches/chunk_array_builder.rs @@ -18,7 +18,6 @@ use vortex_array::builders::VarBinViewBuilder; use vortex_array::builders::builder_with_capacity; use vortex_array::session::ArraySession; use vortex_dtype::DType; -use vortex_error::VortexExpect; use vortex_session::VortexSession; fn main() { @@ -43,7 +42,7 @@ fn chunked_bool_canonical_into(bencher: Bencher, (len, chunk_count): (usize, usi let mut builder = builder_with_capacity(chunk.dtype(), len * chunk_count); chunk .append_to_builder(builder.as_mut(), &mut SESSION.create_execution_ctx()) - .vortex_expect("append failed"); + .expect("append failed"); builder.finish() }) } @@ -56,7 +55,7 @@ fn chunked_opt_bool_canonical_into(bencher: Bencher, (len, chunk_count): (usize, let mut builder = builder_with_capacity(chunk.dtype(), len * chunk_count); chunk .append_to_builder(builder.as_mut(), &mut SESSION.create_execution_ctx()) - .vortex_expect("append failed"); + .expect("append failed"); builder.finish() }) } @@ -81,7 +80,7 @@ fn chunked_varbinview_canonical_into(bencher: Bencher, (len, chunk_count): (usiz ); chunk .append_to_builder(&mut builder, &mut SESSION.create_execution_ctx()) - .vortex_expect("append failed"); + .expect("append failed"); builder.finish() }) } @@ -106,7 +105,7 @@ fn chunked_varbinview_opt_canonical_into(bencher: Bencher, (len, chunk_count): ( ); chunk .append_to_builder(&mut builder, &mut SESSION.create_execution_ctx()) - .vortex_expect("append failed"); + .expect("append failed"); builder.finish() }) } diff --git a/vortex-array/benches/chunked_dict_builder.rs b/vortex-array/benches/chunked_dict_builder.rs index daebfbfb637..51b2f6be858 100644 --- a/vortex-array/benches/chunked_dict_builder.rs +++ b/vortex-array/benches/chunked_dict_builder.rs @@ -14,7 +14,6 @@ use vortex_array::builders::builder_with_capacity; use vortex_array::compute::warm_up_vtables; use vortex_array::session::ArraySession; use vortex_dtype::NativePType; -use vortex_error::VortexExpect; use vortex_session::VortexSession; fn main() { @@ -47,7 +46,7 @@ fn chunked_dict_primitive_canonical_into( let mut builder = builder_with_capacity(chunk.dtype(), len * chunk_count); chunk .append_to_builder(builder.as_mut(), &mut SESSION.create_execution_ctx()) - .vortex_expect("append failed"); + .expect("append failed"); builder.finish() }) } diff --git a/vortex-array/benches/varbinview_compact.rs b/vortex-array/benches/varbinview_compact.rs index 3ad3db497da..8703dbc3a0c 100644 --- a/vortex-array/benches/varbinview_compact.rs +++ b/vortex-array/benches/varbinview_compact.rs @@ -13,7 +13,6 @@ use vortex_array::builders::VarBinViewBuilder; use vortex_buffer::Buffer; use vortex_dtype::DType; use vortex_dtype::Nullability; -use vortex_error::VortexExpect; fn main() { divan::main(); @@ -43,13 +42,13 @@ fn compact_impl(bencher: Bencher, (output_size, utilization_pct): (usize, usize) let indices = random_indices(output_size, base_size); let taken = base_array .take(indices) - .vortex_expect("operation should succeed in benchmark"); + .expect("operation should succeed in benchmark"); let array = taken.to_varbinview(); bencher.with_inputs(|| &array).bench_refs(|array| { array .compact_buffers() - .vortex_expect("operation should succeed in benchmark") + .expect("operation should succeed in benchmark") }) } @@ -59,13 +58,13 @@ fn compact_sliced_impl(bencher: Bencher, (output_size, utilization_pct): (usize, let sliced = base_array .as_ref() .slice(0..output_size) - .vortex_expect("slice should succeed"); + .expect("slice should succeed"); let array = sliced.to_varbinview(); bencher.with_inputs(|| &array).bench_refs(|array| { array .compact_buffers() - .vortex_expect("operation should succeed in benchmark") + .expect("operation should succeed in benchmark") }) } diff --git a/vortex-array/src/array/mod.rs b/vortex-array/src/array/mod.rs index aa6b8931fdd..c622afa922c 100644 --- a/vortex-array/src/array/mod.rs +++ b/vortex-array/src/array/mod.rs @@ -16,7 +16,6 @@ pub use visitor::*; use vortex_buffer::ByteBuffer; use vortex_dtype::DType; use vortex_dtype::Nullability; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_ensure; use vortex_error::vortex_err; @@ -300,7 +299,7 @@ impl dyn Array + '_ { /// Returns the array downcast by the given matcher. pub fn as_(&self) -> M::Match<'_> { - self.as_opt::().vortex_expect("Failed to downcast") + self.as_opt::().expect("Failed to downcast") } /// Returns the array downcast by the given matcher. @@ -316,7 +315,7 @@ impl dyn Array + '_ { .as_any_arc() .downcast::>() .map_err(|_| vortex_err!("failed to downcast")) - .vortex_expect("Failed to downcast"); + .expect("Failed to downcast"); Ok(match Arc::try_unwrap(arc) { Ok(array) => array.0, Err(arc) => arc.deref().0.clone(), @@ -488,7 +487,7 @@ impl Array for ArrayAdapter { Stat::IsConstant | Stat::IsSorted | Stat::IsStrictSorted ) && value.as_ref().as_exact().is_some_and(|v| { Scalar::try_new(DType::Bool(Nullability::NonNullable), Some(v.clone())) - .vortex_expect("A stat that was expected to be a boolean stat was not") + .expect("A stat that was expected to be a boolean stat was not") .as_bool() .value() .unwrap_or_default() diff --git a/vortex-array/src/arrays/arbitrary.rs b/vortex-array/src/arrays/arbitrary.rs index f30f8a56e2d..081cd1c7645 100644 --- a/vortex-array/src/arrays/arbitrary.rs +++ b/vortex-array/src/arrays/arbitrary.rs @@ -15,7 +15,6 @@ use vortex_dtype::NativePType; use vortex_dtype::Nullability; use vortex_dtype::PType; use vortex_dtype::match_each_decimal_value_type; -use vortex_error::VortexExpect; use super::BoolArray; use super::ChunkedArray; @@ -77,7 +76,7 @@ fn random_array(u: &mut Unstructured, dtype: &DType, len: Option) -> Resu } else { let dtype = chunks[0].dtype().clone(); Ok(ChunkedArray::try_new(chunks, dtype) - .vortex_expect("operation should succeed in arbitrary impl") + .expect("operation should succeed in arbitrary impl") .into_array()) } } @@ -120,9 +119,9 @@ fn random_array_chunk( let mut builder = DecimalBuilder::new::(*decimal, *n); for _i in 0..elem_len { let random_decimal = random_scalar(u, d)?; - builder.append_scalar(&random_decimal).vortex_expect( - "was somehow unable to append a decimal to a decimal builder", - ); + builder + .append_scalar(&random_decimal) + .expect("was somehow unable to append a decimal to a decimal builder"); } Ok(builder.finish()) } @@ -157,7 +156,7 @@ fn random_array_chunk( resolved_len, random_validity(u, *n, resolved_len)?, ) - .vortex_expect("operation should succeed in arbitrary impl") + .expect("operation should succeed in arbitrary impl") .into_array()) } DType::List(elem_dtype, null) => random_list(u, elem_dtype, *null, chunk_len), @@ -191,7 +190,7 @@ fn random_fixed_size_list( } else { builder .append_value(random_list_scalar(u, elem_dtype, list_size, null)?.as_list()) - .vortex_expect("can append value"); + .expect("can append value"); } } @@ -238,7 +237,7 @@ fn random_list_with_offset_type( let list_size = u.int_in_range(0..=20)?; builder .append_value(random_list_scalar(u, elem_dtype, list_size, null)?.as_list()) - .vortex_expect("can append value"); + .expect("can append value"); } } diff --git a/vortex-array/src/arrays/bool/array.rs b/vortex-array/src/arrays/bool/array.rs index cbc8ddfeecf..cc656112c02 100644 --- a/vortex-array/src/arrays/bool/array.rs +++ b/vortex-array/src/arrays/bool/array.rs @@ -5,7 +5,6 @@ use arrow_array::BooleanArray; use vortex_buffer::BitBuffer; use vortex_buffer::BitBufferMut; use vortex_dtype::DType; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_ensure; use vortex_mask::Mask; @@ -73,7 +72,7 @@ impl BoolArray { /// /// Panics if the validity length is not equal to the bit buffer length. pub fn new(bits: BitBuffer, validity: Validity) -> Self { - Self::try_new(bits, validity).vortex_expect("Failed to create BoolArray") + Self::try_new(bits, validity).expect("Failed to create BoolArray") } /// Constructs a new `BoolArray` from a `BufferHandle`. @@ -83,7 +82,7 @@ impl BoolArray { /// Panics if the validity length is not equal to the bit buffer length. pub fn new_handle(handle: BufferHandle, offset: usize, len: usize, validity: Validity) -> Self { Self::try_new_from_handle(handle, offset, len, validity) - .vortex_expect("Failed to create BoolArray from BufferHandle") + .expect("Failed to create BoolArray from BufferHandle") } /// Constructs a new `BoolArray`. @@ -229,8 +228,8 @@ impl BoolArray { pub fn to_mask(&self) -> Mask { self.maybe_to_mask() - .vortex_expect("failed to check validity") - .vortex_expect("cannot convert nullable boolean array to mask") + .expect("failed to check validity") + .expect("cannot convert nullable boolean array to mask") } pub fn maybe_to_mask(&self) -> VortexResult> { diff --git a/vortex-array/src/arrays/bool/compute/filter.rs b/vortex-array/src/arrays/bool/compute/filter.rs index 82887d8799d..9f49f14a453 100644 --- a/vortex-array/src/arrays/bool/compute/filter.rs +++ b/vortex-array/src/arrays/bool/compute/filter.rs @@ -4,7 +4,6 @@ use vortex_buffer::BitBuffer; use vortex_buffer::BitBufferMut; use vortex_buffer::get_bit; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_mask::Mask; use vortex_mask::MaskIter; @@ -30,7 +29,7 @@ impl FilterKernel for BoolVTable { let mask_values = mask .values() - .vortex_expect("AllTrue and AllFalse are handled by filter fn"); + .expect("AllTrue and AllFalse are handled by filter fn"); let buffer = match mask_values.threshold_iter(FILTER_SLICES_DENSITY_THRESHOLD) { MaskIter::Indices(indices) => filter_indices( @@ -61,7 +60,7 @@ pub fn filter_indices( BitBuffer::collect_bool(indices_len, |_idx| { let idx = indices .next() - .vortex_expect("iterator is guaranteed to be within the length of the array."); + .expect("iterator is guaranteed to be within the length of the array."); get_bit(buffer, bools.offset() + idx) }) } diff --git a/vortex-array/src/arrays/bool/compute/sum.rs b/vortex-array/src/arrays/bool/compute/sum.rs index 4a9f8a449b7..b46fec3496d 100644 --- a/vortex-array/src/arrays/bool/compute/sum.rs +++ b/vortex-array/src/arrays/bool/compute/sum.rs @@ -4,7 +4,6 @@ use std::ops::BitAnd; use vortex_dtype::Nullability; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_mask::AllOr; @@ -34,7 +33,7 @@ impl SumKernel for BoolVTable { let acc_value = accumulator .as_primitive() .as_::() - .vortex_expect("cannot be null"); + .expect("cannot be null"); let result = true_count.and_then(|tc| acc_value.checked_add(tc)); Ok(match result { Some(v) => Scalar::primitive(v, Nullability::Nullable), diff --git a/vortex-array/src/arrays/bool/test_harness.rs b/vortex-array/src/arrays/bool/test_harness.rs index 286c0f98110..bff1e14cf86 100644 --- a/vortex-array/src/arrays/bool/test_harness.rs +++ b/vortex-array/src/arrays/bool/test_harness.rs @@ -1,7 +1,6 @@ // SPDX-License-Identifier: Apache-2.0 // SPDX-FileCopyrightText: Copyright the Vortex contributors -use vortex_error::VortexExpect; use vortex_error::vortex_panic; use crate::arrays::BoolArray; @@ -9,7 +8,7 @@ use crate::arrays::BoolArray; impl BoolArray { pub fn opt_bool_vec(&self) -> Vec> { self.validity_mask() - .vortex_expect("Failed to get validity mask") + .expect("Failed to get validity mask") .to_bit_buffer() .iter() .zip(self.to_bit_buffer().iter()) @@ -19,7 +18,7 @@ impl BoolArray { pub fn bool_vec(&self) -> Vec { self.validity_mask() - .vortex_expect("Failed to get validity mask") + .expect("Failed to get validity mask") .to_bit_buffer() .iter() .zip(self.to_bit_buffer().iter()) diff --git a/vortex-array/src/arrays/bool/vtable/mod.rs b/vortex-array/src/arrays/bool/vtable/mod.rs index 1c60179b328..208731367d1 100644 --- a/vortex-array/src/arrays/bool/vtable/mod.rs +++ b/vortex-array/src/arrays/bool/vtable/mod.rs @@ -3,7 +3,6 @@ use kernel::PARENT_KERNELS; use vortex_dtype::DType; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_bail; use vortex_error::vortex_ensure; @@ -58,7 +57,7 @@ impl VTable for BoolVTable { fn metadata(array: &BoolArray) -> VortexResult { assert!(array.offset < 8, "Offset must be <8, got {}", array.offset); Ok(ProstMetadata(BoolMetadata { - offset: u32::try_from(array.offset).vortex_expect("checked"), + offset: u32::try_from(array.offset).expect("checked"), })) } @@ -112,7 +111,7 @@ impl VTable for BoolVTable { array.validity = if children.is_empty() { Validity::from(array.dtype().nullability()) } else { - Validity::Array(children.into_iter().next().vortex_expect("checked")) + Validity::Array(children.into_iter().next().expect("checked")) }; Ok(()) diff --git a/vortex-array/src/arrays/chunked/array.rs b/vortex-array/src/arrays/chunked/array.rs index abb4d483a00..01f9c53e8c1 100644 --- a/vortex-array/src/arrays/chunked/array.rs +++ b/vortex-array/src/arrays/chunked/array.rs @@ -11,7 +11,6 @@ use futures::stream; use vortex_buffer::Buffer; use vortex_buffer::BufferMut; use vortex_dtype::DType; -use vortex_error::VortexExpect as _; use vortex_error::VortexResult; use vortex_error::vortex_bail; @@ -64,7 +63,7 @@ impl ChunkedArray { pub unsafe fn new_unchecked(chunks: Vec, dtype: DType) -> Self { #[cfg(debug_assertions)] Self::validate(&chunks, &dtype) - .vortex_expect("[Debug Assertion]: Invalid `ChunkedArray` parameters"); + .expect("[Debug Assertion]: Invalid `ChunkedArray` parameters"); let nchunks = chunks.len(); @@ -84,7 +83,7 @@ impl ChunkedArray { dtype, len: curr_offset .try_into() - .vortex_expect("chunk offset must fit in usize"), + .expect("chunk offset must fit in usize"), chunk_offsets, chunks, stats_set: Default::default(), @@ -134,7 +133,7 @@ impl ChunkedArray { let chunk_start = self.chunk_offsets()[index_chunk]; let index_in_chunk = - usize::try_from(index - chunk_start).vortex_expect("Index is too large for usize"); + usize::try_from(index - chunk_start).expect("Index is too large for usize"); Ok((index_chunk, index_in_chunk)) } @@ -216,8 +215,8 @@ impl FromIterator for ChunkedArray { let dtype = chunks .first() .map(|c| c.dtype().clone()) - .vortex_expect("Cannot infer DType from an empty iterator"); - Self::try_new(chunks, dtype).vortex_expect("Failed to create chunked array from iterator") + .expect("Cannot infer DType from an empty iterator"); + Self::try_new(chunks, dtype).expect("Failed to create chunked array from iterator") } } diff --git a/vortex-array/src/arrays/chunked/compute/filter.rs b/vortex-array/src/arrays/chunked/compute/filter.rs index 2e31adcc0aa..e53f97fa59f 100644 --- a/vortex-array/src/arrays/chunked/compute/filter.rs +++ b/vortex-array/src/arrays/chunked/compute/filter.rs @@ -2,8 +2,8 @@ // SPDX-FileCopyrightText: Copyright the Vortex contributors use vortex_buffer::BufferMut; -use vortex_error::VortexExpect; use vortex_error::VortexResult; +use vortex_error::vortex_err; use vortex_mask::Mask; use vortex_mask::MaskIter; @@ -30,7 +30,7 @@ impl FilterKernel for ChunkedVTable { ) -> VortexResult> { let mask_values = mask .values() - .vortex_expect("AllTrue and AllFalse are handled by filter fn"); + .expect("AllTrue and AllFalse are handled by filter fn"); // Based on filter selectivity, we take the values between a range of slices, or // we take individual indices. @@ -117,8 +117,10 @@ pub(crate) fn chunk_filters( // start chunk: append a slice from (start_idx, start_chunk_end), i.e. whole chunk. // end chunk: append a slice from (0, end_idx). // chunks between start and end: append ChunkFilter::All. - let start_chunk_len: usize = - (chunk_offsets[start_chunk + 1] - chunk_offsets[start_chunk]).try_into()?; + let start_chunk_len: usize = (chunk_offsets[start_chunk + 1] + - chunk_offsets[start_chunk]) + .try_into() + .map_err(|e| vortex_err!("chunk length conversion failed: {e}"))?; let start_slice = (start_idx, start_chunk_len); match &mut chunk_filters[start_chunk] { f @ (ChunkFilter::All | ChunkFilter::None) => { @@ -194,7 +196,7 @@ pub(crate) fn find_chunk_idx(idx: usize, chunk_ends: &[u64]) -> VortexResult<(us .saturating_sub(1); let chunk_begin: usize = chunk_ends[chunk_id] .try_into() - .vortex_expect("chunk end must fit in usize"); + .expect("chunk end must fit in usize"); let chunk_offset = idx - chunk_begin; Ok((chunk_id, chunk_offset)) diff --git a/vortex-array/src/arrays/chunked/compute/is_constant.rs b/vortex-array/src/arrays/chunked/compute/is_constant.rs index b8909fe043b..494dd66f0da 100644 --- a/vortex-array/src/arrays/chunked/compute/is_constant.rs +++ b/vortex-array/src/arrays/chunked/compute/is_constant.rs @@ -1,7 +1,6 @@ // SPDX-License-Identifier: Apache-2.0 // SPDX-FileCopyrightText: Copyright the Vortex contributors -use vortex_error::VortexExpect; use vortex_error::VortexResult; use crate::Array; @@ -23,7 +22,7 @@ impl IsConstantKernel for ChunkedVTable { let first_chunk = chunks .next() - .vortex_expect("Must have at least one non-empty chunk"); + .expect("Must have at least one non-empty chunk"); match is_constant_opts(first_chunk, opts)? { // Un-determined diff --git a/vortex-array/src/arrays/chunked/compute/mask.rs b/vortex-array/src/arrays/chunked/compute/mask.rs index 88a8f0898d5..25d6545e6cb 100644 --- a/vortex-array/src/arrays/chunked/compute/mask.rs +++ b/vortex-array/src/arrays/chunked/compute/mask.rs @@ -2,6 +2,7 @@ // SPDX-FileCopyrightText: Copyright the Vortex contributors use vortex_error::VortexResult; +use vortex_error::vortex_err; use crate::ArrayRef; use crate::ExecutionCtx; @@ -25,8 +26,12 @@ impl MaskKernel for ChunkedVTable { .iter() .enumerate() .map(|(i, chunk)| { - let start: usize = chunk_offsets[i].try_into()?; - let end: usize = chunk_offsets[i + 1].try_into()?; + let start: usize = chunk_offsets[i] + .try_into() + .map_err(|e| vortex_err!("offset conversion failed: {e}"))?; + let end: usize = chunk_offsets[i + 1] + .try_into() + .map_err(|e| vortex_err!("offset conversion failed: {e}"))?; let chunk_mask = mask.slice(start..end)?; MaskExpr.try_new_array(chunk.len(), EmptyOptions, [chunk.clone(), chunk_mask]) }) diff --git a/vortex-array/src/arrays/chunked/compute/take.rs b/vortex-array/src/arrays/chunked/compute/take.rs index 14d7f4c639a..e837f1c0b2d 100644 --- a/vortex-array/src/arrays/chunked/compute/take.rs +++ b/vortex-array/src/arrays/chunked/compute/take.rs @@ -5,6 +5,7 @@ use vortex_buffer::BufferMut; use vortex_dtype::DType; use vortex_dtype::PType; use vortex_error::VortexResult; +use vortex_error::vortex_err; use crate::Array; use crate::ArrayRef; @@ -34,9 +35,15 @@ fn take_chunked(array: &ChunkedArray, indices: &dyn Array) -> VortexResult(ctx)?; let sizes_arr = chunk_array .sizes() .to_array() .cast(DType::Primitive(PType::U64, Nullability::NonNullable)) - .vortex_expect("Must be able to fit array offsets in u64") + .expect("Must be able to fit array offsets in u64") .execute::(ctx)?; let offsets_slice = offsets_arr.as_slice::(); diff --git a/vortex-array/src/arrays/constant/compute/sum.rs b/vortex-array/src/arrays/constant/compute/sum.rs index 8745fa07c64..892f6fc542a 100644 --- a/vortex-array/src/arrays/constant/compute/sum.rs +++ b/vortex-array/src/arrays/constant/compute/sum.rs @@ -10,7 +10,6 @@ use vortex_dtype::NativePType; use vortex_dtype::Nullability; use vortex_dtype::i256; use vortex_dtype::match_each_native_ptype; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_bail; use vortex_error::vortex_err; @@ -54,7 +53,7 @@ fn sum_scalar( let accumulator = accumulator .as_primitive() .as_::() - .vortex_expect("cannot be null"); + .expect("cannot be null"); Ok(accumulator .checked_add(count) .map(|v| ScalarValue::Primitive(v.into()))) @@ -86,10 +85,8 @@ fn sum_decimal( ) -> VortexResult> { let result_dtype = Stat::Sum .dtype(&DType::Decimal(decimal_dtype, Nullability::Nullable)) - .vortex_expect("decimal supports sum"); - let result_decimal_type = result_dtype - .as_decimal_opt() - .vortex_expect("must be decimal"); + .expect("decimal supports sum"); + let result_decimal_type = result_dtype.as_decimal_opt().expect("must be decimal"); let Some(value) = decimal_scalar.decimal_value() else { // Null value: return null @@ -143,7 +140,7 @@ where let initial = accumulator .as_primitive() .as_::() - .vortex_expect("cannot be null"); + .expect("cannot be null"); Ok(initial.checked_add(&array_sum)) } @@ -155,10 +152,8 @@ fn sum_float( let initial = accumulator .as_primitive() .as_::() - .vortex_expect("cannot be null"); - let v = primitive_scalar - .as_::() - .vortex_expect("cannot be null"); + .expect("cannot be null"); + let v = primitive_scalar.as_::().expect("cannot be null"); let len_f64: f64 = array_len.as_(); Ok(Some(initial + v * len_f64)) @@ -174,7 +169,6 @@ mod tests { use vortex_dtype::Nullability::Nullable; use vortex_dtype::PType; use vortex_dtype::i256; - use vortex_error::VortexExpect; use crate::Array; use crate::IntoArray; @@ -292,9 +286,9 @@ mod tests { let acc = -2048669276050936500000000000f64; let array = ConstantArray::new(6.1811675e16f64, 25); let sum = sum_with_accumulator(array.as_ref(), &Scalar::primitive(acc, Nullable)) - .vortex_expect("operation should succeed in test"); + .expect("operation should succeed in test"); assert_eq!( - f64::try_from(&sum).vortex_expect("operation should succeed in test"), + f64::try_from(&sum).expect("operation should succeed in test"), -2048669274505644600000000000f64 ); } diff --git a/vortex-array/src/arrays/constant/vtable/canonical.rs b/vortex-array/src/arrays/constant/vtable/canonical.rs index e19eea9781e..d4c01d41f55 100644 --- a/vortex-array/src/arrays/constant/vtable/canonical.rs +++ b/vortex-array/src/arrays/constant/vtable/canonical.rs @@ -12,7 +12,6 @@ use vortex_dtype::Nullability; use vortex_dtype::match_each_decimal_value; use vortex_dtype::match_each_decimal_value_type; use vortex_dtype::match_each_native_ptype; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use crate::Canonical; @@ -60,8 +59,7 @@ pub(crate) fn constant_canonicalize(array: &ConstantArray) -> VortexResult ListViewArray { let mut builder = builder_with_capacity( list.dtype() .as_list_element_opt() - .vortex_expect("list scalar somehow did not have a list DType"), + .expect("list scalar somehow did not have a list DType"), list.len(), ); for scalar in &elements { builder .append_scalar(scalar) - .vortex_expect("list element scalar was invalid"); + .expect("list element scalar was invalid"); } builder.finish() } else { @@ -292,7 +290,7 @@ fn constant_canonical_fixed_size_list_array( for v in &values { elements_builder .append_scalar(v) - .vortex_expect("must be a same dtype"); + .expect("must be a same dtype"); } } diff --git a/vortex-array/src/arrays/decimal/array.rs b/vortex-array/src/arrays/decimal/array.rs index 7c154f0a1e1..d4f664de5e9 100644 --- a/vortex-array/src/arrays/decimal/array.rs +++ b/vortex-array/src/arrays/decimal/array.rs @@ -14,7 +14,6 @@ use vortex_dtype::IntegerPType; use vortex_dtype::NativeDecimalType; use vortex_dtype::match_each_decimal_value_type; use vortex_dtype::match_each_integer_ptype; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_ensure; use vortex_error::vortex_panic; @@ -111,8 +110,7 @@ impl DecimalArray { decimal_dtype: DecimalDType, validity: Validity, ) -> Self { - Self::try_new(buffer, decimal_dtype, validity) - .vortex_expect("DecimalArray construction failed") + Self::try_new(buffer, decimal_dtype, validity).expect("DecimalArray construction failed") } /// Creates a new [`DecimalArray`] from a [`BufferHandle`] of values that may live in @@ -129,7 +127,7 @@ impl DecimalArray { validity: Validity, ) -> Self { Self::try_new_handle(values, values_type, decimal_dtype, validity) - .vortex_expect("DecimalArray construction failed") + .expect("DecimalArray construction failed") } /// Constructs a new `DecimalArray`. @@ -217,7 +215,7 @@ impl DecimalArray { #[cfg(debug_assertions)] { Self::validate(&values, values_type, &validity) - .vortex_expect("[Debug Assertion]: Invalid `DecimalArray` parameters"); + .expect("[Debug Assertion]: Invalid `DecimalArray` parameters"); } Self { @@ -277,7 +275,7 @@ impl DecimalArray { } pub fn into_parts(self) -> DecimalArrayParts { - let decimal_dtype = self.dtype.into_decimal_opt().vortex_expect("cannot fail"); + let decimal_dtype = self.dtype.into_decimal_opt().expect("cannot fail"); DecimalArrayParts { decimal_dtype, @@ -424,7 +422,7 @@ where } for (idx, value) in patch_indices.iter().zip_eq(patch_values.into_iter()) { - buffer[idx.as_() - patch_indices_offset] = ::from(value).vortex_expect( + buffer[idx.as_() - patch_indices_offset] = ::from(value).expect( "values of a given DecimalDType are representable in all compatible NativeDecimalType", ); } diff --git a/vortex-array/src/arrays/decimal/compute/cast.rs b/vortex-array/src/arrays/decimal/compute/cast.rs index c1256c0707b..4dbf6ee7704 100644 --- a/vortex-array/src/arrays/decimal/compute/cast.rs +++ b/vortex-array/src/arrays/decimal/compute/cast.rs @@ -6,7 +6,6 @@ use vortex_dtype::DType; use vortex_dtype::DecimalType; use vortex_dtype::NativeDecimalType; use vortex_dtype::match_each_decimal_value_type; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_bail; use vortex_error::vortex_panic; @@ -135,7 +134,7 @@ pub fn upcast_decimal_values( /// Since T is wider than F, this conversion never fails. fn upcast_decimal_buffer(from: Buffer) -> Buffer { from.iter() - .map(|&v| T::from(v).vortex_expect("upcast should never fail")) + .map(|&v| T::from(v).expect("upcast should never fail")) .collect() } diff --git a/vortex-array/src/arrays/decimal/compute/fill_null.rs b/vortex-array/src/arrays/decimal/compute/fill_null.rs index 504dbd2638c..ca88e30ec2f 100644 --- a/vortex-array/src/arrays/decimal/compute/fill_null.rs +++ b/vortex-array/src/arrays/decimal/compute/fill_null.rs @@ -7,7 +7,6 @@ use std::ops::Not; use vortex_buffer::BitBuffer; use vortex_dtype::NativeDecimalType; use vortex_dtype::match_each_decimal_value_type; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use super::cast::upcast_decimal_values; @@ -37,7 +36,7 @@ impl FillNullKernel for DecimalVTable { let decimal_scalar = fill_value.as_decimal(); let decimal_value = decimal_scalar .decimal_value() - .vortex_expect("fill_null requires a non-null fill value"); + .expect("fill_null requires a non-null fill value"); match_each_decimal_value_type!(array.values_type(), |T| { fill_invalid_positions::( array, diff --git a/vortex-array/src/arrays/decimal/compute/sum.rs b/vortex-array/src/arrays/decimal/compute/sum.rs index a777b89ce8f..a7bf0eb4415 100644 --- a/vortex-array/src/arrays/decimal/compute/sum.rs +++ b/vortex-array/src/arrays/decimal/compute/sum.rs @@ -11,7 +11,6 @@ use vortex_dtype::DecimalDType; use vortex_dtype::DecimalType; use vortex_dtype::Nullability::Nullable; use vortex_dtype::match_each_decimal_value_type; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_bail; use vortex_mask::Mask; @@ -29,16 +28,14 @@ impl SumKernel for DecimalVTable { fn sum(&self, array: &DecimalArray, accumulator: &Scalar) -> VortexResult { let return_dtype = Stat::Sum .dtype(array.dtype()) - .vortex_expect("sum for decimals exists"); - let return_decimal_dtype = *return_dtype - .as_decimal_opt() - .vortex_expect("must be decimal"); + .expect("sum for decimals exists"); + let return_decimal_dtype = *return_dtype.as_decimal_opt().expect("must be decimal"); // Extract the initial value as a `DecimalValue`. let initial_decimal = accumulator .as_decimal() .decimal_value() - .vortex_expect("cannot be null"); + .expect("cannot be null"); let mask = array.validity_mask()?; let validity = match &mask { @@ -54,7 +51,7 @@ impl SumKernel for DecimalVTable { match_each_decimal_value_type!(values_type, |O| { let initial_val: O = initial_decimal .cast() - .vortex_expect("cannot fail to cast initial value"); + .expect("cannot fail to cast initial value"); Ok(sum_to_scalar( array.buffer::(), @@ -133,7 +130,6 @@ mod tests { use vortex_dtype::DecimalDType; use vortex_dtype::Nullability; use vortex_dtype::i256; - use vortex_error::VortexExpect; use crate::arrays::DecimalArray; use crate::compute::sum; @@ -406,7 +402,7 @@ mod tests { ); assert_eq!( - sum(decimal.as_ref()).vortex_expect("operation should succeed in test"), + sum(decimal.as_ref()).expect("operation should succeed in test"), Scalar::null(DType::Decimal(decimal_dtype, Nullability::Nullable)) ); } diff --git a/vortex-array/src/arrays/decimal/utils.rs b/vortex-array/src/arrays/decimal/utils.rs index 1b148b130a9..6af7b3beaad 100644 --- a/vortex-array/src/arrays/decimal/utils.rs +++ b/vortex-array/src/arrays/decimal/utils.rs @@ -5,7 +5,6 @@ use itertools::Itertools; use itertools::MinMaxResult; use vortex_dtype::DecimalType; use vortex_dtype::i256; -use vortex_error::VortexExpect; use crate::arrays::DecimalArray; use crate::vtable::ValidityHelper; @@ -26,7 +25,7 @@ macro_rules! try_downcast { $array .buffer::<$src>() .into_iter() - .map(|v| <$dst as BigCast>::from(v).vortex_expect("decimal conversion failure")) + .map(|v| <$dst as BigCast>::from(v).expect("decimal conversion failure")) .collect(), $array.decimal_dtype(), $array.validity().clone(), diff --git a/vortex-array/src/arrays/decimal/vtable/mod.rs b/vortex-array/src/arrays/decimal/vtable/mod.rs index d681555a6f7..4421a70d36e 100644 --- a/vortex-array/src/arrays/decimal/vtable/mod.rs +++ b/vortex-array/src/arrays/decimal/vtable/mod.rs @@ -7,7 +7,6 @@ use vortex_dtype::DType; use vortex_dtype::DecimalType; use vortex_dtype::NativeDecimalType; use vortex_dtype::match_each_decimal_value_type; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_bail; use vortex_error::vortex_ensure; @@ -129,7 +128,7 @@ impl VTable for DecimalVTable { children .into_iter() .next() - .vortex_expect("children length already validated"), + .expect("children length already validated"), ); } Ok(()) diff --git a/vortex-array/src/arrays/dict/arbitrary.rs b/vortex-array/src/arrays/dict/arbitrary.rs index 83cafc023f1..2cb35e2e467 100644 --- a/vortex-array/src/arrays/dict/arbitrary.rs +++ b/vortex-array/src/arrays/dict/arbitrary.rs @@ -10,7 +10,6 @@ use vortex_dtype::DType; use vortex_dtype::NativePType; use vortex_dtype::Nullability; use vortex_dtype::PType; -use vortex_error::VortexExpect; use super::DictArray; use crate::ArrayRef; @@ -86,7 +85,7 @@ impl ArbitraryDictArray { Ok(ArbitraryDictArray( DictArray::try_new(codes, values) - .vortex_expect("DictArray creation should succeed in arbitrary impl"), + .expect("DictArray creation should succeed in arbitrary impl"), )) } } @@ -105,7 +104,7 @@ where .map(|_| { let idx = u.int_in_range(0..=max_value - 1)?; // max_value is bounded by T::MAX in the caller, so conversion always succeeds - Ok(T::from(idx).vortex_expect("value within type bounds")) + Ok(T::from(idx).expect("value within type bounds")) }) .collect::>>()?; let validity = random_validity(u, nullability, len)?; diff --git a/vortex-array/src/arrays/dict/array.rs b/vortex-array/src/arrays/dict/array.rs index de616417ce1..867647b39c4 100644 --- a/vortex-array/src/arrays/dict/array.rs +++ b/vortex-array/src/arrays/dict/array.rs @@ -5,7 +5,6 @@ use vortex_buffer::BitBuffer; use vortex_dtype::DType; use vortex_dtype::PType; use vortex_dtype::match_each_integer_ptype; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_bail; use vortex_error::vortex_ensure; @@ -85,9 +84,8 @@ impl DictArray { #[cfg(debug_assertions)] { - use vortex_error::VortexExpect; self.validate_all_values_referenced() - .vortex_expect("validation should succeed when all values are referenced") + .expect("validation should succeed when all values are referenced") } self @@ -98,7 +96,7 @@ impl DictArray { /// This constructor will panic if `codes` or `values` do not pass validation for building /// a new `DictArray`. See [`DictArray::try_new`] for a description of the error conditions. pub fn new(codes: ArrayRef, values: ArrayRef) -> Self { - Self::try_new(codes, values).vortex_expect("DictArray new") + Self::try_new(codes, values).expect("DictArray new") } /// Build a new `DictArray` from its components, `codes` and `values`. @@ -241,7 +239,6 @@ mod test { use vortex_dtype::Nullability::NonNullable; use vortex_dtype::PType; use vortex_dtype::UnsignedPType; - use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_panic; use vortex_mask::AllOr; @@ -351,13 +348,11 @@ mod test { .map(|_| rng.random::()) .collect::(); let codes = (0..len) - .map(|_| { - Code::from(rng.random_range(0..unique_values)).vortex_expect("valid value") - }) + .map(|_| Code::from(rng.random_range(0..unique_values)).expect("valid value")) .collect::(); DictArray::try_new(codes.into_array(), values.into_array()) - .vortex_expect("DictArray creation should succeed in arbitrary impl") + .expect("DictArray creation should succeed in arbitrary impl") .into_array() }) .collect::() diff --git a/vortex-array/src/arrays/dict/compute/fill_null.rs b/vortex-array/src/arrays/dict/compute/fill_null.rs index ca900abe856..34bce3c9568 100644 --- a/vortex-array/src/arrays/dict/compute/fill_null.rs +++ b/vortex-array/src/arrays/dict/compute/fill_null.rs @@ -85,7 +85,6 @@ mod tests { use vortex_buffer::BitBuffer; use vortex_buffer::buffer; use vortex_dtype::Nullability; - use vortex_error::VortexExpect; use crate::IntoArray; use crate::ToCanonical; @@ -106,12 +105,12 @@ mod tests { .into_array(), PrimitiveArray::new(buffer![10, 20, 20], Validity::AllValid).into_array(), ) - .vortex_expect("operation should succeed in test"); + .expect("operation should succeed in test"); let filled = dict .to_array() .fill_null(Scalar::primitive(20, Nullability::NonNullable)) - .vortex_expect("operation should succeed in test"); + .expect("operation should succeed in test"); let filled_primitive = filled.to_primitive(); assert_arrays_eq!(filled_primitive, PrimitiveArray::from_iter([10, 20, 20])); assert!(filled_primitive.all_valid().unwrap()); diff --git a/vortex-array/src/arrays/dict/execute.rs b/vortex-array/src/arrays/dict/execute.rs index 0e1dfd506bb..a9adf79ea2a 100644 --- a/vortex-array/src/arrays/dict/execute.rs +++ b/vortex-array/src/arrays/dict/execute.rs @@ -3,7 +3,6 @@ //! Execution logic for DictArray - takes from values using codes (indices). -use vortex_error::VortexExpect; use vortex_error::VortexResult; use crate::Canonical; @@ -64,7 +63,7 @@ fn take_bool( ) -> VortexResult { Ok( ::take(array, codes.as_ref(), ctx)? - .vortex_expect("take bool should not return None") + .expect("take bool should not return None") .as_::() .clone(), ) @@ -76,8 +75,8 @@ fn take_primitive( ctx: &mut ExecutionCtx, ) -> PrimitiveArray { ::take(array, codes.as_ref(), ctx) - .vortex_expect("take primitive array") - .vortex_expect("take primitive should not return None") + .expect("take primitive array") + .expect("take primitive should not return None") .as_::() .clone() } @@ -88,8 +87,8 @@ fn take_decimal( ctx: &mut ExecutionCtx, ) -> DecimalArray { ::take(array, codes.as_ref(), ctx) - .vortex_expect("take decimal array") - .vortex_expect("take decimal should not return None") + .expect("take decimal array") + .expect("take decimal should not return None") .as_::() .clone() } @@ -100,8 +99,8 @@ fn take_varbinview( ctx: &mut ExecutionCtx, ) -> VarBinViewArray { ::take(array, codes.as_ref(), ctx) - .vortex_expect("take varbinview array") - .vortex_expect("take varbinview should not return None") + .expect("take varbinview array") + .expect("take varbinview should not return None") .as_::() .clone() } @@ -112,8 +111,8 @@ fn take_listview( ctx: &mut ExecutionCtx, ) -> ListViewArray { ::take(array, codes.as_ref(), ctx) - .vortex_expect("take listview array") - .vortex_expect("take listview should not return None") + .expect("take listview array") + .expect("take listview should not return None") .as_::() .clone() } @@ -124,16 +123,16 @@ fn take_fixed_size_list( ctx: &mut ExecutionCtx, ) -> FixedSizeListArray { ::take(array, codes.as_ref(), ctx) - .vortex_expect("take fixed size list array") - .vortex_expect("take fixed size list should not return None") + .expect("take fixed size list array") + .expect("take fixed size list should not return None") .as_::() .clone() } fn take_struct(array: &StructArray, codes: &PrimitiveArray, ctx: &mut ExecutionCtx) -> StructArray { ::take(array, codes.as_ref(), ctx) - .vortex_expect("take struct array") - .vortex_expect("take struct should not return None") + .expect("take struct array") + .expect("take struct should not return None") .as_::() .clone() } @@ -144,8 +143,8 @@ fn take_extension( ctx: &mut ExecutionCtx, ) -> ExtensionArray { ::take(array, codes.as_ref(), ctx) - .vortex_expect("take extension storage") - .vortex_expect("take extension should not return None") + .expect("take extension storage") + .expect("take extension should not return None") .as_::() .clone() } diff --git a/vortex-array/src/arrays/dict/vtable/operations.rs b/vortex-array/src/arrays/dict/vtable/operations.rs index edfbd408165..4fc55bf06c7 100644 --- a/vortex-array/src/arrays/dict/vtable/operations.rs +++ b/vortex-array/src/arrays/dict/vtable/operations.rs @@ -1,7 +1,6 @@ // SPDX-License-Identifier: Apache-2.0 // SPDX-FileCopyrightText: Copyright the Vortex contributors -use vortex_error::VortexExpect; use vortex_error::VortexResult; use super::DictVTable; @@ -25,6 +24,6 @@ impl OperationsVTable for DictVTable { .values() .scalar_at(dict_index)? .cast(array.dtype()) - .vortex_expect("Array dtype will only differ by nullability")) + .expect("Array dtype will only differ by nullability")) } } diff --git a/vortex-array/src/arrays/dict_test.rs b/vortex-array/src/arrays/dict_test.rs index 0eee899f357..157f2f4644d 100644 --- a/vortex-array/src/arrays/dict_test.rs +++ b/vortex-array/src/arrays/dict_test.rs @@ -12,7 +12,6 @@ use rand::prelude::IndexedRandom; use rand::prelude::StdRng; use vortex_buffer::Buffer; use vortex_dtype::NativePType; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use super::ChunkedArray; @@ -82,7 +81,7 @@ where (0..chunk_count) .map(|_| { gen_primitive_dict::(len, unique_values) - .vortex_expect("operation should succeed in test") + .expect("operation should succeed in test") .into_array() }) .collect::() diff --git a/vortex-array/src/arrays/extension/vtable/mod.rs b/vortex-array/src/arrays/extension/vtable/mod.rs index 2f86f9053a7..97567e8aeef 100644 --- a/vortex-array/src/arrays/extension/vtable/mod.rs +++ b/vortex-array/src/arrays/extension/vtable/mod.rs @@ -10,7 +10,6 @@ mod visitor; use kernel::PARENT_KERNELS; use vortex_dtype::DType; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_bail; use vortex_error::vortex_ensure; @@ -88,7 +87,7 @@ impl VTable for ExtensionVTable { array.storage = children .into_iter() .next() - .vortex_expect("children length already validated"); + .expect("children length already validated"); Ok(()) } diff --git a/vortex-array/src/arrays/filter/array.rs b/vortex-array/src/arrays/filter/array.rs index 40857487a65..2699cf9e4c3 100644 --- a/vortex-array/src/arrays/filter/array.rs +++ b/vortex-array/src/arrays/filter/array.rs @@ -1,7 +1,6 @@ // SPDX-License-Identifier: Apache-2.0 // SPDX-FileCopyrightText: Copyright the Vortex contributors -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_ensure_eq; use vortex_mask::Mask; @@ -36,7 +35,7 @@ pub struct FilterArray { impl FilterArray { pub fn new(array: ArrayRef, mask: Mask) -> Self { - Self::try_new(array, mask).vortex_expect("FilterArray construction failed") + Self::try_new(array, mask).expect("FilterArray construction failed") } pub fn try_new(array: ArrayRef, mask: Mask) -> VortexResult { diff --git a/vortex-array/src/arrays/filter/execute/bool.rs b/vortex-array/src/arrays/filter/execute/bool.rs index 40bc70bfdc5..ed03a3ceb59 100644 --- a/vortex-array/src/arrays/filter/execute/bool.rs +++ b/vortex-array/src/arrays/filter/execute/bool.rs @@ -4,14 +4,13 @@ use std::sync::Arc; use vortex_compute::filter::Filter; -use vortex_error::VortexExpect; use vortex_mask::MaskValues; use crate::arrays::BoolArray; use crate::arrays::filter::execute::filter_validity; pub fn filter_bool(array: &BoolArray, mask: &Arc) -> BoolArray { - let validity = array.validity().vortex_expect("missing BoolArray validity"); + let validity = array.validity().expect("missing BoolArray validity"); let filtered_validity = filter_validity(validity, mask); let bit_buffer = array.to_bit_buffer(); diff --git a/vortex-array/src/arrays/filter/execute/fixed_size_list.rs b/vortex-array/src/arrays/filter/execute/fixed_size_list.rs index f892f90921f..301b8bc19e3 100644 --- a/vortex-array/src/arrays/filter/execute/fixed_size_list.rs +++ b/vortex-array/src/arrays/filter/execute/fixed_size_list.rs @@ -3,7 +3,6 @@ use std::sync::Arc; -use vortex_error::VortexExpect; use vortex_mask::Mask; use vortex_mask::MaskIter; use vortex_mask::MaskValues; @@ -41,7 +40,7 @@ pub fn filter_fixed_size_list( // Allow the child array to filter itself. let new_elements = elements .filter(elements_mask) - .vortex_expect("FixedSizeListArray elements are guaranteed to support filter"); + .expect("FixedSizeListArray elements are guaranteed to support filter"); debug_assert_eq!(new_elements.len(), new_len * list_size as usize); new_elements diff --git a/vortex-array/src/arrays/filter/execute/listview.rs b/vortex-array/src/arrays/filter/execute/listview.rs index c9e9d369cff..3241080de53 100644 --- a/vortex-array/src/arrays/filter/execute/listview.rs +++ b/vortex-array/src/arrays/filter/execute/listview.rs @@ -3,7 +3,6 @@ use std::sync::Arc; -use vortex_error::VortexExpect; use vortex_mask::MaskValues; use crate::arrays::ListViewArray; @@ -52,10 +51,10 @@ pub fn filter_listview(array: &ListViewArray, selection_mask: &Arc) let mask_for_filter = values_to_mask(selection_mask); let new_offsets = offsets .filter(mask_for_filter.clone()) - .vortex_expect("ListViewArray offsets are guaranteed to support filter"); + .expect("ListViewArray offsets are guaranteed to support filter"); let new_sizes = sizes .filter(mask_for_filter) - .vortex_expect("ListViewArray sizes are guaranteed to support filter"); + .expect("ListViewArray sizes are guaranteed to support filter"); // SAFETY: Filter operation maintains all `ListViewArray` invariants: // - Offsets and sizes are derived from existing valid child arrays. @@ -71,7 +70,7 @@ pub fn filter_listview(array: &ListViewArray, selection_mask: &Arc) new_array .rebuild(ListViewRebuildMode::MakeZeroCopyToList) - .vortex_expect("ListViewArray rebuild to zero-copy List should always succeed") + .expect("ListViewArray rebuild to zero-copy List should always succeed") } #[cfg(test)] diff --git a/vortex-array/src/arrays/filter/execute/mod.rs b/vortex-array/src/arrays/filter/execute/mod.rs index 70cd460ef9d..48b3a908163 100644 --- a/vortex-array/src/arrays/filter/execute/mod.rs +++ b/vortex-array/src/arrays/filter/execute/mod.rs @@ -7,7 +7,6 @@ use std::sync::Arc; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_mask::Mask; use vortex_mask::MaskValues; @@ -40,7 +39,7 @@ fn values_to_mask(values: &Arc) -> Mask { fn filter_validity(validity: Validity, mask: &Arc) -> Validity { validity .filter(&values_to_mask(mask)) - .vortex_expect("Somehow unable to wrap filter around a validity array") + .expect("Somehow unable to wrap filter around a validity array") } /// Check for some fast-path execution conditions before calling [`execute_filter`]. @@ -88,7 +87,7 @@ pub(super) fn execute_filter(canonical: Canonical, mask: &Arc) -> Ca let filtered_storage = a .storage() .filter(values_to_mask(mask)) - .vortex_expect("ExtensionArray storage type somehow could not be filtered"); + .expect("ExtensionArray storage type somehow could not be filtered"); Canonical::Extension(ExtensionArray::new(a.ext_dtype().clone(), filtered_storage)) } } diff --git a/vortex-array/src/arrays/filter/execute/primitive.rs b/vortex-array/src/arrays/filter/execute/primitive.rs index 7000a67503b..f3560728e90 100644 --- a/vortex-array/src/arrays/filter/execute/primitive.rs +++ b/vortex-array/src/arrays/filter/execute/primitive.rs @@ -5,16 +5,13 @@ use std::sync::Arc; use vortex_compute::filter::Filter; use vortex_dtype::match_each_native_ptype; -use vortex_error::VortexExpect; use vortex_mask::MaskValues; use crate::arrays::PrimitiveArray; use crate::arrays::filter::execute::filter_validity; pub fn filter_primitive(array: &PrimitiveArray, mask: &Arc) -> PrimitiveArray { - let validity = array - .validity() - .vortex_expect("missing PrimitiveArray validity"); + let validity = array.validity().expect("missing PrimitiveArray validity"); let filtered_validity = filter_validity(validity, mask); match_each_native_ptype!(array.ptype(), |T| { diff --git a/vortex-array/src/arrays/filter/execute/struct_.rs b/vortex-array/src/arrays/filter/execute/struct_.rs index dff28d27430..db564f347e3 100644 --- a/vortex-array/src/arrays/filter/execute/struct_.rs +++ b/vortex-array/src/arrays/filter/execute/struct_.rs @@ -3,7 +3,6 @@ use std::sync::Arc; -use vortex_error::VortexExpect; use vortex_mask::MaskValues; use crate::ArrayRef; @@ -22,7 +21,7 @@ pub fn filter_struct(array: &StructArray, mask: &Arc) -> StructArray .map(|field| { field .filter(mask_for_filter.clone()) - .vortex_expect("StructArray fields are guaranteed to support filter") + .expect("StructArray fields are guaranteed to support filter") }) .collect(); @@ -37,7 +36,7 @@ pub fn filter_struct(array: &StructArray, mask: &Arc) -> StructArray length, filtered_validity, ) - .vortex_expect("filtered StructArray fields have consistent lengths") + .expect("filtered StructArray fields have consistent lengths") } #[cfg(test)] diff --git a/vortex-array/src/arrays/filter/execute/varbinview.rs b/vortex-array/src/arrays/filter/execute/varbinview.rs index 60286586ba3..55bcdb51031 100644 --- a/vortex-array/src/arrays/filter/execute/varbinview.rs +++ b/vortex-array/src/arrays/filter/execute/varbinview.rs @@ -3,7 +3,6 @@ use std::sync::Arc; -use vortex_error::VortexExpect; use vortex_mask::MaskValues; use crate::arrays::VarBinViewArray; @@ -14,7 +13,7 @@ use crate::compute::arrow_filter_fn; pub fn filter_varbinview(array: &VarBinViewArray, mask: &Arc) -> VarBinViewArray { // Delegate to the Arrow implementation of filter over `VarBinView`. arrow_filter_fn(array.as_ref(), &values_to_mask(mask)) - .vortex_expect("VarBinViewArray is Arrow-compatible and supports arrow_filter_fn") + .expect("VarBinViewArray is Arrow-compatible and supports arrow_filter_fn") .as_::() .clone() } diff --git a/vortex-array/src/arrays/filter/vtable.rs b/vortex-array/src/arrays/filter/vtable.rs index 6a9f74b0960..0cb82c42e7e 100644 --- a/vortex-array/src/arrays/filter/vtable.rs +++ b/vortex-array/src/arrays/filter/vtable.rs @@ -6,7 +6,6 @@ use std::fmt::Formatter; use std::hash::Hasher; use vortex_dtype::DType; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_bail; use vortex_error::vortex_ensure; @@ -105,7 +104,7 @@ impl VTable for FilterVTable { array.child = children .into_iter() .next() - .vortex_expect("children length already validated"); + .expect("children length already validated"); Ok(()) } diff --git a/vortex-array/src/arrays/fixed_size_list/array.rs b/vortex-array/src/arrays/fixed_size_list/array.rs index d62b3d569bb..1b69c6177e4 100644 --- a/vortex-array/src/arrays/fixed_size_list/array.rs +++ b/vortex-array/src/arrays/fixed_size_list/array.rs @@ -4,7 +4,6 @@ use std::sync::Arc; use vortex_dtype::DType; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_ensure; @@ -108,7 +107,7 @@ impl FixedSizeListArray { /// in [`FixedSizeListArray::new_unchecked`]. pub fn new(elements: ArrayRef, list_size: u32, validity: Validity, len: usize) -> Self { Self::try_new(elements, list_size, validity, len) - .vortex_expect("FixedSizeListArray construction failed") + .expect("FixedSizeListArray construction failed") } /// Constructs a new `FixedSizeListArray`. @@ -155,7 +154,7 @@ impl FixedSizeListArray { ) -> Self { #[cfg(debug_assertions)] Self::validate(&elements, len, list_size, &validity) - .vortex_expect("[Debug Assertion]: Invalid `FixedSizeListArray` parameters"); + .expect("[Debug Assertion]: Invalid `FixedSizeListArray` parameters"); let nullability = validity.nullability(); diff --git a/vortex-array/src/arrays/fixed_size_list/compute/take.rs b/vortex-array/src/arrays/fixed_size_list/compute/take.rs index 3d507b19490..23afb328271 100644 --- a/vortex-array/src/arrays/fixed_size_list/compute/take.rs +++ b/vortex-array/src/arrays/fixed_size_list/compute/take.rs @@ -5,7 +5,6 @@ use vortex_buffer::BitBufferMut; use vortex_buffer::BufferMut; use vortex_dtype::IntegerPType; use vortex_dtype::match_each_integer_ptype; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_panic; @@ -105,9 +104,7 @@ fn take_non_nullable_fsl( // Expand the list into individual element indices. for i in list_start..list_end { // SAFETY: We've allocated enough space for enough indices for all `new_len` lists (that each consist of `list_size = list_end - list_start` elements), so we know we have enough capacity. - unsafe { - elements_indices.push_unchecked(I::from_usize(i).vortex_expect("i < list_end")) - }; + unsafe { elements_indices.push_unchecked(I::from_usize(i).expect("i < list_end")) }; } } @@ -169,9 +166,7 @@ fn take_nullable_fsl( // Expand the list into individual element indices. for i in list_start..list_end { // SAFETY: We've allocated enough space for enough indices for all `new_len` lists (that each consist of `list_size = list_end - list_start` elements), so we know we have enough capacity. - unsafe { - elements_indices.push_unchecked(I::from_usize(i).vortex_expect("i < list_end")) - }; + unsafe { elements_indices.push_unchecked(I::from_usize(i).expect("i < list_end")) }; } new_validity_builder.append(true); diff --git a/vortex-array/src/arrays/fixed_size_list/vtable/mod.rs b/vortex-array/src/arrays/fixed_size_list/vtable/mod.rs index 75d97d6bfb5..d54bf0a6525 100644 --- a/vortex-array/src/arrays/fixed_size_list/vtable/mod.rs +++ b/vortex-array/src/arrays/fixed_size_list/vtable/mod.rs @@ -2,7 +2,6 @@ // SPDX-FileCopyrightText: Copyright the Vortex contributors use vortex_dtype::DType; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_bail; use vortex_error::vortex_ensure; @@ -132,9 +131,7 @@ impl VTable for FixedSizeListVTable { ); let mut iter = children.into_iter(); - let elements = iter - .next() - .vortex_expect("children length already validated"); + let elements = iter.next().expect("children length already validated"); let validity = if let Some(validity_array) = iter.next() { Validity::Array(validity_array) } else { diff --git a/vortex-array/src/arrays/list/array.rs b/vortex-array/src/arrays/list/array.rs index 5239d7f1e38..e2fdb5e1aeb 100644 --- a/vortex-array/src/arrays/list/array.rs +++ b/vortex-array/src/arrays/list/array.rs @@ -8,7 +8,6 @@ use vortex_dtype::DType; use vortex_dtype::NativePType; use vortex_dtype::match_each_integer_ptype; use vortex_dtype::match_each_native_ptype; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_bail; use vortex_error::vortex_ensure; @@ -100,7 +99,7 @@ impl ListArray { /// Panics if the provided components do not satisfy the invariants documented /// in [`ListArray::new_unchecked`]. pub fn new(elements: ArrayRef, offsets: ArrayRef, validity: Validity) -> Self { - Self::try_new(elements, offsets, validity).vortex_expect("ListArray new") + Self::try_new(elements, offsets, validity).expect("ListArray new") } /// Constructs a new `ListArray`. @@ -141,7 +140,7 @@ impl ListArray { pub unsafe fn new_unchecked(elements: ArrayRef, offsets: ArrayRef, validity: Validity) -> Self { #[cfg(debug_assertions)] Self::validate(&elements, &offsets, &validity) - .vortex_expect("[Debug Assertion]: Invalid `ListViewArray` parameters"); + .expect("[Debug Assertion]: Invalid `ListViewArray` parameters"); Self { dtype: DType::List(Arc::new(elements.dtype().clone()), validity.nullability()), @@ -193,12 +192,12 @@ impl ListArray { .max .as_primitive() .as_::

() - .vortex_expect("offsets type must fit offsets values"); + .expect("offsets type must fit offsets values"); let min = min_max .min .as_primitive() .as_::

() - .vortex_expect("offsets type must fit offsets values"); + .expect("offsets type must fit offsets values"); vortex_ensure!( min >= 0, diff --git a/vortex-array/src/arrays/list/compute/take.rs b/vortex-array/src/arrays/list/compute/take.rs index fcfd7b977ab..8da62ec2469 100644 --- a/vortex-array/src/arrays/list/compute/take.rs +++ b/vortex-array/src/arrays/list/compute/take.rs @@ -5,7 +5,6 @@ use vortex_dtype::IntegerPType; use vortex_dtype::Nullability; use vortex_dtype::match_each_integer_ptype; use vortex_dtype::match_smallest_offset_type; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use crate::Array; @@ -90,10 +89,10 @@ fn _take( // TODO(0ax1): optimize this elements_to_take.reserve_exact(additional); for i in 0..additional { - elements_to_take.append_value(start + O::from_usize(i).vortex_expect("i < additional")); + elements_to_take.append_value(start + O::from_usize(i).expect("i < additional")); } current_offset += - OutputOffsetType::from_usize((stop - start).as_()).vortex_expect("offset conversion"); + OutputOffsetType::from_usize((stop - start).as_()).expect("offset conversion"); new_offsets.append_value(current_offset); } @@ -159,10 +158,10 @@ fn _take_nullable Self { Self::try_new(elements, offsets, sizes, validity) - .vortex_expect("`ListViewArray` construction failed") + .expect("`ListViewArray` construction failed") } /// Constructs a new `ListViewArray`. @@ -207,7 +206,7 @@ impl ListViewArray { ) -> Self { if cfg!(debug_assertions) { Self::validate(&elements, &offsets, &sizes, &validity) - .vortex_expect("Failed to crate `ListViewArray`"); + .expect("Failed to crate `ListViewArray`"); } Self { @@ -309,7 +308,7 @@ impl ListViewArray { self.offsets.to_primitive(), self.sizes.to_primitive(), ) - .vortex_expect("Failed to validate zero-copy to list flag"); + .expect("Failed to validate zero-copy to list flag"); } self.is_zero_copy_to_list = is_zctl; self @@ -342,7 +341,7 @@ impl ListViewArray { } pub fn into_parts(self) -> ListViewArrayParts { - let dtype = self.dtype.into_list_element_opt().vortex_expect("is list"); + let dtype = self.dtype.into_list_element_opt().expect("is list"); ListViewArrayParts { elements_dtype: dtype, elements: self.elements, @@ -372,10 +371,10 @@ impl ListViewArray { // Slow path: use `scalar_at` if we can't downcast directly to `PrimitiveArray`. self.offsets .scalar_at(index) - .vortex_expect("offsets must support scalar_at") + .expect("offsets must support scalar_at") .as_primitive() .as_::() - .vortex_expect("offset must fit in usize") + .expect("offset must fit in usize") }) } @@ -400,10 +399,10 @@ impl ListViewArray { // Slow path: use `scalar_at` if we can't downcast directly to `PrimitiveArray`. self.sizes .scalar_at(index) - .vortex_expect("sizes must support scalar_at") + .expect("sizes must support scalar_at") .as_primitive() .as_::() - .vortex_expect("size must fit in usize") + .expect("size must fit in usize") }) } diff --git a/vortex-array/src/arrays/listview/conversion.rs b/vortex-array/src/arrays/listview/conversion.rs index 7751c3e03dd..3a164fb0121 100644 --- a/vortex-array/src/arrays/listview/conversion.rs +++ b/vortex-array/src/arrays/listview/conversion.rs @@ -6,7 +6,6 @@ use std::sync::Arc; use vortex_dtype::IntegerPType; use vortex_dtype::Nullability; use vortex_dtype::match_each_integer_ptype; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use crate::Array; @@ -39,7 +38,7 @@ pub fn list_view_from_list(list: ListArray, ctx: &mut ExecutionCtx) -> VortexRes // We reset the offsets here because mostly for convenience, and also because callers of this // function might not expect the output `ListViewArray` to have a bunch of leading and trailing // garbage data when they turn it back into a `ListArray`. - let list = list.reset_offsets(false).vortex_expect("This can't fail"); + let list = list.reset_offsets(false).expect("This can't fail"); let list_offsets = list.offsets().clone(); @@ -160,8 +159,7 @@ unsafe fn build_list_offsets_from_list_view( let last_offset = offsets_slice[len - 1]; let last_size = list_view.size_at(len - 1); - let last_size = - O::from_usize(last_size).vortex_expect("size somehow did not fit into offsets"); + let last_size = O::from_usize(last_size).expect("size somehow did not fit into offsets"); last_offset + last_size } else { @@ -227,9 +225,7 @@ pub fn recursive_list_from_list_view(array: ArrayRef) -> VortexResult fixed_size_list.validity().clone(), fixed_size_list.len(), ) - .vortex_expect( - "FixedSizeListArray reconstruction should not fail with valid components", - ) + .expect("FixedSizeListArray reconstruction should not fail with valid components") .into_array() } else { fixed_size_list.into_array() @@ -254,7 +250,7 @@ pub fn recursive_list_from_list_view(array: ArrayRef) -> VortexResult struct_array.len(), struct_array.validity().clone(), ) - .vortex_expect("StructArray reconstruction should not fail with valid components") + .expect("StructArray reconstruction should not fail with valid components") .into_array() } else { struct_array.into_array() diff --git a/vortex-array/src/arrays/listview/rebuild.rs b/vortex-array/src/arrays/listview/rebuild.rs index 68c1098f2c9..e890b96a3a9 100644 --- a/vortex-array/src/arrays/listview/rebuild.rs +++ b/vortex-array/src/arrays/listview/rebuild.rs @@ -6,7 +6,6 @@ use vortex_buffer::BufferMut; use vortex_dtype::IntegerPType; use vortex_dtype::Nullability; use vortex_dtype::match_each_integer_ptype; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use crate::Array; @@ -113,7 +112,7 @@ impl ListViewArray { let element_dtype = self .dtype() .as_list_element_opt() - .vortex_expect("somehow had a canonical list that was not a list"); + .expect("somehow had a canonical list that was not a list"); let offsets_canonical = self.offsets().to_primitive(); let offsets_slice = offsets_canonical.as_slice::(); @@ -133,7 +132,7 @@ impl ListViewArray { let elements_canonical = self .elements() .to_canonical() - .vortex_expect("canonicalize elements for rebuild") + .expect("canonicalize elements for rebuild") .into_array(); // Note that we do not know what the exact capacity should be of the new elements since @@ -161,7 +160,7 @@ impl ListViewArray { new_sizes.push(size); new_elements_builder.extend_from_array(&elements_canonical.slice(start..stop)?); - n_elements += num_traits::cast(size).vortex_expect("Cast failed"); + n_elements += num_traits::cast(size).expect("Cast failed"); } let offsets = new_offsets.into_array(); @@ -198,7 +197,7 @@ impl ListViewArray { // completely fine for us to use this as a lower-bounded start of the `elements`. self.offset_at(0) } else { - self.offsets().statistics().compute_min().vortex_expect( + self.offsets().statistics().compute_min().expect( "[ListViewArray::rebuild]: `offsets` must report min statistic that is a `usize`", ) }; @@ -212,25 +211,25 @@ impl ListViewArray { } else { let min_max = compute::min_max( &compute::add(self.offsets(), self.sizes()) - .vortex_expect("`offsets + sizes` somehow overflowed"), + .expect("`offsets + sizes` somehow overflowed"), ) - .vortex_expect("Something went wrong while computing min and max") - .vortex_expect("We checked that the array was not empty in the top-level `rebuild`"); + .expect("Something went wrong while computing min and max") + .expect("We checked that the array was not empty in the top-level `rebuild`"); min_max .max .as_primitive() .as_::() - .vortex_expect("unable to interpret the max `offset + size` as a `usize`") + .expect("unable to interpret the max `offset + size` as a `usize`") }; let adjusted_offsets = match_each_integer_ptype!(self.offsets().dtype().as_ptype(), |O| { let offset = ::from_usize(start) - .vortex_expect("unable to convert the min offset `start` into a `usize`"); + .expect("unable to convert the min offset `start` into a `usize`"); let scalar = Scalar::primitive(offset, Nullability::NonNullable); compute::sub_scalar(self.offsets(), scalar) - .vortex_expect("was somehow unable to adjust offsets down by their minimum") + .expect("was somehow unable to adjust offsets down by their minimum") }); let sliced_elements = self.elements().slice(start..end)?; diff --git a/vortex-array/src/arrays/listview/vtable/mod.rs b/vortex-array/src/arrays/listview/vtable/mod.rs index 63a0d032466..9743de809bb 100644 --- a/vortex-array/src/arrays/listview/vtable/mod.rs +++ b/vortex-array/src/arrays/listview/vtable/mod.rs @@ -5,7 +5,6 @@ use kernel::PARENT_KERNELS; use vortex_dtype::DType; use vortex_dtype::Nullability; use vortex_dtype::PType; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_bail; use vortex_error::vortex_ensure; @@ -120,7 +119,7 @@ impl VTable for ListViewVTable { let elements = children.get( 0, element_dtype.as_ref(), - usize::try_from(metadata.0.elements_len)?, + usize::try_from(metadata.0.elements_len).expect("Elements length must fit in usize"), )?; // Get offsets with proper type from metadata. @@ -148,15 +147,9 @@ impl VTable for ListViewVTable { ); let mut iter = children.into_iter(); - let elements = iter - .next() - .vortex_expect("children length already validated"); - let offsets = iter - .next() - .vortex_expect("children length already validated"); - let sizes = iter - .next() - .vortex_expect("children length already validated"); + let elements = iter.next().expect("children length already validated"); + let offsets = iter.next().expect("children length already validated"); + let sizes = iter.next().expect("children length already validated"); let validity = if let Some(validity_array) = iter.next() { Validity::Array(validity_array) } else { diff --git a/vortex-array/src/arrays/masked/vtable/mod.rs b/vortex-array/src/arrays/masked/vtable/mod.rs index 67bf8a82e25..2e598cb7300 100644 --- a/vortex-array/src/arrays/masked/vtable/mod.rs +++ b/vortex-array/src/arrays/masked/vtable/mod.rs @@ -8,7 +8,6 @@ mod validity; use kernel::PARENT_KERNELS; use vortex_dtype::DType; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_bail; use vortex_error::vortex_ensure; @@ -175,9 +174,7 @@ impl VTable for MaskedVTable { ); let mut iter = children.into_iter(); - let child = iter - .next() - .vortex_expect("children length already validated"); + let child = iter.next().expect("children length already validated"); let validity = if let Some(validity_array) = iter.next() { Validity::Array(validity_array) } else { diff --git a/vortex-array/src/arrays/primitive/array/cast.rs b/vortex-array/src/arrays/primitive/array/cast.rs index a01f4bc6311..152c0262b9d 100644 --- a/vortex-array/src/arrays/primitive/array/cast.rs +++ b/vortex-array/src/arrays/primitive/array/cast.rs @@ -5,7 +5,6 @@ use vortex_buffer::Buffer; use vortex_dtype::DType; use vortex_dtype::NativePType; use vortex_dtype::PType; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_panic; @@ -35,7 +34,7 @@ impl PrimitiveArray { let byte_buffer = self .buffer .as_host_opt() - .vortex_expect("as_slice must be called on host buffer"); + .expect("as_slice must be called on host buffer"); let raw_slice = byte_buffer.as_ptr(); // SAFETY: alignment of Buffer is checked on construction diff --git a/vortex-array/src/arrays/primitive/array/mod.rs b/vortex-array/src/arrays/primitive/array/mod.rs index 79e4ad698a3..0f4d5e2fd70 100644 --- a/vortex-array/src/arrays/primitive/array/mod.rs +++ b/vortex-array/src/arrays/primitive/array/mod.rs @@ -13,7 +13,6 @@ use vortex_dtype::NativePType; use vortex_dtype::Nullability; use vortex_dtype::PType; use vortex_dtype::match_each_native_ptype; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_err; @@ -110,7 +109,7 @@ impl PrimitiveArray { /// in [`PrimitiveArray::new_unchecked`]. pub fn new(buffer: impl Into>, validity: Validity) -> Self { let buffer = buffer.into(); - Self::try_new(buffer, validity).vortex_expect("PrimitiveArray construction failed") + Self::try_new(buffer, validity).expect("PrimitiveArray construction failed") } /// Constructs a new `PrimitiveArray`. @@ -145,7 +144,7 @@ impl PrimitiveArray { pub unsafe fn new_unchecked(buffer: Buffer, validity: Validity) -> Self { #[cfg(debug_assertions)] Self::validate(&buffer, &validity) - .vortex_expect("[Debug Assertion]: Invalid `PrimitiveArray` parameters"); + .expect("[Debug Assertion]: Invalid `PrimitiveArray` parameters"); Self { dtype: DType::Primitive(T::PTYPE, validity.nullability()), diff --git a/vortex-array/src/arrays/primitive/array/top_value.rs b/vortex-array/src/arrays/primitive/array/top_value.rs index c454ffd30c5..069897c3b60 100644 --- a/vortex-array/src/arrays/primitive/array/top_value.rs +++ b/vortex-array/src/arrays/primitive/array/top_value.rs @@ -6,7 +6,6 @@ use std::hash::Hash; use rustc_hash::FxBuildHasher; use vortex_dtype::NativePType; use vortex_dtype::match_each_native_ptype; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_mask::AllOr; use vortex_mask::Mask; @@ -60,6 +59,6 @@ where let (&top_value, &top_count) = distinct_values .iter() .max_by_key(|&(_, &count)| count) - .vortex_expect("non-empty"); + .expect("non-empty"); (top_value.0, top_count) } diff --git a/vortex-array/src/arrays/primitive/compute/fill_null.rs b/vortex-array/src/arrays/primitive/compute/fill_null.rs index d881d9e7c27..670b58c0d85 100644 --- a/vortex-array/src/arrays/primitive/compute/fill_null.rs +++ b/vortex-array/src/arrays/primitive/compute/fill_null.rs @@ -4,7 +4,6 @@ use std::ops::Not; use vortex_dtype::match_each_native_ptype; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use crate::ArrayRef; @@ -34,7 +33,7 @@ impl FillNullKernel for PrimitiveVTable { let fill_value = fill_value .as_primitive() .typed_value::() - .vortex_expect("top-level fill_null ensure non-null fill value"); + .expect("top-level fill_null ensure non-null fill value"); for invalid_index in is_invalid.set_indices() { buffer[invalid_index] = fill_value; } diff --git a/vortex-array/src/arrays/primitive/compute/is_sorted.rs b/vortex-array/src/arrays/primitive/compute/is_sorted.rs index e00e68d704d..c2d623f5488 100644 --- a/vortex-array/src/arrays/primitive/compute/is_sorted.rs +++ b/vortex-array/src/arrays/primitive/compute/is_sorted.rs @@ -92,7 +92,6 @@ fn compute_is_sorted(array: &PrimitiveArray, strict: bool) -> Vo #[cfg(test)] mod tests { use rstest::rstest; - use vortex_error::VortexExpect; use super::*; use crate::compute::is_sorted; @@ -106,7 +105,7 @@ mod tests { #[case(PrimitiveArray::from_option_iter([None, Some(5_u8), None]), false)] fn test_primitive_is_sorted(#[case] array: PrimitiveArray, #[case] expected: bool) { assert_eq!( - is_sorted(array.as_ref()).vortex_expect("operation should succeed in test"), + is_sorted(array.as_ref()).expect("operation should succeed in test"), Some(expected) ); } @@ -119,7 +118,7 @@ mod tests { #[case(PrimitiveArray::from_option_iter([None, Some(5_u8), None]), false)] fn test_primitive_is_strict_sorted(#[case] array: PrimitiveArray, #[case] expected: bool) { assert_eq!( - is_strict_sorted(array.as_ref()).vortex_expect("operation should succeed in test"), + is_strict_sorted(array.as_ref()).expect("operation should succeed in test"), Some(expected) ); } diff --git a/vortex-array/src/arrays/primitive/compute/sum.rs b/vortex-array/src/arrays/primitive/compute/sum.rs index e59bd131511..bcb1fea3c35 100644 --- a/vortex-array/src/arrays/primitive/compute/sum.rs +++ b/vortex-array/src/arrays/primitive/compute/sum.rs @@ -9,7 +9,6 @@ use vortex_buffer::BitBuffer; use vortex_dtype::NativePType; use vortex_dtype::Nullability; use vortex_dtype::match_each_native_ptype; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_mask::AllOr; @@ -30,20 +29,20 @@ impl SumKernel for PrimitiveVTable { unsigned: |T| { Scalar::from(sum_integer::<_, u64>( array.as_slice::(), - accumulator.as_primitive().as_::().vortex_expect("cannot be null"), + accumulator.as_primitive().as_::().expect("cannot be null"), )) }, signed: |T| { Scalar::from(sum_integer::<_, i64>( array.as_slice::(), - accumulator.as_primitive().as_::().vortex_expect("cannot be null"), + accumulator.as_primitive().as_::().expect("cannot be null"), )) }, floating: |T| { Scalar::primitive( sum_float( array.as_slice::(), - accumulator.as_primitive().as_::().vortex_expect("cannot be null"), + accumulator.as_primitive().as_::().expect("cannot be null"), ), Nullability::Nullable, ) @@ -62,14 +61,14 @@ impl SumKernel for PrimitiveVTable { Scalar::from(sum_integer_with_validity::<_, u64>( array.as_slice::(), validity_mask, - accumulator.as_primitive().as_::().vortex_expect("cannot be null"), + accumulator.as_primitive().as_::().expect("cannot be null"), )) }, signed: |T| { Scalar::from(sum_integer_with_validity::<_, i64>( array.as_slice::(), validity_mask, - accumulator.as_primitive().as_::().vortex_expect("cannot be null"), + accumulator.as_primitive().as_::().expect("cannot be null"), )) }, floating: |T| { @@ -77,7 +76,7 @@ impl SumKernel for PrimitiveVTable { sum_float_with_validity( array.as_slice::(), validity_mask, - accumulator.as_primitive().as_::().vortex_expect("cannot be null"), + accumulator.as_primitive().as_::().expect("cannot be null"), ), Nullability::Nullable, ) @@ -120,7 +119,7 @@ fn sum_integer_with_validity(values: &[T], accumulator: f64) -> f64 { let mut sum = accumulator; for &x in values { - sum += x.to_f64().vortex_expect("Failed to cast value to f64"); + sum += x.to_f64().expect("Failed to cast value to f64"); } sum } @@ -133,7 +132,7 @@ fn sum_float_with_validity( let mut sum = accumulator; for (&x, valid) in array.iter().zip_eq(validity.iter()) { if valid { - sum += x.to_f64().vortex_expect("Failed to cast value to f64"); + sum += x.to_f64().expect("Failed to cast value to f64"); } } sum diff --git a/vortex-array/src/arrays/primitive/compute/take/mod.rs b/vortex-array/src/arrays/primitive/compute/take/mod.rs index 5bea3539626..8df73809301 100644 --- a/vortex-array/src/arrays/primitive/compute/take/mod.rs +++ b/vortex-array/src/arrays/primitive/compute/take/mod.rs @@ -120,7 +120,6 @@ fn take_primitive_scalar(array: &[T], indices: mod test { use rstest::rstest; use vortex_buffer::buffer; - use vortex_error::VortexExpect; use crate::Array; use crate::IntoArray; @@ -149,18 +148,15 @@ mod test { Validity::Array(BoolArray::from_iter([true, true, false]).into_array()), ); let actual = values.take(indices.to_array()).unwrap(); - assert_eq!( - actual.scalar_at(0).vortex_expect("no fail"), - Scalar::from(Some(1)) - ); + assert_eq!(actual.scalar_at(0).expect("no fail"), Scalar::from(Some(1))); // position 3 is null assert_eq!( - actual.scalar_at(1).vortex_expect("no fail"), + actual.scalar_at(1).expect("no fail"), Scalar::null_native::() ); // the third index is null assert_eq!( - actual.scalar_at(2).vortex_expect("no fail"), + actual.scalar_at(2).expect("no fail"), Scalar::null_native::() ); } diff --git a/vortex-array/src/arrays/primitive/vtable/mod.rs b/vortex-array/src/arrays/primitive/vtable/mod.rs index 450f9faf99e..13ab12e0ea0 100644 --- a/vortex-array/src/arrays/primitive/vtable/mod.rs +++ b/vortex-array/src/arrays/primitive/vtable/mod.rs @@ -4,7 +4,6 @@ use kernel::PARENT_KERNELS; use vortex_dtype::DType; use vortex_dtype::PType; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_bail; use vortex_error::vortex_ensure; @@ -129,7 +128,7 @@ impl VTable for PrimitiveVTable { array.validity = if children.is_empty() { Validity::from(array.dtype().nullability()) } else { - Validity::Array(children.into_iter().next().vortex_expect("checked")) + Validity::Array(children.into_iter().next().expect("checked")) }; Ok(()) diff --git a/vortex-array/src/arrays/scalar_fn/rules.rs b/vortex-array/src/arrays/scalar_fn/rules.rs index 218495e39e1..8922b356dc1 100644 --- a/vortex-array/src/arrays/scalar_fn/rules.rs +++ b/vortex-array/src/arrays/scalar_fn/rules.rs @@ -6,7 +6,6 @@ use std::sync::Arc; use itertools::Itertools; use vortex_dtype::DType; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use crate::Array; @@ -97,7 +96,7 @@ impl ArrayReduceRule for ScalarFnAbstractReduceRule { reduced .as_any() .downcast_ref::() - .vortex_expect("ReduceNode is not an ArrayRef") + .expect("ReduceNode is not an ArrayRef") .clone(), )); } @@ -168,7 +167,7 @@ impl ReduceCtx for ArrayReduceCtx { .map(|c| { c.as_any() .downcast_ref::() - .vortex_expect("ReduceNode is not an ArrayRef") + .expect("ReduceNode is not an ArrayRef") .clone() }) .collect(), @@ -227,7 +226,6 @@ mod tests { use vortex_dtype::DType; use vortex_dtype::Nullability; use vortex_dtype::PType; - use vortex_error::VortexExpect; use crate::array::IntoArray; use crate::arrays::ChunkedArray; @@ -248,14 +246,14 @@ mod tests { root(), DType::Primitive(PType::U64, Nullability::Nullable), )) - .vortex_expect("casted"), + .expect("casted"), ], DType::Primitive(PType::U64, Nullability::Nullable), ) - .vortex_expect("construction") + .expect("construction") .to_array(); let expr = is_null(root()); - array.apply(&expr).vortex_expect("expr evaluation"); + array.apply(&expr).expect("expr evaluation"); } } diff --git a/vortex-array/src/arrays/scalar_fn/vtable/mod.rs b/vortex-array/src/arrays/scalar_fn/vtable/mod.rs index 774543c9c7d..4cdbfd60710 100644 --- a/vortex-array/src/arrays/scalar_fn/vtable/mod.rs +++ b/vortex-array/src/arrays/scalar_fn/vtable/mod.rs @@ -15,7 +15,6 @@ use std::ops::Deref; use itertools::Itertools; use vortex_dtype::DType; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_bail; use vortex_error::vortex_ensure; @@ -231,13 +230,13 @@ impl Matcher for ExactScalarFn { .vtable() .as_any() .downcast_ref::() - .vortex_expect("ScalarFn VTable type mismatch in ExactScalarFn matcher"); + .expect("ScalarFn VTable type mismatch in ExactScalarFn matcher"); let scalar_fn_options = scalar_fn_array .scalar_fn .options() .as_any() .downcast_ref::() - .vortex_expect("ScalarFn options type mismatch in ExactScalarFn matcher"); + .expect("ScalarFn options type mismatch in ExactScalarFn matcher"); Some(ScalarFnArrayView { array, vtable: scalar_fn_vtable, diff --git a/vortex-array/src/arrays/shared/vtable.rs b/vortex-array/src/arrays/shared/vtable.rs index 2834dbdf282..4309cf05881 100644 --- a/vortex-array/src/arrays/shared/vtable.rs +++ b/vortex-array/src/arrays/shared/vtable.rs @@ -4,7 +4,6 @@ use std::hash::Hash; use vortex_dtype::DType; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_session::VortexSession; @@ -93,7 +92,7 @@ impl VTable for SharedVTable { let child = children .into_iter() .next() - .vortex_expect("children length already validated"); + .expect("children length already validated"); array.set_source(child); Ok(()) } diff --git a/vortex-array/src/arrays/slice/array.rs b/vortex-array/src/arrays/slice/array.rs index 8a0dbbb642c..49ea444615d 100644 --- a/vortex-array/src/arrays/slice/array.rs +++ b/vortex-array/src/arrays/slice/array.rs @@ -3,7 +3,6 @@ use std::ops::Range; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_panic; @@ -39,7 +38,7 @@ impl SliceArray { } pub fn new(child: ArrayRef, range: Range) -> Self { - Self::try_new(child, range).vortex_expect("failed") + Self::try_new(child, range).expect("failed") } /// The range used to slice the child array. diff --git a/vortex-array/src/arrays/slice/vtable.rs b/vortex-array/src/arrays/slice/vtable.rs index a24d1e96ae1..2123d0211cd 100644 --- a/vortex-array/src/arrays/slice/vtable.rs +++ b/vortex-array/src/arrays/slice/vtable.rs @@ -8,7 +8,6 @@ use std::hash::Hasher; use std::ops::Range; use vortex_dtype::DType; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_bail; use vortex_error::vortex_ensure; @@ -104,7 +103,7 @@ impl VTable for SliceVTable { array.child = children .into_iter() .next() - .vortex_expect("children length already validated"); + .expect("children length already validated"); Ok(()) } diff --git a/vortex-array/src/arrays/struct_/array.rs b/vortex-array/src/arrays/struct_/array.rs index ff418c14444..8443a80b68b 100644 --- a/vortex-array/src/arrays/struct_/array.rs +++ b/vortex-array/src/arrays/struct_/array.rs @@ -9,7 +9,6 @@ use vortex_dtype::DType; use vortex_dtype::FieldName; use vortex_dtype::FieldNames; use vortex_dtype::StructFields; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_bail; use vortex_error::vortex_err; @@ -198,7 +197,7 @@ impl StructArray { len, Validity::NonNullable, ) - .vortex_expect("StructArray::new_with_len should not fail") + .expect("StructArray::new_with_len should not fail") } /// Creates a new [`StructArray`]. @@ -213,8 +212,7 @@ impl StructArray { length: usize, validity: Validity, ) -> Self { - Self::try_new(names, fields, length, validity) - .vortex_expect("StructArray construction failed") + Self::try_new(names, fields, length, validity).expect("StructArray construction failed") } /// Constructs a new `StructArray`. @@ -276,7 +274,7 @@ impl StructArray { #[cfg(debug_assertions)] Self::validate(&fields, &dtype, length, &validity) - .vortex_expect("[Debug Assertion]: Invalid `StructArray` parameters"); + .expect("[Debug Assertion]: Invalid `StructArray` parameters"); Self { len: length, diff --git a/vortex-array/src/arrays/struct_/compute/mod.rs b/vortex-array/src/arrays/struct_/compute/mod.rs index 092eaa689f1..2ae50f5bf05 100644 --- a/vortex-array/src/arrays/struct_/compute/mod.rs +++ b/vortex-array/src/arrays/struct_/compute/mod.rs @@ -22,7 +22,6 @@ mod tests { use vortex_dtype::Nullability; use vortex_dtype::PType; use vortex_dtype::StructFields; - use vortex_error::VortexExpect; use crate::Array; use crate::Canonical; @@ -252,8 +251,7 @@ mod tests { #[test] fn test_empty_struct_is_constant() { let array = StructArray::new_fieldless_with_len(2); - let is_constant = - is_constant(array.as_ref()).vortex_expect("operation should succeed in test"); + let is_constant = is_constant(array.as_ref()).expect("operation should succeed in test"); assert_eq!(is_constant, Some(true)); } diff --git a/vortex-array/src/arrays/struct_/vtable/mod.rs b/vortex-array/src/arrays/struct_/vtable/mod.rs index 38f42e5f639..8bb6651b2fd 100644 --- a/vortex-array/src/arrays/struct_/vtable/mod.rs +++ b/vortex-array/src/arrays/struct_/vtable/mod.rs @@ -6,7 +6,6 @@ use std::sync::Arc; use itertools::Itertools; use kernel::PARENT_KERNELS; use vortex_dtype::DType; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_bail; use vortex_error::vortex_ensure; @@ -94,9 +93,7 @@ impl VTable for StructVTable { let children: Vec<_> = (0..struct_dtype.nfields()) .map(|i| { - let child_dtype = struct_dtype - .field_by_index(i) - .vortex_expect("no out of bounds"); + let child_dtype = struct_dtype.field_by_index(i).expect("no out of bounds"); children.get(non_data_children + i, &child_dtype, len) }) .try_collect()?; diff --git a/vortex-array/src/arrays/varbin/array.rs b/vortex-array/src/arrays/varbin/array.rs index 61e81da7caf..4be6cc47699 100644 --- a/vortex-array/src/arrays/varbin/array.rs +++ b/vortex-array/src/arrays/varbin/array.rs @@ -8,7 +8,6 @@ use vortex_dtype::DType; use vortex_dtype::IntegerPType; use vortex_dtype::Nullability; use vortex_dtype::match_each_integer_ptype; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_ensure; use vortex_error::vortex_err; @@ -39,7 +38,7 @@ impl VarBinArray { /// Panics if the provided components do not satisfy the invariants documented /// in [`VarBinArray::new_unchecked`]. pub fn new(offsets: ArrayRef, bytes: ByteBuffer, dtype: DType, validity: Validity) -> Self { - Self::try_new(offsets, bytes, dtype, validity).vortex_expect("VarBinArray new") + Self::try_new(offsets, bytes, dtype, validity).expect("VarBinArray new") } /// Creates a new [`VarBinArray`]. @@ -54,7 +53,7 @@ impl VarBinArray { dtype: DType, validity: Validity, ) -> Self { - Self::try_new_from_handle(offset, bytes, dtype, validity).vortex_expect("VarBinArray new") + Self::try_new_from_handle(offset, bytes, dtype, validity).expect("VarBinArray new") } /// Constructs a new `VarBinArray`. @@ -154,7 +153,7 @@ impl VarBinArray { ) -> Self { #[cfg(debug_assertions)] Self::validate(&offsets, &bytes, &dtype, &validity) - .vortex_expect("[Debug Assertion]: Invalid `VarBinArray` parameters"); + .expect("[Debug Assertion]: Invalid `VarBinArray` parameters"); Self { dtype, @@ -355,9 +354,9 @@ impl VarBinArray { (&self .offsets() .scalar_at(index) - .vortex_expect("offsets must support scalar_at")) + .expect("offsets must support scalar_at")) .try_into() - .vortex_expect("Failed to convert offset to usize") + .expect("Failed to convert offset to usize") } /// Access value bytes at a given index diff --git a/vortex-array/src/arrays/varbin/compute/compare.rs b/vortex-array/src/arrays/varbin/compute/compare.rs index c5e38b64ff4..3ca4e5ac58d 100644 --- a/vortex-array/src/arrays/varbin/compute/compare.rs +++ b/vortex-array/src/arrays/varbin/compute/compare.rs @@ -9,7 +9,6 @@ use vortex_buffer::BitBuffer; use vortex_dtype::DType; use vortex_dtype::IntegerPType; use vortex_dtype::match_each_integer_ptype; -use vortex_error::VortexExpect as _; use vortex_error::VortexResult; use vortex_error::vortex_bail; use vortex_error::vortex_err; @@ -47,11 +46,11 @@ impl CompareKernel for VarBinVTable { DType::Binary(_) => rhs_const .as_binary() .is_empty() - .vortex_expect("RHS should not be null"), + .expect("RHS should not be null"), DType::Utf8(_) => rhs_const .as_utf8() .is_empty() - .vortex_expect("RHS should not be null"), + .expect("RHS should not be null"), _ => vortex_bail!("VarBinArray can only have type of Binary or Utf8"), }; diff --git a/vortex-array/src/arrays/varbin/compute/filter.rs b/vortex-array/src/arrays/varbin/compute/filter.rs index 24d961e03c1..a64590ae0aa 100644 --- a/vortex-array/src/arrays/varbin/compute/filter.rs +++ b/vortex-array/src/arrays/varbin/compute/filter.rs @@ -6,7 +6,6 @@ use num_traits::AsPrimitive; use vortex_dtype::DType; use vortex_dtype::IntegerPType; use vortex_dtype::match_each_integer_ptype; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_err; use vortex_error::vortex_panic; @@ -38,7 +37,7 @@ impl FilterKernel for VarBinVTable { fn filter_select_var_bin(arr: &VarBinArray, mask: &Mask) -> VortexResult { match mask .values() - .vortex_expect("AllTrue and AllFalse are handled by filter fn") + .expect("AllTrue and AllFalse are handled by filter fn") .threshold_iter(0.5) { MaskIter::Indices(indices) => { diff --git a/vortex-array/src/arrays/varbin/compute/take.rs b/vortex-array/src/arrays/varbin/compute/take.rs index 143b1065450..1ebd01957b6 100644 --- a/vortex-array/src/arrays/varbin/compute/take.rs +++ b/vortex-array/src/arrays/varbin/compute/take.rs @@ -7,7 +7,6 @@ use vortex_buffer::ByteBufferMut; use vortex_dtype::DType; use vortex_dtype::IntegerPType; use vortex_dtype::match_each_integer_ptype; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_panic; use vortex_mask::Mask; @@ -146,7 +145,7 @@ fn take( let start = offsets[idx]; let stop = offsets[idx + 1]; - current_offset += NewOffset::from(stop - start).vortex_expect("offset type overflow"); + current_offset += NewOffset::from(stop - start).expect("offset type overflow"); new_offsets.push(current_offset); } @@ -158,10 +157,10 @@ fn take( .unwrap_or_else(|| vortex_panic!("Failed to convert index to usize: {}", idx)); let start = offsets[idx] .to_usize() - .vortex_expect("Failed to cast max offset to usize"); + .expect("Failed to cast max offset to usize"); let stop = offsets[idx + 1] .to_usize() - .vortex_expect("Failed to cast max offset to usize"); + .expect("Failed to cast max offset to usize"); new_data.extend_from_slice(&data[start..stop]); } @@ -210,7 +209,7 @@ fn take_nullable Scalar { if matches!(dtype, DType::Utf8(_)) { Scalar::try_utf8(value, dtype.nullability()) .map_err(|err| vortex_err!("Failed to create scalar from utf8 buffer: {}", err)) - .vortex_expect("UTF-8 scalar creation should succeed") + .expect("UTF-8 scalar creation should succeed") } else { Scalar::binary(value, dtype.nullability()) } diff --git a/vortex-array/src/arrays/varbin/vtable/mod.rs b/vortex-array/src/arrays/varbin/vtable/mod.rs index e5f7a2c8372..a2a091ed9d7 100644 --- a/vortex-array/src/arrays/varbin/vtable/mod.rs +++ b/vortex-array/src/arrays/varbin/vtable/mod.rs @@ -4,7 +4,6 @@ use vortex_dtype::DType; use vortex_dtype::Nullability; use vortex_dtype::PType; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_bail; use vortex_error::vortex_err; @@ -61,8 +60,8 @@ impl VTable for VarBinVTable { fn metadata(array: &VarBinArray) -> VortexResult { Ok(ProstMetadata(VarBinMetadata { - offsets_ptype: PType::try_from(array.offsets().dtype()) - .vortex_expect("Must be a valid PType") as i32, + offsets_ptype: PType::try_from(array.offsets().dtype()).expect("Must be a valid PType") + as i32, })) } diff --git a/vortex-array/src/arrays/varbinview/array.rs b/vortex-array/src/arrays/varbinview/array.rs index 75acbe4875e..e39d471d727 100644 --- a/vortex-array/src/arrays/varbinview/array.rs +++ b/vortex-array/src/arrays/varbinview/array.rs @@ -8,7 +8,6 @@ use vortex_buffer::Buffer; use vortex_buffer::ByteBuffer; use vortex_dtype::DType; use vortex_dtype::Nullability; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_bail; use vortex_error::vortex_ensure; @@ -110,8 +109,7 @@ impl VarBinViewArray { dtype: DType, validity: Validity, ) -> Self { - Self::try_new(views, buffers, dtype, validity) - .vortex_expect("VarBinViewArray construction failed") + Self::try_new(views, buffers, dtype, validity).expect("VarBinViewArray construction failed") } /// Creates a new [`VarBinViewArray`] with device or host memory. @@ -127,7 +125,7 @@ impl VarBinViewArray { validity: Validity, ) -> Self { Self::try_new_handle(views, buffers, dtype, validity) - .vortex_expect("VarbinViewArray construction failed") + .expect("VarbinViewArray construction failed") } /// Constructs a new `VarBinViewArray`. @@ -220,7 +218,7 @@ impl VarBinViewArray { ) -> Self { #[cfg(debug_assertions)] Self::validate(&views, &buffers, &dtype, &validity) - .vortex_expect("[Debug Assertion]: Invalid `VarBinViewArray` parameters"); + .expect("[Debug Assertion]: Invalid `VarBinViewArray` parameters"); let handles: Vec = buffers .iter() diff --git a/vortex-array/src/arrays/varbinview/compact.rs b/vortex-array/src/arrays/varbinview/compact.rs index 8a303f3e83d..44603128a14 100644 --- a/vortex-array/src/arrays/varbinview/compact.rs +++ b/vortex-array/src/arrays/varbinview/compact.rs @@ -6,7 +6,6 @@ use std::ops::Range; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_mask::Mask; @@ -95,7 +94,7 @@ impl VarBinViewArray { .buffers() .iter() .map(|buf| { - let len = u32::try_from(buf.len()).vortex_expect("buffer sizes must fit in u32"); + let len = u32::try_from(buf.len()).expect("buffer sizes must fit in u32"); BufferUtilization::zero(len) }) .collect(); diff --git a/vortex-array/src/arrays/varbinview/compute/is_constant.rs b/vortex-array/src/arrays/varbinview/compute/is_constant.rs index 406cf57ed65..12d2dceb1bd 100644 --- a/vortex-array/src/arrays/varbinview/compute/is_constant.rs +++ b/vortex-array/src/arrays/varbinview/compute/is_constant.rs @@ -1,7 +1,6 @@ // SPDX-License-Identifier: Apache-2.0 // SPDX-FileCopyrightText: Copyright the Vortex contributors -use vortex_error::VortexExpect; use vortex_error::VortexResult; use crate::arrays::Ref; @@ -19,9 +18,7 @@ impl IsConstantKernel for VarBinViewVTable { _opts: &IsConstantOpts, ) -> VortexResult> { let mut views_iter = array.views().iter(); - let first_value = views_iter - .next() - .vortex_expect("Must have at least one value"); + let first_value = views_iter.next().expect("Must have at least one value"); // For the array to be constant, all views must be of the same type if first_value.is_inlined() { diff --git a/vortex-array/src/arrays/varbinview/view.rs b/vortex-array/src/arrays/varbinview/view.rs index 068fbde6d4e..842b45e3ce3 100644 --- a/vortex-array/src/arrays/varbinview/view.rs +++ b/vortex-array/src/arrays/varbinview/view.rs @@ -10,7 +10,6 @@ use std::ops::Range; use static_assertions::assert_eq_align; use static_assertions::assert_eq_size; -use vortex_error::VortexExpect; /// A view over a variable-length binary value. /// @@ -51,7 +50,7 @@ impl Inlined { fn new(value: &[u8]) -> Self { debug_assert_eq!(value.len(), N); let mut inlined = Self { - size: N.try_into().vortex_expect("inlined size must fit in u32"), + size: N.try_into().expect("inlined size must fit in u32"), data: [0u8; BinaryView::MAX_INLINED_SIZE], }; inlined.data[..N].copy_from_slice(&value[..N]); @@ -153,11 +152,10 @@ impl BinaryView { }, _ => Self { _ref: Ref { - size: u32::try_from(value.len()).vortex_expect("value length must fit in u32"), + size: u32::try_from(value.len()).expect("value length must fit in u32"), prefix: value[0..4] .try_into() - .ok() - .vortex_expect("prefix must be exactly 4 bytes"), + .expect("prefix must be exactly 4 bytes"), buffer_index: block, offset, }, diff --git a/vortex-array/src/arrow/convert.rs b/vortex-array/src/arrow/convert.rs index 12849414ddc..0f88227d6f6 100644 --- a/vortex-array/src/arrow/convert.rs +++ b/vortex-array/src/arrow/convert.rs @@ -69,7 +69,6 @@ use vortex_dtype::NativePType; use vortex_dtype::PType; use vortex_dtype::datetime::TimeUnit; use vortex_dtype::i256; -use vortex_error::VortexExpect as _; use vortex_error::VortexResult; use vortex_error::vortex_bail; use vortex_error::vortex_panic; @@ -370,9 +369,7 @@ fn remove_nulls(data: arrow_data::ArrayData) -> arrow_data::ArrayData { if let Some(children) = children { builder = builder.child_data(children); } - builder - .build() - .vortex_expect("reconstructing array without nulls") + builder.build().expect("reconstructing array without nulls") } impl FromArrowArray<&ArrowStructArray> for ArrayRef { diff --git a/vortex-array/src/arrow/datum.rs b/vortex-array/src/arrow/datum.rs index 5297f3c9790..1b63129baea 100644 --- a/vortex-array/src/arrow/datum.rs +++ b/vortex-array/src/arrow/datum.rs @@ -5,7 +5,6 @@ use arrow_array::Array as ArrowArray; use arrow_array::ArrayRef as ArrowArrayRef; use arrow_array::Datum as ArrowDatum; use arrow_schema::DataType; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_panic; @@ -105,7 +104,7 @@ where Ok(ConstantArray::new( array .scalar_at(0) - .vortex_expect("array of length 1 must support scalar_at(0)"), + .expect("array of length 1 must support scalar_at(0)"), len, ) .into_array()) diff --git a/vortex-array/src/arrow/executor/list.rs b/vortex-array/src/arrow/executor/list.rs index a372ef6f924..3a6154d8272 100644 --- a/vortex-array/src/arrow/executor/list.rs +++ b/vortex-array/src/arrow/executor/list.rs @@ -13,7 +13,6 @@ use vortex_buffer::BufferMut; use vortex_dtype::DType; use vortex_dtype::NativePType; use vortex_dtype::Nullability; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_ensure; use vortex_error::vortex_err; @@ -150,7 +149,7 @@ fn list_view_zctl( let final_size = final_size .as_primitive() .typed_value::() - .vortex_expect("non null"); + .expect("non null"); let offsets = offsets .cast(DType::Primitive(O::PTYPE, Nullability::NonNullable))? diff --git a/vortex-array/src/buffer.rs b/vortex-array/src/buffer.rs index 07baf843ead..abc2d3ce1d5 100644 --- a/vortex-array/src/buffer.rs +++ b/vortex-array/src/buffer.rs @@ -12,7 +12,6 @@ use futures::future::BoxFuture; use vortex_buffer::ALIGNMENT_TO_HOST_COPY; use vortex_buffer::Alignment; use vortex_buffer::ByteBuffer; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_utils::dyn_traits::DynEq; use vortex_utils::dyn_traits::DynHash; @@ -267,13 +266,13 @@ impl BufferHandle { /// A version of [`as_host_opt`][Self::as_host_opt] that panics if the allocation is /// not a host allocation. pub fn as_host(&self) -> &ByteBuffer { - self.as_host_opt().vortex_expect("expected host buffer") + self.as_host_opt().expect("expected host buffer") } /// A version of [`as_device_opt`][Self::as_device_opt] that panics if the allocation is /// not a device allocation. pub fn as_device(&self) -> &Arc { - self.as_device_opt().vortex_expect("expected device buffer") + self.as_device_opt().expect("expected device buffer") } /// Returns a host-resident copy of the data in the buffer. @@ -293,7 +292,7 @@ impl BufferHandle { /// See also: [`try_to_host`][Self::try_to_host]. pub fn to_host_sync(&self) -> ByteBuffer { self.try_to_host_sync() - .vortex_expect("to_host: copy from device to host failed") + .expect("to_host: copy from device to host failed") } /// Returns a host-resident copy of the data behind the handle, consuming the handle. @@ -307,7 +306,7 @@ impl BufferHandle { /// See the panic documentation on [`to_host`][Self::to_host]. pub fn into_host_sync(self) -> ByteBuffer { self.try_into_host_sync() - .vortex_expect("into_host: copy from device to host failed") + .expect("into_host: copy from device to host failed") } /// Attempts to load this buffer into a host-resident allocation. @@ -380,11 +379,11 @@ impl BufferHandle { pub fn to_host(&self) -> BoxFuture<'static, ByteBuffer> { let future = self .try_to_host() - .vortex_expect("to_host: failed to initiate copy from device to host"); + .expect("to_host: failed to initiate copy from device to host"); Box::pin(async move { future .await - .vortex_expect("to_host: copy from device to host failed") + .expect("to_host: copy from device to host failed") }) } @@ -396,11 +395,11 @@ impl BufferHandle { pub fn into_host(self) -> BoxFuture<'static, ByteBuffer> { let future = self .try_into_host() - .vortex_expect("into_host: failed to initiate copy from device to host"); + .expect("into_host: failed to initiate copy from device to host"); Box::pin(async move { future .await - .vortex_expect("into_host: copy from device to host failed") + .expect("into_host: copy from device to host failed") }) } } diff --git a/vortex-array/src/builders/bool.rs b/vortex-array/src/builders/bool.rs index 6e5bcb5a14f..2b52da8a535 100644 --- a/vortex-array/src/builders/bool.rs +++ b/vortex-array/src/builders/bool.rs @@ -7,7 +7,6 @@ use std::mem; use vortex_buffer::BitBufferMut; use vortex_dtype::DType; use vortex_dtype::Nullability; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_ensure; use vortex_mask::Mask; @@ -119,7 +118,7 @@ impl ArrayBuilder for BoolBuilder { self.nulls.append_validity_mask( bool_array .validity_mask() - .vortex_expect("validity_mask in extend_from_array_unchecked"), + .expect("validity_mask in extend_from_array_unchecked"), ); } diff --git a/vortex-array/src/builders/decimal.rs b/vortex-array/src/builders/decimal.rs index bc363567f90..cde6435817d 100644 --- a/vortex-array/src/builders/decimal.rs +++ b/vortex-array/src/builders/decimal.rs @@ -12,7 +12,6 @@ use vortex_dtype::Nullability; use vortex_dtype::i256; use vortex_dtype::match_each_decimal_value; use vortex_dtype::match_each_decimal_value_type; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_ensure; use vortex_error::vortex_err; @@ -200,7 +199,7 @@ impl ArrayBuilder for DecimalBuilder { self.nulls.append_validity_mask( decimal_array .validity_mask() - .vortex_expect("validity_mask in extend_from_array_unchecked"), + .expect("validity_mask in extend_from_array_unchecked"), ); } @@ -236,7 +235,7 @@ impl DecimalBuffer { T::DECIMAL_TYPE, ) }) - .vortex_expect("operation should succeed in builder"), + .expect("operation should succeed in builder"), ) }); } @@ -244,7 +243,7 @@ impl DecimalBuffer { fn push_n(&mut self, value: V, n: usize) { delegate_fn!(self, |T, buffer| { buffer.push_n( - ::from(value).vortex_expect("decimal conversion failure"), + ::from(value).expect("decimal conversion failure"), n, ) }); @@ -263,9 +262,8 @@ impl DecimalBuffer { I: Iterator, { delegate_fn!(self, |T, buffer| { - buffer.extend( - iter.map(|x| ::from(x).vortex_expect("decimal conversion failure")), - ) + buffer + .extend(iter.map(|x| ::from(x).expect("decimal conversion failure"))) }) } } diff --git a/vortex-array/src/builders/dict/bytes.rs b/vortex-array/src/builders/dict/bytes.rs index 430790a31ef..466c4d9ee99 100644 --- a/vortex-array/src/builders/dict/bytes.rs +++ b/vortex-array/src/builders/dict/bytes.rs @@ -11,7 +11,6 @@ use vortex_buffer::ByteBufferMut; use vortex_dtype::DType; use vortex_dtype::PType; use vortex_dtype::UnsignedPType; -use vortex_error::VortexExpect; use vortex_error::vortex_panic; use vortex_utils::aliases::hash_map::DefaultHashBuilder; use vortex_utils::aliases::hash_map::HashTable; @@ -108,7 +107,7 @@ impl BytesDictBuilder { val, 0, u32::try_from(self.values.len()) - .vortex_expect("values length must fit in u32"), + .expect("values length must fit in u32"), ); let additional_bytes = if view.is_inlined() { size_of::() @@ -137,7 +136,7 @@ impl BytesDictBuilder { } fn encode_bytes>(&mut self, accessor: &A, len: usize) -> ArrayRef { - let mut local_lookup = self.lookup.take().vortex_expect("Must have a lookup dict"); + let mut local_lookup = self.lookup.take().expect("Must have a lookup dict"); let mut codes: BufferMut = BufferMut::with_capacity(len); accessor.with_iterator(|it| { diff --git a/vortex-array/src/builders/fixed_size_list.rs b/vortex-array/src/builders/fixed_size_list.rs index caed797fdbf..6bcad336bb7 100644 --- a/vortex-array/src/builders/fixed_size_list.rs +++ b/vortex-array/src/builders/fixed_size_list.rs @@ -6,7 +6,6 @@ use std::sync::Arc; use vortex_dtype::DType; use vortex_dtype::Nullability; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_bail; use vortex_error::vortex_ensure; @@ -149,7 +148,7 @@ impl FixedSizeListBuilder { self.nulls.finish_with_nullability(self.dtype.nullability()), final_len, ) - .vortex_expect("tried to create an invalid `FixedSizeListArray` from a builder") + .expect("tried to create an invalid `FixedSizeListArray` from a builder") } /// The [`DType`] of the inner elements. Note that this is **not** the same as the [`DType`] of @@ -244,7 +243,7 @@ impl ArrayBuilder for FixedSizeListBuilder { self.nulls.append_validity_mask( array .validity_mask() - .vortex_expect("validity_mask in extend_from_array_unchecked"), + .expect("validity_mask in extend_from_array_unchecked"), ); } diff --git a/vortex-array/src/builders/lazy_null_builder.rs b/vortex-array/src/builders/lazy_null_builder.rs index 4c7c203122a..c6105a1a81e 100644 --- a/vortex-array/src/builders/lazy_null_builder.rs +++ b/vortex-array/src/builders/lazy_null_builder.rs @@ -6,7 +6,6 @@ use vortex_buffer::BitBufferMut; use vortex_dtype::Nullability; use vortex_dtype::Nullability::NonNullable; use vortex_dtype::Nullability::Nullable; -use vortex_error::VortexExpect; use vortex_error::vortex_panic; use vortex_mask::Mask; @@ -57,7 +56,7 @@ impl LazyBitBufferBuilder { self.materialize_if_needed(); self.inner .as_mut() - .vortex_expect("cannot append null to non-nullable builder") + .expect("cannot append null to non-nullable builder") .append_n(false, n); } @@ -67,7 +66,7 @@ impl LazyBitBufferBuilder { self.materialize_if_needed(); self.inner .as_mut() - .vortex_expect("cannot append null to non-nullable builder") + .expect("cannot append null to non-nullable builder") .append(false); } @@ -77,7 +76,7 @@ impl LazyBitBufferBuilder { self.materialize_if_needed(); self.inner .as_mut() - .vortex_expect("buffer just materialized") + .expect("buffer just materialized") .append_buffer(bool_buffer); } @@ -95,7 +94,7 @@ impl LazyBitBufferBuilder { self.materialize_if_needed(); self.inner .as_mut() - .vortex_expect("buffer just materialized") + .expect("buffer just materialized") .set_to(index, v); } @@ -129,7 +128,7 @@ impl LazyBitBufferBuilder { } else { self.inner .as_mut() - .vortex_expect("buffer just materialized") + .expect("buffer just materialized") .reserve(additional); } } diff --git a/vortex-array/src/builders/list.rs b/vortex-array/src/builders/list.rs index 15fd03c4fa1..c1d167ce505 100644 --- a/vortex-array/src/builders/list.rs +++ b/vortex-array/src/builders/list.rs @@ -9,7 +9,6 @@ use vortex_dtype::IntegerPType; use vortex_dtype::Nullability; use vortex_dtype::Nullability::NonNullable; use vortex_dtype::match_each_integer_ptype; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_bail; use vortex_error::vortex_ensure; @@ -102,8 +101,7 @@ impl ListBuilder { self.elements_builder.extend_from_array(array); self.nulls.append_non_null(); self.offsets_builder.append_value( - O::from_usize(self.elements_builder.len()) - .vortex_expect("Failed to convert from usize to O"), + O::from_usize(self.elements_builder.len()).expect("Failed to convert from usize to O"), ); Ok(()) @@ -128,7 +126,7 @@ impl ListBuilder { self.nulls.append_non_null(); self.offsets_builder.append_value( O::from_usize(self.elements_builder.len()) - .vortex_expect("Failed to convert from usize to O"), + .expect("Failed to convert from usize to O"), ); } } @@ -149,7 +147,7 @@ impl ListBuilder { self.offsets_builder.finish(), self.nulls.finish_with_nullability(self.dtype.nullability()), ) - .vortex_expect("Buffer, offsets, and validity must have same length.") + .expect("Buffer, offsets, and validity must have same length.") } /// The [`DType`] of the inner elements. Note that this is **not** the same as the [`DType`] of @@ -183,9 +181,8 @@ impl ArrayBuilder for ListBuilder { fn append_zeros(&mut self, n: usize) { let curr_len = self.elements_builder.len(); for _ in 0..n { - self.offsets_builder.append_value( - O::from_usize(curr_len).vortex_expect("Failed to convert from usize to "), - ) + self.offsets_builder + .append_value(O::from_usize(curr_len).expect("Failed to convert from usize to ")) } self.nulls.append_n_non_nulls(n); } @@ -195,9 +192,8 @@ impl ArrayBuilder for ListBuilder { for _ in 0..n { // A list with a null element is can be a list with a zero-span offset and a validity // bit set - self.offsets_builder.append_value( - O::from_usize(curr_len).vortex_expect("Failed to convert from usize to "), - ) + self.offsets_builder + .append_value(O::from_usize(curr_len).expect("Failed to convert from usize to ")) } self.nulls.append_n_nulls(n); } @@ -223,7 +219,7 @@ impl ArrayBuilder for ListBuilder { self.nulls.append_validity_mask( array .validity_mask() - .vortex_expect("validity_mask in extend_from_array_unchecked"), + .expect("validity_mask in extend_from_array_unchecked"), ); // Note that `ListViewArray` has `n` offsets and sizes, not `n+1` offsets like `ListArray`. @@ -256,13 +252,12 @@ impl ArrayBuilder for ListBuilder { if size > 0 { let list_elements = new_elements .slice(offset..offset + size) - .vortex_expect("list builder slice"); + .expect("list builder slice"); builder.elements_builder.extend_from_array(&list_elements); curr_offset += size; } - let new_offset = - O::from_usize(curr_offset).vortex_expect("Failed to convert offset"); + let new_offset = O::from_usize(curr_offset).expect("Failed to convert offset"); offsets_range.set_value(i, new_offset); } diff --git a/vortex-array/src/builders/listview.rs b/vortex-array/src/builders/listview.rs index 165a81e527a..9e7e27fb965 100644 --- a/vortex-array/src/builders/listview.rs +++ b/vortex-array/src/builders/listview.rs @@ -16,7 +16,6 @@ use vortex_dtype::DType; use vortex_dtype::IntegerPType; use vortex_dtype::Nullability; use vortex_dtype::match_each_integer_ptype; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_ensure; use vortex_error::vortex_panic; @@ -137,11 +136,10 @@ impl ListViewBuilder { self.elements_builder.extend_from_array(array); self.nulls.append_non_null(); - self.offsets_builder.append_value( - O::from_usize(curr_offset).vortex_expect("Failed to convert from usize to `O`"), - ); + self.offsets_builder + .append_value(O::from_usize(curr_offset).expect("Failed to convert from usize to `O`")); self.sizes_builder.append_value( - S::from_usize(num_elements).vortex_expect("Failed to convert from usize to `S`"), + S::from_usize(num_elements).expect("Failed to convert from usize to `S`"), ); Ok(()) @@ -177,11 +175,10 @@ impl ListViewBuilder { } self.nulls.append_non_null(); - self.offsets_builder.append_value( - O::from_usize(curr_offset).vortex_expect("Failed to convert from usize to `O`"), - ); + self.offsets_builder + .append_value(O::from_usize(curr_offset).expect("Failed to convert from usize to `O`")); self.sizes_builder.append_value( - S::from_usize(num_elements).vortex_expect("Failed to convert from usize to `S`"), + S::from_usize(num_elements).expect("Failed to convert from usize to `S`"), ); Ok(()) @@ -251,7 +248,7 @@ impl ArrayBuilder for ListViewBuilder { // `offsets` and `sizes` metadata to add an empty list. for _ in 0..n { self.offsets_builder.append_value( - O::from_usize(curr_offset).vortex_expect("Failed to convert from usize to `O`"), + O::from_usize(curr_offset).expect("Failed to convert from usize to `O`"), ); self.sizes_builder.append_value(S::zero()); } @@ -269,7 +266,7 @@ impl ArrayBuilder for ListViewBuilder { // A null list can have any representation, but we choose to use the zero representation. for _ in 0..n { self.offsets_builder.append_value( - O::from_usize(curr_offset).vortex_expect("Failed to convert from usize to `O`"), + O::from_usize(curr_offset).expect("Failed to convert from usize to `O`"), ); self.sizes_builder.append_value(S::zero()); } @@ -302,10 +299,10 @@ impl ArrayBuilder for ListViewBuilder { for i in 0..listview.len() { let list = listview .scalar_at(i) - .vortex_expect("scalar_at failed in extend_from_array_unchecked"); + .expect("scalar_at failed in extend_from_array_unchecked"); self.append_scalar(&list) - .vortex_expect("was unable to extend the `ListViewBuilder`") + .expect("was unable to extend the `ListViewBuilder`") } return; @@ -315,13 +312,13 @@ impl ArrayBuilder for ListViewBuilder { // the entire array. let listview = listview .rebuild(ListViewRebuildMode::MakeExact) - .vortex_expect("ListViewArray::rebuild(MakeExact) failed in extend_from_array"); + .expect("ListViewArray::rebuild(MakeExact) failed in extend_from_array"); debug_assert!(listview.is_zero_copy_to_list()); self.nulls.append_validity_mask( array .validity_mask() - .vortex_expect("validity_mask in extend_from_array_unchecked"), + .expect("validity_mask in extend_from_array_unchecked"), ); // Bulk append the new elements (which should have no gaps or overlaps). @@ -341,9 +338,7 @@ impl ArrayBuilder for ListViewBuilder { .sizes() .to_array() .cast(self.sizes_builder.dtype().clone()) - .vortex_expect( - "was somehow unable to cast the new sizes to the type of the builder sizes", - ); + .expect("was somehow unable to cast the new sizes to the type of the builder sizes"); self.sizes_builder.extend_from_array(cast_sizes.as_ref()); // Now we need to adjust all of the offsets by adding the current number of elements in the @@ -395,17 +390,17 @@ fn adjust_and_extend_offsets<'a, O: IntegerPType, A: IntegerPType>( ) { let new_offsets_slice = new_offsets.as_slice::(); let old_elements_len = O::from_usize(old_elements_len) - .vortex_expect("the old elements length did not fit into the offset type (impossible)"); + .expect("the old elements length did not fit into the offset type (impossible)"); let new_elements_len = O::from_usize(new_elements_len) - .vortex_expect("the current elements length did not fit into the offset type (impossible)"); + .expect("the current elements length did not fit into the offset type (impossible)"); for i in 0..uninit_range.len() { let new_offset = O::from_usize( new_offsets_slice[i] .to_usize() - .vortex_expect("Offsets must always fit in usize"), + .expect("Offsets must always fit in usize"), ) - .vortex_expect("New offset somehow did not fit into the builder's offset type"); + .expect("New offset somehow did not fit into the builder's offset type"); // We have to check this even in release mode to ensure the final `new_unchecked` // construction in `finish_into_listview` is valid. diff --git a/vortex-array/src/builders/mod.rs b/vortex-array/src/builders/mod.rs index 738410ce44d..4c0ba4666ff 100644 --- a/vortex-array/src/builders/mod.rs +++ b/vortex-array/src/builders/mod.rs @@ -33,7 +33,6 @@ use std::any::Any; use vortex_dtype::DType; use vortex_dtype::match_each_decimal_value_type; use vortex_dtype::match_each_native_ptype; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_panic; use vortex_mask::Mask; @@ -215,7 +214,7 @@ pub trait ArrayBuilder: Send { fn finish_into_canonical(&mut self) -> Canonical { self.finish() .to_canonical() - .vortex_expect("finish_into_canonical failed") + .expect("finish_into_canonical failed") } } diff --git a/vortex-array/src/builders/primitive.rs b/vortex-array/src/builders/primitive.rs index 42b29ee891c..ad1c5dc4c99 100644 --- a/vortex-array/src/builders/primitive.rs +++ b/vortex-array/src/builders/primitive.rs @@ -8,7 +8,6 @@ use vortex_buffer::BufferMut; use vortex_dtype::DType; use vortex_dtype::NativePType; use vortex_dtype::Nullability; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_ensure; use vortex_mask::Mask; @@ -178,7 +177,7 @@ impl ArrayBuilder for PrimitiveBuilder { self.nulls.append_validity_mask( array .validity_mask() - .vortex_expect("validity_mask in extend_from_array_unchecked"), + .expect("validity_mask in extend_from_array_unchecked"), ); } diff --git a/vortex-array/src/builders/struct_.rs b/vortex-array/src/builders/struct_.rs index a6d92b351aa..d94a9bb6ab8 100644 --- a/vortex-array/src/builders/struct_.rs +++ b/vortex-array/src/builders/struct_.rs @@ -7,7 +7,6 @@ use itertools::Itertools; use vortex_dtype::DType; use vortex_dtype::Nullability; use vortex_dtype::StructFields; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_bail; use vortex_error::vortex_ensure; @@ -108,7 +107,7 @@ impl StructBuilder { let validity = self.nulls.finish_with_nullability(self.dtype.nullability()); StructArray::try_new_with_dtype(fields, self.struct_fields().clone(), len, validity) - .vortex_expect("Fields must all have same length.") + .expect("Fields must all have same length.") } /// The [`StructFields`] of this struct builder. @@ -179,7 +178,7 @@ impl ArrayBuilder for StructBuilder { self.nulls.append_validity_mask( array .validity_mask() - .vortex_expect("validity_mask in extend_from_array_unchecked"), + .expect("validity_mask in extend_from_array_unchecked"), ); } diff --git a/vortex-array/src/builders/tests.rs b/vortex-array/src/builders/tests.rs index 56f3b14ed84..01ec4689e82 100644 --- a/vortex-array/src/builders/tests.rs +++ b/vortex-array/src/builders/tests.rs @@ -12,7 +12,6 @@ use vortex_dtype::StructFields; use vortex_dtype::datetime::TimeUnit; use vortex_dtype::datetime::Timestamp; use vortex_dtype::half::f16; -use vortex_error::VortexExpect; use crate::builders::ArrayBuilder; use crate::builders::builder_with_capacity; @@ -232,7 +231,7 @@ where let canonical_indirect = builder2 .finish() .to_canonical() - .vortex_expect("to_canonical failed"); + .expect("to_canonical failed"); // Convert both to arrays for comparison. let array_direct = canonical_direct.into_array(); diff --git a/vortex-array/src/builders/varbinview.rs b/vortex-array/src/builders/varbinview.rs index e393871bca1..c24159e815d 100644 --- a/vortex-array/src/builders/varbinview.rs +++ b/vortex-array/src/builders/varbinview.rs @@ -11,7 +11,6 @@ use vortex_buffer::BufferMut; use vortex_buffer::ByteBuffer; use vortex_buffer::ByteBufferMut; use vortex_dtype::DType; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_bail; use vortex_error::vortex_ensure; @@ -91,7 +90,7 @@ impl VarBinViewBuilder { fn append_value_view(&mut self, value: &[u8]) { let length = - u32::try_from(value.len()).vortex_expect("cannot have a single string >2^32 in length"); + u32::try_from(value.len()).expect("cannot have a single string >2^32 in length"); if length <= 12 { self.views_builder.push(BinaryView::make_view(value, 0, 0)); return; @@ -137,7 +136,7 @@ impl VarBinViewBuilder { } let buffer_idx = self.completed.len(); - let offset = u32::try_from(self.in_progress.len()).vortex_expect("too many buffers"); + let offset = u32::try_from(self.in_progress.len()).expect("too many buffers"); self.in_progress.extend_from_slice(value); (buffer_idx, offset) @@ -274,7 +273,7 @@ impl ArrayBuilder for VarBinViewBuilder { self.push_only_validity_mask( array .validity_mask() - .vortex_expect("validity_mask in extend_from_array_unchecked"), + .expect("validity_mask in extend_from_array_unchecked"), ); let view_adjustment = @@ -293,7 +292,7 @@ impl ArrayBuilder for VarBinViewBuilder { ), ViewAdjustment::Rewriting(adjustment) => match array .validity_mask() - .vortex_expect("validity_mask in extend_from_array_unchecked") + .expect("validity_mask in extend_from_array_unchecked") { Mask::AllTrue(_) => { for (idx, &view) in array.views().iter().enumerate() { @@ -596,7 +595,7 @@ impl BuffersWithOffsets { let buffer_utilizations = array .buffer_utilizations() - .vortex_expect("buffer_utilizations in BuffersWithOffsets::from_array"); + .expect("buffer_utilizations in BuffersWithOffsets::from_array"); let mut has_rewrite = false; let mut has_nonzero_offset = false; for utilization in buffer_utilizations.iter() { @@ -626,7 +625,7 @@ impl BuffersWithOffsets { // keep all buffers (false, false) => { let buffers: Vec<_> = buffers_with_offsets_iter - .map(|(b, _)| b.vortex_expect("already checked for rewrite")) + .map(|(b, _)| b.expect("already checked for rewrite")) .collect(); Self::AllKept { buffers: Arc::from(buffers), @@ -644,9 +643,7 @@ impl BuffersWithOffsets { // keep all buffers, but some have offsets (false, true) => { let (buffers, offsets): (Vec<_>, _) = buffers_with_offsets_iter - .map(|(buffer, offset)| { - (buffer.vortex_expect("already checked for rewrite"), offset) - }) + .map(|(buffer, offset)| (buffer.expect("already checked for rewrite"), offset)) .collect(); Self::AllKept { buffers: Arc::from(buffers), diff --git a/vortex-array/src/canonical.rs b/vortex-array/src/canonical.rs index 11be1a7119d..958b2850e78 100644 --- a/vortex-array/src/canonical.rs +++ b/vortex-array/src/canonical.rs @@ -8,7 +8,6 @@ use std::sync::Arc; use vortex_buffer::Buffer; use vortex_dtype::DType; use vortex_dtype::NativePType; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_ensure; use vortex_error::vortex_panic; @@ -412,55 +411,55 @@ pub trait ToCanonical { impl ToCanonical for A { fn to_null(&self) -> NullArray { self.to_canonical() - .vortex_expect("to_canonical failed") + .expect("to_canonical failed") .into_null() } fn to_bool(&self) -> BoolArray { self.to_canonical() - .vortex_expect("to_canonical failed") + .expect("to_canonical failed") .into_bool() } fn to_primitive(&self) -> PrimitiveArray { self.to_canonical() - .vortex_expect("to_canonical failed") + .expect("to_canonical failed") .into_primitive() } fn to_decimal(&self) -> DecimalArray { self.to_canonical() - .vortex_expect("to_canonical failed") + .expect("to_canonical failed") .into_decimal() } fn to_struct(&self) -> StructArray { self.to_canonical() - .vortex_expect("to_canonical failed") + .expect("to_canonical failed") .into_struct() } fn to_listview(&self) -> ListViewArray { self.to_canonical() - .vortex_expect("to_canonical failed") + .expect("to_canonical failed") .into_listview() } fn to_fixed_size_list(&self) -> FixedSizeListArray { self.to_canonical() - .vortex_expect("to_canonical failed") + .expect("to_canonical failed") .into_fixed_size_list() } fn to_varbinview(&self) -> VarBinViewArray { self.to_canonical() - .vortex_expect("to_canonical failed") + .expect("to_canonical failed") .into_varbinview() } fn to_extension(&self) -> ExtensionArray { self.to_canonical() - .vortex_expect("to_canonical failed") + .expect("to_canonical failed") .into_extension() } } diff --git a/vortex-array/src/compute/conformance/binary_numeric.rs b/vortex-array/src/compute/conformance/binary_numeric.rs index c2cb63e1d4d..d83533c3b91 100644 --- a/vortex-array/src/compute/conformance/binary_numeric.rs +++ b/vortex-array/src/compute/conformance/binary_numeric.rs @@ -28,7 +28,6 @@ use num_traits::Num; use vortex_dtype::DType; use vortex_dtype::NativePType; use vortex_dtype::PType; -use vortex_error::VortexExpect; use vortex_error::vortex_err; use vortex_error::vortex_panic; @@ -48,7 +47,7 @@ fn to_vec_of_scalar(array: &dyn Array) -> Vec { .map(|index| { array .scalar_at(index) - .vortex_expect("scalar_at should succeed in conformance test") + .expect("scalar_at should succeed in conformance test") }) .collect_vec() } @@ -97,10 +96,10 @@ where let one = T::from(1) .ok_or_else(|| vortex_err!("could not convert 1 into array native type")) - .vortex_expect("operation should succeed in conformance test"); + .expect("operation should succeed in conformance test"); let scalar_one = Scalar::from(one) .cast(array.dtype()) - .vortex_expect("operation should succeed in conformance test"); + .expect("operation should succeed in conformance test"); let operators: [NumericOperator; 6] = [ NumericOperator::Add, @@ -333,7 +332,7 @@ where let scalar = Scalar::from(scalar_value) .cast(array.dtype()) - .vortex_expect("operation should succeed in conformance test"); + .expect("operation should succeed in conformance test"); // Only test operators that make sense for the given scalar let operators = if scalar_value == T::zero() { @@ -368,7 +367,7 @@ where continue; } - let result = result.vortex_expect("operation should succeed in conformance test"); + let result = result.expect("operation should succeed in conformance test"); let actual_values = to_vec_of_scalar(&result); // Check each element for overflow/underflow diff --git a/vortex-array/src/compute/conformance/cast.rs b/vortex-array/src/compute/conformance/cast.rs index 1fd9c263a34..c1786eae032 100644 --- a/vortex-array/src/compute/conformance/cast.rs +++ b/vortex-array/src/compute/conformance/cast.rs @@ -4,7 +4,6 @@ use vortex_dtype::DType; use vortex_dtype::Nullability; use vortex_dtype::PType; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_panic; @@ -60,7 +59,7 @@ pub fn test_cast_conformance(array: &dyn Array) { fn test_cast_identity(array: &dyn Array) { // Casting to the same type should be a no-op let result = cast_and_execute(&array.to_array(), array.dtype().clone()) - .vortex_expect("cast should succeed in conformance test"); + .expect("cast should succeed in conformance test"); assert_eq!(result.len(), array.len()); assert_eq!(result.dtype(), array.dtype()); @@ -69,10 +68,10 @@ fn test_cast_identity(array: &dyn Array) { assert_eq!( array .scalar_at(i) - .vortex_expect("scalar_at should succeed in conformance test"), + .expect("scalar_at should succeed in conformance test"), result .scalar_at(i) - .vortex_expect("scalar_at should succeed in conformance test") + .expect("scalar_at should succeed in conformance test") ); } } @@ -80,7 +79,7 @@ fn test_cast_identity(array: &dyn Array) { fn test_cast_from_null(array: &dyn Array) { // Null can be cast to itself let result = cast_and_execute(&array.to_array(), DType::Null) - .vortex_expect("cast should succeed in conformance test"); + .expect("cast should succeed in conformance test"); assert_eq!(result.len(), array.len()); assert_eq!(result.dtype(), &DType::Null); @@ -95,7 +94,7 @@ fn test_cast_from_null(array: &dyn Array) { for dtype in nullable_types { let result = cast_and_execute(&array.to_array(), dtype.clone()) - .vortex_expect("cast should succeed in conformance test"); + .expect("cast should succeed in conformance test"); assert_eq!(result.len(), array.len()); assert_eq!(result.dtype(), &dtype); @@ -104,7 +103,7 @@ fn test_cast_from_null(array: &dyn Array) { assert!( result .scalar_at(i) - .vortex_expect("scalar_at should succeed in conformance test") + .expect("scalar_at should succeed in conformance test") .is_null() ); } @@ -124,11 +123,11 @@ fn test_cast_from_null(array: &dyn Array) { fn test_cast_to_non_nullable(array: &dyn Array) { if array .invalid_count() - .vortex_expect("invalid_count should succeed in conformance test") + .expect("invalid_count should succeed in conformance test") == 0 { let non_nullable = cast_and_execute(&array.to_array(), array.dtype().as_nonnullable()) - .vortex_expect("arrays without nulls can cast to non-nullable"); + .expect("arrays without nulls can cast to non-nullable"); assert_eq!(non_nullable.dtype(), &array.dtype().as_nonnullable()); assert_eq!(non_nullable.len(), array.len()); @@ -136,15 +135,15 @@ fn test_cast_to_non_nullable(array: &dyn Array) { assert_eq!( array .scalar_at(i) - .vortex_expect("scalar_at should succeed in conformance test"), + .expect("scalar_at should succeed in conformance test"), non_nullable .scalar_at(i) - .vortex_expect("scalar_at should succeed in conformance test") + .expect("scalar_at should succeed in conformance test") ); } let back_to_nullable = cast_and_execute(&non_nullable, array.dtype().clone()) - .vortex_expect("non-nullable arrays can cast to nullable"); + .expect("non-nullable arrays can cast to nullable"); assert_eq!(back_to_nullable.dtype(), array.dtype()); assert_eq!(back_to_nullable.len(), array.len()); @@ -152,10 +151,10 @@ fn test_cast_to_non_nullable(array: &dyn Array) { assert_eq!( array .scalar_at(i) - .vortex_expect("scalar_at should succeed in conformance test"), + .expect("scalar_at should succeed in conformance test"), back_to_nullable .scalar_at(i) - .vortex_expect("scalar_at should succeed in conformance test") + .expect("scalar_at should succeed in conformance test") ); } } else { @@ -177,7 +176,7 @@ fn test_cast_to_non_nullable(array: &dyn Array) { fn test_cast_to_nullable(array: &dyn Array) { let nullable = cast_and_execute(&array.to_array(), array.dtype().as_nullable()) - .vortex_expect("arrays without nulls can cast to nullable"); + .expect("arrays without nulls can cast to nullable"); assert_eq!(nullable.dtype(), &array.dtype().as_nullable()); assert_eq!(nullable.len(), array.len()); @@ -185,15 +184,15 @@ fn test_cast_to_nullable(array: &dyn Array) { assert_eq!( array .scalar_at(i) - .vortex_expect("scalar_at should succeed in conformance test"), + .expect("scalar_at should succeed in conformance test"), nullable .scalar_at(i) - .vortex_expect("scalar_at should succeed in conformance test") + .expect("scalar_at should succeed in conformance test") ); } let back = cast_and_execute(&nullable, array.dtype().clone()) - .vortex_expect("casting to nullable and back should be a no-op"); + .expect("casting to nullable and back should be a no-op"); assert_eq!(back.dtype(), array.dtype()); assert_eq!(back.len(), array.len()); @@ -201,9 +200,9 @@ fn test_cast_to_nullable(array: &dyn Array) { assert_eq!( array .scalar_at(i) - .vortex_expect("scalar_at should succeed in conformance test"), + .expect("scalar_at should succeed in conformance test"), back.scalar_at(i) - .vortex_expect("scalar_at should succeed in conformance test") + .expect("scalar_at should succeed in conformance test") ); } } @@ -241,7 +240,7 @@ fn fits(value: &Scalar, ptype: PType) -> bool { } fn test_cast_to_primitive(array: &dyn Array, target_ptype: PType, test_round_trip: bool) { - let maybe_min_max = min_max(array).vortex_expect("cast should succeed in conformance test"); + let maybe_min_max = min_max(array).expect("cast should succeed in conformance test"); if let Some(MinMaxResult { min, max }) = maybe_min_max && (!fits(&min, target_ptype) || !fits(&max, target_ptype)) @@ -279,22 +278,22 @@ fn test_cast_to_primitive(array: &dyn Array, target_ptype: PType, test_round_tri assert_eq!( array .validity_mask() - .vortex_expect("validity_mask should succeed in conformance test"), + .expect("validity_mask should succeed in conformance test"), casted .validity_mask() - .vortex_expect("validity_mask should succeed in conformance test") + .expect("validity_mask should succeed in conformance test") ); for i in 0..array.len().min(10) { let original = array .scalar_at(i) - .vortex_expect("scalar_at should succeed in conformance test"); + .expect("scalar_at should succeed in conformance test"); let casted = casted .scalar_at(i) - .vortex_expect("scalar_at should succeed in conformance test"); + .expect("scalar_at should succeed in conformance test"); assert_eq!( original .cast(casted.dtype()) - .vortex_expect("cast should succeed in conformance test"), + .expect("cast should succeed in conformance test"), casted, "{i} {original} {casted}" ); @@ -303,7 +302,7 @@ fn test_cast_to_primitive(array: &dyn Array, target_ptype: PType, test_round_tri original, casted .cast(original.dtype()) - .vortex_expect("cast should succeed in conformance test"), + .expect("cast should succeed in conformance test"), "{i} {original} {casted}" ); } diff --git a/vortex-array/src/compute/conformance/consistency.rs b/vortex-array/src/compute/conformance/consistency.rs index 79478dbfeb1..f0cdb69d9e7 100644 --- a/vortex-array/src/compute/conformance/consistency.rs +++ b/vortex-array/src/compute/conformance/consistency.rs @@ -23,7 +23,6 @@ use vortex_buffer::BitBuffer; use vortex_dtype::DType; use vortex_dtype::Nullability; use vortex_dtype::PType; -use vortex_error::VortexExpect; use vortex_error::vortex_panic; use vortex_mask::Mask; @@ -64,7 +63,7 @@ fn test_filter_take_consistency(array: &dyn Array) { // Filter the array let filtered = array .filter(mask) - .vortex_expect("filter should succeed in conformance test"); + .expect("filter should succeed in conformance test"); // Create indices where mask is true let indices: Vec = mask_pattern @@ -77,7 +76,7 @@ fn test_filter_take_consistency(array: &dyn Array) { // Take using those indices let taken = array .take(indices_array.to_array()) - .vortex_expect("take should succeed in conformance test"); + .expect("take should succeed in conformance test"); // Results should be identical assert_eq!( @@ -92,10 +91,10 @@ fn test_filter_take_consistency(array: &dyn Array) { for i in 0..filtered.len() { let filtered_val = filtered .scalar_at(i) - .vortex_expect("scalar_at should succeed in conformance test"); + .expect("scalar_at should succeed in conformance test"); let taken_val = taken .scalar_at(i) - .vortex_expect("scalar_at should succeed in conformance test"); + .expect("scalar_at should succeed in conformance test"); assert_eq!( filtered_val, taken_val, "Filter and take produced different values at index {i}. \ @@ -130,9 +129,9 @@ fn test_double_mask_consistency(array: &dyn Array) { let mask2: Mask = (0..len).map(|i| i % 2 == 0).collect(); // Apply masks sequentially - let first_masked = mask(array, &mask1).vortex_expect("mask should succeed in conformance test"); + let first_masked = mask(array, &mask1).expect("mask should succeed in conformance test"); let double_masked = - mask(&first_masked, &mask2).vortex_expect("mask should succeed in conformance test"); + mask(&first_masked, &mask2).expect("mask should succeed in conformance test"); // Create combined mask (OR operation - element is masked if EITHER mask is true) let combined_pattern: BitBuffer = mask1 @@ -145,7 +144,7 @@ fn test_double_mask_consistency(array: &dyn Array) { // Apply combined mask directly let directly_masked = - mask(array, &combined_mask).vortex_expect("mask should succeed in conformance test"); + mask(array, &combined_mask).expect("mask should succeed in conformance test"); // Results should be identical assert_eq!( @@ -160,10 +159,10 @@ fn test_double_mask_consistency(array: &dyn Array) { for i in 0..double_masked.len() { let double_val = double_masked .scalar_at(i) - .vortex_expect("scalar_at should succeed in conformance test"); + .expect("scalar_at should succeed in conformance test"); let direct_val = directly_masked .scalar_at(i) - .vortex_expect("scalar_at should succeed in conformance test"); + .expect("scalar_at should succeed in conformance test"); assert_eq!( double_val, direct_val, "Sequential masking and combined masking produced different values at index {i}. \ @@ -195,7 +194,7 @@ fn test_filter_identity(array: &dyn Array) { let all_true_mask = Mask::new_true(len); let filtered = array .filter(all_true_mask) - .vortex_expect("filter should succeed in conformance test"); + .expect("filter should succeed in conformance test"); // Filtered array should be identical to original assert_eq!( @@ -210,10 +209,10 @@ fn test_filter_identity(array: &dyn Array) { for i in 0..len { let original_val = array .scalar_at(i) - .vortex_expect("scalar_at should succeed in conformance test"); + .expect("scalar_at should succeed in conformance test"); let filtered_val = filtered .scalar_at(i) - .vortex_expect("scalar_at should succeed in conformance test"); + .expect("scalar_at should succeed in conformance test"); assert_eq!( filtered_val, original_val, "Filtering with all-true mask should preserve all values. \ @@ -242,8 +241,7 @@ fn test_mask_identity(array: &dyn Array) { } let all_false_mask = Mask::new_false(len); - let masked = - mask(array, &all_false_mask).vortex_expect("mask should succeed in conformance test"); + let masked = mask(array, &all_false_mask).expect("mask should succeed in conformance test"); // Masked array should have same values (just nullable) assert_eq!( @@ -264,10 +262,10 @@ fn test_mask_identity(array: &dyn Array) { for i in 0..len { let original_val = array .scalar_at(i) - .vortex_expect("scalar_at should succeed in conformance test"); + .expect("scalar_at should succeed in conformance test"); let masked_val = masked .scalar_at(i) - .vortex_expect("scalar_at should succeed in conformance test"); + .expect("scalar_at should succeed in conformance test"); let expected_val = original_val.clone().into_nullable(); assert_eq!( masked_val, expected_val, @@ -304,12 +302,12 @@ fn test_slice_filter_consistency(array: &dyn Array) { let mask = Mask::from_iter(mask_pattern); let filtered = array .filter(mask) - .vortex_expect("filter should succeed in conformance test"); + .expect("filter should succeed in conformance test"); // Slice should produce the same result let sliced = array .slice(1..4.min(len)) - .vortex_expect("slice should succeed in conformance test"); + .expect("slice should succeed in conformance test"); assert_eq!( filtered.len(), @@ -323,10 +321,10 @@ fn test_slice_filter_consistency(array: &dyn Array) { for i in 0..filtered.len() { let filtered_val = filtered .scalar_at(i) - .vortex_expect("scalar_at should succeed in conformance test"); + .expect("scalar_at should succeed in conformance test"); let sliced_val = sliced .scalar_at(i) - .vortex_expect("scalar_at should succeed in conformance test"); + .expect("scalar_at should succeed in conformance test"); assert_eq!( filtered_val, sliced_val, "Filter with contiguous mask and slice produced different values at index {i}. \ @@ -359,12 +357,12 @@ fn test_take_slice_consistency(array: &dyn Array) { let indices = PrimitiveArray::from_iter((1..end).map(|i| i as u64)).into_array(); let taken = array .take(indices.to_array()) - .vortex_expect("take should succeed in conformance test"); + .expect("take should succeed in conformance test"); // Slice from 1 to end let sliced = array .slice(1..end) - .vortex_expect("slice should succeed in conformance test"); + .expect("slice should succeed in conformance test"); assert_eq!( taken.len(), @@ -378,10 +376,10 @@ fn test_take_slice_consistency(array: &dyn Array) { for i in 0..taken.len() { let taken_val = taken .scalar_at(i) - .vortex_expect("scalar_at should succeed in conformance test"); + .expect("scalar_at should succeed in conformance test"); let sliced_val = sliced .scalar_at(i) - .vortex_expect("scalar_at should succeed in conformance test"); + .expect("scalar_at should succeed in conformance test"); assert_eq!( taken_val, sliced_val, "Take with sequential indices and slice produced different values at index {i}. \ @@ -403,7 +401,7 @@ fn test_filter_preserves_order(array: &dyn Array) { let filtered = array .filter(mask) - .vortex_expect("filter should succeed in conformance test"); + .expect("filter should succeed in conformance test"); // Verify the filtered array contains the right elements in order assert_eq!(filtered.len(), 3.min(len)); @@ -411,26 +409,26 @@ fn test_filter_preserves_order(array: &dyn Array) { assert_eq!( filtered .scalar_at(0) - .vortex_expect("scalar_at should succeed in conformance test"), + .expect("scalar_at should succeed in conformance test"), array .scalar_at(0) - .vortex_expect("scalar_at should succeed in conformance test") + .expect("scalar_at should succeed in conformance test") ); assert_eq!( filtered .scalar_at(1) - .vortex_expect("scalar_at should succeed in conformance test"), + .expect("scalar_at should succeed in conformance test"), array .scalar_at(2) - .vortex_expect("scalar_at should succeed in conformance test") + .expect("scalar_at should succeed in conformance test") ); assert_eq!( filtered .scalar_at(2) - .vortex_expect("scalar_at should succeed in conformance test"), + .expect("scalar_at should succeed in conformance test"), array .scalar_at(3) - .vortex_expect("scalar_at should succeed in conformance test") + .expect("scalar_at should succeed in conformance test") ); } } @@ -446,17 +444,17 @@ fn test_take_repeated_indices(array: &dyn Array) { let indices = PrimitiveArray::from_iter([0u64, 0, 0]).into_array(); let taken = array .take(indices.to_array()) - .vortex_expect("take should succeed in conformance test"); + .expect("take should succeed in conformance test"); assert_eq!(taken.len(), 3); for i in 0..3 { assert_eq!( taken .scalar_at(i) - .vortex_expect("scalar_at should succeed in conformance test"), + .expect("scalar_at should succeed in conformance test"), array .scalar_at(0) - .vortex_expect("scalar_at should succeed in conformance test") + .expect("scalar_at should succeed in conformance test") ); } } @@ -471,29 +469,29 @@ fn test_mask_filter_null_consistency(array: &dyn Array) { // First mask some elements let mask_pattern: Vec = (0..len).map(|i| i % 2 == 0).collect(); let mask_array = Mask::from_iter(mask_pattern); - let masked = mask(array, &mask_array).vortex_expect("mask should succeed in conformance test"); + let masked = mask(array, &mask_array).expect("mask should succeed in conformance test"); // Then filter to remove the nulls let filter_pattern: Vec = (0..len).map(|i| i % 2 != 0).collect(); let filter_mask = Mask::from_iter(filter_pattern); let filtered = masked .filter(filter_mask.clone()) - .vortex_expect("filter should succeed in conformance test"); + .expect("filter should succeed in conformance test"); // This should be equivalent to directly filtering the original array let direct_filtered = array .filter(filter_mask) - .vortex_expect("filter should succeed in conformance test"); + .expect("filter should succeed in conformance test"); assert_eq!(filtered.len(), direct_filtered.len()); for i in 0..filtered.len() { assert_eq!( filtered .scalar_at(i) - .vortex_expect("scalar_at should succeed in conformance test"), + .expect("scalar_at should succeed in conformance test"), direct_filtered .scalar_at(i) - .vortex_expect("scalar_at should succeed in conformance test") + .expect("scalar_at should succeed in conformance test") ); } } @@ -505,7 +503,7 @@ fn test_empty_operations_consistency(array: &dyn Array) { // Empty filter let empty_filter = array .filter(Mask::new_false(len)) - .vortex_expect("filter should succeed in conformance test"); + .expect("filter should succeed in conformance test"); assert_eq!(empty_filter.len(), 0); assert_eq!(empty_filter.dtype(), array.dtype()); @@ -513,7 +511,7 @@ fn test_empty_operations_consistency(array: &dyn Array) { let empty_indices = PrimitiveArray::empty::(Nullability::NonNullable).into_array(); let empty_take = array .take(empty_indices.to_array()) - .vortex_expect("take should succeed in conformance test"); + .expect("take should succeed in conformance test"); assert_eq!(empty_take.len(), 0); assert_eq!(empty_take.dtype(), array.dtype()); @@ -521,7 +519,7 @@ fn test_empty_operations_consistency(array: &dyn Array) { if len > 0 { let empty_slice = array .slice(0..0) - .vortex_expect("slice should succeed in conformance test"); + .expect("slice should succeed in conformance test"); assert_eq!(empty_slice.len(), 0); assert_eq!(empty_slice.dtype(), array.dtype()); } @@ -538,7 +536,7 @@ fn test_take_preserves_properties(array: &dyn Array) { let indices = PrimitiveArray::from_iter((0..len).map(|i| i as u64)).into_array(); let taken = array .take(indices.to_array()) - .vortex_expect("take should succeed in conformance test"); + .expect("take should succeed in conformance test"); // Should be identical to original assert_eq!(taken.len(), array.len()); @@ -547,10 +545,10 @@ fn test_take_preserves_properties(array: &dyn Array) { assert_eq!( taken .scalar_at(i) - .vortex_expect("scalar_at should succeed in conformance test"), + .expect("scalar_at should succeed in conformance test"), array .scalar_at(i) - .vortex_expect("scalar_at should succeed in conformance test") + .expect("scalar_at should succeed in conformance test") ); } } @@ -583,7 +581,7 @@ fn test_nullable_indices_consistency(array: &dyn Array) { let taken = array .take(indices.to_array()) - .vortex_expect("take should succeed in conformance test"); + .expect("take should succeed in conformance test"); // Result should have nulls where indices were null assert_eq!( @@ -602,11 +600,11 @@ fn test_nullable_indices_consistency(array: &dyn Array) { // Check first element (from index 0) let expected_0 = array .scalar_at(0) - .vortex_expect("scalar_at should succeed in conformance test") + .expect("scalar_at should succeed in conformance test") .into_nullable(); let actual_0 = taken .scalar_at(0) - .vortex_expect("scalar_at should succeed in conformance test"); + .expect("scalar_at should succeed in conformance test"); assert_eq!( actual_0, expected_0, "Take with nullable indices: element at position 0 should be from array index 0. \ @@ -616,7 +614,7 @@ fn test_nullable_indices_consistency(array: &dyn Array) { // Check second element (should be null) let actual_1 = taken .scalar_at(1) - .vortex_expect("scalar_at should succeed in conformance test"); + .expect("scalar_at should succeed in conformance test"); assert!( actual_1.is_null(), "Take with nullable indices: element at position 1 should be null, but got {actual_1:?}" @@ -625,11 +623,11 @@ fn test_nullable_indices_consistency(array: &dyn Array) { // Check third element (from index 2) let expected_2 = array .scalar_at(2) - .vortex_expect("scalar_at should succeed in conformance test") + .expect("scalar_at should succeed in conformance test") .into_nullable(); let actual_2 = taken .scalar_at(2) - .vortex_expect("scalar_at should succeed in conformance test"); + .expect("scalar_at should succeed in conformance test"); assert_eq!( actual_2, expected_2, "Take with nullable indices: element at position 2 should be from array index 2. \ @@ -649,14 +647,14 @@ fn test_large_array_consistency(array: &dyn Array) { let indices_array = PrimitiveArray::from_iter(indices).into_array(); let taken = array .take(indices_array.to_array()) - .vortex_expect("take should succeed in conformance test"); + .expect("take should succeed in conformance test"); // Create equivalent filter mask let mask_pattern: Vec = (0..len).map(|i| i % 10 == 0).collect(); let mask = Mask::from_iter(mask_pattern); let filtered = array .filter(mask) - .vortex_expect("filter should succeed in conformance test"); + .expect("filter should succeed in conformance test"); // Results should match assert_eq!(taken.len(), filtered.len()); @@ -664,10 +662,10 @@ fn test_large_array_consistency(array: &dyn Array) { assert_eq!( taken .scalar_at(i) - .vortex_expect("scalar_at should succeed in conformance test"), + .expect("scalar_at should succeed in conformance test"), filtered .scalar_at(i) - .vortex_expect("scalar_at should succeed in conformance test") + .expect("scalar_at should succeed in conformance test") ); } } @@ -707,7 +705,7 @@ fn test_comparison_inverse_consistency(array: &dyn Array) { } else { array .scalar_at(len / 2) - .vortex_expect("scalar_at should succeed in conformance test") + .expect("scalar_at should succeed in conformance test") }; // Test Eq vs NotEq @@ -716,8 +714,7 @@ fn test_comparison_inverse_consistency(array: &dyn Array) { compare(array, const_array.as_ref(), Operator::Eq), compare(array, const_array.as_ref(), Operator::NotEq), ) { - let inverted_eq = - invert(&eq_result).vortex_expect("invert should succeed in conformance test"); + let inverted_eq = invert(&eq_result).expect("invert should succeed in conformance test"); assert_eq!( inverted_eq.len(), @@ -728,10 +725,10 @@ fn test_comparison_inverse_consistency(array: &dyn Array) { for i in 0..inverted_eq.len() { let inv_val = inverted_eq .scalar_at(i) - .vortex_expect("scalar_at should succeed in conformance test"); + .expect("scalar_at should succeed in conformance test"); let neq_val = neq_result .scalar_at(i) - .vortex_expect("scalar_at should succeed in conformance test"); + .expect("scalar_at should succeed in conformance test"); assert_eq!( inv_val, neq_val, "At index {i}: NOT(Eq) should equal NotEq. \ @@ -745,16 +742,15 @@ fn test_comparison_inverse_consistency(array: &dyn Array) { compare(array, const_array.as_ref(), Operator::Gt), compare(array, const_array.as_ref(), Operator::Lte), ) { - let inverted_gt = - invert(>_result).vortex_expect("invert should succeed in conformance test"); + let inverted_gt = invert(>_result).expect("invert should succeed in conformance test"); for i in 0..inverted_gt.len() { let inv_val = inverted_gt .scalar_at(i) - .vortex_expect("scalar_at should succeed in conformance test"); + .expect("scalar_at should succeed in conformance test"); let lte_val = lte_result .scalar_at(i) - .vortex_expect("scalar_at should succeed in conformance test"); + .expect("scalar_at should succeed in conformance test"); assert_eq!( inv_val, lte_val, "At index {i}: NOT(Gt) should equal Lte. \ @@ -768,16 +764,15 @@ fn test_comparison_inverse_consistency(array: &dyn Array) { compare(array, const_array.as_ref(), Operator::Lt), compare(array, const_array.as_ref(), Operator::Gte), ) { - let inverted_lt = - invert(<_result).vortex_expect("invert should succeed in conformance test"); + let inverted_lt = invert(<_result).expect("invert should succeed in conformance test"); for i in 0..inverted_lt.len() { let inv_val = inverted_lt .scalar_at(i) - .vortex_expect("scalar_at should succeed in conformance test"); + .expect("scalar_at should succeed in conformance test"); let gte_val = gte_result .scalar_at(i) - .vortex_expect("scalar_at should succeed in conformance test"); + .expect("scalar_at should succeed in conformance test"); assert_eq!( inv_val, gte_val, "At index {i}: NOT(Lt) should equal Gte. \ @@ -820,7 +815,7 @@ fn test_comparison_symmetry_consistency(array: &dyn Array) { } else { array .scalar_at(len / 2) - .vortex_expect("scalar_at should succeed in conformance test") + .expect("scalar_at should succeed in conformance test") }; // Create a constant array with the test scalar for reverse comparison @@ -840,10 +835,10 @@ fn test_comparison_symmetry_consistency(array: &dyn Array) { for i in 0..arr_gt_scalar.len() { let arr_gt = arr_gt_scalar .scalar_at(i) - .vortex_expect("scalar_at should succeed in conformance test"); + .expect("scalar_at should succeed in conformance test"); let scalar_lt = scalar_lt_arr .scalar_at(i) - .vortex_expect("scalar_at should succeed in conformance test"); + .expect("scalar_at should succeed in conformance test"); assert_eq!( arr_gt, scalar_lt, "At index {i}: (array > scalar) should equal (scalar < array). \ @@ -860,10 +855,10 @@ fn test_comparison_symmetry_consistency(array: &dyn Array) { for i in 0..arr_eq_scalar.len() { let arr_eq = arr_eq_scalar .scalar_at(i) - .vortex_expect("scalar_at should succeed in conformance test"); + .expect("scalar_at should succeed in conformance test"); let scalar_eq = scalar_eq_arr .scalar_at(i) - .vortex_expect("scalar_at should succeed in conformance test"); + .expect("scalar_at should succeed in conformance test"); assert_eq!( arr_eq, scalar_eq, "At index {i}: (array == scalar) should equal (scalar == array). \ @@ -905,10 +900,9 @@ fn test_boolean_demorgan_consistency(array: &dyn Array) { if let (Ok(a_and_b), Ok(not_a), Ok(not_b)) = (and_kleene(array, mask), invert(array), invert(mask)) { - let not_a_and_b = - invert(&a_and_b).vortex_expect("invert should succeed in conformance test"); + let not_a_and_b = invert(&a_and_b).expect("invert should succeed in conformance test"); let not_a_or_not_b = - or_kleene(¬_a, ¬_b).vortex_expect("or should succeed in conformance test"); + or_kleene(¬_a, ¬_b).expect("or should succeed in conformance test"); assert_eq!( not_a_and_b.len(), @@ -919,10 +913,10 @@ fn test_boolean_demorgan_consistency(array: &dyn Array) { for i in 0..not_a_and_b.len() { let left = not_a_and_b .scalar_at(i) - .vortex_expect("scalar_at should succeed in conformance test"); + .expect("scalar_at should succeed in conformance test"); let right = not_a_or_not_b .scalar_at(i) - .vortex_expect("scalar_at should succeed in conformance test"); + .expect("scalar_at should succeed in conformance test"); assert_eq!( left, right, "De Morgan's first law failed at index {i}: \ @@ -935,17 +929,17 @@ fn test_boolean_demorgan_consistency(array: &dyn Array) { if let (Ok(a_or_b), Ok(not_a), Ok(not_b)) = (or_kleene(array, mask), invert(array), invert(mask)) { - let not_a_or_b = invert(&a_or_b).vortex_expect("invert should succeed in conformance test"); + let not_a_or_b = invert(&a_or_b).expect("invert should succeed in conformance test"); let not_a_and_not_b = - and_kleene(¬_a, ¬_b).vortex_expect("and should succeed in conformance test"); + and_kleene(¬_a, ¬_b).expect("and should succeed in conformance test"); for i in 0..not_a_or_b.len() { let left = not_a_or_b .scalar_at(i) - .vortex_expect("scalar_at should succeed in conformance test"); + .expect("scalar_at should succeed in conformance test"); let right = not_a_and_not_b .scalar_at(i) - .vortex_expect("scalar_at should succeed in conformance test"); + .expect("scalar_at should succeed in conformance test"); assert_eq!( left, right, "De Morgan's second law failed at index {i}: \ @@ -989,20 +983,20 @@ fn test_slice_aggregate_consistency(array: &dyn Array) { // Get sliced array and canonical slice let sliced = array .slice(start..end) - .vortex_expect("slice should succeed in conformance test"); - let canonical = array.to_canonical().vortex_expect("to_canonical failed"); + .expect("slice should succeed in conformance test"); + let canonical = array.to_canonical().expect("to_canonical failed"); let canonical_sliced = canonical .as_ref() .slice(start..end) - .vortex_expect("slice should succeed in conformance test"); + .expect("slice should succeed in conformance test"); // Test null count through invalid_count let sliced_invalid_count = sliced .invalid_count() - .vortex_expect("invalid_count should succeed in conformance test"); + .expect("invalid_count should succeed in conformance test"); let canonical_invalid_count = canonical_sliced .invalid_count() - .vortex_expect("invalid_count should succeed in conformance test"); + .expect("invalid_count should succeed in conformance test"); assert_eq!( sliced_invalid_count, canonical_invalid_count, "null_count on sliced array should match canonical. \ @@ -1085,7 +1079,7 @@ fn test_cast_slice_consistency(array: &dyn Array) { let end = 7.min(len - 2).max(start + 1); // Ensure we have at least 1 element // Get canonical form of the original array - let canonical = array.to_canonical().vortex_expect("to_canonical failed"); + let canonical = array.to_canonical().expect("to_canonical failed"); // Choose appropriate target dtype based on the array's type let target_dtypes = match array.dtype() { @@ -1205,7 +1199,7 @@ fn test_cast_slice_consistency(array: &dyn Array) { // Slice the array let sliced = array .slice(start..end) - .vortex_expect("slice should succeed in conformance test"); + .expect("slice should succeed in conformance test"); // Try to cast the sliced array (force execution via to_canonical) let slice_then_cast = match sliced @@ -1229,13 +1223,13 @@ fn test_cast_slice_consistency(array: &dyn Array) { for i in 0..slice_then_cast.len() { let slice_cast_val = slice_then_cast .scalar_at(i) - .vortex_expect("scalar_at should succeed in conformance test"); + .expect("scalar_at should succeed in conformance test"); // Get the corresponding value from the canonical array (adjusted for slice offset) let canonical_val = canonical .as_ref() .scalar_at(start + i) - .vortex_expect("scalar_at should succeed in conformance test"); + .expect("scalar_at should succeed in conformance test"); // Cast the canonical scalar to the target dtype let expected_val = match canonical_val.cast(&target_dtype) { @@ -1269,7 +1263,7 @@ fn test_cast_slice_consistency(array: &dyn Array) { }; let cast_then_slice = casted .slice(start..end) - .vortex_expect("slice should succeed in conformance test"); + .expect("slice should succeed in conformance test"); // Verify the two approaches produce identical results assert_eq!( @@ -1281,10 +1275,10 @@ fn test_cast_slice_consistency(array: &dyn Array) { for i in 0..slice_then_cast.len() { let slice_cast_val = slice_then_cast .scalar_at(i) - .vortex_expect("scalar_at should succeed in conformance test"); + .expect("scalar_at should succeed in conformance test"); let cast_slice_val = cast_then_slice .scalar_at(i) - .vortex_expect("scalar_at should succeed in conformance test"); + .expect("scalar_at should succeed in conformance test"); assert_eq!( slice_cast_val, cast_slice_val, "Slice-then-cast and cast-then-slice produced different values at index {i}. \ diff --git a/vortex-array/src/compute/conformance/filter.rs b/vortex-array/src/compute/conformance/filter.rs index 727098c7485..8b210385b2b 100644 --- a/vortex-array/src/compute/conformance/filter.rs +++ b/vortex-array/src/compute/conformance/filter.rs @@ -2,7 +2,6 @@ // SPDX-FileCopyrightText: Copyright the Vortex contributors use vortex_dtype::DType; -use vortex_error::VortexExpect; use vortex_mask::Mask; use crate::Array; @@ -64,7 +63,7 @@ fn test_all_filter(array: &dyn Array) { let mask = Mask::new_true(len); let filtered = array .filter(mask) - .vortex_expect("filter should succeed in conformance test"); + .expect("filter should succeed in conformance test"); assert_arrays_eq!(filtered, array); } @@ -74,7 +73,7 @@ fn test_none_filter(array: &dyn Array) { let mask = Mask::new_false(len); let filtered = array .filter(mask) - .vortex_expect("filter should succeed in conformance test"); + .expect("filter should succeed in conformance test"); assert_eq!(filtered.len(), 0); assert_eq!(filtered.dtype(), array.dtype()); } @@ -91,7 +90,7 @@ fn test_selective_filter(array: &dyn Array) { let mask = Mask::from_iter(mask_values); let filtered = array .filter(mask) - .vortex_expect("filter should succeed in conformance test"); + .expect("filter should succeed in conformance test"); assert_eq!(filtered.len(), expected_count); // Verify correct elements are kept @@ -99,10 +98,10 @@ fn test_selective_filter(array: &dyn Array) { assert_eq!( filtered .scalar_at(filtered_idx) - .vortex_expect("scalar_at should succeed in conformance test"), + .expect("scalar_at should succeed in conformance test"), array .scalar_at(i) - .vortex_expect("scalar_at should succeed in conformance test") + .expect("scalar_at should succeed in conformance test") ); } @@ -114,23 +113,23 @@ fn test_selective_filter(array: &dyn Array) { let mask = Mask::from_iter(mask_values); let filtered = array .filter(mask) - .vortex_expect("filter should succeed in conformance test"); + .expect("filter should succeed in conformance test"); assert_eq!(filtered.len(), 2); assert_eq!( filtered .scalar_at(0) - .vortex_expect("scalar_at should succeed in conformance test"), + .expect("scalar_at should succeed in conformance test"), array .scalar_at(0) - .vortex_expect("scalar_at should succeed in conformance test") + .expect("scalar_at should succeed in conformance test") ); assert_eq!( filtered .scalar_at(1) - .vortex_expect("scalar_at should succeed in conformance test"), + .expect("scalar_at should succeed in conformance test"), array .scalar_at(len - 1) - .vortex_expect("scalar_at should succeed in conformance test") + .expect("scalar_at should succeed in conformance test") ); } } @@ -147,15 +146,15 @@ fn test_single_element_filter(array: &dyn Array) { let mask = Mask::from_iter(mask_values); let filtered = array .filter(mask) - .vortex_expect("filter should succeed in conformance test"); + .expect("filter should succeed in conformance test"); assert_eq!(filtered.len(), 1); assert_eq!( filtered .scalar_at(0) - .vortex_expect("scalar_at should succeed in conformance test"), + .expect("scalar_at should succeed in conformance test"), array .scalar_at(0) - .vortex_expect("scalar_at should succeed in conformance test") + .expect("scalar_at should succeed in conformance test") ); // Test selecting only the last element @@ -165,15 +164,15 @@ fn test_single_element_filter(array: &dyn Array) { let mask = Mask::from_iter(mask_values); let filtered = array .filter(mask) - .vortex_expect("filter should succeed in conformance test"); + .expect("filter should succeed in conformance test"); assert_eq!(filtered.len(), 1); assert_eq!( filtered .scalar_at(0) - .vortex_expect("scalar_at should succeed in conformance test"), + .expect("scalar_at should succeed in conformance test"), array .scalar_at(len - 1) - .vortex_expect("scalar_at should succeed in conformance test") + .expect("scalar_at should succeed in conformance test") ); } } @@ -185,13 +184,13 @@ fn test_empty_array_filter(dtype: &DType) { let empty_mask = Mask::new_false(0); let filtered = empty_array .filter(empty_mask) - .vortex_expect("filter should succeed in conformance test"); + .expect("filter should succeed in conformance test"); assert_eq!(filtered.len(), 0); let empty_mask = Mask::new_true(0); let filtered = empty_array .filter(empty_mask) - .vortex_expect("filter should succeed in conformance test"); + .expect("filter should succeed in conformance test"); assert_eq!(filtered.len(), 0); } @@ -226,7 +225,7 @@ fn test_alternating_pattern_filter(array: &dyn Array) { let mask = Mask::from_iter(pattern.clone()); let filtered = array .filter(mask) - .vortex_expect("filter should succeed in conformance test"); + .expect("filter should succeed in conformance test"); assert_eq!(filtered.len(), expected_count); // Verify correct elements are kept @@ -236,10 +235,10 @@ fn test_alternating_pattern_filter(array: &dyn Array) { assert_eq!( filtered .scalar_at(filtered_idx) - .vortex_expect("scalar_at should succeed in conformance test"), + .expect("scalar_at should succeed in conformance test"), array .scalar_at(i) - .vortex_expect("scalar_at should succeed in conformance test") + .expect("scalar_at should succeed in conformance test") ); filtered_idx += 1; } @@ -260,7 +259,7 @@ fn test_runs_pattern_filter(array: &dyn Array) { let mask = Mask::from_iter(pattern); let filtered = array .filter(mask) - .vortex_expect("filter should succeed in conformance test"); + .expect("filter should succeed in conformance test"); assert_eq!(filtered.len(), expected_count); } @@ -278,7 +277,7 @@ fn test_sparse_true_filter(array: &dyn Array) { let mask = Mask::from_iter(pattern); let filtered = array .filter(mask) - .vortex_expect("filter should succeed in conformance test"); + .expect("filter should succeed in conformance test"); assert_eq!(filtered.len(), expected_count); } @@ -296,7 +295,7 @@ fn test_sparse_false_filter(array: &dyn Array) { let mask = Mask::from_iter(pattern); let filtered = array .filter(mask) - .vortex_expect("filter should succeed in conformance test"); + .expect("filter should succeed in conformance test"); assert_eq!(filtered.len(), expected_count); } @@ -313,6 +312,6 @@ fn test_random_pattern_filter(array: &dyn Array) { let mask = Mask::from_iter(pattern); let filtered = array .filter(mask) - .vortex_expect("filter should succeed in conformance test"); + .expect("filter should succeed in conformance test"); assert_eq!(filtered.len(), expected_count); } diff --git a/vortex-array/src/compute/conformance/mask.rs b/vortex-array/src/compute/conformance/mask.rs index 517707eacec..877307671d3 100644 --- a/vortex-array/src/compute/conformance/mask.rs +++ b/vortex-array/src/compute/conformance/mask.rs @@ -1,7 +1,6 @@ // SPDX-License-Identifier: Apache-2.0 // SPDX-FileCopyrightText: Copyright the Vortex contributors -use vortex_error::VortexExpect; use vortex_mask::Mask; use crate::Array; @@ -39,7 +38,7 @@ fn test_heterogenous_mask(array: &dyn Array) { let mask_pattern: Vec = (0..len).map(|i| i % 3 != 1).collect(); let mask_array = Mask::from_iter(mask_pattern.clone()); - let masked = mask(array, &mask_array).vortex_expect("mask should succeed in conformance test"); + let masked = mask(array, &mask_array).expect("mask should succeed in conformance test"); assert_eq!(masked.len(), array.len()); // Verify masked elements are null and unmasked elements are preserved @@ -48,16 +47,16 @@ fn test_heterogenous_mask(array: &dyn Array) { assert!( !masked .is_valid(i) - .vortex_expect("is_valid should succeed in conformance test") + .expect("is_valid should succeed in conformance test") ); } else { assert_eq!( masked .scalar_at(i) - .vortex_expect("scalar_at should succeed in conformance test"), + .expect("scalar_at should succeed in conformance test"), array .scalar_at(i) - .vortex_expect("scalar_at should succeed in conformance test") + .expect("scalar_at should succeed in conformance test") .into_nullable() ); } @@ -70,7 +69,7 @@ fn test_empty_mask(array: &dyn Array) { let all_unmasked = vec![false; len]; let mask_array = Mask::from_iter(all_unmasked); - let masked = mask(array, &mask_array).vortex_expect("mask should succeed in conformance test"); + let masked = mask(array, &mask_array).expect("mask should succeed in conformance test"); assert_eq!(masked.len(), array.len()); // All elements should be preserved @@ -78,10 +77,10 @@ fn test_empty_mask(array: &dyn Array) { assert_eq!( masked .scalar_at(i) - .vortex_expect("scalar_at should succeed in conformance test"), + .expect("scalar_at should succeed in conformance test"), array .scalar_at(i) - .vortex_expect("scalar_at should succeed in conformance test") + .expect("scalar_at should succeed in conformance test") .into_nullable() ); } @@ -93,7 +92,7 @@ fn test_full_mask(array: &dyn Array) { let all_masked = vec![true; len]; let mask_array = Mask::from_iter(all_masked); - let masked = mask(array, &mask_array).vortex_expect("mask should succeed in conformance test"); + let masked = mask(array, &mask_array).expect("mask should succeed in conformance test"); assert_eq!(masked.len(), array.len()); // All elements should be null @@ -101,7 +100,7 @@ fn test_full_mask(array: &dyn Array) { assert!( !masked .is_valid(i) - .vortex_expect("is_valid should succeed in conformance test") + .expect("is_valid should succeed in conformance test") ); } } @@ -112,7 +111,7 @@ fn test_alternating_mask(array: &dyn Array) { let pattern: Vec = (0..len).map(|i| i % 2 == 0).collect(); let mask_array = Mask::from_iter(pattern); - let masked = mask(array, &mask_array).vortex_expect("mask should succeed in conformance test"); + let masked = mask(array, &mask_array).expect("mask should succeed in conformance test"); assert_eq!(masked.len(), array.len()); for i in 0..len { @@ -120,16 +119,16 @@ fn test_alternating_mask(array: &dyn Array) { assert!( !masked .is_valid(i) - .vortex_expect("is_valid should succeed in conformance test") + .expect("is_valid should succeed in conformance test") ); } else { assert_eq!( masked .scalar_at(i) - .vortex_expect("scalar_at should succeed in conformance test"), + .expect("scalar_at should succeed in conformance test"), array .scalar_at(i) - .vortex_expect("scalar_at should succeed in conformance test") + .expect("scalar_at should succeed in conformance test") .into_nullable() ); } @@ -147,7 +146,7 @@ fn test_sparse_mask(array: &dyn Array) { let pattern: Vec = (0..len).map(|i| i % 10 == 0).collect(); let mask_array = Mask::from_iter(pattern.clone()); - let masked = mask(array, &mask_array).vortex_expect("mask should succeed in conformance test"); + let masked = mask(array, &mask_array).expect("mask should succeed in conformance test"); assert_eq!(masked.len(), array.len()); // Count how many elements are valid after masking @@ -155,7 +154,7 @@ fn test_sparse_mask(array: &dyn Array) { .filter(|&i| { masked .is_valid(i) - .vortex_expect("is_valid should succeed in conformance test") + .expect("is_valid should succeed in conformance test") }) .count(); @@ -167,7 +166,7 @@ fn test_sparse_mask(array: &dyn Array) { pattern[i] || !array .is_valid(i) - .vortex_expect("is_valid should succeed in conformance test") + .expect("is_valid should succeed in conformance test") }) .count(); @@ -183,21 +182,21 @@ fn test_single_element_mask(array: &dyn Array) { pattern[0] = true; let mask_array = Mask::from_iter(pattern); - let masked = mask(array, &mask_array).vortex_expect("mask should succeed in conformance test"); + let masked = mask(array, &mask_array).expect("mask should succeed in conformance test"); assert!( !masked .is_valid(0) - .vortex_expect("is_valid should succeed in conformance test") + .expect("is_valid should succeed in conformance test") ); for i in 1..len { assert_eq!( masked .scalar_at(i) - .vortex_expect("scalar_at should succeed in conformance test"), + .expect("scalar_at should succeed in conformance test"), array .scalar_at(i) - .vortex_expect("scalar_at should succeed in conformance test") + .expect("scalar_at should succeed in conformance test") .into_nullable() ); } @@ -214,9 +213,9 @@ fn test_double_mask(array: &dyn Array) { let mask1 = Mask::from_iter(mask1_pattern.clone()); let mask2 = Mask::from_iter(mask2_pattern.clone()); - let first_masked = mask(array, &mask1).vortex_expect("mask should succeed in conformance test"); + let first_masked = mask(array, &mask1).expect("mask should succeed in conformance test"); let double_masked = - mask(&first_masked, &mask2).vortex_expect("mask should succeed in conformance test"); + mask(&first_masked, &mask2).expect("mask should succeed in conformance test"); // Elements should be null if either mask is true for i in 0..len { @@ -224,16 +223,16 @@ fn test_double_mask(array: &dyn Array) { assert!( !double_masked .is_valid(i) - .vortex_expect("is_valid should succeed in conformance test") + .expect("is_valid should succeed in conformance test") ); } else { assert_eq!( double_masked .scalar_at(i) - .vortex_expect("scalar_at should succeed in conformance test"), + .expect("scalar_at should succeed in conformance test"), array .scalar_at(i) - .vortex_expect("scalar_at should succeed in conformance test") + .expect("scalar_at should succeed in conformance test") .into_nullable() ); } @@ -256,7 +255,7 @@ fn test_nullable_mask_input(array: &dyn Array) { let nullable_mask = BoolArray::new(bool_array.to_bit_buffer(), validity); let mask_array = nullable_mask.to_mask_fill_null_false(); - let masked = mask(array, &mask_array).vortex_expect("mask should succeed in conformance test"); + let masked = mask(array, &mask_array).expect("mask should succeed in conformance test"); // Elements are masked only if the mask is true AND valid for i in 0..len { @@ -264,16 +263,16 @@ fn test_nullable_mask_input(array: &dyn Array) { assert!( !masked .is_valid(i) - .vortex_expect("is_valid should succeed in conformance test") + .expect("is_valid should succeed in conformance test") ); } else { assert_eq!( masked .scalar_at(i) - .vortex_expect("scalar_at should succeed in conformance test"), + .expect("scalar_at should succeed in conformance test"), array .scalar_at(i) - .vortex_expect("scalar_at should succeed in conformance test") + .expect("scalar_at should succeed in conformance test") .into_nullable() ); } diff --git a/vortex-array/src/compute/conformance/take.rs b/vortex-array/src/compute/conformance/take.rs index 32a1251bbb2..d7dd9f3a434 100644 --- a/vortex-array/src/compute/conformance/take.rs +++ b/vortex-array/src/compute/conformance/take.rs @@ -3,7 +3,6 @@ use vortex_buffer::buffer; use vortex_dtype::Nullability; -use vortex_error::VortexExpect; use crate::Array; use crate::Canonical; @@ -56,19 +55,17 @@ fn test_take_all(array: &dyn Array) { let indices = PrimitiveArray::from_iter(0..len as u64); let result = array .take(indices.to_array()) - .vortex_expect("take should succeed in conformance test"); + .expect("take should succeed in conformance test"); assert_eq!(result.len(), len); assert_eq!(result.dtype(), array.dtype()); // Verify elements match match ( - array - .to_canonical() - .vortex_expect("to_canonical failed on array"), + array.to_canonical().expect("to_canonical failed on array"), result .to_canonical() - .vortex_expect("to_canonical failed on result"), + .expect("to_canonical failed on result"), ) { (Canonical::Primitive(orig_prim), Canonical::Primitive(result_prim)) => { assert_eq!( @@ -82,10 +79,10 @@ fn test_take_all(array: &dyn Array) { assert_eq!( array .scalar_at(i) - .vortex_expect("scalar_at should succeed in conformance test"), + .expect("scalar_at should succeed in conformance test"), result .scalar_at(i) - .vortex_expect("scalar_at should succeed in conformance test") + .expect("scalar_at should succeed in conformance test") ); } } @@ -96,7 +93,7 @@ fn test_take_none(array: &dyn Array) { let indices: PrimitiveArray = PrimitiveArray::from_iter::<[u64; 0]>([]); let result = array .take(indices.to_array()) - .vortex_expect("take should succeed in conformance test"); + .expect("take should succeed in conformance test"); assert_eq!(result.len(), 0); assert_eq!(result.dtype(), array.dtype()); @@ -113,7 +110,7 @@ fn test_take_selective(array: &dyn Array) { let result = array .take(indices_array.to_array()) - .vortex_expect("take should succeed in conformance test"); + .expect("take should succeed in conformance test"); assert_eq!(result.len(), expected_len); // Verify the taken elements @@ -121,10 +118,10 @@ fn test_take_selective(array: &dyn Array) { assert_eq!( array .scalar_at(original_idx as usize) - .vortex_expect("scalar_at should succeed in conformance test"), + .expect("scalar_at should succeed in conformance test"), result .scalar_at(result_idx) - .vortex_expect("scalar_at should succeed in conformance test") + .expect("scalar_at should succeed in conformance test") ); } } @@ -134,24 +131,24 @@ fn test_take_first_and_last(array: &dyn Array) { let indices = PrimitiveArray::from_iter([0u64, (len - 1) as u64]); let result = array .take(indices.to_array()) - .vortex_expect("take should succeed in conformance test"); + .expect("take should succeed in conformance test"); assert_eq!(result.len(), 2); assert_eq!( array .scalar_at(0) - .vortex_expect("scalar_at should succeed in conformance test"), + .expect("scalar_at should succeed in conformance test"), result .scalar_at(0) - .vortex_expect("scalar_at should succeed in conformance test") + .expect("scalar_at should succeed in conformance test") ); assert_eq!( array .scalar_at(len - 1) - .vortex_expect("scalar_at should succeed in conformance test"), + .expect("scalar_at should succeed in conformance test"), result .scalar_at(1) - .vortex_expect("scalar_at should succeed in conformance test") + .expect("scalar_at should succeed in conformance test") ); } @@ -171,7 +168,7 @@ fn test_take_with_nullable_indices(array: &dyn Array) { let indices = PrimitiveArray::from_option_iter(indices_vec.clone()); let result = array .take(indices.to_array()) - .vortex_expect("take should succeed in conformance test"); + .expect("take should succeed in conformance test"); assert_eq!(result.len(), indices_vec.len()); assert_eq!( @@ -185,17 +182,17 @@ fn test_take_with_nullable_indices(array: &dyn Array) { Some(idx) => { let expected = array .scalar_at(*idx as usize) - .vortex_expect("scalar_at should succeed in conformance test"); + .expect("scalar_at should succeed in conformance test"); let actual = result .scalar_at(i) - .vortex_expect("scalar_at should succeed in conformance test"); + .expect("scalar_at should succeed in conformance test"); assert_eq!(expected, actual); } None => { assert!( result .scalar_at(i) - .vortex_expect("scalar_at should succeed in conformance test") + .expect("scalar_at should succeed in conformance test") .is_null() ); } @@ -212,17 +209,17 @@ fn test_take_repeated_indices(array: &dyn Array) { let indices = buffer![0u64, 0, 0].into_array(); let result = array .take(indices.to_array()) - .vortex_expect("take should succeed in conformance test"); + .expect("take should succeed in conformance test"); assert_eq!(result.len(), 3); let first_elem = array .scalar_at(0) - .vortex_expect("scalar_at should succeed in conformance test"); + .expect("scalar_at should succeed in conformance test"); for i in 0..3 { assert_eq!( result .scalar_at(i) - .vortex_expect("scalar_at should succeed in conformance test"), + .expect("scalar_at should succeed in conformance test"), first_elem ); } @@ -232,7 +229,7 @@ fn test_empty_indices(array: &dyn Array) { let indices = PrimitiveArray::empty::(Nullability::NonNullable); let result = array .take(indices.to_array()) - .vortex_expect("take should succeed in conformance test"); + .expect("take should succeed in conformance test"); assert_eq!(result.len(), 0); assert_eq!(result.dtype(), array.dtype()); @@ -244,7 +241,7 @@ fn test_take_reverse(array: &dyn Array) { let indices = PrimitiveArray::from_iter((0..len as u64).rev()); let result = array .take(indices.to_array()) - .vortex_expect("take should succeed in conformance test"); + .expect("take should succeed in conformance test"); assert_eq!(result.len(), len); @@ -253,10 +250,10 @@ fn test_take_reverse(array: &dyn Array) { assert_eq!( array .scalar_at(len - 1 - i) - .vortex_expect("scalar_at should succeed in conformance test"), + .expect("scalar_at should succeed in conformance test"), result .scalar_at(i) - .vortex_expect("scalar_at should succeed in conformance test") + .expect("scalar_at should succeed in conformance test") ); } } @@ -268,16 +265,16 @@ fn test_take_single_middle(array: &dyn Array) { let indices = PrimitiveArray::from_iter([middle_idx as u64]); let result = array .take(indices.to_array()) - .vortex_expect("take should succeed in conformance test"); + .expect("take should succeed in conformance test"); assert_eq!(result.len(), 1); assert_eq!( array .scalar_at(middle_idx) - .vortex_expect("scalar_at should succeed in conformance test"), + .expect("scalar_at should succeed in conformance test"), result .scalar_at(0) - .vortex_expect("scalar_at should succeed in conformance test") + .expect("scalar_at should succeed in conformance test") ); } @@ -296,7 +293,7 @@ fn test_take_random_unsorted(array: &dyn Array) { let indices_array = PrimitiveArray::from_iter(indices.clone()); let result = array .take(indices_array.to_array()) - .vortex_expect("take should succeed in conformance test"); + .expect("take should succeed in conformance test"); assert_eq!(result.len(), indices.len()); @@ -305,10 +302,10 @@ fn test_take_random_unsorted(array: &dyn Array) { assert_eq!( array .scalar_at(idx as usize) - .vortex_expect("scalar_at should succeed in conformance test"), + .expect("scalar_at should succeed in conformance test"), result .scalar_at(i) - .vortex_expect("scalar_at should succeed in conformance test") + .expect("scalar_at should succeed in conformance test") ); } } @@ -322,7 +319,7 @@ fn test_take_contiguous_range(array: &dyn Array) { let indices = PrimitiveArray::from_iter(start as u64..end as u64); let result = array .take(indices.to_array()) - .vortex_expect("take should succeed in conformance test"); + .expect("take should succeed in conformance test"); assert_eq!(result.len(), end - start); @@ -331,10 +328,10 @@ fn test_take_contiguous_range(array: &dyn Array) { assert_eq!( array .scalar_at(start + i) - .vortex_expect("scalar_at should succeed in conformance test"), + .expect("scalar_at should succeed in conformance test"), result .scalar_at(i) - .vortex_expect("scalar_at should succeed in conformance test") + .expect("scalar_at should succeed in conformance test") ); } } @@ -358,7 +355,7 @@ fn test_take_mixed_repeated(array: &dyn Array) { let indices_array = PrimitiveArray::from_iter(indices.clone()); let result = array .take(indices_array.to_array()) - .vortex_expect("take should succeed in conformance test"); + .expect("take should succeed in conformance test"); assert_eq!(result.len(), indices.len()); @@ -367,10 +364,10 @@ fn test_take_mixed_repeated(array: &dyn Array) { assert_eq!( array .scalar_at(idx as usize) - .vortex_expect("scalar_at should succeed in conformance test"), + .expect("scalar_at should succeed in conformance test"), result .scalar_at(i) - .vortex_expect("scalar_at should succeed in conformance test") + .expect("scalar_at should succeed in conformance test") ); } } @@ -389,7 +386,7 @@ fn test_take_large_indices(array: &dyn Array) { let indices_array = PrimitiveArray::from_iter(indices.clone()); let result = array .take(indices_array.to_array()) - .vortex_expect("take should succeed in conformance test"); + .expect("take should succeed in conformance test"); assert_eq!(result.len(), num_indices); @@ -399,10 +396,10 @@ fn test_take_large_indices(array: &dyn Array) { assert_eq!( array .scalar_at(expected_idx) - .vortex_expect("scalar_at should succeed in conformance test"), + .expect("scalar_at should succeed in conformance test"), result .scalar_at(i) - .vortex_expect("scalar_at should succeed in conformance test") + .expect("scalar_at should succeed in conformance test") ); } } diff --git a/vortex-array/src/compute/list_contains.rs b/vortex-array/src/compute/list_contains.rs index cabeb4a2477..ee6333d4f58 100644 --- a/vortex-array/src/compute/list_contains.rs +++ b/vortex-array/src/compute/list_contains.rs @@ -16,7 +16,6 @@ use vortex_dtype::DType; use vortex_dtype::IntegerPType; use vortex_dtype::Nullability; use vortex_dtype::match_each_integer_ptype; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_bail; @@ -226,7 +225,7 @@ fn constant_list_scalar_contains( values: &dyn Array, nullability: Nullability, ) -> VortexResult { - let elements = list_scalar.elements().vortex_expect("non null"); + let elements = list_scalar.elements().expect("non null"); let len = values.len(); let mut result: Option = None; diff --git a/vortex-array/src/compute/min_max.rs b/vortex-array/src/compute/min_max.rs index f514c5887d4..31bd906703a 100644 --- a/vortex-array/src/compute/min_max.rs +++ b/vortex-array/src/compute/min_max.rs @@ -8,7 +8,6 @@ use vortex_dtype::DType; use vortex_dtype::FieldNames; use vortex_dtype::Nullability; use vortex_dtype::StructFields; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_bail; @@ -69,11 +68,11 @@ impl MinMaxResult { let min = scalar .as_struct() .field_by_idx(0) - .vortex_expect("missing min field"); + .expect("missing min field"); let max = scalar .as_struct() .field_by_idx(1) - .vortex_expect("missing max field"); + .expect("missing max field"); Ok(Some(MinMaxResult { min, max })) } } diff --git a/vortex-array/src/compute/nan_count.rs b/vortex-array/src/compute/nan_count.rs index e457eb23113..ab105fe475b 100644 --- a/vortex-array/src/compute/nan_count.rs +++ b/vortex-array/src/compute/nan_count.rs @@ -5,7 +5,6 @@ use std::sync::LazyLock; use arcref::ArcRef; use vortex_dtype::DType; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_bail; use vortex_error::vortex_err; @@ -46,7 +45,7 @@ pub fn nan_count(array: &dyn Array) -> VortexResult { .unwrap_scalar()? .as_primitive() .as_::() - .vortex_expect("NaN count should not return null")) + .expect("NaN count should not return null")) } struct NaNCount; diff --git a/vortex-array/src/compute/sum.rs b/vortex-array/src/compute/sum.rs index a917dde53e3..1e5dd8eb73c 100644 --- a/vortex-array/src/compute/sum.rs +++ b/vortex-array/src/compute/sum.rs @@ -270,7 +270,6 @@ mod test { use vortex_dtype::DType; use vortex_dtype::Nullability; use vortex_dtype::PType; - use vortex_error::VortexExpect; use crate::IntoArray as _; use crate::arrays::BoolArray; @@ -324,7 +323,7 @@ mod test { ], DType::Primitive(PType::I32, Nullability::NonNullable), ) - .vortex_expect("operation should succeed in test"); + .expect("operation should succeed in test"); // compute sum with accumulator to populate stats sum_with_accumulator( array.as_ref(), diff --git a/vortex-array/src/display/tree.rs b/vortex-array/src/display/tree.rs index 3f9426a9fea..fe1fe4ad139 100644 --- a/vortex-array/src/display/tree.rs +++ b/vortex-array/src/display/tree.rs @@ -6,7 +6,6 @@ use std::fmt::{self}; use humansize::DECIMAL; use humansize::format_size; -use vortex_error::VortexExpect as _; use crate::Array; use crate::ArrayRef; @@ -262,7 +261,7 @@ impl<'a, 'b: 'a> TreeFormatter<'a, 'b> { let _ = self .ancestor_sizes .pop() - .vortex_expect("pushes and pops are matched"); + .expect("pushes and pops are matched"); } Ok(()) diff --git a/vortex-array/src/executor.rs b/vortex-array/src/executor.rs index 6a43f9ef169..15674d00d7f 100644 --- a/vortex-array/src/executor.rs +++ b/vortex-array/src/executor.rs @@ -7,7 +7,6 @@ use std::fmt::Display; use std::sync::Arc; use std::sync::atomic::AtomicUsize; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_session::VortexSession; @@ -168,7 +167,7 @@ impl Executable for ArrayRef { // 2. reduce_parent (child-driven metadata-only rewrites) for child_idx in 0..array.nchildren() { - let child = array.nth_child(child_idx).vortex_expect("checked length"); + let child = array.nth_child(child_idx).expect("checked length"); if let Some(reduced_parent) = child.vtable().reduce_parent(&child, &array, child_idx)? { ctx.log(format_args!( "reduce_parent: child[{}]({}) rewrote {} -> {}", @@ -184,7 +183,7 @@ impl Executable for ArrayRef { // 3. execute_parent (child-driven optimized execution) for child_idx in 0..array.nchildren() { - let child = array.nth_child(child_idx).vortex_expect("checked length"); + let child = array.nth_child(child_idx).expect("checked length"); if let Some(executed_parent) = child .vtable() .execute_parent(&child, &array, child_idx, ctx)? diff --git a/vortex-array/src/expr/analysis/annotation.rs b/vortex-array/src/expr/analysis/annotation.rs index be842742a5a..6ad073cece6 100644 --- a/vortex-array/src/expr/analysis/annotation.rs +++ b/vortex-array/src/expr/analysis/annotation.rs @@ -3,7 +3,6 @@ use std::hash::Hash; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_utils::aliases::hash_map::HashMap; use vortex_utils::aliases::hash_set::HashSet; @@ -43,7 +42,7 @@ pub fn descendent_annotations( annotations: Default::default(), annotate, }; - expr.accept(&mut visitor).vortex_expect("Infallible"); + expr.accept(&mut visitor).expect("Infallible"); visitor.annotations } diff --git a/vortex-array/src/expr/analysis/immediate_access.rs b/vortex-array/src/expr/analysis/immediate_access.rs index f68cd3567dc..b0b452e15e7 100644 --- a/vortex-array/src/expr/analysis/immediate_access.rs +++ b/vortex-array/src/expr/analysis/immediate_access.rs @@ -3,7 +3,6 @@ use vortex_dtype::FieldName; use vortex_dtype::StructFields; -use vortex_error::VortexExpect; use vortex_utils::aliases::hash_set::HashSet; use crate::expr::Expression; @@ -49,7 +48,7 @@ pub fn make_free_field_annotator( if expr.child(0).is::() { return selection .normalize_to_included_fields(scope.names()) - .vortex_expect("Select fields must be valid for scope") + .expect("Select fields must be valid for scope") .into_iter() .collect(); } @@ -86,6 +85,6 @@ pub fn immediate_scope_access<'a>( ) -> HashSet { immediate_scope_accesses(expr, scope) .get(expr) - .vortex_expect("Expression missing from scope accesses, this is a internal bug") + .expect("Expression missing from scope accesses, this is a internal bug") .clone() } diff --git a/vortex-array/src/expr/analysis/labeling.rs b/vortex-array/src/expr/analysis/labeling.rs index a7ad0cd87f9..b21939374e9 100644 --- a/vortex-array/src/expr/analysis/labeling.rs +++ b/vortex-array/src/expr/analysis/labeling.rs @@ -1,7 +1,6 @@ // SPDX-License-Identifier: Apache-2.0 // SPDX-FileCopyrightText: Copyright the Vortex contributors -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_utils::aliases::hash_map::HashMap; @@ -41,7 +40,7 @@ pub fn label_tree( merge_child: &mut merge_child, }; expr.accept(&mut visitor) - .vortex_expect("LabelingVisitor is infallible"); + .expect("LabelingVisitor is infallible"); visitor.labels } @@ -70,10 +69,7 @@ where let self_label = (self.self_label)(node); let final_label = node.children().iter().fold(self_label, |acc, child| { - let child_label = self - .labels - .get(child) - .vortex_expect("child must have label"); + let child_label = self.labels.get(child).expect("child must have label"); (self.merge_child)(acc, child_label) }); diff --git a/vortex-array/src/expr/exprs/between/kernel.rs b/vortex-array/src/expr/exprs/between/kernel.rs index 55689bfb622..8856b21120a 100644 --- a/vortex-array/src/expr/exprs/between/kernel.rs +++ b/vortex-array/src/expr/exprs/between/kernel.rs @@ -1,7 +1,6 @@ // SPDX-License-Identifier: Apache-2.0 // SPDX-FileCopyrightText: Copyright the Vortex contributors -use vortex_error::VortexExpect; use vortex_error::VortexResult; use super::Between; @@ -64,7 +63,7 @@ where } let scalar_fn_array = parent .as_opt::() - .vortex_expect("ExactScalarFn matcher confirmed ScalarFnArray"); + .expect("ExactScalarFn matcher confirmed ScalarFnArray"); let children = scalar_fn_array.children(); let lower = children[1].as_ref(); let upper = children[2].as_ref(); @@ -98,7 +97,7 @@ where } let scalar_fn_array = parent .as_opt::() - .vortex_expect("ExactScalarFn matcher confirmed ScalarFnArray"); + .expect("ExactScalarFn matcher confirmed ScalarFnArray"); let children = scalar_fn_array.children(); let lower = children[1].as_ref(); let upper = children[2].as_ref(); diff --git a/vortex-array/src/expr/exprs/between/mod.rs b/vortex-array/src/expr/exprs/between/mod.rs index 5f870e641f1..07491b76321 100644 --- a/vortex-array/src/expr/exprs/between/mod.rs +++ b/vortex-array/src/expr/exprs/between/mod.rs @@ -11,7 +11,6 @@ pub use kernel::*; use prost::Message; use vortex_dtype::DType; use vortex_dtype::DType::Bool; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_bail; use vortex_error::vortex_err; @@ -361,7 +360,7 @@ pub fn between( ) -> Expression { Between .try_new_expr(options, [arr, lower, upper]) - .vortex_expect("Failed to create Between expression") + .expect("Failed to create Between expression") } #[cfg(test)] diff --git a/vortex-array/src/expr/exprs/binary/mod.rs b/vortex-array/src/expr/exprs/binary/mod.rs index cfaf19ec651..61d3f79b359 100644 --- a/vortex-array/src/expr/exprs/binary/mod.rs +++ b/vortex-array/src/expr/exprs/binary/mod.rs @@ -5,7 +5,6 @@ use std::fmt::Formatter; use prost::Message; use vortex_dtype::DType; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_bail; use vortex_proto::expr as pb; @@ -300,7 +299,7 @@ impl VTable for Binary { pub fn eq(lhs: Expression, rhs: Expression) -> Expression { Binary .try_new_expr(Operator::Eq, [lhs, rhs]) - .vortex_expect("Failed to create Eq binary expression") + .expect("Failed to create Eq binary expression") } /// Create a new [`Binary`] using the [`NotEq`](crate::expr::exprs::operators::Operator::NotEq) operator. @@ -324,7 +323,7 @@ pub fn eq(lhs: Expression, rhs: Expression) -> Expression { pub fn not_eq(lhs: Expression, rhs: Expression) -> Expression { Binary .try_new_expr(Operator::NotEq, [lhs, rhs]) - .vortex_expect("Failed to create NotEq binary expression") + .expect("Failed to create NotEq binary expression") } /// Create a new [`Binary`] using the [`Gte`](crate::expr::exprs::operators::Operator::Gte) operator. @@ -348,7 +347,7 @@ pub fn not_eq(lhs: Expression, rhs: Expression) -> Expression { pub fn gt_eq(lhs: Expression, rhs: Expression) -> Expression { Binary .try_new_expr(Operator::Gte, [lhs, rhs]) - .vortex_expect("Failed to create Gte binary expression") + .expect("Failed to create Gte binary expression") } /// Create a new [`Binary`] using the [`Gt`](crate::expr::exprs::operators::Operator::Gt) operator. @@ -372,7 +371,7 @@ pub fn gt_eq(lhs: Expression, rhs: Expression) -> Expression { pub fn gt(lhs: Expression, rhs: Expression) -> Expression { Binary .try_new_expr(Operator::Gt, [lhs, rhs]) - .vortex_expect("Failed to create Gt binary expression") + .expect("Failed to create Gt binary expression") } /// Create a new [`Binary`] using the [`Lte`](crate::expr::exprs::operators::Operator::Lte) operator. @@ -396,7 +395,7 @@ pub fn gt(lhs: Expression, rhs: Expression) -> Expression { pub fn lt_eq(lhs: Expression, rhs: Expression) -> Expression { Binary .try_new_expr(Operator::Lte, [lhs, rhs]) - .vortex_expect("Failed to create Lte binary expression") + .expect("Failed to create Lte binary expression") } /// Create a new [`Binary`] using the [`Lt`](crate::expr::exprs::operators::Operator::Lt) operator. @@ -420,7 +419,7 @@ pub fn lt_eq(lhs: Expression, rhs: Expression) -> Expression { pub fn lt(lhs: Expression, rhs: Expression) -> Expression { Binary .try_new_expr(Operator::Lt, [lhs, rhs]) - .vortex_expect("Failed to create Lt binary expression") + .expect("Failed to create Lt binary expression") } /// Create a new [`Binary`] using the [`Or`](crate::expr::exprs::operators::Operator::Or) operator. @@ -442,7 +441,7 @@ pub fn lt(lhs: Expression, rhs: Expression) -> Expression { pub fn or(lhs: Expression, rhs: Expression) -> Expression { Binary .try_new_expr(Operator::Or, [lhs, rhs]) - .vortex_expect("Failed to create Or binary expression") + .expect("Failed to create Or binary expression") } /// Collects a list of `or`ed values into a single expression using a balanced tree. @@ -478,7 +477,7 @@ where pub fn and(lhs: Expression, rhs: Expression) -> Expression { Binary .try_new_expr(Operator::And, [lhs, rhs]) - .vortex_expect("Failed to create And binary expression") + .expect("Failed to create And binary expression") } /// Collects a list of `and`ed values into a single expression using a balanced tree. @@ -552,7 +551,7 @@ where pub fn checked_add(lhs: Expression, rhs: Expression) -> Expression { Binary .try_new_expr(Operator::Add, [lhs, rhs]) - .vortex_expect("Failed to create Add binary expression") + .expect("Failed to create Add binary expression") } #[cfg(test)] @@ -735,7 +734,7 @@ mod tests { // Test using compare compute function directly let result_equal = compare(&lhs_struct, &rhs_struct_equal, compute::Operator::Eq).unwrap(); assert_eq!( - result_equal.scalar_at(0).vortex_expect("value"), + result_equal.scalar_at(0).expect("value"), Scalar::bool(true, Nullability::NonNullable), "Equal structs should be equal" ); @@ -743,7 +742,7 @@ mod tests { let result_different = compare(&lhs_struct, &rhs_struct_different, compute::Operator::Eq).unwrap(); assert_eq!( - result_different.scalar_at(0).vortex_expect("value"), + result_different.scalar_at(0).expect("value"), Scalar::bool(false, Nullability::NonNullable), "Different structs should not be equal" ); diff --git a/vortex-array/src/expr/exprs/cast/mod.rs b/vortex-array/src/expr/exprs/cast/mod.rs index fa8b6c5b7f2..d95257b962e 100644 --- a/vortex-array/src/expr/exprs/cast/mod.rs +++ b/vortex-array/src/expr/exprs/cast/mod.rs @@ -8,7 +8,6 @@ use std::fmt::Formatter; pub use kernel::*; use prost::Message; use vortex_dtype::DType; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_bail; use vortex_error::vortex_err; @@ -105,7 +104,7 @@ impl VTable for Cast { let input = args .inputs .pop() - .vortex_expect("missing input for Cast expression"); + .expect("missing input for Cast expression"); let Some(columnar) = input.as_opt::() else { return input @@ -233,7 +232,7 @@ fn cast_constant(array: &ConstantArray, dtype: &DType) -> VortexResult Expression { Cast.try_new_expr(target, [child]) - .vortex_expect("Failed to create Cast expression") + .expect("Failed to create Cast expression") } #[cfg(test)] @@ -242,7 +241,6 @@ mod tests { use vortex_dtype::DType; use vortex_dtype::Nullability; use vortex_dtype::PType; - use vortex_error::VortexExpect as _; use super::cast; use crate::IntoArray; @@ -267,7 +265,7 @@ mod tests { fn replace_children() { let expr = cast(root(), DType::Bool(Nullability::Nullable)); expr.with_children(vec![root()]) - .vortex_expect("operation should succeed in test"); + .expect("operation should succeed in test"); } #[test] diff --git a/vortex-array/src/expr/exprs/dynamic.rs b/vortex-array/src/expr/exprs/dynamic.rs index aaf8cb1c5ea..e4c3eb93e72 100644 --- a/vortex-array/src/expr/exprs/dynamic.rs +++ b/vortex-array/src/expr/exprs/dynamic.rs @@ -10,7 +10,6 @@ use std::sync::Arc; use parking_lot::Mutex; use vortex_dtype::DType; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_bail; @@ -196,7 +195,7 @@ impl DynamicComparisonExpr { pub fn scalar(&self) -> Option { (self.rhs.value)().map(|v| { Scalar::try_new(self.rhs.dtype.clone(), Some(v)) - .vortex_expect("`DynamicComparisonExpr` was invalid") + .expect("`DynamicComparisonExpr` was invalid") }) } } @@ -241,9 +240,8 @@ struct Rhs { impl Rhs { pub fn scalar(&self) -> Option { - (self.value)().map(|v| { - Scalar::try_new(self.dtype.clone(), Some(v)).vortex_expect("`Rhs` was invalid") - }) + (self.value)() + .map(|v| Scalar::try_new(self.dtype.clone(), Some(v)).expect("`Rhs` was invalid")) } } @@ -280,7 +278,7 @@ impl DynamicExprUpdates { } let mut visitor = Visitor::default(); - expr.accept(&mut visitor).vortex_expect("Infallible"); + expr.accept(&mut visitor).expect("Infallible"); if visitor.0.is_empty() { return None; @@ -292,7 +290,7 @@ impl DynamicExprUpdates { .map(|expr| { (expr.rhs.value)().map(|v| { Scalar::try_new(expr.rhs.dtype.clone(), Some(v)) - .vortex_expect("`DynamicExprUpdates` was invalid") + .expect("`DynamicExprUpdates` was invalid") }) }) .collect(); diff --git a/vortex-array/src/expr/exprs/fill_null/kernel.rs b/vortex-array/src/expr/exprs/fill_null/kernel.rs index bb6b5b7b62f..a160fdab015 100644 --- a/vortex-array/src/expr/exprs/fill_null/kernel.rs +++ b/vortex-array/src/expr/exprs/fill_null/kernel.rs @@ -1,7 +1,6 @@ // SPDX-License-Identifier: Apache-2.0 // SPDX-FileCopyrightText: Copyright the Vortex contributors -use vortex_error::VortexExpect; use vortex_error::VortexResult; use crate::Array; @@ -109,10 +108,10 @@ where } let scalar_fn_array = parent .as_opt::() - .vortex_expect("ExactScalarFn matcher confirmed ScalarFnArray"); + .expect("ExactScalarFn matcher confirmed ScalarFnArray"); let fill_value = scalar_fn_array.children()[1] .as_constant() - .vortex_expect("fill_null fill_value must be constant"); + .expect("fill_null fill_value must be constant"); if let Some(result) = precondition(&**array, &fill_value)? { return Ok(Some(result)); } @@ -143,10 +142,10 @@ where } let scalar_fn_array = parent .as_opt::() - .vortex_expect("ExactScalarFn matcher confirmed ScalarFnArray"); + .expect("ExactScalarFn matcher confirmed ScalarFnArray"); let fill_value = scalar_fn_array.children()[1] .as_constant() - .vortex_expect("fill_null fill_value must be constant"); + .expect("fill_null fill_value must be constant"); if let Some(result) = precondition(&**array, &fill_value)? { return Ok(Some(result)); } diff --git a/vortex-array/src/expr/exprs/fill_null/mod.rs b/vortex-array/src/expr/exprs/fill_null/mod.rs index 24ea7483da1..b2e0b28af26 100644 --- a/vortex-array/src/expr/exprs/fill_null/mod.rs +++ b/vortex-array/src/expr/exprs/fill_null/mod.rs @@ -197,7 +197,6 @@ mod tests { use vortex_dtype::DType; use vortex_dtype::Nullability; use vortex_dtype::PType; - use vortex_error::VortexExpect; use super::fill_null; use crate::IntoArray; @@ -221,7 +220,7 @@ mod tests { fn replace_children() { let expr = fill_null(root(), lit(0i32)); expr.with_children(vec![root(), lit(0i32)]) - .vortex_expect("operation should succeed in test"); + .expect("operation should succeed in test"); } #[test] diff --git a/vortex-array/src/expr/exprs/get_item.rs b/vortex-array/src/expr/exprs/get_item.rs index 76d88a5b23e..b20da59a77e 100644 --- a/vortex-array/src/expr/exprs/get_item.rs +++ b/vortex-array/src/expr/exprs/get_item.rs @@ -8,7 +8,6 @@ use vortex_dtype::DType; use vortex_dtype::FieldName; use vortex_dtype::FieldPath; use vortex_dtype::Nullability; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_err; use vortex_proto::expr as pb; @@ -109,7 +108,7 @@ impl VTable for GetItem { let input = args .inputs .pop() - .vortex_expect("missing input for GetItem expression") + .expect("missing input for GetItem expression") .execute::(args.ctx)?; let field = input.unmasked_field_by_name(field_name).cloned()?; diff --git a/vortex-array/src/expr/exprs/is_null.rs b/vortex-array/src/expr/exprs/is_null.rs index eaec3d0f73a..82b4c2dd35a 100644 --- a/vortex-array/src/expr/exprs/is_null.rs +++ b/vortex-array/src/expr/exprs/is_null.rs @@ -5,7 +5,6 @@ use std::fmt::Formatter; use vortex_dtype::DType; use vortex_dtype::Nullability; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_session::VortexSession; @@ -76,7 +75,7 @@ impl VTable for IsNull { } fn execute(&self, _data: &Self::Options, mut args: ExecutionArgs) -> VortexResult { - let child = args.inputs.pop().vortex_expect("Missing input child"); + let child = args.inputs.pop().expect("Missing input child"); if let Some(scalar) = child.as_constant() { return Ok(ConstantArray::new(scalar.is_null(), args.row_count).into_array()); } @@ -129,7 +128,6 @@ mod tests { use vortex_dtype::FieldPath; use vortex_dtype::FieldPathSet; use vortex_dtype::Nullability; - use vortex_error::VortexExpect as _; use vortex_utils::aliases::hash_map::HashMap; use vortex_utils::aliases::hash_set::HashSet; @@ -160,7 +158,7 @@ mod tests { fn replace_children() { let expr = is_null(root()); expr.with_children([root()]) - .vortex_expect("operation should succeed in test"); + .expect("operation should succeed in test"); } #[test] diff --git a/vortex-array/src/expr/exprs/like/kernel.rs b/vortex-array/src/expr/exprs/like/kernel.rs index 117a3dda912..e1b452fbc10 100644 --- a/vortex-array/src/expr/exprs/like/kernel.rs +++ b/vortex-array/src/expr/exprs/like/kernel.rs @@ -1,7 +1,6 @@ // SPDX-License-Identifier: Apache-2.0 // SPDX-FileCopyrightText: Copyright the Vortex contributors -use vortex_error::VortexExpect; use vortex_error::VortexResult; use crate::Array; @@ -69,7 +68,7 @@ where } let scalar_fn_array = parent .as_opt::() - .vortex_expect("ExactScalarFn matcher confirmed ScalarFnArray"); + .expect("ExactScalarFn matcher confirmed ScalarFnArray"); let children = scalar_fn_array.children(); let pattern = &*children[1]; let options = *parent.options; @@ -99,7 +98,7 @@ where } let scalar_fn_array = parent .as_opt::() - .vortex_expect("ExactScalarFn matcher confirmed ScalarFnArray"); + .expect("ExactScalarFn matcher confirmed ScalarFnArray"); let children = scalar_fn_array.children(); let pattern = &*children[1]; let options = *parent.options; diff --git a/vortex-array/src/expr/exprs/mask/mod.rs b/vortex-array/src/expr/exprs/mask/mod.rs index 3c09b36dae9..48196962c67 100644 --- a/vortex-array/src/expr/exprs/mask/mod.rs +++ b/vortex-array/src/expr/exprs/mask/mod.rs @@ -7,7 +7,6 @@ use std::fmt::Formatter; pub use kernel::*; use vortex_dtype::DType; use vortex_dtype::Nullability; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_ensure; use vortex_error::vortex_err; @@ -120,7 +119,7 @@ impl VTable for Mask { let mask_lit = mask_lit .as_bool() .value() - .vortex_expect("Mask must be non-nullable"); + .expect("Mask must be non-nullable"); if mask_lit { // Mask is all true, so the output is just the input. @@ -195,7 +194,6 @@ mod test { use vortex_dtype::DType; use vortex_dtype::Nullability::Nullable; use vortex_dtype::PType; - use vortex_error::VortexExpect; use crate::expr::exprs::literal::lit; use crate::expr::exprs::mask::mask; @@ -210,13 +208,13 @@ mod test { let mask_true_expr = mask(input_expr.clone(), true_mask_expr); let simplified_true = mask_true_expr .optimize(&DType::Null) - .vortex_expect("Simplification"); + .expect("Simplification"); assert_eq!(&simplified_true, &input_expr); let mask_false_expr = mask(input_expr, false_mask_expr); let simplified_false = mask_false_expr .optimize(&DType::Null) - .vortex_expect("Simplification"); + .expect("Simplification"); let expected_null_expr = lit(Scalar::null(DType::Primitive(PType::U32, Nullable))); assert_eq!(&simplified_false, &expected_null_expr); } diff --git a/vortex-array/src/expr/exprs/merge.rs b/vortex-array/src/expr/exprs/merge.rs index 69cef878d84..981e3cc91d2 100644 --- a/vortex-array/src/expr/exprs/merge.rs +++ b/vortex-array/src/expr/exprs/merge.rs @@ -11,7 +11,6 @@ use vortex_dtype::DType; use vortex_dtype::FieldNames; use vortex_dtype::Nullability; use vortex_dtype::StructFields; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_bail; use vortex_session::VortexSession; @@ -200,9 +199,7 @@ impl VTable for Merge { ) } - let child_dtype = child_dtype - .as_struct_fields_opt() - .vortex_expect("expected struct"); + let child_dtype = child_dtype.as_struct_fields_opt().expect("expected struct"); for name in child_dtype.names().iter() { if let Some(idx) = names.iter().position(|n| n == name) { diff --git a/vortex-array/src/expr/exprs/not/mod.rs b/vortex-array/src/expr/exprs/not/mod.rs index 1bb1bb0a8a6..57fed6860fd 100644 --- a/vortex-array/src/expr/exprs/not/mod.rs +++ b/vortex-array/src/expr/exprs/not/mod.rs @@ -7,7 +7,6 @@ use std::fmt::Formatter; pub use kernel::*; use vortex_dtype::DType; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_bail; use vortex_session::VortexSession; @@ -85,7 +84,7 @@ impl VTable for Not { } fn execute(&self, _data: &Self::Options, mut args: ExecutionArgs) -> VortexResult { - let child = args.inputs.pop().vortex_expect("Missing input child"); + let child = args.inputs.pop().expect("Missing input child"); // For constant boolean if let Some(scalar) = child.as_constant() { diff --git a/vortex-array/src/expr/exprs/root.rs b/vortex-array/src/expr/exprs/root.rs index d6674367da5..7ea5c841c88 100644 --- a/vortex-array/src/expr/exprs/root.rs +++ b/vortex-array/src/expr/exprs/root.rs @@ -5,7 +5,6 @@ use std::fmt::Formatter; use vortex_dtype::DType; use vortex_dtype::FieldPath; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_bail; use vortex_session::VortexSession; @@ -98,7 +97,7 @@ impl VTable for Root { /// This is commonly used as the starting point for field access and other operations. pub fn root() -> Expression { Root.try_new_expr(EmptyOptions, vec![]) - .vortex_expect("Failed to create Root expression") + .expect("Failed to create Root expression") } /// Return whether the expression is a root expression. diff --git a/vortex-array/src/expr/exprs/select.rs b/vortex-array/src/expr/exprs/select.rs index 08ee809cf23..9816f3cde44 100644 --- a/vortex-array/src/expr/exprs/select.rs +++ b/vortex-array/src/expr/exprs/select.rs @@ -9,7 +9,6 @@ use prost::Message; use vortex_dtype::DType; use vortex_dtype::FieldName; use vortex_dtype::FieldNames; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_bail; use vortex_error::vortex_err; @@ -145,7 +144,7 @@ impl VTable for Select { let child = args .inputs .pop() - .vortex_expect("Missing input child") + .expect("Missing input child") .execute::(args.ctx)?; let result = match selection { @@ -186,7 +185,7 @@ impl VTable for Select { let all_included_fields_are_nullable = included_fields.iter().all(|name| { struct_fields .field(name) - .vortex_expect( + .expect( "`normalize_to_included_fields` checks that the included fields already exist \ in `struct_fields`", ) @@ -250,7 +249,7 @@ impl VTable for Select { pub fn select(field_names: impl Into, child: Expression) -> Expression { Select .try_new_expr(FieldSelection::Include(field_names.into()), [child]) - .vortex_expect("Failed to create Select expression") + .expect("Failed to create Select expression") } /// Creates an expression that excludes specific fields from an array. @@ -264,7 +263,7 @@ pub fn select(field_names: impl Into, child: Expression) -> Expressi pub fn select_exclude(fields: impl Into, child: Expression) -> Expression { Select .try_new_expr(FieldSelection::Exclude(fields.into()), [child]) - .vortex_expect("Failed to create Select expression") + .expect("Failed to create Select expression") } impl FieldSelection { diff --git a/vortex-array/src/expr/exprs/zip/kernel.rs b/vortex-array/src/expr/exprs/zip/kernel.rs index b0617023377..f1f19a4d757 100644 --- a/vortex-array/src/expr/exprs/zip/kernel.rs +++ b/vortex-array/src/expr/exprs/zip/kernel.rs @@ -1,7 +1,6 @@ // SPDX-License-Identifier: Apache-2.0 // SPDX-FileCopyrightText: Copyright the Vortex contributors -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_mask::Mask; @@ -69,7 +68,7 @@ where } let scalar_fn_array = parent .as_opt::() - .vortex_expect("ExactScalarFn matcher confirmed ScalarFnArray"); + .expect("ExactScalarFn matcher confirmed ScalarFnArray"); let children = scalar_fn_array.children(); let if_false = &*children[1]; let mask_array = &*children[2]; @@ -100,7 +99,7 @@ where } let scalar_fn_array = parent .as_opt::() - .vortex_expect("ExactScalarFn matcher confirmed ScalarFnArray"); + .expect("ExactScalarFn matcher confirmed ScalarFnArray"); let children = scalar_fn_array.children(); let if_false = &*children[1]; let mask_array = &*children[2]; diff --git a/vortex-array/src/expr/mod.rs b/vortex-array/src/expr/mod.rs index 45916e4812c..e12e210bcd8 100644 --- a/vortex-array/src/expr/mod.rs +++ b/vortex-array/src/expr/mod.rs @@ -16,7 +16,6 @@ use std::sync::Arc; use arcref::ArcRef; use vortex_dtype::FieldName; -use vortex_error::VortexExpect; use vortex_utils::aliases::hash_set::HashSet; use crate::expr::traversal::NodeExt; @@ -62,7 +61,7 @@ impl VortexExprExt for Expression { let mut collector = ReferenceCollector::new(); // The collector is infallible, so we can unwrap the result self.accept(&mut collector) - .vortex_expect("reference collector should never fail"); + .expect("reference collector should never fail"); collector.into_fields() } } diff --git a/vortex-array/src/expr/optimize.rs b/vortex-array/src/expr/optimize.rs index 0089c510f1e..66883119bd1 100644 --- a/vortex-array/src/expr/optimize.rs +++ b/vortex-array/src/expr/optimize.rs @@ -8,7 +8,6 @@ use std::sync::Arc; use itertools::Itertools; use vortex_dtype::DType; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_utils::aliases::hash_map::HashMap; @@ -82,7 +81,7 @@ impl Expression { let reduced_expr = reduced .as_any() .downcast_ref::() - .vortex_expect("ReduceNode not an ExpressionReduceNode") + .expect("ReduceNode not an ExpressionReduceNode") .expression .clone(); current = reduced_expr; @@ -283,7 +282,7 @@ impl ReduceCtx for ExpressionReduceCtx { .map(|c| { c.as_any() .downcast_ref::() - .vortex_expect("ReduceNode not an ExpressionReduceNode") + .expect("ReduceNode not an ExpressionReduceNode") .expression .clone() }) diff --git a/vortex-array/src/expr/scalar_fn.rs b/vortex-array/src/expr/scalar_fn.rs index 53ae32c3a1b..4bc316f4a46 100644 --- a/vortex-array/src/expr/scalar_fn.rs +++ b/vortex-array/src/expr/scalar_fn.rs @@ -10,7 +10,6 @@ use std::hash::Hasher; use std::ops::Deref; use vortex_dtype::DType; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_utils::debug_with::DebugWith; @@ -92,14 +91,14 @@ impl ScalarFn { self.options() .as_any() .downcast_ref::() - .vortex_expect("Expression options type mismatch") + .expect("Expression options type mismatch") }) } /// Returns the typed options for this `ScalarFn` if it matches the given vtable type. pub fn as_(&self) -> &V::Options { self.as_opt::() - .vortex_expect("Expression options type mismatch") + .expect("Expression options type mismatch") } /// Signature information for this expression. pub fn signature(&self) -> ExpressionSignature<'_> { diff --git a/vortex-array/src/expr/stats/precision.rs b/vortex-array/src/expr/stats/precision.rs index e0d1a1c7f92..af6608ba05a 100644 --- a/vortex-array/src/expr/stats/precision.rs +++ b/vortex-array/src/expr/stats/precision.rs @@ -6,7 +6,6 @@ use std::fmt::Display; use std::fmt::Formatter; use vortex_dtype::DType; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use crate::expr::stats::precision::Precision::Exact; @@ -159,9 +158,7 @@ impl Precision { /// Convert this [`Precision`] into a [`Precision`] with the given /// [`DType`]. pub fn into_scalar(self, dtype: DType) -> Precision { - self.map(|v| { - Scalar::try_new(dtype, Some(v)).vortex_expect("`Precision` was invalid") - }) + self.map(|v| Scalar::try_new(dtype, Some(v)).expect("`Precision` was invalid")) } } @@ -170,8 +167,7 @@ impl Precision<&ScalarValue> { /// [`DType`]. pub fn into_scalar(self, dtype: DType) -> Precision { self.map(|v| { - Scalar::try_new(dtype, Some(v.clone())) - .vortex_expect("`Precision` was invalid") + Scalar::try_new(dtype, Some(v.clone())).expect("`Precision` was invalid") }) } } diff --git a/vortex-array/src/expr/transform/partition.rs b/vortex-array/src/expr/transform/partition.rs index 44c40986da9..b5f53aef7d9 100644 --- a/vortex-array/src/expr/transform/partition.rs +++ b/vortex-array/src/expr/transform/partition.rs @@ -10,7 +10,6 @@ use vortex_dtype::FieldName; use vortex_dtype::FieldNames; use vortex_dtype::Nullability; use vortex_dtype::StructFields; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_utils::aliases::hash_map::HashMap; @@ -173,10 +172,7 @@ where match self.annotations.get(&node) { // If this expression only accesses a single field, then we can skip the children Some(annotations) if annotations.len() == 1 => { - let annotation = annotations - .iter() - .next() - .vortex_expect("expected one field"); + let annotation = annotations.iter().next().expect("expected one field"); let sub_exprs = self.sub_expressions.entry(annotation.clone()).or_default(); let idx = sub_exprs.len(); sub_exprs.push(node.clone()); diff --git a/vortex-array/src/expr/transform/replace.rs b/vortex-array/src/expr/transform/replace.rs index 3c2b039fa44..97820b607e1 100644 --- a/vortex-array/src/expr/transform/replace.rs +++ b/vortex-array/src/expr/transform/replace.rs @@ -3,7 +3,6 @@ use vortex_dtype::Nullability; use vortex_dtype::StructFields; -use vortex_error::VortexExpect; use crate::expr::Expression; use crate::expr::exprs::get_item::col; @@ -27,7 +26,7 @@ pub fn replace(expr: Expression, needle: &Expression, replacement: Expression) - Ok(Transformed::no(node)) } }) - .vortex_expect("ReplaceVisitor should not fail") + .expect("ReplaceVisitor should not fail") .into_inner() } diff --git a/vortex-array/src/expr/traversal/fold.rs b/vortex-array/src/expr/traversal/fold.rs index 623837fc06d..cd8b553bdd1 100644 --- a/vortex-array/src/expr/traversal/fold.rs +++ b/vortex-array/src/expr/traversal/fold.rs @@ -140,7 +140,7 @@ impl NodeFolderContext for NodeFolderContextWrapper<'_, T> { #[cfg(test)] mod tests { - use vortex_error::VortexExpect; + use vortex_error::vortex_bail; use super::*; @@ -160,10 +160,7 @@ mod tests { fn visit_down(&mut self, node: &'_ Self::NodeTy) -> VortexResult> { if let Some(scalar) = node.as_opt::() { - let v = scalar - .as_primitive() - .typed_value::() - .vortex_expect("i32"); + let v = scalar.as_primitive().typed_value::().expect("i32"); if v == 5 { return Ok(FoldDown::Stop(5)); @@ -185,10 +182,7 @@ mod tests { children: Vec, ) -> VortexResult> { if let Some(scalar) = node.as_opt::() { - let v = scalar - .as_primitive() - .typed_value::() - .vortex_expect("i32"); + let v = scalar.as_primitive().typed_value::().expect("i32"); Ok(FoldUp::Continue(v)) } else if let Some(operator) = node.as_opt::() { if *operator == Operator::Add { diff --git a/vortex-array/src/expr/vtable.rs b/vortex-array/src/expr/vtable.rs index 3c48a5bb0d4..1530ae74606 100644 --- a/vortex-array/src/expr/vtable.rs +++ b/vortex-array/src/expr/vtable.rs @@ -12,7 +12,6 @@ use std::sync::Arc; use arcref::ArcRef; use vortex_dtype::DType; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_bail; use vortex_session::VortexSession; @@ -322,7 +321,7 @@ pub trait VTableExt: VTable { options: Self::Options, children: impl IntoIterator, ) -> Expression { - Self::try_new_expr(self, options, children).vortex_expect("Failed to create expression") + Self::try_new_expr(self, options, children).expect("Failed to create expression") } /// Try to create a new expression with this vtable and the given options and children. @@ -435,7 +434,7 @@ impl DynExprVTable for VTableAdapter { fn options_clone(&self, options: &dyn Any) -> Box { let options = options .downcast_ref::() - .vortex_expect("Failed to downcast expression options to expected type"); + .expect("Failed to downcast expression options to expected type"); Box::new(options.clone()) } @@ -585,7 +584,7 @@ impl DynExprVTable for VTableAdapter { fn downcast(options: &dyn Any) -> &V::Options { options .downcast_ref::() - .vortex_expect("Invalid options type for expression") + .expect("Invalid options type for expression") } mod private { diff --git a/vortex-array/src/patches.rs b/vortex-array/src/patches.rs index 3dae3bdabbe..3c23a4920a6 100644 --- a/vortex-array/src/patches.rs +++ b/vortex-array/src/patches.rs @@ -18,7 +18,6 @@ use vortex_dtype::PType; use vortex_dtype::UnsignedPType; use vortex_dtype::match_each_integer_ptype; use vortex_dtype::match_each_unsigned_integer_ptype; -use vortex_error::VortexError; use vortex_error::VortexResult; use vortex_error::vortex_bail; use vortex_error::vortex_ensure; @@ -977,7 +976,7 @@ unsafe fn apply_patches_to_buffer_inner( } } -fn take_map, T: NativePType>( +fn take_map( indices: &[I], take_indices: PrimitiveArray, indices_offset: usize, @@ -987,13 +986,15 @@ fn take_map, T: NativePType>( ) -> VortexResult> where usize: TryFrom, - VortexError: From<>::Error>, + I: NativePType + Hash + Eq + TryFrom, + >::Error: Debug, + T: NativePType, { let take_indices_len = take_indices.len(); let take_indices_validity = take_indices.validity(); let take_indices_validity_mask = take_indices_validity.to_mask(take_indices_len); let take_indices = take_indices.as_slice::(); - let offset_i = I::try_from(indices_offset)?; + let offset_i = I::try_from(indices_offset).expect("Indices offset should fit"); let sparse_index_to_value_index: HashMap = indices .iter() diff --git a/vortex-array/src/scalar/arbitrary.rs b/vortex-array/src/scalar/arbitrary.rs index f777c654e64..26182246139 100644 --- a/vortex-array/src/scalar/arbitrary.rs +++ b/vortex-array/src/scalar/arbitrary.rs @@ -18,7 +18,6 @@ use vortex_dtype::NativeDecimalType; use vortex_dtype::PType; use vortex_dtype::half::f16; use vortex_dtype::match_each_decimal_value_type; -use vortex_error::VortexExpect; use crate::scalar::DecimalValue; use crate::scalar::PValue; @@ -40,15 +39,15 @@ pub fn random_scalar(u: &mut Unstructured, dtype: &DType) -> Result { Ok(match dtype { DType::Null => Scalar::null(dtype.clone()), DType::Bool(_) => Scalar::try_new(dtype.clone(), Some(ScalarValue::Bool(u.arbitrary()?))) - .vortex_expect("unable to construct random `Scalar`_"), + .expect("unable to construct random `Scalar`_"), DType::Primitive(p, _) => Scalar::try_new( dtype.clone(), Some(ScalarValue::Primitive(random_pvalue(u, p)?)), ) - .vortex_expect("unable to construct random `Scalar`_"), + .expect("unable to construct random `Scalar`_"), DType::Decimal(decimal_type, _) => { Scalar::try_new(dtype.clone(), Some(random_decimal(u, decimal_type)?)) - .vortex_expect("unable to construct random `Scalar`_") + .expect("unable to construct random `Scalar`_") } DType::Utf8(_) => Scalar::try_new( dtype.clone(), @@ -56,14 +55,14 @@ pub fn random_scalar(u: &mut Unstructured, dtype: &DType) -> Result { u.arbitrary::()?, ))), ) - .vortex_expect("unable to construct random `Scalar`_"), + .expect("unable to construct random `Scalar`_"), DType::Binary(_) => Scalar::try_new( dtype.clone(), Some(ScalarValue::Binary(ByteBuffer::from( u.arbitrary::>()?, ))), ) - .vortex_expect("unable to construct random `Scalar`_"), + .expect("unable to construct random `Scalar`_"), DType::Struct(sdt, _) => Scalar::try_new( dtype.clone(), Some(ScalarValue::List( @@ -72,7 +71,7 @@ pub fn random_scalar(u: &mut Unstructured, dtype: &DType) -> Result { .collect::>>()?, )), ) - .vortex_expect("unable to construct random `Scalar`_"), + .expect("unable to construct random `Scalar`_"), DType::List(edt, _) => Scalar::try_new( dtype.clone(), Some(ScalarValue::List( @@ -85,7 +84,7 @@ pub fn random_scalar(u: &mut Unstructured, dtype: &DType) -> Result { .collect::>>()?, )), ) - .vortex_expect("unable to construct random `Scalar`_"), + .expect("unable to construct random `Scalar`_"), DType::FixedSizeList(edt, size, _) => Scalar::try_new( dtype.clone(), Some(ScalarValue::List( @@ -94,7 +93,7 @@ pub fn random_scalar(u: &mut Unstructured, dtype: &DType) -> Result { .collect::>>()?, )), ) - .vortex_expect("unable to construct random `Scalar`_"), + .expect("unable to construct random `Scalar`_"), DType::Extension(..) => { unreachable!("Can't yet generate arbitrary scalars for ext dtype") } diff --git a/vortex-array/src/scalar/cast.rs b/vortex-array/src/scalar/cast.rs index c592044bf02..3c20ec1ef5a 100644 --- a/vortex-array/src/scalar/cast.rs +++ b/vortex-array/src/scalar/cast.rs @@ -4,7 +4,6 @@ //! Scalar casting between [`DType`]s. use vortex_dtype::DType; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_ensure; @@ -65,6 +64,6 @@ impl Scalar { pub fn into_nullable(self) -> Scalar { let (dtype, value) = self.into_parts(); Self::try_new(dtype.as_nullable(), value) - .vortex_expect("Casting to nullable should always succeed") + .expect("Casting to nullable should always succeed") } } diff --git a/vortex-array/src/scalar/constructor.rs b/vortex-array/src/scalar/constructor.rs index cd4ca17554b..ea64b797000 100644 --- a/vortex-array/src/scalar/constructor.rs +++ b/vortex-array/src/scalar/constructor.rs @@ -15,7 +15,6 @@ use vortex_dtype::NativePType; use vortex_dtype::Nullability; use vortex_dtype::PType; use vortex_dtype::extension::ExtDTypeVTable; -use vortex_error::VortexExpect; use vortex_error::vortex_panic; use crate::scalar::DecimalValue; @@ -28,7 +27,7 @@ impl Scalar { /// Creates a new boolean scalar with the given value and nullability. pub fn bool(value: bool, nullability: Nullability) -> Self { Self::try_new(DType::Bool(nullability), Some(ScalarValue::Bool(value))) - .vortex_expect("unable to construct a boolean `Scalar`") + .expect("unable to construct a boolean `Scalar`") } /// Creates a new primitive scalar from a native value. @@ -45,7 +44,7 @@ impl Scalar { DType::Primitive(ptype, nullability), Some(ScalarValue::Primitive(value)), ) - .vortex_expect("unable to construct a primitive `Scalar`") + .expect("unable to construct a primitive `Scalar`") } /// Creates a new decimal scalar with the given value, precision, scale, and nullability. @@ -58,7 +57,7 @@ impl Scalar { DType::Decimal(decimal_type, nullability), Some(ScalarValue::Decimal(value)), ) - .vortex_expect("unable to construct a decimal `Scalar`") + .expect("unable to construct a decimal `Scalar`") } /// Creates a new UTF-8 scalar from a string-like value. @@ -89,7 +88,7 @@ impl Scalar { DType::Utf8(nullability), Some(ScalarValue::Utf8(str.try_into()?)), ) - .vortex_expect("unable to construct a UTF-8 `Scalar`")) + .expect("unable to construct a UTF-8 `Scalar`")) } /// Creates a new binary scalar from a byte buffer. @@ -98,7 +97,7 @@ impl Scalar { DType::Binary(nullability), Some(ScalarValue::Binary(buffer.into())), ) - .vortex_expect("unable to construct a binary `Scalar`") + .expect("unable to construct a binary `Scalar`") } /// Creates a new list scalar with the given element type and children. @@ -159,7 +158,7 @@ impl Scalar { let size: u32 = children .len() .try_into() - .vortex_expect("tried to create a list that was too large"); + .expect("tried to create a list that was too large"); let dtype = match list_kind { ListKind::Variable => DType::List(element_dtype, nullability), @@ -167,15 +166,15 @@ impl Scalar { }; Self::try_new(dtype, Some(ScalarValue::List(children))) - .vortex_expect("unable to construct a list `Scalar`") + .expect("unable to construct a list `Scalar`") } /// Creates a new extension scalar wrapping the given storage value. pub fn extension(options: V::Metadata, value: Scalar) -> Self { let ext_dtype = ExtDType::::try_new(options, value.dtype().clone()) - .vortex_expect("Failed to create extension dtype"); + .expect("Failed to create extension dtype"); Self::try_new(DType::Extension(ext_dtype.erased()), value.into_value()) - .vortex_expect("unable to construct an extension `Scalar`") + .expect("unable to construct an extension `Scalar`") } /// Creates a new extension scalar wrapping the given storage value. @@ -186,7 +185,7 @@ impl Scalar { pub fn extension_ref(ext_dtype: ExtDTypeRef, value: Scalar) -> Self { assert_eq!(ext_dtype.storage_dtype(), value.dtype()); Self::try_new(DType::Extension(ext_dtype), value.into_value()) - .vortex_expect("unable to construct an extension `Scalar`") + .expect("unable to construct an extension `Scalar`") } } diff --git a/vortex-array/src/scalar/convert/into_scalar.rs b/vortex-array/src/scalar/convert/into_scalar.rs index c5e906336c0..e05d4813cab 100644 --- a/vortex-array/src/scalar/convert/into_scalar.rs +++ b/vortex-array/src/scalar/convert/into_scalar.rs @@ -11,7 +11,6 @@ use vortex_dtype::DType; use vortex_dtype::DecimalDType; use vortex_dtype::NativeDType; use vortex_dtype::Nullability; -use vortex_error::VortexExpect; use crate::scalar::DecimalValue; use crate::scalar::Scalar; @@ -39,7 +38,7 @@ macro_rules! impl_into_scalar { DType::$variant(Nullability::NonNullable), Some(ScalarValue::from(value)), ) - .vortex_expect("unable to construct a `Scalar`") + .expect("unable to construct a `Scalar`") } } @@ -49,7 +48,7 @@ macro_rules! impl_into_scalar { DType::$variant(Nullability::Nullable), value.map(ScalarValue::from), ) - .vortex_expect("unable to construct a `Scalar`") + .expect("unable to construct a `Scalar`") } } }; @@ -100,7 +99,7 @@ where DType::List(Arc::from(T::dtype()), Nullability::NonNullable), Some(ScalarValue::from(vec)), ) - .vortex_expect("unable to construct a list `Scalar` from `Vec`") + .expect("unable to construct a list `Scalar` from `Vec`") } } @@ -114,7 +113,7 @@ where DType::List(Arc::from(T::dtype()), Nullability::Nullable), vec.map(ScalarValue::from), ) - .vortex_expect("unable to construct a list `Scalar` from `Option>`") + .expect("unable to construct a list `Scalar` from `Option>`") } } @@ -135,7 +134,7 @@ impl From for Scalar { DType::Decimal(dtype, Nullability::NonNullable), Some(ScalarValue::Decimal(value)), ) - .vortex_expect("unable to construct a decimal `Scalar` from `DecimalValue`") + .expect("unable to construct a decimal `Scalar` from `DecimalValue`") } } @@ -156,6 +155,6 @@ impl From> for Scalar { DType::Decimal(dtype, Nullability::Nullable), Some(ScalarValue::Decimal(value)), ) - .vortex_expect("unable to construct a decimal `Scalar` from `Option`") + .expect("unable to construct a decimal `Scalar` from `Option`") } } diff --git a/vortex-array/src/scalar/convert/primitive.rs b/vortex-array/src/scalar/convert/primitive.rs index 57f3316df11..415e1e31ca6 100644 --- a/vortex-array/src/scalar/convert/primitive.rs +++ b/vortex-array/src/scalar/convert/primitive.rs @@ -9,7 +9,6 @@ use vortex_dtype::Nullability; use vortex_dtype::PType; use vortex_dtype::half::f16; use vortex_error::VortexError; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_err; @@ -102,9 +101,7 @@ macro_rules! primitive_scalar { DType::Primitive(<$T>::PTYPE, Nullability::NonNullable), Some(ScalarValue::Primitive(value.into())), ) - .vortex_expect( - "somehow unable to construct a primitive `Scalar` from a native type", - ) + .expect("somehow unable to construct a primitive `Scalar` from a native type") } } @@ -115,9 +112,7 @@ macro_rules! primitive_scalar { DType::Primitive(<$T>::PTYPE, Nullability::Nullable), value.map(|value| ScalarValue::Primitive(value.into())), ) - .vortex_expect( - "somehow unable to construct a primitive `Scalar` from a native type", - ) + .expect("somehow unable to construct a primitive `Scalar` from a native type") } } }; @@ -153,7 +148,7 @@ impl TryFrom<&ScalarValue> for usize { fn try_from(value: &ScalarValue) -> VortexResult { let val = value.as_primitive().cast::()?; - Ok(usize::try_from(val)?) + usize::try_from(val).map_err(|e| vortex_err!("failed to convert u64 to usize: {e}")) } } @@ -169,7 +164,7 @@ impl TryFrom<&Scalar> for usize { .as_::() .ok_or_else(|| vortex_err!("cannot convert Null to usize"))?; - Ok(usize::try_from(prim)?) + usize::try_from(prim).map_err(|e| vortex_err!("failed to convert u64 to usize: {e}")) } } @@ -181,7 +176,11 @@ impl TryFrom<&Scalar> for Option { .as_primitive_opt() .ok_or_else(|| vortex_err!("Expected primitive scalar, found {}", value.dtype()))?; - Ok(prim_scalar.as_::().map(usize::try_from).transpose()?) + prim_scalar + .as_::() + .map(usize::try_from) + .transpose() + .map_err(|e| vortex_err!("failed to convert u64 to usize: {e}")) } } @@ -197,7 +196,7 @@ impl From for Scalar { DType::Primitive(PType::U64, Nullability::NonNullable), Some(ScalarValue::Primitive((value as u64).into())), ) - .vortex_expect("somehow unable to construct a primitive `Scalar` from a native type") + .expect("somehow unable to construct a primitive `Scalar` from a native type") } } @@ -207,6 +206,6 @@ impl From> for Scalar { DType::Primitive(PType::U64, Nullability::Nullable), value.map(|value| ScalarValue::Primitive((value as u64).into())), ) - .vortex_expect("somehow unable to construct a primitive `Scalar` from a native type") + .expect("somehow unable to construct a primitive `Scalar` from a native type") } } diff --git a/vortex-array/src/scalar/downcast.rs b/vortex-array/src/scalar/downcast.rs index a7648533965..241c4041a65 100644 --- a/vortex-array/src/scalar/downcast.rs +++ b/vortex-array/src/scalar/downcast.rs @@ -5,7 +5,6 @@ use vortex_buffer::BufferString; use vortex_buffer::ByteBuffer; -use vortex_error::VortexExpect; use vortex_error::vortex_panic; use crate::scalar::BinaryScalar; @@ -29,7 +28,7 @@ impl Scalar { /// Panics if the scalar does not have a [`Bool`](vortex_dtype::DType::Bool) type. pub fn as_bool(&self) -> BoolScalar<'_> { self.as_bool_opt() - .vortex_expect("Failed to convert scalar to bool") + .expect("Failed to convert scalar to bool") } /// Returns a view of the scalar as a boolean scalar if it has a boolean type. @@ -44,7 +43,7 @@ impl Scalar { /// Panics if the scalar does not have a [`Primitive`](vortex_dtype::DType::Primitive) type. pub fn as_primitive(&self) -> PrimitiveScalar<'_> { self.as_primitive_opt() - .vortex_expect("Failed to convert scalar to primitive") + .expect("Failed to convert scalar to primitive") } /// Returns a view of the scalar as a primitive scalar if it has a primitive type. @@ -59,7 +58,7 @@ impl Scalar { /// Panics if the scalar does not have a [`Decimal`](vortex_dtype::DType::Decimal) type. pub fn as_decimal(&self) -> DecimalScalar<'_> { self.as_decimal_opt() - .vortex_expect("Failed to convert scalar to decimal") + .expect("Failed to convert scalar to decimal") } /// Returns a view of the scalar as a decimal scalar if it has a decimal type. @@ -74,7 +73,7 @@ impl Scalar { /// Panics if the scalar does not have a [`Utf8`](vortex_dtype::DType::Utf8) type. pub fn as_utf8(&self) -> Utf8Scalar<'_> { self.as_utf8_opt() - .vortex_expect("Failed to convert scalar to utf8") + .expect("Failed to convert scalar to utf8") } /// Returns a view of the scalar as a UTF-8 string scalar if it has a UTF-8 type. @@ -89,7 +88,7 @@ impl Scalar { /// Panics if the scalar does not have a [`Binary`](vortex_dtype::DType::Binary) type. pub fn as_binary(&self) -> BinaryScalar<'_> { self.as_binary_opt() - .vortex_expect("Failed to convert scalar to binary") + .expect("Failed to convert scalar to binary") } /// Returns a view of the scalar as a binary scalar if it has a binary type. @@ -104,7 +103,7 @@ impl Scalar { /// Panics if the scalar does not have a [`Struct`](vortex_dtype::DType::Struct) type. pub fn as_struct(&self) -> StructScalar<'_> { self.as_struct_opt() - .vortex_expect("Failed to convert scalar to struct") + .expect("Failed to convert scalar to struct") } /// Returns a view of the scalar as a struct scalar if it has a struct type. @@ -122,7 +121,7 @@ impl Scalar { /// Panics if the scalar does not have a [`List`](vortex_dtype::DType::List) or [`FixedSizeList`](vortex_dtype::DType::FixedSizeList) type. pub fn as_list(&self) -> ListScalar<'_> { self.as_list_opt() - .vortex_expect("Failed to convert scalar to list") + .expect("Failed to convert scalar to list") } /// Returns a view of the scalar as a list scalar if it has a list type. @@ -140,7 +139,7 @@ impl Scalar { /// Panics if the scalar does not have a [`Extension`](vortex_dtype::DType::Extension) type. pub fn as_extension(&self) -> ExtScalar<'_> { self.as_extension_opt() - .vortex_expect("Failed to convert scalar to extension") + .expect("Failed to convert scalar to extension") } /// Returns a view of the scalar as an extension scalar if it has an extension type. diff --git a/vortex-array/src/scalar/mod.rs b/vortex-array/src/scalar/mod.rs index 9ea9d08bed0..3189457c48d 100644 --- a/vortex-array/src/scalar/mod.rs +++ b/vortex-array/src/scalar/mod.rs @@ -9,6 +9,11 @@ //! //! Note that the implementations of `Scalar` are split into several different modules. +#![deny(missing_docs)] +#![warn(clippy::missing_docs_in_private_items)] +#![warn(clippy::missing_errors_doc)] +#![warn(clippy::missing_safety_doc)] + #[cfg(feature = "arbitrary")] pub mod arbitrary; mod arrow; diff --git a/vortex-array/src/scalar/proto.rs b/vortex-array/src/scalar/proto.rs index 92dce155350..5c51a0bc1d9 100644 --- a/vortex-array/src/scalar/proto.rs +++ b/vortex-array/src/scalar/proto.rs @@ -12,7 +12,6 @@ use vortex_dtype::DType; use vortex_dtype::PType; use vortex_dtype::half::f16; use vortex_dtype::i256; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_bail; use vortex_error::vortex_ensure; @@ -37,7 +36,7 @@ impl From<&Scalar> for pb::Scalar { dtype: Some( (value.dtype()) .try_into() - .vortex_expect("Failed to convert DType to proto"), + .expect("Failed to convert DType to proto"), ), value: Some(ScalarValue::to_proto(value.value())), } @@ -69,7 +68,7 @@ impl ScalarValue { let mut buf = B::default(); proto .encode(&mut buf) - .vortex_expect("Failed to encode scalar value"); + .expect("Failed to encode scalar value"); buf } } @@ -367,32 +366,27 @@ fn bytes_from_proto(bytes: &[u8], dtype: &DType) -> VortexResult { 2 => DecimalValue::I16(i16::from_le_bytes( bytes .try_into() - .ok() - .vortex_expect("Buffer has invalid number of bytes"), + .expect("Buffer has invalid number of bytes"), )), 4 => DecimalValue::I32(i32::from_le_bytes( bytes .try_into() - .ok() - .vortex_expect("Buffer has invalid number of bytes"), + .expect("Buffer has invalid number of bytes"), )), 8 => DecimalValue::I64(i64::from_le_bytes( bytes .try_into() - .ok() - .vortex_expect("Buffer has invalid number of bytes"), + .expect("Buffer has invalid number of bytes"), )), 16 => DecimalValue::I128(i128::from_le_bytes( bytes .try_into() - .ok() - .vortex_expect("Buffer has invalid number of bytes"), + .expect("Buffer has invalid number of bytes"), )), 32 => DecimalValue::I256(i256::from_le_bytes( bytes .try_into() - .ok() - .vortex_expect("Buffer has invalid number of bytes"), + .expect("Buffer has invalid number of bytes"), )), l => vortex_bail!(Serde: "invalid decimal byte length: {l}"), })), diff --git a/vortex-array/src/scalar/scalar_impl.rs b/vortex-array/src/scalar/scalar_impl.rs index 90cc6f410c1..a6e0a18087c 100644 --- a/vortex-array/src/scalar/scalar_impl.rs +++ b/vortex-array/src/scalar/scalar_impl.rs @@ -76,9 +76,7 @@ impl Scalar { /// Panics if the given [`DType`] and [`ScalarValue`] are incompatible. #[cfg(test)] pub fn new(dtype: DType, value: Option) -> Self { - use vortex_error::VortexExpect; - - Self::try_new(dtype, value).vortex_expect("Failed to create Scalar") + Self::try_new(dtype, value).expect("Failed to create Scalar") } /// Attempts to create a new [`Scalar`] with the given [`DType`] and potentially null diff --git a/vortex-array/src/scalar/tests/casting.rs b/vortex-array/src/scalar/tests/casting.rs index 6cecd1b8b92..a494c95b4f9 100644 --- a/vortex-array/src/scalar/tests/casting.rs +++ b/vortex-array/src/scalar/tests/casting.rs @@ -16,7 +16,6 @@ mod tests { use vortex_dtype::StructFields; use vortex_dtype::extension::ExtDTypeVTable; use vortex_dtype::half::f16; - use vortex_error::VortexExpect; use vortex_error::VortexResult; use crate::scalar::PValue; @@ -45,7 +44,7 @@ mod tests { impl Apples { fn new() -> ExtDType { ExtDType::try_new(0, DType::Primitive(PType::U16, Nullability::NonNullable)) - .vortex_expect("valid apples dtype") + .expect("valid apples dtype") } } @@ -325,7 +324,7 @@ mod tests { .to_storage_scalar() .as_struct() .fields_iter() - .vortex_expect("non null") + .expect("non null") .collect::>(); assert_eq!( list_elems[0].as_primitive().pvalue().unwrap(), diff --git a/vortex-array/src/scalar/truncation.rs b/vortex-array/src/scalar/truncation.rs index c9d661427ad..2f054b2de6c 100644 --- a/vortex-array/src/scalar/truncation.rs +++ b/vortex-array/src/scalar/truncation.rs @@ -6,7 +6,6 @@ use vortex_buffer::BufferString; use vortex_buffer::ByteBuffer; use vortex_dtype::Nullability; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_ensure; @@ -100,7 +99,7 @@ impl ScalarTruncation for BufferString { fn upper_bound(self, max_length: usize) -> Option { let utf8_split_pos = (max_length.saturating_sub(3)..=max_length) .rfind(|p| self.is_char_boundary(*p)) - .vortex_expect("Failed to find utf8 character boundary"); + .expect("Failed to find utf8 character boundary"); // SAFETY: we slice to a char boundary so the sliced range contains valid UTF-8. let sliced = @@ -115,7 +114,7 @@ impl ScalarTruncation for BufferString { // valid UTF-8, we must have a valid character boundary. let utf8_split_pos = (max_length.saturating_sub(3)..=max_length) .rfind(|p| self.is_char_boundary(*p)) - .vortex_expect("Failed to find utf8 character boundary"); + .expect("Failed to find utf8 character boundary"); unsafe { BufferString::new_unchecked(self.into_inner().slice(..utf8_split_pos)) } } diff --git a/vortex-array/src/scalar/typed_view/binary.rs b/vortex-array/src/scalar/typed_view/binary.rs index c95be86125a..861e3e8347e 100644 --- a/vortex-array/src/scalar/typed_view/binary.rs +++ b/vortex-array/src/scalar/typed_view/binary.rs @@ -9,7 +9,6 @@ use std::fmt::Formatter; use itertools::Itertools; use vortex_buffer::ByteBuffer; use vortex_dtype::DType; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_bail; @@ -102,7 +101,7 @@ impl<'a> BinaryScalar<'a> { Some(ScalarValue::Binary( self.value() .cloned() - .vortex_expect("nullness handled in Scalar::cast"), + .expect("nullness handled in Scalar::cast"), )), ) } diff --git a/vortex-array/src/scalar/typed_view/bool.rs b/vortex-array/src/scalar/typed_view/bool.rs index 19271b4fb15..751947c6f58 100644 --- a/vortex-array/src/scalar/typed_view/bool.rs +++ b/vortex-array/src/scalar/typed_view/bool.rs @@ -8,7 +8,6 @@ use std::fmt::Display; use std::fmt::Formatter; use vortex_dtype::DType; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_bail; @@ -89,7 +88,7 @@ impl<'a> BoolScalar<'a> { ) } Ok(Scalar::bool( - self.value.vortex_expect("nullness handled in Scalar::cast"), + self.value.expect("nullness handled in Scalar::cast"), dtype.nullability(), )) } diff --git a/vortex-array/src/scalar/typed_view/decimal/dvalue.rs b/vortex-array/src/scalar/typed_view/decimal/dvalue.rs index 14e60516e5f..29be07902eb 100644 --- a/vortex-array/src/scalar/typed_view/decimal/dvalue.rs +++ b/vortex-array/src/scalar/typed_view/decimal/dvalue.rs @@ -17,7 +17,6 @@ use vortex_dtype::NativeDecimalType; use vortex_dtype::ToI256; use vortex_dtype::i256; use vortex_dtype::match_each_decimal_value; -use vortex_error::VortexExpect; /// A decimal value that can be stored in various integer widths. /// @@ -110,8 +109,7 @@ impl DecimalValue { pub fn fits_in_precision(&self, decimal_type: DecimalDType) -> bool { // Convert to i256 for comparison let value_i256 = match_each_decimal_value!(self, |v| { - v.to_i256() - .vortex_expect("upcast to i256 must always succeed") + v.to_i256().expect("upcast to i256 must always succeed") }); // Calculate the maximum stored value that can be represented with this precision @@ -120,7 +118,7 @@ impl DecimalValue { let ten = i256::from_i128(10); let max_value = ten .checked_pow(decimal_type.precision() as _) - .vortex_expect("precision must exist in i256"); + .expect("precision must exist in i256"); let min_value = -max_value; value_i256 > min_value && value_i256 < max_value @@ -134,12 +132,10 @@ impl DecimalValue { F: FnOnce(i256, i256) -> Option, { let self_upcast = match_each_decimal_value!(self, |v| { - v.to_i256() - .vortex_expect("upcast to i256 must always succeed") + v.to_i256().expect("upcast to i256 must always succeed") }); let other_upcast = match_each_decimal_value!(other, |v| { - v.to_i256() - .vortex_expect("upcast to i256 must always succeed") + v.to_i256().expect("upcast to i256 must always succeed") }); op(self_upcast, other_upcast).map(DecimalValue::I256) @@ -175,12 +171,10 @@ impl DecimalValue { impl PartialEq for DecimalValue { fn eq(&self, other: &Self) -> bool { let self_upcast = match_each_decimal_value!(self, |v| { - v.to_i256() - .vortex_expect("upcast to i256 must always succeed") + v.to_i256().expect("upcast to i256 must always succeed") }); let other_upcast = match_each_decimal_value!(other, |v| { - v.to_i256() - .vortex_expect("upcast to i256 must always succeed") + v.to_i256().expect("upcast to i256 must always succeed") }); self_upcast == other_upcast @@ -192,12 +186,10 @@ impl Eq for DecimalValue {} impl PartialOrd for DecimalValue { fn partial_cmp(&self, other: &Self) -> Option { let self_upcast = match_each_decimal_value!(self, |v| { - v.to_i256() - .vortex_expect("upcast to i256 must always succeed") + v.to_i256().expect("upcast to i256 must always succeed") }); let other_upcast = match_each_decimal_value!(other, |v| { - v.to_i256() - .vortex_expect("upcast to i256 must always succeed") + v.to_i256().expect("upcast to i256 must always succeed") }); self_upcast.partial_cmp(&other_upcast) @@ -208,8 +200,7 @@ impl PartialOrd for DecimalValue { impl Hash for DecimalValue { fn hash(&self, state: &mut H) { let self_upcast = match_each_decimal_value!(self, |v| { - v.to_i256() - .vortex_expect("upcast to i256 must always succeed") + v.to_i256().expect("upcast to i256 must always succeed") }); self_upcast.hash(state); } diff --git a/vortex-array/src/scalar/typed_view/extension/mod.rs b/vortex-array/src/scalar/typed_view/extension/mod.rs index 91834b45da7..1e399afaf2f 100644 --- a/vortex-array/src/scalar/typed_view/extension/mod.rs +++ b/vortex-array/src/scalar/typed_view/extension/mod.rs @@ -10,7 +10,6 @@ use std::hash::Hash; use vortex_dtype::DType; use vortex_dtype::datetime::AnyTemporal; use vortex_dtype::extension::ExtDTypeRef; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_bail; @@ -104,7 +103,7 @@ impl<'a> ExtScalar<'a> { /// Returns the storage scalar of the extension scalar. pub fn to_storage_scalar(&self) -> Scalar { Scalar::try_new(self.ext_dtype.storage_dtype().clone(), self.value.cloned()) - .vortex_expect("ExtScalar is invalid") + .expect("ExtScalar is invalid") } /// Return the [`DType`] of the extension scalar. diff --git a/vortex-array/src/scalar/typed_view/list.rs b/vortex-array/src/scalar/typed_view/list.rs index 43a7e3229f8..3c0a7053c61 100644 --- a/vortex-array/src/scalar/typed_view/list.rs +++ b/vortex-array/src/scalar/typed_view/list.rs @@ -10,7 +10,6 @@ use std::sync::Arc; use itertools::Itertools; use vortex_dtype::DType; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_bail; use vortex_error::vortex_err; @@ -59,7 +58,7 @@ impl Display for ListScalar<'_> { elems .iter() .map(|e| Scalar::try_new(self.element_dtype().clone(), e.clone()) - .vortex_expect("`ListScalar` is already a valid `Scalar`")) + .expect("`ListScalar` is already a valid `Scalar`")) .format(", ") ) } diff --git a/vortex-array/src/scalar/typed_view/primitive/pvalue.rs b/vortex-array/src/scalar/typed_view/primitive/pvalue.rs index 6adfd5e0be7..fc874d33d5c 100644 --- a/vortex-array/src/scalar/typed_view/primitive/pvalue.rs +++ b/vortex-array/src/scalar/typed_view/primitive/pvalue.rs @@ -17,7 +17,6 @@ use vortex_dtype::PType; use vortex_dtype::ToBytes; use vortex_dtype::half::f16; use vortex_error::VortexError; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_bail; use vortex_error::vortex_ensure; @@ -56,14 +55,14 @@ pub enum PValue { impl PartialEq for PValue { fn eq(&self, other: &Self) -> bool { match (self, other) { - (Self::U8(s), o) => o.as_u64().vortex_expect("upcast") == *s as u64, - (Self::U16(s), o) => o.as_u64().vortex_expect("upcast") == *s as u64, - (Self::U32(s), o) => o.as_u64().vortex_expect("upcast") == *s as u64, - (Self::U64(s), o) => o.as_u64().vortex_expect("upcast") == *s, - (Self::I8(s), o) => o.as_i64().vortex_expect("upcast") == *s as i64, - (Self::I16(s), o) => o.as_i64().vortex_expect("upcast") == *s as i64, - (Self::I32(s), o) => o.as_i64().vortex_expect("upcast") == *s as i64, - (Self::I64(s), o) => o.as_i64().vortex_expect("upcast") == *s, + (Self::U8(s), o) => o.as_u64().expect("upcast") == *s as u64, + (Self::U16(s), o) => o.as_u64().expect("upcast") == *s as u64, + (Self::U32(s), o) => o.as_u64().expect("upcast") == *s as u64, + (Self::U64(s), o) => o.as_u64().expect("upcast") == *s, + (Self::I8(s), o) => o.as_i64().expect("upcast") == *s as i64, + (Self::I16(s), o) => o.as_i64().expect("upcast") == *s as i64, + (Self::I32(s), o) => o.as_i64().expect("upcast") == *s as i64, + (Self::I64(s), o) => o.as_i64().expect("upcast") == *s, (Self::F16(s), Self::F16(o)) => s.is_eq(*o), (Self::F32(s), Self::F32(o)) => s.is_eq(*o), (Self::F64(s), Self::F64(o)) => s.is_eq(*o), @@ -77,14 +76,14 @@ impl Eq for PValue {} impl PartialOrd for PValue { fn partial_cmp(&self, other: &Self) -> Option { match (self, other) { - (Self::U8(s), o) => Some((*s as u64).cmp(&o.as_u64().vortex_expect("upcast"))), - (Self::U16(s), o) => Some((*s as u64).cmp(&o.as_u64().vortex_expect("upcast"))), - (Self::U32(s), o) => Some((*s as u64).cmp(&o.as_u64().vortex_expect("upcast"))), - (Self::U64(s), o) => Some((*s).cmp(&o.as_u64().vortex_expect("upcast"))), - (Self::I8(s), o) => Some((*s as i64).cmp(&o.as_i64().vortex_expect("upcast"))), - (Self::I16(s), o) => Some((*s as i64).cmp(&o.as_i64().vortex_expect("upcast"))), - (Self::I32(s), o) => Some((*s as i64).cmp(&o.as_i64().vortex_expect("upcast"))), - (Self::I64(s), o) => Some((*s).cmp(&o.as_i64().vortex_expect("upcast"))), + (Self::U8(s), o) => Some((*s as u64).cmp(&o.as_u64().expect("upcast"))), + (Self::U16(s), o) => Some((*s as u64).cmp(&o.as_u64().expect("upcast"))), + (Self::U32(s), o) => Some((*s as u64).cmp(&o.as_u64().expect("upcast"))), + (Self::U64(s), o) => Some((*s).cmp(&o.as_u64().expect("upcast"))), + (Self::I8(s), o) => Some((*s as i64).cmp(&o.as_i64().expect("upcast"))), + (Self::I16(s), o) => Some((*s as i64).cmp(&o.as_i64().expect("upcast"))), + (Self::I32(s), o) => Some((*s as i64).cmp(&o.as_i64().expect("upcast"))), + (Self::I64(s), o) => Some((*s).cmp(&o.as_i64().expect("upcast"))), (Self::F16(s), Self::F16(o)) => Some(s.total_compare(*o)), (Self::F32(s), Self::F32(o)) => Some(s.total_compare(*o)), (Self::F64(s), Self::F64(o)) => Some(s.total_compare(*o)), @@ -97,10 +96,10 @@ impl Hash for PValue { fn hash(&self, state: &mut H) { match self { PValue::U8(_) | PValue::U16(_) | PValue::U32(_) | PValue::U64(_) => { - self.as_u64().vortex_expect("upcast").hash(state) + self.as_u64().expect("upcast").hash(state) } PValue::I8(_) | PValue::I16(_) | PValue::I32(_) | PValue::I64(_) => { - self.as_i64().vortex_expect("upcast").hash(state) + self.as_i64().expect("upcast").hash(state) } PValue::F16(v) => v.to_le_bytes().hash(state), PValue::F32(v) => v.to_le_bytes().hash(state), diff --git a/vortex-array/src/scalar/typed_view/primitive/scalar.rs b/vortex-array/src/scalar/typed_view/primitive/scalar.rs index 9dc242073bc..ba637160497 100644 --- a/vortex-array/src/scalar/typed_view/primitive/scalar.rs +++ b/vortex-array/src/scalar/typed_view/primitive/scalar.rs @@ -21,7 +21,6 @@ use vortex_dtype::NativePType; use vortex_dtype::PType; use vortex_dtype::match_each_native_ptype; use vortex_error::VortexError; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_ensure; use vortex_error::vortex_panic; @@ -165,9 +164,7 @@ impl<'a> PrimitiveScalar<'a> { /// Casts this scalar to the given `dtype`. pub(crate) fn cast(&self, dtype: &DType) -> VortexResult { let ptype = PType::try_from(dtype)?; - let pvalue = self - .pvalue - .vortex_expect("nullness handled in Scalar::cast"); + let pvalue = self.pvalue.expect("nullness handled in Scalar::cast"); Ok(match_each_native_ptype!(ptype, |Q| { Scalar::primitive(pvalue.cast::()?, dtype.nullability()) })) @@ -272,7 +269,7 @@ impl Sub for PrimitiveScalar<'_> { fn sub(self, rhs: Self) -> Self::Output { self.checked_sub(&rhs) - .vortex_expect("PrimitiveScalar subtract: overflow or underflow") + .expect("PrimitiveScalar subtract: overflow or underflow") } } @@ -287,7 +284,7 @@ impl Add for PrimitiveScalar<'_> { fn add(self, rhs: Self) -> Self::Output { self.checked_add(&rhs) - .vortex_expect("PrimitiveScalar add: overflow or underflow") + .expect("PrimitiveScalar add: overflow or underflow") } } diff --git a/vortex-array/src/scalar/typed_view/primitive/tests.rs b/vortex-array/src/scalar/typed_view/primitive/tests.rs index 687160cedf7..cbf3a4b8b71 100644 --- a/vortex-array/src/scalar/typed_view/primitive/tests.rs +++ b/vortex-array/src/scalar/typed_view/primitive/tests.rs @@ -11,7 +11,6 @@ use vortex_dtype::Nullability; use vortex_dtype::PType; use vortex_dtype::ToBytes; use vortex_dtype::half::f16; -use vortex_error::VortexExpect; use vortex_utils::aliases::hash_set::HashSet; use super::pvalue::CoercePValue; @@ -134,9 +133,9 @@ fn test_primitive_cast( #[case] should_succeed: bool, ) { let source_pvalue = match source_type { - PType::I8 => PValue::I8(i8::try_from(source_value).vortex_expect("cannot cast")), - PType::U8 => PValue::U8(u8::try_from(source_value).vortex_expect("cannot cast")), - PType::U16 => PValue::U16(u16::try_from(source_value).vortex_expect("cannot cast")), + PType::I8 => PValue::I8(i8::try_from(source_value).expect("cannot cast")), + PType::U8 => PValue::U8(u8::try_from(source_value).expect("cannot cast")), + PType::U16 => PValue::U16(u16::try_from(source_value).expect("cannot cast")), PType::I32 => PValue::I32(source_value), _ => unreachable!("Test case uses unexpected source type"), }; diff --git a/vortex-array/src/scalar/typed_view/struct_.rs b/vortex-array/src/scalar/typed_view/struct_.rs index 1c755525a41..096a4e87624 100644 --- a/vortex-array/src/scalar/typed_view/struct_.rs +++ b/vortex-array/src/scalar/typed_view/struct_.rs @@ -14,7 +14,6 @@ use vortex_dtype::DType; use vortex_dtype::FieldName; use vortex_dtype::FieldNames; use vortex_dtype::StructFields; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_bail; use vortex_error::vortex_err; @@ -48,7 +47,7 @@ impl Display for StructScalar<'_> { .zip_eq(fields.iter()) .map(|((name, dtype), value)| { let val = Scalar::try_new(dtype, value.clone()) - .vortex_expect("unable to construct a struct `Scalar`"); + .expect("unable to construct a struct `Scalar`"); format!("{name}: {val}") }) .format(", "); @@ -137,7 +136,7 @@ impl<'a> StructScalar<'a> { pub fn struct_fields(&self) -> &StructFields { self.dtype .as_struct_fields_opt() - .vortex_expect("StructScalar always has struct dtype") + .expect("StructScalar always has struct dtype") } /// Returns the field names of the struct. @@ -170,7 +169,7 @@ impl<'a> StructScalar<'a> { pub fn field_by_idx(&self, idx: usize) -> Option { let fields = self .fields - .vortex_expect("Can't take field out of null struct scalar"); + .expect("Can't take field out of null struct scalar"); Some( // SAFETY: We assume that the struct `DType` correctly describes the struct values. unsafe { @@ -227,13 +226,10 @@ impl<'a> StructScalar<'a> { Scalar::try_new( own_st .field_by_index(i) - .vortex_expect("Iterating over scalar fields"), + .expect("Iterating over scalar fields"), f.clone(), )? - .cast( - &st.field_by_index(i) - .vortex_expect("Iterating over scalar fields"), - ) + .cast(&st.field_by_index(i).expect("Iterating over scalar fields")) .map(|s| s.into_value()) }) .collect::>>()?; @@ -248,6 +244,7 @@ impl<'a> StructScalar<'a> { /// # Errors /// /// Returns an error if the struct cannot be projected or if a field is not found. + #[allow(clippy::missing_panics_doc)] pub fn project(&self, projection: &[FieldName]) -> VortexResult { let struct_dtype = self .dtype @@ -268,7 +265,7 @@ impl<'a> StructScalar<'a> { .map(|name| { struct_dtype .find(name) - .vortex_expect("DType has been successfully projected already") + .expect("DType has been successfully projected already") }) .map(|i| fs[i].clone()) .collect(), @@ -309,7 +306,7 @@ impl Scalar { value_children.extend(children.into_iter().map(|x| x.into_value())); Self::try_new(dtype, Some(ScalarValue::List(value_children))) - .vortex_expect("unable to construct a struct `Scalar`") + .expect("unable to construct a struct `Scalar`") } } diff --git a/vortex-array/src/scalar/typed_view/utf8.rs b/vortex-array/src/scalar/typed_view/utf8.rs index bfd8019d4bf..8e7b891bcc6 100644 --- a/vortex-array/src/scalar/typed_view/utf8.rs +++ b/vortex-array/src/scalar/typed_view/utf8.rs @@ -10,7 +10,6 @@ use std::fmt::Formatter; use vortex_buffer::BufferString; use vortex_dtype::DType; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_bail; use vortex_utils::aliases::StringEscape; @@ -98,7 +97,7 @@ impl<'a> Utf8Scalar<'a> { Some(ScalarValue::Utf8( self.value() .cloned() - .vortex_expect("nullness handled in Scalar::cast"), + .expect("nullness handled in Scalar::cast"), )), ) } diff --git a/vortex-array/src/serde.rs b/vortex-array/src/serde.rs index 1ef2675da63..d6296efa043 100644 --- a/vortex-array/src/serde.rs +++ b/vortex-array/src/serde.rs @@ -16,7 +16,6 @@ use vortex_buffer::ByteBuffer; use vortex_dtype::DType; use vortex_dtype::TryFromBytes; use vortex_error::VortexError; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_bail; use vortex_error::vortex_err; @@ -104,7 +103,7 @@ impl dyn Array + '_ { }; fb_buffers.push(fba::Buffer::new( - u16::try_from(padding).vortex_expect("padding fits into u16"), + u16::try_from(padding).expect("padding fits into u16"), buffer.alignment().exponent(), Compression::None, u32::try_from(buffer.len()) @@ -410,7 +409,7 @@ impl ArrayParts { let children = self .flatbuffer() .children() - .vortex_expect("Expected array to have children"); + .expect("Expected array to have children"); if idx >= children.len() { vortex_panic!( "Invalid child index {} for array with {} children", @@ -454,7 +453,7 @@ impl ArrayParts { /// without needing to access the actual buffer data. pub fn buffer_lengths(&self) -> Vec { let fb_array = root::(self.flatbuffer.as_ref()) - .vortex_expect("ArrayParts flatbuffer must be a valid Array"); + .expect("ArrayParts flatbuffer must be a valid Array"); fb_array .buffers() .map(|buffers| buffers.iter().map(|b| b.length() as usize).collect()) diff --git a/vortex-array/src/stats/mod.rs b/vortex-array/src/stats/mod.rs index ea8ae6b58a6..a6935462739 100644 --- a/vortex-array/src/stats/mod.rs +++ b/vortex-array/src/stats/mod.rs @@ -14,7 +14,6 @@ pub mod flatbuffers; mod stats_set; pub use array::*; -use vortex_error::VortexExpect; use crate::expr::stats::Stat; @@ -29,7 +28,7 @@ pub const PRUNING_STATS: &[Stat] = &[ ]; pub fn as_stat_bitset_bytes(stats: &[Stat]) -> Vec { - let max_stat = u8::from(last::().vortex_expect("last stat")) as usize + 1; + let max_stat = u8::from(last::().expect("last stat")) as usize + 1; // TODO(ngates): use vortex-buffer::BitBuffer let mut stat_bitset = BooleanBufferBuilder::new_from_buffer( MutableBuffer::from_len_zeroed(max_stat.div_ceil(8)), diff --git a/vortex-array/src/stats/stats_set.rs b/vortex-array/src/stats/stats_set.rs index 082a117b22e..9ea2defa446 100644 --- a/vortex-array/src/stats/stats_set.rs +++ b/vortex-array/src/stats/stats_set.rs @@ -8,7 +8,6 @@ use enum_iterator::all; use num_traits::CheckedAdd; use vortex_dtype::DType; use vortex_error::VortexError; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_err; use vortex_error::vortex_panic; @@ -134,7 +133,7 @@ impl StatsSet { v.map(|v| { T::try_from( &Scalar::try_new(dtype.clone(), Some(v)) - .vortex_expect("failed to construct a scalar statistic"), + .expect("failed to construct a scalar statistic"), ) .unwrap_or_else(|err| { vortex_panic!( @@ -231,10 +230,10 @@ impl StatsProvider for TypedStatsSetRef<'_, '_> { p.map(|sv| { Scalar::try_new( stat.dtype(self.dtype) - .vortex_expect("Must have valid dtype if value is present"), + .expect("Must have valid dtype if value is present"), Some(sv), ) - .vortex_expect("failed to construct a scalar statistic") + .expect("failed to construct a scalar statistic") }) }) } @@ -267,10 +266,10 @@ impl StatsProvider for MutTypedStatsSetRef<'_, '_> { p.map(|sv| { Scalar::try_new( stat.dtype(self.dtype) - .vortex_expect("Must have valid dtype if value is present"), + .expect("Must have valid dtype if value is present"), Some(sv), ) - .vortex_expect("failed to construct a scalar statistic") + .expect("failed to construct a scalar statistic") }) }) } @@ -357,26 +356,22 @@ impl MutTypedStatsSetRef<'_, '_> { (Some(m1), Some(m2)) => { let meet = m1 .intersection(&m2) - .vortex_expect("can always compare scalar") + .expect("can always compare scalar") .ok_or_else(|| { vortex_err!("{:?} bounds ({m1:?}, {m2:?}) do not overlap", S::STAT) })?; if meet != m1 { self.set( S::STAT, - meet.into_value().map(|s| { - s.into_value() - .vortex_expect("stat scalar value cannot be null") - }), + meet.into_value() + .map(|s| s.into_value().expect("stat scalar value cannot be null")), ); } } (None, Some(m)) => self.set( S::STAT, - m.into_value().map(|s| { - s.into_value() - .vortex_expect("stat scalar value cannot be null") - }), + m.into_value() + .map(|s| s.into_value().expect("stat scalar value cannot be null")), ), (Some(_), _) => (), (None, None) => self.clear(S::STAT), @@ -395,7 +390,7 @@ impl MutTypedStatsSetRef<'_, '_> { (Some(m1), Some(m2)) => { let intersection = m1 .intersection(&m2) - .vortex_expect("can always compare boolean") + .expect("can always compare boolean") .ok_or_else(|| { vortex_err!("{:?} bounds ({m1:?}, {m2:?}) do not overlap", S::STAT) })?; @@ -416,14 +411,12 @@ impl MutTypedStatsSetRef<'_, '_> { other.get_scalar_bound::(), ) { (Some(m1), Some(m2)) => { - let meet = m1.union(&m2).vortex_expect("can compare scalar"); + let meet = m1.union(&m2).expect("can compare scalar"); if meet != m1 { self.set( Stat::Min, - meet.into_value().map(|s| { - s.into_value() - .vortex_expect("stat scalar value cannot be null") - }), + meet.into_value() + .map(|s| s.into_value().expect("stat scalar value cannot be null")), ); } } @@ -437,14 +430,12 @@ impl MutTypedStatsSetRef<'_, '_> { other.get_scalar_bound::(), ) { (Some(m1), Some(m2)) => { - let meet = m1.union(&m2).vortex_expect("can compare scalar"); + let meet = m1.union(&m2).expect("can compare scalar"); if meet != m1 { self.set( Stat::Max, - meet.into_value().map(|s| { - s.into_value() - .vortex_expect("stat scalar value cannot be null") - }), + meet.into_value() + .map(|s| s.into_value().expect("stat scalar value cannot be null")), ); } } diff --git a/vortex-array/src/validity.rs b/vortex-array/src/validity.rs index ad639e9686e..1c10c432b16 100644 --- a/vortex-array/src/validity.rs +++ b/vortex-array/src/validity.rs @@ -9,7 +9,6 @@ use std::ops::Range; use vortex_buffer::BitBuffer; use vortex_dtype::DType; use vortex_dtype::Nullability; -use vortex_error::VortexExpect as _; use vortex_error::VortexResult; use vortex_error::vortex_bail; use vortex_error::vortex_err; @@ -118,8 +117,8 @@ impl Validity { Validity::NonNullable | Validity::AllValid => true, Validity::AllInvalid => false, Validity::Array(array) => { - usize::try_from(&sum(array).vortex_expect("must have sum for bool array")) - .vortex_expect("sum must be a usize") + usize::try_from(&sum(array).expect("must have sum for bool array")) + .expect("sum must be a usize") == array.len() } }) @@ -132,8 +131,8 @@ impl Validity { Validity::NonNullable | Validity::AllValid => false, Validity::AllInvalid => true, Validity::Array(array) => { - usize::try_from(&sum(array).vortex_expect("must have sum for bool array")) - .vortex_expect("sum must be a usize") + usize::try_from(&sum(array).expect("must have sum for bool array")) + .expect("sum must be a usize") == 0 } }) @@ -147,10 +146,10 @@ impl Validity { Self::AllInvalid => false, Self::Array(a) => a .scalar_at(index) - .vortex_expect("Validity array must support scalar_at") + .expect("Validity array must support scalar_at") .as_bool() .value() - .vortex_expect("Validity must be non-nullable"), + .expect("Validity must be non-nullable"), }) } @@ -352,7 +351,7 @@ impl Validity { is_valid .statistics() .compute_min::() - .vortex_expect("validity array must support min") + .expect("validity array must support min") .then(|| { // min true => all true Self::NonNullable diff --git a/vortex-array/src/variants.rs b/vortex-array/src/variants.rs index 0263d9c4480..33b6a98d9f3 100644 --- a/vortex-array/src/variants.rs +++ b/vortex-array/src/variants.rs @@ -8,7 +8,6 @@ use vortex_dtype::DType; use vortex_dtype::FieldNames; use vortex_dtype::PType; use vortex_dtype::extension::ExtDTypeRef; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_panic; @@ -22,63 +21,63 @@ impl dyn Array + '_ { pub fn as_null_typed(&self) -> NullTyped<'_> { matches!(self.dtype(), DType::Null) .then(|| NullTyped(self)) - .vortex_expect("Array does not have DType::Null") + .expect("Array does not have DType::Null") } /// Downcasts the array for bool-specific behavior. pub fn as_bool_typed(&self) -> BoolTyped<'_> { matches!(self.dtype(), DType::Bool(..)) .then(|| BoolTyped(self)) - .vortex_expect("Array does not have DType::Bool") + .expect("Array does not have DType::Bool") } /// Downcasts the array for primitive-specific behavior. pub fn as_primitive_typed(&self) -> PrimitiveTyped<'_> { matches!(self.dtype(), DType::Primitive(..)) .then(|| PrimitiveTyped(self)) - .vortex_expect("Array does not have DType::Primitive") + .expect("Array does not have DType::Primitive") } /// Downcasts the array for decimal-specific behavior. pub fn as_decimal_typed(&self) -> DecimalTyped<'_> { matches!(self.dtype(), DType::Decimal(..)) .then(|| DecimalTyped(self)) - .vortex_expect("Array does not have DType::Decimal") + .expect("Array does not have DType::Decimal") } /// Downcasts the array for utf8-specific behavior. pub fn as_utf8_typed(&self) -> Utf8Typed<'_> { matches!(self.dtype(), DType::Utf8(..)) .then(|| Utf8Typed(self)) - .vortex_expect("Array does not have DType::Utf8") + .expect("Array does not have DType::Utf8") } /// Downcasts the array for binary-specific behavior. pub fn as_binary_typed(&self) -> BinaryTyped<'_> { matches!(self.dtype(), DType::Binary(..)) .then(|| BinaryTyped(self)) - .vortex_expect("Array does not have DType::Binary") + .expect("Array does not have DType::Binary") } /// Downcasts the array for struct-specific behavior. pub fn as_struct_typed(&self) -> StructTyped<'_> { matches!(self.dtype(), DType::Struct(..)) .then(|| StructTyped(self)) - .vortex_expect("Array does not have DType::Struct") + .expect("Array does not have DType::Struct") } /// Downcasts the array for list-specific behavior. pub fn as_list_typed(&self) -> ListTyped<'_> { matches!(self.dtype(), DType::List(..)) .then(|| ListTyped(self)) - .vortex_expect("Array does not have DType::List") + .expect("Array does not have DType::List") } /// Downcasts the array for extension-specific behavior. pub fn as_extension_typed(&self) -> ExtensionTyped<'_> { matches!(self.dtype(), DType::Extension(..)) .then(|| ExtensionTyped(self)) - .vortex_expect("Array does not have DType::Extension") + .expect("Array does not have DType::Extension") } } @@ -93,7 +92,7 @@ impl BoolTyped<'_> { Ok(true_count .as_primitive() .as_::() - .vortex_expect("true count should never be null")) + .expect("true count should never be null")) } } diff --git a/vortex-array/src/vtable/dyn_.rs b/vortex-array/src/vtable/dyn_.rs index cd86dd28948..6b1971c7009 100644 --- a/vortex-array/src/vtable/dyn_.rs +++ b/vortex-array/src/vtable/dyn_.rs @@ -9,7 +9,6 @@ use std::marker::PhantomData; use arcref::ArcRef; use vortex_dtype::DType; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_ensure; use vortex_session::VortexSession; @@ -199,7 +198,7 @@ fn downcast(array: &ArrayRef) -> &V::Array { array .as_any() .downcast_ref::>() - .vortex_expect("Failed to downcast array to expected encoding type") + .expect("Failed to downcast array to expected encoding type") .as_inner() } diff --git a/vortex-bench/src/bin/data-gen.rs b/vortex-bench/src/bin/data-gen.rs index d38c977a6c6..254835c935c 100644 --- a/vortex-bench/src/bin/data-gen.rs +++ b/vortex-bench/src/bin/data-gen.rs @@ -12,7 +12,6 @@ use std::process::Command; use clap::Parser; use clap::value_parser; use tracing::info; -use vortex::error::VortexExpect; use vortex_bench::Benchmark; use vortex_bench::BenchmarkArg; use vortex_bench::CompactionStrategy; @@ -107,9 +106,7 @@ fn generate_duckdb(base_path: &Path, benchmark: &dyn Benchmark) -> anyhow::Resul let parquet_dir = base_path.join(Format::Parquet.name()); let sql = generate_duckdb_registration_sql( benchmark, - parquet_dir - .to_str() - .vortex_expect("value must be str displayable"), + parquet_dir.to_str().expect("value must be str displayable"), Format::Parquet, "TABLE", ); diff --git a/vortex-bench/src/clickbench/benchmark.rs b/vortex-bench/src/clickbench/benchmark.rs index 5e14cbcf40e..bf0a1738d8c 100644 --- a/vortex-bench/src/clickbench/benchmark.rs +++ b/vortex-bench/src/clickbench/benchmark.rs @@ -8,7 +8,6 @@ use std::path::Path; use anyhow::Result; use reqwest::Client; use url::Url; -use vortex::error::VortexExpect; use crate::Benchmark; use crate::BenchmarkDataset; @@ -43,7 +42,7 @@ impl ClickBenchBenchmark { let basepath = format!("clickbench_{flavor}").to_data_path(); Ok(Url::parse(&format!( "file:{}/", - basepath.to_str().vortex_expect("path should be utf8") + basepath.to_str().expect("path should be utf8") ))?) } Some(remote_data_dir) => { diff --git a/vortex-bench/src/clickbench/data.rs b/vortex-bench/src/clickbench/data.rs index c0ac621a6b4..662bec99e77 100644 --- a/vortex-bench/src/clickbench/data.rs +++ b/vortex-bench/src/clickbench/data.rs @@ -25,7 +25,6 @@ use tokio::io::AsyncWriteExt; use tokio::task::JoinSet; use tracing::info; use tracing::warn; -use vortex::error::VortexExpect; use crate::Format; // Re-export for use by clickbench_benchmark @@ -181,7 +180,7 @@ impl Display for Flavor { f, "{}", self.to_possible_value() - .vortex_expect("Invalid flavour value") + .expect("Invalid flavour value") .get_name() .to_lowercase() ) diff --git a/vortex-bench/src/lib.rs b/vortex-bench/src/lib.rs index 6dad0f0f6a1..c0a9c64bdcb 100644 --- a/vortex-bench/src/lib.rs +++ b/vortex-bench/src/lib.rs @@ -26,7 +26,6 @@ use tpcds::TpcDsBenchmark; use tpch::benchmark::TpcHBenchmark; pub use utils::file::*; pub use utils::logging::*; -use vortex::error::VortexExpect; use vortex::error::vortex_err; use vortex::file::VortexWriteOptions; use vortex::file::WriteStrategyBuilder; @@ -95,7 +94,7 @@ impl FromStr for Target { e ) }) - .vortex_expect("operation should succeed in benchmark"), + .expect("operation should succeed in benchmark"), format: Format::from_str(format_str, true) .map_err(|e| { vortex_err!( @@ -105,7 +104,7 @@ impl FromStr for Target { e ) }) - .vortex_expect("operation should succeed in benchmark"), + .expect("operation should succeed in benchmark"), }) } } diff --git a/vortex-bench/src/measurements.rs b/vortex-bench/src/measurements.rs index f49349cd95e..9d94f95c95b 100644 --- a/vortex-bench/src/measurements.rs +++ b/vortex-bench/src/measurements.rs @@ -15,7 +15,6 @@ use serde::Deserialize; use serde::Serialize; use serde::Serializer; use target_lexicon::Triple; -use vortex::error::VortexExpect; use vortex::error::vortex_panic; use crate::BenchmarkDataset; @@ -185,9 +184,9 @@ impl TimingMeasurement { let mean_nanos = total_nanos / len as u128; Duration::new( u64::try_from(mean_nanos / 1_000_000_000) - .vortex_expect("nanosecond conversion must fit in u64/u32"), + .expect("nanosecond conversion must fit in u64/u32"), u32::try_from(mean_nanos % 1_000_000_000) - .vortex_expect("nanosecond conversion must fit in u64/u32"), + .expect("nanosecond conversion must fit in u64/u32"), ) } @@ -266,9 +265,9 @@ impl QueryMeasurement { let avg_nanos = (mid1.as_nanos() + mid2.as_nanos()) / 2; Duration::new( u64::try_from(avg_nanos / 1_000_000_000) - .vortex_expect("nanosecond conversion must fit in u64/u32"), + .expect("nanosecond conversion must fit in u64/u32"), u32::try_from(avg_nanos % 1_000_000_000) - .vortex_expect("nanosecond conversion must fit in u64/u32"), + .expect("nanosecond conversion must fit in u64/u32"), ) } } diff --git a/vortex-bench/src/statpopgen/builder.rs b/vortex-bench/src/statpopgen/builder.rs index 25b6a2735b2..2338eb8499b 100644 --- a/vortex-bench/src/statpopgen/builder.rs +++ b/vortex-bench/src/statpopgen/builder.rs @@ -19,7 +19,6 @@ use noodles_vcf::Header; use noodles_vcf::Record; use noodles_vcf::record::Info; use noodles_vcf::variant::record::info::field::Value; -use vortex::error::VortexExpect as _; use vortex::error::VortexResult; use vortex::error::vortex_bail; use vortex::error::vortex_err; @@ -368,14 +367,14 @@ impl<'a> GnomADBuilder<'a> { all_fields.remove(name); self.info_builder .get_mut(name) - .vortex_expect("key must exist") + .expect("key must exist") .push(value)?; } for missing_field in all_fields { self.info_builder .get_mut(missing_field) - .vortex_expect("key must exist") + .expect("key must exist") .push(None)?; } @@ -432,7 +431,7 @@ impl<'a> GnomADBuilder<'a> { .map(|field| { self.info_builder .remove(field.name().as_str()) - .vortex_expect("field must exist") + .expect("field must exist") .finish() }); diff --git a/vortex-bench/src/tpch/mod.rs b/vortex-bench/src/tpch/mod.rs index 7c36ffb6950..369168c8910 100644 --- a/vortex-bench/src/tpch/mod.rs +++ b/vortex-bench/src/tpch/mod.rs @@ -8,8 +8,6 @@ pub mod benchmark; pub mod schema; pub mod tpchgen; -use vortex::error::VortexExpect; - pub const TPC_H_ROW_COUNT_ARRAY_LENGTH: usize = 23; pub const EXPECTED_ROW_COUNTS_SF1: [usize; TPC_H_ROW_COUNT_ARRAY_LENGTH] = [ // The 0th entry is a dummy so that Query 1's row count is at index 1. @@ -32,5 +30,5 @@ fn tpch_query(query_idx: usize) -> String { .join("tpch") .join(format!("q{query_idx}")) .with_extension("sql"); - fs::read_to_string(manifest_dir).vortex_expect("cannot load tpch query from file") + fs::read_to_string(manifest_dir).expect("cannot load tpch query from file") } diff --git a/vortex-bench/src/tpch/tpchgen.rs b/vortex-bench/src/tpch/tpchgen.rs index 75fe4db9e8f..c8d4e3729f9 100644 --- a/vortex-bench/src/tpch/tpchgen.rs +++ b/vortex-bench/src/tpch/tpchgen.rs @@ -36,7 +36,6 @@ use vortex::array::arrow::FromArrowArray; use vortex::array::stream::ArrayStreamAdapter; use vortex::dtype::DType; use vortex::dtype::arrow::FromArrowType; -use vortex::error::VortexExpect; use vortex::file::WriteOptionsSessionExt; use crate::CompactionStrategy; @@ -431,7 +430,7 @@ impl FileWriter for VortexWriter { let array = ArrayRef::from_arrow(batch, false)?; self.sender .as_ref() - .vortex_expect("sender closed early") + .expect("sender closed early") .send(Ok(array)) .await .map_err(|_| anyhow!("Failed to send array to write task")) diff --git a/vortex-btrblocks/src/compressor/float/stats.rs b/vortex-btrblocks/src/compressor/float/stats.rs index eb9c337bc6a..c7ed394e994 100644 --- a/vortex-btrblocks/src/compressor/float/stats.rs +++ b/vortex-btrblocks/src/compressor/float/stats.rs @@ -13,7 +13,6 @@ use vortex_array::arrays::PrimitiveVTable; use vortex_dtype::NativePType; use vortex_dtype::PType; use vortex_dtype::half::f16; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_err; use vortex_error::vortex_panic; @@ -84,7 +83,7 @@ impl CompressorStats for FloatStats { fn generate_opts(input: &PrimitiveArray, opts: GenerateStatsOptions) -> Self { Self::generate_opts_fallible(input, opts) - .vortex_expect("FloatStats::generate_opts should not fail") + .expect("FloatStats::generate_opts should not fail") } fn source(&self) -> &PrimitiveArray { @@ -136,7 +135,7 @@ where } else if array.all_invalid()? { return Ok(FloatStats { src: array.clone(), - null_count: u32::try_from(array.len())?, + null_count: u32::try_from(array.len()).expect("null count must fit in u32"), value_count: 0, average_run_length: 0, distinct_values_count: 0, @@ -166,7 +165,7 @@ where let mut runs = 1; let head_idx = validity .first() - .vortex_expect("All null masks have been handled before"); + .expect("All null masks have been handled before"); let buff = array.to_buffer::(); let mut prev = buff[head_idx]; @@ -204,10 +203,10 @@ where } } - let null_count = u32::try_from(null_count)?; - let value_count = u32::try_from(value_count)?; + let null_count = u32::try_from(null_count).expect("null count must fit in u32"); + let value_count = u32::try_from(value_count).expect("value count must fit in u32"); let distinct_values_count = if count_distinct_values { - u32::try_from(distinct_values.len())? + u32::try_from(distinct_values.len()).unwrap_or(u32::MAX) } else { u32::MAX }; diff --git a/vortex-btrblocks/src/compressor/integer/mod.rs b/vortex-btrblocks/src/compressor/integer/mod.rs index b3b8156aaa1..f7eec3496e9 100644 --- a/vortex-btrblocks/src/compressor/integer/mod.rs +++ b/vortex-btrblocks/src/compressor/integer/mod.rs @@ -21,7 +21,6 @@ use vortex_array::arrays::PrimitiveVTable; use vortex_array::scalar::Scalar; use vortex_array::vtable::VTable; use vortex_array::vtable::ValidityHelper; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_bail; use vortex_error::vortex_err; @@ -350,7 +349,7 @@ impl Scheme for FORScheme { .ptype() .bit_width() .try_into() - .vortex_expect("bit width must fit in u32"); + .expect("bit width must fit in u32"); let bw = match stats.typed.max_minus_min().checked_ilog2() { Some(l) => l + 1, // If max-min == 0, it we should use a different compression scheme diff --git a/vortex-btrblocks/src/compressor/integer/stats.rs b/vortex-btrblocks/src/compressor/integer/stats.rs index 125a31e7a2f..9b915be534e 100644 --- a/vortex-btrblocks/src/compressor/integer/stats.rs +++ b/vortex-btrblocks/src/compressor/integer/stats.rs @@ -16,7 +16,6 @@ use vortex_buffer::BitBuffer; use vortex_dtype::IntegerPType; use vortex_dtype::match_each_integer_ptype; use vortex_error::VortexError; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_mask::AllOr; use vortex_utils::aliases::hash_map::HashMap; @@ -89,7 +88,7 @@ impl ErasedStats { ErasedStats::I16(x) => (x.max as i32 - x.min as i32) as u64, ErasedStats::I32(x) => (x.max as i64 - x.min as i64) as u64, ErasedStats::I64(x) => u64::try_from(x.max as i128 - x.min as i128) - .vortex_expect("max minus min result bigger than u64"), + .expect("max minus min result bigger than u64"), } } @@ -156,7 +155,7 @@ impl CompressorStats for IntegerStats { fn generate_opts(input: &PrimitiveArray, opts: GenerateStatsOptions) -> Self { Self::generate_opts_fallible(input, opts) - .vortex_expect("IntegerStats::generate_opts should not fail") + .expect("IntegerStats::generate_opts should not fail") } fn source(&self) -> &PrimitiveArray { @@ -213,7 +212,7 @@ where } else if array.all_invalid()? { return Ok(IntegerStats { src: array.clone(), - null_count: u32::try_from(array.len())?, + null_count: u32::try_from(array.len()).expect("null count must fit in u32"), value_count: 0, average_run_length: 0, distinct_values_count: 0, @@ -235,7 +234,7 @@ where // Initialize loop state let head_idx = validity .first() - .vortex_expect("All null masks have been handled before"); + .expect("All null masks have been handled before"); let buffer = array.to_buffer::(); let head = buffer[head_idx]; @@ -255,7 +254,7 @@ where AllOr::All => { for chunk in &mut chunks { inner_loop_nonnull( - chunk.try_into().ok().vortex_expect("chunk size must be 64"), + chunk.try_into().expect("chunk size must be 64"), count_distinct_values, &mut loop_state, ) @@ -281,13 +280,13 @@ where 0 => continue, // Inner loop for when validity check can be elided 64 => inner_loop_nonnull( - chunk.try_into().ok().vortex_expect("chunk size must be 64"), + chunk.try_into().expect("chunk size must be 64"), count_distinct_values, &mut loop_state, ), // Inner loop for when we need to check validity _ => inner_loop_nullable( - chunk.try_into().ok().vortex_expect("chunk size must be 64"), + chunk.try_into().expect("chunk size must be 64"), count_distinct_values, &validity, &mut loop_state, @@ -310,7 +309,7 @@ where .distinct_values .iter() .max_by_key(|&(_, &count)| count) - .vortex_expect("non-empty"); + .expect("non-empty"); (top_value.0, top_count) } else { (T::default(), 0) @@ -318,7 +317,7 @@ where let runs = loop_state.runs; let distinct_values_count = if count_distinct_values { - u32::try_from(loop_state.distinct_values.len())? + u32::try_from(loop_state.distinct_values.len()).unwrap_or(u32::MAX) } else { u32::MAX }; @@ -326,12 +325,12 @@ where let min = array .statistics() .compute_as::(Stat::Min) - .vortex_expect("min should be computed"); + .expect("min should be computed"); let max = array .statistics() .compute_as::(Stat::Max) - .vortex_expect("max should be computed"); + .expect("max should be computed"); let typed = TypedStats { min, @@ -341,8 +340,8 @@ where top_count, }; - let null_count = u32::try_from(null_count)?; - let value_count = u32::try_from(value_count)?; + let null_count = u32::try_from(null_count).expect("value count must fit in u32"); + let value_count = u32::try_from(value_count).expect("null count must fit in u32"); Ok(IntegerStats { src: array.clone(), diff --git a/vortex-btrblocks/src/compressor/string.rs b/vortex-btrblocks/src/compressor/string.rs index 7de5cb5c52b..b0cc5d16580 100644 --- a/vortex-btrblocks/src/compressor/string.rs +++ b/vortex-btrblocks/src/compressor/string.rs @@ -20,7 +20,6 @@ use vortex_array::compute::is_constant; use vortex_array::scalar::Scalar; use vortex_array::vtable::VTable; use vortex_array::vtable::ValidityHelper; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_err; use vortex_fsst::FSSTArray; @@ -55,7 +54,7 @@ pub struct StringStats { } /// Estimate the number of distinct strings in the var bin view array. -fn estimate_distinct_count(strings: &VarBinViewArray) -> VortexResult { +fn estimate_distinct_count(strings: &VarBinViewArray) -> u32 { let views = strings.views(); // Iterate the views. Two strings which are equal must have the same first 8-bytes. // NOTE: there are cases where this performs pessimally, e.g. when we have strings that all @@ -70,7 +69,7 @@ fn estimate_distinct_count(strings: &VarBinViewArray) -> VortexResult { distinct.insert(len_and_prefix); }); - Ok(u32::try_from(distinct.len())?) + u32::try_from(distinct.len()).unwrap_or(u32::MAX) } impl StringStats { @@ -84,15 +83,15 @@ impl StringStats { .ok_or_else(|| vortex_err!("Failed to compute null_count"))?; let value_count = input.len() - null_count; let estimated_distinct = if opts.count_distinct_values { - estimate_distinct_count(input)? + estimate_distinct_count(input) } else { u32::MAX }; Ok(Self { src: input.clone(), - value_count: u32::try_from(value_count)?, - null_count: u32::try_from(null_count)?, + value_count: u32::try_from(value_count).expect("value count must fit in u32"), + null_count: u32::try_from(null_count).expect("null count must fit in u32"), estimated_distinct_count: estimated_distinct, }) } @@ -103,7 +102,7 @@ impl CompressorStats for StringStats { fn generate_opts(input: &VarBinViewArray, opts: GenerateStatsOptions) -> Self { Self::generate_opts_fallible(input, opts) - .vortex_expect("StringStats::generate_opts should not fail") + .expect("StringStats::generate_opts should not fail") } fn source(&self) -> &VarBinViewArray { diff --git a/vortex-btrblocks/src/sample.rs b/vortex-btrblocks/src/sample.rs index b841a5f3fb2..72a5ee0ca85 100644 --- a/vortex-btrblocks/src/sample.rs +++ b/vortex-btrblocks/src/sample.rs @@ -8,7 +8,6 @@ use vortex_array::Array; use vortex_array::ArrayRef; use vortex_array::IntoArray; use vortex_array::arrays::ChunkedArray; -use vortex_error::VortexExpect; use crate::stats::SAMPLE_COUNT; use crate::stats::SAMPLE_SIZE; @@ -28,14 +27,10 @@ pub(crate) fn sample(input: &dyn Array, sample_size: u32, sample_count: u32) -> // For every slice, grab the relevant slice and repack into a new PrimitiveArray. let chunks: Vec<_> = slices .into_iter() - .map(|(start, end)| { - input - .slice(start..end) - .vortex_expect("slice should succeed") - }) + .map(|(start, end)| input.slice(start..end).expect("slice should succeed")) .collect(); ChunkedArray::try_new(chunks, input.dtype().clone()) - .vortex_expect("sample slices should form valid chunked array") + .expect("sample slices should form valid chunked array") .into_array() } @@ -43,12 +38,12 @@ pub(crate) fn sample(input: &dyn Array, sample_size: u32, sample_count: u32) -> /// with a minimum of `SAMPLE_SIZE * SAMPLE_COUNT` (1024) values. pub(crate) fn sample_count_approx_one_percent(len: usize) -> u32 { let approximately_one_percent = - (len / 100) / usize::try_from(SAMPLE_SIZE).vortex_expect("SAMPLE_SIZE must fit in usize"); + (len / 100) / usize::try_from(SAMPLE_SIZE).expect("SAMPLE_SIZE must fit in usize"); u32::max( u32::next_multiple_of( approximately_one_percent .try_into() - .vortex_expect("sample count must fit in u32"), + .expect("sample count must fit in u32"), 16, ), SAMPLE_COUNT, diff --git a/vortex-buffer/benches/vortex_buffer.rs b/vortex-buffer/benches/vortex_buffer.rs index 712890a5a4a..298ddc5d8c5 100644 --- a/vortex-buffer/benches/vortex_buffer.rs +++ b/vortex-buffer/benches/vortex_buffer.rs @@ -12,7 +12,6 @@ use divan::Bencher; use num_traits::PrimInt; use vortex_buffer::Buffer; use vortex_buffer::BufferMut; -use vortex_error::VortexExpect; use vortex_error::vortex_err; fn main() { @@ -65,7 +64,7 @@ impl MapEach for Arrow() .map_err(|_| vortex_err!("Failed to convert Arrow buffer into a mut vec")) - .vortex_expect("Failed to convert Arrow buffer into a mut vec") + .expect("Failed to convert Arrow buffer into a mut vec") .into_iter() .map(f) .collect::>(), diff --git a/vortex-buffer/src/alignment.rs b/vortex-buffer/src/alignment.rs index bcd17a64f10..9987acf8b86 100644 --- a/vortex-buffer/src/alignment.rs +++ b/vortex-buffer/src/alignment.rs @@ -5,7 +5,6 @@ use std::fmt::Display; use std::ops::Deref; use vortex_error::VortexError; -use vortex_error::VortexExpect; use vortex_error::vortex_err; /// Default alignment for device-to-host buffer copies. @@ -76,7 +75,7 @@ impl Alignment { /// Returns the log2 of the alignment. pub fn exponent(&self) -> u8 { u8::try_from(self.0.trailing_zeros()) - .vortex_expect("alignment fits into u16, so exponent fits in u7") + .expect("alignment fits into u16, so exponent fits in u7") } /// Create from the log2 exponent of the alignment. @@ -129,14 +128,14 @@ impl From for usize { impl From for u16 { #[inline] fn from(value: Alignment) -> Self { - u16::try_from(value.0).vortex_expect("Alignment must fit into u16") + u16::try_from(value.0).expect("Alignment must fit into u16") } } impl From for u32 { #[inline] fn from(value: Alignment) -> Self { - u32::try_from(value.0).vortex_expect("Alignment must fit into u32") + u32::try_from(value.0).expect("Alignment must fit into u32") } } diff --git a/vortex-buffer/src/buffer.rs b/vortex-buffer/src/buffer.rs index f941db6dbec..891c6c5de1e 100644 --- a/vortex-buffer/src/buffer.rs +++ b/vortex-buffer/src/buffer.rs @@ -14,7 +14,6 @@ use std::ops::RangeBounds; use bytes::Buf; use bytes::Bytes; -use vortex_error::VortexExpect; use vortex_error::vortex_panic; use crate::Alignment; @@ -201,9 +200,8 @@ impl Buffer { /// Should be preferred over `from_iter` when the iterator is known to be `TrustedLen`. pub fn from_trusted_len_iter>(iter: I) -> Self { let (_, upper_bound) = iter.size_hint(); - let mut buffer = BufferMut::with_capacity( - upper_bound.vortex_expect("TrustedLen iterator has no upper bound"), - ); + let mut buffer = + BufferMut::with_capacity(upper_bound.expect("TrustedLen iterator has no upper bound")); buffer.extend_trusted(iter); buffer.freeze() } @@ -289,11 +287,11 @@ impl Buffer { let len = self.len(); let begin = match range.start_bound() { Bound::Included(&n) => n, - Bound::Excluded(&n) => n.checked_add(1).vortex_expect("out of range"), + Bound::Excluded(&n) => n.checked_add(1).expect("out of range"), Bound::Unbounded => 0, }; let end = match range.end_bound() { - Bound::Included(&n) => n.checked_add(1).vortex_expect("out of range"), + Bound::Included(&n) => n.checked_add(1).expect("out of range"), Bound::Excluded(&n) => n, Bound::Unbounded => len, }; diff --git a/vortex-buffer/src/buffer_mut.rs b/vortex-buffer/src/buffer_mut.rs index 51e6ca2cbb4..a5bc8cc953f 100644 --- a/vortex-buffer/src/buffer_mut.rs +++ b/vortex-buffer/src/buffer_mut.rs @@ -13,7 +13,6 @@ use bytes::Buf; use bytes::BufMut; use bytes::BytesMut; use bytes::buf::UninitSlice; -use vortex_error::VortexExpect; use vortex_error::vortex_panic; use crate::Alignment; @@ -598,8 +597,7 @@ impl BufferMut { // for this operation up front. let (_, upper_bound) = iter.size_hint(); self.reserve( - upper_bound - .vortex_expect("`TrustedLen` iterator somehow didn't have valid upper bound"), + upper_bound.expect("`TrustedLen` iterator somehow didn't have valid upper bound"), ); // We store `begin` in the case that the upper bound hint is incorrect. @@ -636,8 +634,7 @@ impl BufferMut { { let (_, upper_bound) = iter.size_hint(); let mut buffer = Self::with_capacity( - upper_bound - .vortex_expect("`TrustedLen` iterator somehow didn't have valid upper bound"), + upper_bound.expect("`TrustedLen` iterator somehow didn't have valid upper bound"), ); buffer.extend_trusted(iter); @@ -762,7 +759,7 @@ impl AlignedBytesMut for BytesMut { let padding = self.as_ptr().align_offset(*alignment); self.capacity() .checked_sub(padding) - .vortex_expect("Not enough capacity to align buffer"); + .expect("Not enough capacity to align buffer"); // SAFETY: We know the buffer is empty, and we know we have enough capacity, so we can // safely set the length to the padding and advance the buffer to the aligned offset. diff --git a/vortex-buffer/src/bytes.rs b/vortex-buffer/src/bytes.rs index 72a61b5794c..4ea63f316dc 100644 --- a/vortex-buffer/src/bytes.rs +++ b/vortex-buffer/src/bytes.rs @@ -2,7 +2,6 @@ // SPDX-FileCopyrightText: Copyright the Vortex contributors use bytes::Buf; -use vortex_error::VortexExpect; use crate::Alignment; use crate::ByteBuffer; @@ -40,7 +39,7 @@ pub trait AlignedBuf: Buf { // The default implementation uses copy_to_bytes, and then returns a ByteBuffer with // alignment of 1. This will be zero-copy if the underlying `copy_to_bytes` is zero-copy. ConstBuffer::try_from(self.copy_to_aligned(len, Alignment::new(A))) - .vortex_expect("we just aligned the buffer") + .expect("we just aligned the buffer") } } diff --git a/vortex-compute/src/filter/mask.rs b/vortex-compute/src/filter/mask.rs index 53f68da9c38..317bb0e862a 100644 --- a/vortex-compute/src/filter/mask.rs +++ b/vortex-compute/src/filter/mask.rs @@ -2,7 +2,6 @@ // SPDX-FileCopyrightText: Copyright the Vortex contributors use vortex_buffer::BitView; -use vortex_error::VortexExpect; use vortex_mask::Mask; use vortex_mask::MaskMut; use vortex_mask::MaskValues; @@ -186,7 +185,7 @@ impl Filter> for &mut MaskMut { return; } self.as_bit_buffer_mut() - .vortex_expect("Checked all-true and all-false cases; should have bit buffer") + .expect("Checked all-true and all-false cases; should have bit buffer") .filter(selection); } } diff --git a/vortex-compute/src/lib.rs b/vortex-compute/src/lib.rs index 881414fb0a1..67011df2733 100644 --- a/vortex-compute/src/lib.rs +++ b/vortex-compute/src/lib.rs @@ -5,7 +5,6 @@ #![cfg_attr(vortex_nightly, feature(portable_simd))] #![deny(missing_docs)] -#![deny(clippy::missing_panics_doc)] #![deny(clippy::missing_safety_doc)] pub mod arithmetic; diff --git a/vortex-cuda/benches/bitpacked_cuda.rs b/vortex-cuda/benches/bitpacked_cuda.rs index 880ce673e6d..5cf3d54ed34 100644 --- a/vortex-cuda/benches/bitpacked_cuda.rs +++ b/vortex-cuda/benches/bitpacked_cuda.rs @@ -27,7 +27,6 @@ use vortex_cuda::executor::CudaArrayExt; use vortex_cuda_macros::cuda_available; use vortex_cuda_macros::cuda_not_available; use vortex_dtype::NativePType; -use vortex_error::VortexExpect; use vortex_fastlanes::BitPackedArray; use vortex_fastlanes::unpack_iter::BitPacked; use vortex_session::VortexSession; @@ -52,7 +51,7 @@ where let primitive_array = PrimitiveArray::new(Buffer::from(values), NonNullable); BitPackedArray::encode(primitive_array.as_ref(), bit_width) - .vortex_expect("failed to create BitPacked array") + .expect("failed to create BitPacked array") } /// Generic benchmark function for a specific type and bit width @@ -78,7 +77,7 @@ where let timer = Arc::clone(&timed.total_time_ns); let mut cuda_ctx = CudaSession::create_execution_ctx(&VortexSession::empty()) - .vortex_expect("failed to create execution context") + .expect("failed to create execution context") .with_launch_strategy(Arc::new(timed)); for _ in 0..iters { diff --git a/vortex-cuda/benches/date_time_parts_cuda.rs b/vortex-cuda/benches/date_time_parts_cuda.rs index 2f01fd8f3b8..83fce7c0a85 100644 --- a/vortex-cuda/benches/date_time_parts_cuda.rs +++ b/vortex-cuda/benches/date_time_parts_cuda.rs @@ -31,7 +31,6 @@ use vortex_dtype::DType; use vortex_dtype::Nullability; use vortex_dtype::datetime::TimeUnit; use vortex_dtype::datetime::Timestamp; -use vortex_error::VortexExpect; use vortex_session::VortexSession; use crate::common::TimedLaunchStrategy; @@ -45,7 +44,7 @@ fn make_datetimeparts_array(len: usize, time_unit: TimeUnit) -> DateTimePartsArr let dtype = DType::Extension(Timestamp::new(time_unit, Nullability::NonNullable).erased()); DateTimePartsArray::try_new(dtype, days_arr, seconds_arr, subseconds_arr) - .vortex_expect("Failed to create DateTimePartsArray") + .expect("Failed to create DateTimePartsArray") } fn benchmark_datetimeparts(c: &mut Criterion) { @@ -71,7 +70,7 @@ fn benchmark_datetimeparts(c: &mut Criterion) { let timer = Arc::clone(&timed.total_time_ns); let mut cuda_ctx = CudaSession::create_execution_ctx(&VortexSession::empty()) - .vortex_expect("failed to create execution context") + .expect("failed to create execution context") .with_launch_strategy(Arc::new(timed)); for _ in 0..iters { diff --git a/vortex-cuda/benches/dict_cuda.rs b/vortex-cuda/benches/dict_cuda.rs index f71867dcd5b..94a1da68fca 100644 --- a/vortex-cuda/benches/dict_cuda.rs +++ b/vortex-cuda/benches/dict_cuda.rs @@ -28,7 +28,6 @@ use vortex_cuda::executor::CudaArrayExt; use vortex_cuda_macros::cuda_available; use vortex_cuda_macros::cuda_not_available; use vortex_dtype::NativePType; -use vortex_error::VortexExpect; use vortex_session::VortexSession; use crate::common::TimedLaunchStrategy; @@ -62,7 +61,7 @@ where let codes_array = PrimitiveArray::new(Buffer::from(codes), NonNullable); DictArray::try_new(codes_array.into_array(), values_array.into_array()) - .vortex_expect("failed to create Dict array") + .expect("failed to create Dict array") } /// Benchmark Dict decompression for specific value and code types. @@ -96,12 +95,12 @@ where let timer = Arc::clone(&timed.total_time_ns); let mut cuda_ctx = CudaSession::create_execution_ctx(&VortexSession::empty()) - .vortex_expect("failed to create execution context") + .expect("failed to create execution context") .with_launch_strategy(Arc::new(timed)); for _ in 0..iters { block_on(dict_array.to_array().execute_cuda(&mut cuda_ctx)) - .vortex_expect("execute"); + .expect("execute"); } Duration::from_nanos(timer.load(Ordering::Relaxed)) diff --git a/vortex-cuda/benches/dynamic_dispatch_cuda.rs b/vortex-cuda/benches/dynamic_dispatch_cuda.rs index 8b0d136324a..9419ea00649 100644 --- a/vortex-cuda/benches/dynamic_dispatch_cuda.rs +++ b/vortex-cuda/benches/dynamic_dispatch_cuda.rs @@ -29,7 +29,6 @@ use vortex_cuda::dynamic_dispatch::ScalarOp; use vortex_cuda::dynamic_dispatch::SourceOp; use vortex_cuda_macros::cuda_available; use vortex_cuda_macros::cuda_not_available; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_err; use vortex_fastlanes::BitPackedArray; @@ -57,8 +56,7 @@ fn make_bitpacked_array_u32(bit_width: u8, len: usize) -> BitPackedArray { .map(|i| (i as u64 % (max_val + 1)) as u32) .collect(); let primitive = PrimitiveArray::new(Buffer::from(values), NonNullable); - BitPackedArray::encode(primitive.as_ref(), bit_width) - .vortex_expect("failed to create BitPacked array") + BitPackedArray::encode(primitive.as_ref(), bit_width).expect("failed to create BitPacked array") } /// Launch the dynamic_dispatch kernel and return GPU-timed duration. @@ -130,19 +128,19 @@ fn run_dynamic_dispatch_bitpacked_timed( let device_input = if packed.is_on_device() { packed } else { - block_on(cuda_ctx.move_to_device(packed)?).vortex_expect("failed to move to device") + block_on(cuda_ctx.move_to_device(packed)?).expect("failed to move to device") }; let input_ptr = device_input .cuda_view::() - .vortex_expect("failed to get input view") + .expect("failed to get input view") .device_ptr(cuda_ctx.stream()) .0; // Allocate output buffer (padded to 1024-element chunks). let output_slice = cuda_ctx .device_alloc::(len.next_multiple_of(1024)) - .vortex_expect("failed to allocate output"); + .expect("failed to allocate output"); let output_buf = CudaDeviceBuffer::new(output_slice); let output_ptr = output_buf.as_view::().device_ptr(cuda_ctx.stream()).0; @@ -174,7 +172,7 @@ fn bench_bitunpack_for_dynamic_dispatch(c: &mut Criterion) { &bitpacked, |b, array| { let mut cuda_ctx = CudaSession::create_execution_ctx(&VortexSession::empty()) - .vortex_expect("failed to create execution context"); + .expect("failed to create execution context"); let device_plan = Arc::new( cuda_ctx @@ -192,7 +190,7 @@ fn bench_bitunpack_for_dynamic_dispatch(c: &mut Criterion) { array, &device_plan, ) - .vortex_expect("bitunpack+for dynamic_dispatch failed"); + .expect("bitunpack+for dynamic_dispatch failed"); total_time += kernel_time; } @@ -228,7 +226,7 @@ fn bench_bitunpack_for_alp_dynamic_dispatch(c: &mut Criterion) { &bitpacked, |b, array| { let mut cuda_ctx = CudaSession::create_execution_ctx(&VortexSession::empty()) - .vortex_expect("failed to create execution context"); + .expect("failed to create execution context"); let device_plan = Arc::new( cuda_ctx @@ -246,7 +244,7 @@ fn bench_bitunpack_for_alp_dynamic_dispatch(c: &mut Criterion) { array, &device_plan, ) - .vortex_expect("bitunpack+for+alp dynamic_dispatch failed"); + .expect("bitunpack+for+alp dynamic_dispatch failed"); total_time += kernel_time; } diff --git a/vortex-cuda/benches/filter_cuda.rs b/vortex-cuda/benches/filter_cuda.rs index ad7f8eb6ebf..ca6e44c9d41 100644 --- a/vortex-cuda/benches/filter_cuda.rs +++ b/vortex-cuda/benches/filter_cuda.rs @@ -27,7 +27,6 @@ use vortex_cuda::CudaExecutionCtx; use vortex_cuda::CudaSession; use vortex_cuda_macros::cuda_available; use vortex_cuda_macros::cuda_not_available; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_err; use vortex_session::VortexSession; @@ -152,7 +151,7 @@ where b.iter_custom(|iters| { let mut cuda_ctx = CudaSession::create_execution_ctx(&VortexSession::empty()) - .vortex_expect("failed to create execution context"); + .expect("failed to create execution context"); let num_items = input_data.len() as i64; #[expect(clippy::expect_used)] @@ -165,7 +164,7 @@ where // Copy input to device let d_input_handle = block_on(cuda_ctx.copy_to_device(input_data.clone()).unwrap()) - .vortex_expect("failed to copy input to device"); + .expect("failed to copy input to device"); let d_input = d_input_handle .as_device() .as_any() @@ -175,7 +174,7 @@ where // Copy bitmask to device let d_bitmask_handle = block_on(cuda_ctx.copy_to_device(bitmask.clone()).unwrap()) - .vortex_expect("failed to copy bitmask to device"); + .expect("failed to copy bitmask to device"); let d_bitmask = d_bitmask_handle .as_device() .as_any() @@ -185,13 +184,13 @@ where // Allocate output and temp buffers let mut d_output: CudaSlice = cuda_ctx .device_alloc(*true_count) - .vortex_expect("failed to allocate output"); + .expect("failed to allocate output"); let mut d_temp: CudaSlice = cuda_ctx .device_alloc(temp_bytes.max(1)) - .vortex_expect("failed to allocate temp"); + .expect("failed to allocate temp"); let mut d_num_selected: CudaSlice = cuda_ctx .device_alloc(1) - .vortex_expect("failed to allocate num_selected"); + .expect("failed to allocate num_selected"); let kernel_time = block_on(run_filter_timed( d_input.as_view(), @@ -203,7 +202,7 @@ where temp_bytes, &mut cuda_ctx, )) - .vortex_expect("kernel execution failed"); + .expect("kernel execution failed"); total_time += kernel_time; } diff --git a/vortex-cuda/benches/for_cuda.rs b/vortex-cuda/benches/for_cuda.rs index 5c9fdf5ec60..d0505f7b3fa 100644 --- a/vortex-cuda/benches/for_cuda.rs +++ b/vortex-cuda/benches/for_cuda.rs @@ -30,7 +30,6 @@ use vortex_cuda_macros::cuda_available; use vortex_cuda_macros::cuda_not_available; use vortex_dtype::NativePType; use vortex_dtype::PType; -use vortex_error::VortexExpect; use vortex_fastlanes::BitPackedArray; use vortex_fastlanes::FoRArray; use vortex_session::VortexSession; @@ -55,13 +54,10 @@ where PrimitiveArray::new(Buffer::from(data), Validity::NonNullable).into_array(); if bp && T::PTYPE != PType::U8 { - let child = - BitPackedArray::encode(primitive_array.as_ref(), 8).vortex_expect("failed to bitpack"); - FoRArray::try_new(child.into_array(), reference.into()) - .vortex_expect("failed to create FoR array") + let child = BitPackedArray::encode(primitive_array.as_ref(), 8).expect("failed to bitpack"); + FoRArray::try_new(child.into_array(), reference.into()).expect("failed to create FoR array") } else { - FoRArray::try_new(primitive_array, reference.into()) - .vortex_expect("failed to create FoR array") + FoRArray::try_new(primitive_array, reference.into()).expect("failed to create FoR array") } } @@ -88,7 +84,7 @@ where let timer = Arc::clone(&timed.total_time_ns); let mut cuda_ctx = CudaSession::create_execution_ctx(&VortexSession::empty()) - .vortex_expect("failed to create execution context") + .expect("failed to create execution context") .with_launch_strategy(Arc::new(timed)); for _ in 0..iters { @@ -126,7 +122,7 @@ where let timer = Arc::clone(&timed.total_time_ns); let mut cuda_ctx = CudaSession::create_execution_ctx(&VortexSession::empty()) - .vortex_expect("failed to create execution context") + .expect("failed to create execution context") .with_launch_strategy(Arc::new(timed)); for _ in 0..iters { diff --git a/vortex-cuda/benches/zstd_cuda.rs b/vortex-cuda/benches/zstd_cuda.rs index a625378f1d4..408832c228d 100644 --- a/vortex-cuda/benches/zstd_cuda.rs +++ b/vortex-cuda/benches/zstd_cuda.rs @@ -19,7 +19,6 @@ use vortex_cuda::nvcomp::zstd as nvcomp_zstd; use vortex_cuda::zstd_kernel_prepare; use vortex_cuda_macros::cuda_available; use vortex_cuda_macros::cuda_not_available; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_err; use vortex_session::VortexSession; @@ -117,7 +116,7 @@ fn benchmark_zstd_cuda_decompress(c: &mut Criterion) { for (num_strings, label) in BENCH_ARGS { let (zstd_array, uncompressed_size) = - make_zstd_array(*num_strings).vortex_expect("failed to create ZSTD array"); + make_zstd_array(*num_strings).expect("failed to create ZSTD array"); group.throughput(Throughput::Bytes(uncompressed_size as u64)); group.bench_with_input( @@ -126,7 +125,7 @@ fn benchmark_zstd_cuda_decompress(c: &mut Criterion) { |b, zstd_array| { b.iter_custom(|iters| { let mut cuda_ctx = CudaSession::create_execution_ctx(&VortexSession::empty()) - .vortex_expect("failed to create execution context"); + .expect("failed to create execution context"); let mut total_time = Duration::ZERO; @@ -135,9 +134,9 @@ fn benchmark_zstd_cuda_decompress(c: &mut Criterion) { frames, metadata, .. } = zstd_array.clone().into_parts(); let exec = block_on(zstd_kernel_prepare(frames, &metadata, &mut cuda_ctx)) - .vortex_expect("kernel setup failed"); + .expect("kernel setup failed"); let kernel_time = block_on(execute_zstd_kernel(exec, &mut cuda_ctx)) - .vortex_expect("kernel execution failed"); + .expect("kernel execution failed"); total_time += kernel_time; } diff --git a/vortex-cuda/src/arrow/canonical.rs b/vortex-cuda/src/arrow/canonical.rs index 3d06d9183ba..71ec04c93c8 100644 --- a/vortex-cuda/src/arrow/canonical.rs +++ b/vortex-cuda/src/arrow/canonical.rs @@ -290,7 +290,6 @@ mod tests { use vortex_dtype::DecimalDType; use vortex_dtype::FieldNames; use vortex_dtype::datetime::TimeUnit; - use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_session::VortexSession; @@ -314,7 +313,7 @@ mod tests { #[case] expected_len: i64, ) -> VortexResult<()> { let mut ctx = CudaSession::create_execution_ctx(&VortexSession::empty()) - .vortex_expect("failed to create execution context"); + .expect("failed to create execution context"); let mut device_array = array.export_device_array(&mut ctx).await?; @@ -333,7 +332,7 @@ mod tests { #[tokio::test] async fn test_export_null() -> VortexResult<()> { let mut ctx = CudaSession::create_execution_ctx(&VortexSession::empty()) - .vortex_expect("failed to create execution context"); + .expect("failed to create execution context"); let array = NullArray::new(7).into_array(); let mut device_array = array.export_device_array(&mut ctx).await?; @@ -349,7 +348,7 @@ mod tests { #[tokio::test] async fn test_export_decimal() -> VortexResult<()> { let mut ctx = CudaSession::create_execution_ctx(&VortexSession::empty()) - .vortex_expect("failed to create execution context"); + .expect("failed to create execution context"); let array = DecimalArray::from_iter(0i128..5, DecimalDType::new(38, 2)).into_array(); let mut device_array = array.export_device_array(&mut ctx).await?; @@ -368,7 +367,7 @@ mod tests { #[tokio::test] async fn test_export_temporal() -> VortexResult<()> { let mut ctx = CudaSession::create_execution_ctx(&VortexSession::empty()) - .vortex_expect("failed to create execution context"); + .expect("failed to create execution context"); let array = TemporalArray::new_date( PrimitiveArray::from_iter([100i32, 200, 300]).into_array(), @@ -391,7 +390,7 @@ mod tests { #[tokio::test] async fn test_export_varbinview() -> VortexResult<()> { let mut ctx = CudaSession::create_execution_ctx(&VortexSession::empty()) - .vortex_expect("failed to create execution context"); + .expect("failed to create execution context"); let array = VarBinViewArray::from_iter_str([ "hello", @@ -416,7 +415,7 @@ mod tests { #[tokio::test] async fn test_export_struct() -> VortexResult<()> { let mut ctx = CudaSession::create_execution_ctx(&VortexSession::empty()) - .vortex_expect("failed to create execution context"); + .expect("failed to create execution context"); let array = StructArray::new( FieldNames::from_iter(["a", "b"]), diff --git a/vortex-cuda/src/arrow/varbinview.rs b/vortex-cuda/src/arrow/varbinview.rs index 7ea57a26de0..f3ca2ed1abd 100644 --- a/vortex-cuda/src/arrow/varbinview.rs +++ b/vortex-cuda/src/arrow/varbinview.rs @@ -11,7 +11,6 @@ use cudarc::driver::PushKernelArg; use vortex_array::arrays::VarBinViewArray; use vortex_array::arrays::VarBinViewArrayParts; use vortex_array::buffer::BufferHandle; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_err; @@ -129,7 +128,7 @@ pub(crate) async fn copy_varbinview_to_varbin( let n_blocks = len .div_ceil(threads_per_blocks as usize) .try_into() - .vortex_expect("n_blocks should never overflow u32"); + .expect("n_blocks should never overflow u32"); let fully_parallel_cfg = LaunchConfig { grid_dim: (n_blocks, 1, 1), block_dim: (threads_per_blocks, 1, 1), diff --git a/vortex-cuda/src/device_buffer.rs b/vortex-cuda/src/device_buffer.rs index 02fc5e56aa1..3e40834339a 100644 --- a/vortex-cuda/src/device_buffer.rs +++ b/vortex-cuda/src/device_buffer.rs @@ -17,7 +17,6 @@ use vortex_array::buffer::DeviceBuffer; use vortex_buffer::Alignment; use vortex_buffer::ByteBuffer; use vortex_buffer::ByteBufferMut; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_err; use vortex_error::vortex_panic; @@ -49,7 +48,6 @@ mod private { use cudarc::driver::CudaView; use cudarc::driver::DeviceRepr; use vortex_buffer::Alignment; - use vortex_error::VortexExpect; pub trait DeviceAllocation: Debug + Send + Sync + 'static { /// Get the minimum alignment of the allocation. @@ -76,7 +74,7 @@ mod private { let bytes_len = self.len() * size_of::(); // SAFETY: all types can be reinterpreted as a byte slice let result = unsafe { self.as_view().transmute::(bytes_len) }; - result.vortex_expect("Downcasting CudaSlice => CudaSlice must succeed") + result.expect("Downcasting CudaSlice => CudaSlice must succeed") } } } @@ -113,7 +111,7 @@ impl CudaDeviceBuffer { .as_bytes_view() .slice(self.offset..self.offset + self.len) .transmute::(new_len) - .vortex_expect("Failed to transmute from CudaView to CudaView") + .expect("Failed to transmute from CudaView to CudaView") } } } @@ -289,8 +287,7 @@ impl DeviceBuffer for CudaDeviceBuffer { let new_len = range.end - range.start; let trailing = (self.device_ptr + new_offset as u64).trailing_zeros(); - let exponent = - u8::try_from(min(15, trailing)).vortex_expect("min(15, x) always fits in u8"); + let exponent = u8::try_from(min(15, trailing)).expect("min(15, x) always fits in u8"); let slice_align = Alignment::from_exponent(exponent); assert!( diff --git a/vortex-cuda/src/dynamic_dispatch.rs b/vortex-cuda/src/dynamic_dispatch.rs index e244184abd1..d851afd180a 100644 --- a/vortex-cuda/src/dynamic_dispatch.rs +++ b/vortex-cuda/src/dynamic_dispatch.rs @@ -128,7 +128,6 @@ mod tests { use vortex_array::buffer::BufferHandle; use vortex_array::validity::Validity::NonNullable; use vortex_buffer::Buffer; - use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_fastlanes::BitPackedArray; use vortex_fastlanes::FoRArray; @@ -150,7 +149,7 @@ mod tests { .collect(); let primitive = PrimitiveArray::new(Buffer::from(values), NonNullable); BitPackedArray::encode(primitive.as_ref(), bit_width) - .vortex_expect("failed to create BitPacked array") + .expect("failed to create BitPacked array") } fn run_dynamic_dispatch_u32( @@ -161,7 +160,7 @@ mod tests { ) -> VortexResult> { let output_slice = cuda_ctx .device_alloc::(output_len) - .vortex_expect("alloc output"); + .expect("alloc output"); let output_buf = CudaDeviceBuffer::new(output_slice); let output_ptr = output_buf.as_view::().device_ptr(cuda_ctx.stream()).0; @@ -178,14 +177,15 @@ mod tests { let cuda_function = cuda_ctx .load_function("dynamic_dispatch", &["u32"]) - .vortex_expect("load kernel"); + .expect("load kernel"); let mut launch_builder = cuda_ctx.launch_builder(&cuda_function); launch_builder.arg(&input_ptr); launch_builder.arg(&output_ptr); launch_builder.arg(&array_len_u64); launch_builder.arg(&plan_ptr); - let num_blocks = u32::try_from(output_len.div_ceil(2048))?; + let num_blocks = + u32::try_from(output_len.div_ceil(2048)).expect("num_blocks must fit in u32"); let config = LaunchConfig { grid_dim: (num_blocks, 1, 1), block_dim: (64, 1, 1), @@ -219,11 +219,11 @@ mod tests { bitpacked: &BitPackedArray, ) -> VortexResult<(u64, BufferHandle)> { let packed = bitpacked.packed().clone(); - let device_input = futures::executor::block_on(cuda_ctx.move_to_device(packed)?) - .vortex_expect("move to device"); + let device_input = + futures::executor::block_on(cuda_ctx.move_to_device(packed)?).expect("move to device"); let ptr = device_input .cuda_view::() - .vortex_expect("input view") + .expect("input view") .device_ptr(cuda_ctx.stream()) .0; Ok((ptr, device_input)) diff --git a/vortex-cuda/src/kernel/arrays/constant.rs b/vortex-cuda/src/kernel/arrays/constant.rs index bf8629388f8..08a7d403c17 100644 --- a/vortex-cuda/src/kernel/arrays/constant.rs +++ b/vortex-cuda/src/kernel/arrays/constant.rs @@ -197,7 +197,6 @@ mod tests { use vortex_array::assert_arrays_eq; use vortex_array::scalar::Scalar; use vortex_dtype::NativePType; - use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_session::VortexSession; @@ -225,14 +224,14 @@ mod tests { #[case] constant_array: ConstantArray, ) -> VortexResult<()> { let mut cuda_ctx = CudaSession::create_execution_ctx(&VortexSession::empty()) - .vortex_expect("failed to create execution context"); + .expect("failed to create execution context"); let cpu_result = constant_array.to_canonical()?; let gpu_result = ConstantNumericExecutor .execute(constant_array.to_array(), &mut cuda_ctx) .await - .vortex_expect("GPU materialization failed") + .expect("GPU materialization failed") .into_host() .await? .into_array(); @@ -245,7 +244,7 @@ mod tests { #[tokio::test] async fn test_cuda_constant_empty_array() -> VortexResult<()> { let mut cuda_ctx = CudaSession::create_execution_ctx(&VortexSession::empty()) - .vortex_expect("failed to create execution context"); + .expect("failed to create execution context"); let constant_array = ConstantArray::new(42i32, 0); let cpu_result = constant_array.to_canonical()?; @@ -253,7 +252,7 @@ mod tests { let gpu_result = ConstantNumericExecutor .execute(constant_array.to_array(), &mut cuda_ctx) .await - .vortex_expect("GPU materialization failed") + .expect("GPU materialization failed") .into_host() .await? .into_array(); @@ -266,7 +265,7 @@ mod tests { #[tokio::test] async fn test_cuda_constant_small_array() -> VortexResult<()> { let mut cuda_ctx = CudaSession::create_execution_ctx(&VortexSession::empty()) - .vortex_expect("failed to create execution context"); + .expect("failed to create execution context"); // Test with array smaller than one block (< 2048 elements) let constant_array = ConstantArray::new(99i32, 100); @@ -275,7 +274,7 @@ mod tests { let gpu_result = ConstantNumericExecutor .execute(constant_array.to_array(), &mut cuda_ctx) .await - .vortex_expect("GPU materialization failed") + .expect("GPU materialization failed") .into_host() .await? .into_array(); diff --git a/vortex-cuda/src/kernel/arrays/dict.rs b/vortex-cuda/src/kernel/arrays/dict.rs index 26ecb17efcc..7af55f1401f 100644 --- a/vortex-cuda/src/kernel/arrays/dict.rs +++ b/vortex-cuda/src/kernel/arrays/dict.rs @@ -26,7 +26,6 @@ use vortex_dtype::NativePType; use vortex_dtype::match_each_decimal_value_type; use vortex_dtype::match_each_integer_ptype; use vortex_dtype::match_each_native_simd_ptype; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_bail; @@ -50,8 +49,7 @@ impl CudaExecute for DictExecutor { ) -> VortexResult { let dict_array = array .try_into::() - .ok() - .vortex_expect("Array is not a Dict array"); + .expect("Array is not a Dict array"); let values_dtype = dict_array.values().dtype().clone(); match &values_dtype { @@ -220,7 +218,7 @@ async fn execute_dict_decimal_typed< Ok(Canonical::Decimal(DecimalArray::new_handle( BufferHandle::new_device(Arc::new(output_device)), V::DECIMAL_TYPE, - output_dtype.into_decimal_opt().vortex_expect("is decimal"), + output_dtype.into_decimal_opt().expect("is decimal"), output_validity, ))) } @@ -312,7 +310,6 @@ mod tests { use vortex_buffer::Buffer; use vortex_dtype::DecimalDType; use vortex_dtype::i256; - use vortex_error::VortexExpect; use vortex_session::VortexSession; use super::*; @@ -331,7 +328,7 @@ mod tests { #[tokio::test] async fn test_cuda_dict_u32_values_u8_codes() -> VortexResult<()> { let mut cuda_ctx = CudaSession::create_execution_ctx(&VortexSession::empty()) - .vortex_expect("failed to create execution context"); + .expect("failed to create execution context"); // Dictionary values: [100, 200, 300, 400] let values = PrimitiveArray::new(Buffer::from(vec![100u32, 200, 300, 400]), NonNullable); @@ -341,7 +338,7 @@ mod tests { let codes_array = PrimitiveArray::new(Buffer::from(codes), NonNullable); let dict_array = DictArray::try_new(codes_array.into_array(), values.into_array()) - .vortex_expect("failed to create Dict array"); + .expect("failed to create Dict array"); // Get baseline from CPU canonicalization let baseline = dict_array.to_canonical()?; @@ -350,7 +347,7 @@ mod tests { let cuda_result = DictExecutor .execute(dict_array.to_array(), &mut cuda_ctx) .await - .vortex_expect("GPU decompression failed") + .expect("GPU decompression failed") .into_primitive(); let cuda_result = cuda_primitive_to_host(cuda_result)?; @@ -363,7 +360,7 @@ mod tests { #[tokio::test] async fn test_cuda_dict_u64_values_u16_codes() -> VortexResult<()> { let mut cuda_ctx = CudaSession::create_execution_ctx(&VortexSession::empty()) - .vortex_expect("failed to create execution context"); + .expect("failed to create execution context"); // Dictionary values: large u64 values let values = PrimitiveArray::new( @@ -376,7 +373,7 @@ mod tests { let codes_array = PrimitiveArray::new(Buffer::from(codes), NonNullable); let dict_array = DictArray::try_new(codes_array.into_array(), values.into_array()) - .vortex_expect("failed to create Dict array"); + .expect("failed to create Dict array"); // Get baseline from CPU canonicalization let baseline = dict_array.to_canonical()?; @@ -385,7 +382,7 @@ mod tests { let cuda_result = DictExecutor .execute(dict_array.to_array(), &mut cuda_ctx) .await - .vortex_expect("GPU decompression failed") + .expect("GPU decompression failed") .into_primitive(); let cuda_result = cuda_primitive_to_host(cuda_result)?; @@ -398,7 +395,7 @@ mod tests { #[tokio::test] async fn test_cuda_dict_i32_values_u32_codes() -> VortexResult<()> { let mut cuda_ctx = CudaSession::create_execution_ctx(&VortexSession::empty()) - .vortex_expect("failed to create execution context"); + .expect("failed to create execution context"); // Dictionary values: signed integers including negatives let values = PrimitiveArray::new(Buffer::from(vec![-100i32, 0, 100, 200]), NonNullable); @@ -408,7 +405,7 @@ mod tests { let codes_array = PrimitiveArray::new(Buffer::from(codes), NonNullable); let dict_array = DictArray::try_new(codes_array.into_array(), values.into_array()) - .vortex_expect("failed to create Dict array"); + .expect("failed to create Dict array"); // Get baseline from CPU canonicalization let baseline = dict_array.to_canonical()?; @@ -417,7 +414,7 @@ mod tests { let cuda_result = DictExecutor .execute(dict_array.to_array(), &mut cuda_ctx) .await - .vortex_expect("GPU decompression failed") + .expect("GPU decompression failed") .into_primitive(); let cuda_result = cuda_primitive_to_host(cuda_result)?; @@ -429,7 +426,7 @@ mod tests { #[tokio::test] async fn test_cuda_dict_large_array() -> VortexResult<()> { let mut cuda_ctx = CudaSession::create_execution_ctx(&VortexSession::empty()) - .vortex_expect("failed to create execution context"); + .expect("failed to create execution context"); // Dictionary with 256 values let values: Vec = (0..256).map(|i| i * 1000).collect(); @@ -439,7 +436,7 @@ mod tests { let codes_array = PrimitiveArray::new(Buffer::from(codes), NonNullable); let dict_array = DictArray::try_new(codes_array.into_array(), values_array.into_array()) - .vortex_expect("failed to create Dict array"); + .expect("failed to create Dict array"); // Get baseline from CPU canonicalization let baseline = dict_array.to_canonical()?; @@ -448,7 +445,7 @@ mod tests { let cuda_result = DictExecutor .execute(dict_array.to_array(), &mut cuda_ctx) .await - .vortex_expect("GPU decompression failed") + .expect("GPU decompression failed") .into_primitive(); let cuda_result = cuda_primitive_to_host(cuda_result)?; @@ -461,7 +458,7 @@ mod tests { #[tokio::test] async fn test_cuda_dict_values_with_validity() -> VortexResult<()> { let mut cuda_ctx = CudaSession::create_execution_ctx(&VortexSession::empty()) - .vortex_expect("failed to create execution context"); + .expect("failed to create execution context"); // Dictionary values with nulls: [100, null, 300, 400] let values = @@ -472,7 +469,7 @@ mod tests { let codes_array = PrimitiveArray::new(Buffer::from(codes), NonNullable); let dict_array = DictArray::try_new(codes_array.into_array(), values.into_array()) - .vortex_expect("failed to create Dict array"); + .expect("failed to create Dict array"); // Get baseline from CPU canonicalization let baseline = dict_array.to_canonical()?; @@ -480,7 +477,7 @@ mod tests { let cuda_result = DictExecutor .execute(dict_array.into_array(), &mut cuda_ctx) .await - .vortex_expect("GPU decompression failed") + .expect("GPU decompression failed") .into_primitive(); let cuda_result = cuda_primitive_to_host(cuda_result)?; @@ -493,7 +490,7 @@ mod tests { #[tokio::test] async fn test_cuda_dict_codes_with_validity() -> VortexResult<()> { let mut cuda_ctx = CudaSession::create_execution_ctx(&VortexSession::empty()) - .vortex_expect("failed to create execution context"); + .expect("failed to create execution context"); // Dictionary values: [100, 200, 300, 400] let values = PrimitiveArray::new(Buffer::from(vec![100u32, 200, 300, 400]), NonNullable); @@ -509,7 +506,7 @@ mod tests { ]); let dict_array = DictArray::try_new(codes.into_array(), values.into_array()) - .vortex_expect("failed to create Dict array"); + .expect("failed to create Dict array"); // Get baseline from CPU canonicalization let baseline = dict_array.to_canonical()?; @@ -518,7 +515,7 @@ mod tests { let cuda_result = DictExecutor .execute(dict_array.to_array(), &mut cuda_ctx) .await - .vortex_expect("GPU decompression failed") + .expect("GPU decompression failed") .into_primitive(); let cuda_result = cuda_primitive_to_host(cuda_result)?; @@ -530,7 +527,7 @@ mod tests { #[tokio::test] async fn test_cuda_dict_both_with_validity() -> VortexResult<()> { let mut cuda_ctx = CudaSession::create_execution_ctx(&VortexSession::empty()) - .vortex_expect("failed to create execution context"); + .expect("failed to create execution context"); // Dictionary values with nulls: [100, null, 300, 400] let values = @@ -553,7 +550,7 @@ mod tests { ]); let dict_array = DictArray::try_new(codes.into_array(), values.into_array()) - .vortex_expect("failed to create Dict array"); + .expect("failed to create Dict array"); // Get baseline from CPU canonicalization let baseline = dict_array.to_canonical()?; @@ -562,7 +559,7 @@ mod tests { let cuda_result = DictExecutor .execute(dict_array.to_array(), &mut cuda_ctx) .await - .vortex_expect("GPU decompression failed") + .expect("GPU decompression failed") .into_primitive(); let cuda_result = cuda_primitive_to_host(cuda_result)?; @@ -574,7 +571,7 @@ mod tests { #[tokio::test] async fn test_cuda_dict_i64_values_with_validity() -> VortexResult<()> { let mut cuda_ctx = CudaSession::create_execution_ctx(&VortexSession::empty()) - .vortex_expect("failed to create execution context"); + .expect("failed to create execution context"); // Dictionary values with nulls (i64) let values = PrimitiveArray::from_option_iter(vec![ @@ -598,7 +595,7 @@ mod tests { ]); let dict_array = DictArray::try_new(codes.into_array(), values.into_array()) - .vortex_expect("failed to create Dict array"); + .expect("failed to create Dict array"); // Get baseline from CPU canonicalization let baseline = dict_array.to_canonical()?; @@ -607,7 +604,7 @@ mod tests { let cuda_result = DictExecutor .execute(dict_array.to_array(), &mut cuda_ctx) .await - .vortex_expect("GPU decompression failed") + .expect("GPU decompression failed") .into_primitive(); let cuda_result = cuda_primitive_to_host(cuda_result)?; @@ -619,7 +616,7 @@ mod tests { #[tokio::test] async fn test_cuda_dict_all_valid_matches_baseline() -> VortexResult<()> { let mut cuda_ctx = CudaSession::create_execution_ctx(&VortexSession::empty()) - .vortex_expect("failed to create execution context"); + .expect("failed to create execution context"); // Non-nullable values let values = PrimitiveArray::new(Buffer::from(vec![10u32, 20, 30, 40, 50]), NonNullable); @@ -631,7 +628,7 @@ mod tests { ); let dict_array = DictArray::try_new(codes.into_array(), values.into_array()) - .vortex_expect("failed to create Dict array"); + .expect("failed to create Dict array"); // Get baseline from CPU canonicalization let baseline = dict_array.to_canonical()?; @@ -640,7 +637,7 @@ mod tests { let cuda_result = DictExecutor .execute(dict_array.to_array(), &mut cuda_ctx) .await - .vortex_expect("GPU decompression failed") + .expect("GPU decompression failed") .into_primitive(); let cuda_result = cuda_primitive_to_host(cuda_result)?; @@ -662,7 +659,7 @@ mod tests { #[tokio::test] async fn test_cuda_dict_decimal_i8_values() -> VortexResult<()> { let mut cuda_ctx = CudaSession::create_execution_ctx(&VortexSession::empty()) - .vortex_expect("failed to create execution context"); + .expect("failed to create execution context"); // Precision 2 uses i8 backing type let decimal_dtype = DecimalDType::new(2, 1); @@ -672,14 +669,14 @@ mod tests { let codes_array = PrimitiveArray::new(Buffer::from(codes), NonNullable); let dict_array = DictArray::try_new(codes_array.into_array(), values.into_array()) - .vortex_expect("failed to create Dict array"); + .expect("failed to create Dict array"); let baseline = dict_array.to_canonical()?; let cuda_result = DictExecutor .execute(dict_array.to_array(), &mut cuda_ctx) .await - .vortex_expect("GPU decompression failed") + .expect("GPU decompression failed") .into_decimal(); let cuda_result = cuda_decimal_to_host(cuda_result)?; @@ -690,7 +687,7 @@ mod tests { #[tokio::test] async fn test_cuda_dict_decimal_i16_values() -> VortexResult<()> { let mut cuda_ctx = CudaSession::create_execution_ctx(&VortexSession::empty()) - .vortex_expect("failed to create execution context"); + .expect("failed to create execution context"); // Precision 4 uses i16 backing type let decimal_dtype = DecimalDType::new(4, 2); @@ -700,14 +697,14 @@ mod tests { let codes_array = PrimitiveArray::new(Buffer::from(codes), NonNullable); let dict_array = DictArray::try_new(codes_array.into_array(), values.into_array()) - .vortex_expect("failed to create Dict array"); + .expect("failed to create Dict array"); let baseline = dict_array.to_canonical()?; let cuda_result = DictExecutor .execute(dict_array.to_array(), &mut cuda_ctx) .await - .vortex_expect("GPU decompression failed") + .expect("GPU decompression failed") .into_decimal(); let cuda_result = cuda_decimal_to_host(cuda_result)?; @@ -718,7 +715,7 @@ mod tests { #[tokio::test] async fn test_cuda_dict_decimal_i32_values() -> VortexResult<()> { let mut cuda_ctx = CudaSession::create_execution_ctx(&VortexSession::empty()) - .vortex_expect("failed to create execution context"); + .expect("failed to create execution context"); // Precision 9 uses i32 backing type let decimal_dtype = DecimalDType::new(9, 4); @@ -728,14 +725,14 @@ mod tests { let codes_array = PrimitiveArray::new(Buffer::from(codes), NonNullable); let dict_array = DictArray::try_new(codes_array.into_array(), values.into_array()) - .vortex_expect("failed to create Dict array"); + .expect("failed to create Dict array"); let baseline = dict_array.to_canonical()?; let cuda_result = DictExecutor .execute(dict_array.to_array(), &mut cuda_ctx) .await - .vortex_expect("GPU decompression failed") + .expect("GPU decompression failed") .into_decimal(); let cuda_result = cuda_decimal_to_host(cuda_result)?; @@ -746,7 +743,7 @@ mod tests { #[tokio::test] async fn test_cuda_dict_decimal_i64_values() -> VortexResult<()> { let mut cuda_ctx = CudaSession::create_execution_ctx(&VortexSession::empty()) - .vortex_expect("failed to create execution context"); + .expect("failed to create execution context"); // Precision 18 uses i64 backing type let decimal_dtype = DecimalDType::new(18, 6); @@ -759,14 +756,14 @@ mod tests { let codes_array = PrimitiveArray::new(Buffer::from(codes), NonNullable); let dict_array = DictArray::try_new(codes_array.into_array(), values.into_array()) - .vortex_expect("failed to create Dict array"); + .expect("failed to create Dict array"); let baseline = dict_array.to_canonical()?; let cuda_result = DictExecutor .execute(dict_array.to_array(), &mut cuda_ctx) .await - .vortex_expect("GPU decompression failed") + .expect("GPU decompression failed") .into_decimal(); let cuda_result = cuda_decimal_to_host(cuda_result)?; @@ -777,7 +774,7 @@ mod tests { #[tokio::test] async fn test_cuda_dict_decimal_i128_values() -> VortexResult<()> { let mut cuda_ctx = CudaSession::create_execution_ctx(&VortexSession::empty()) - .vortex_expect("failed to create execution context"); + .expect("failed to create execution context"); // Precision 38 uses i128 backing type let decimal_dtype = DecimalDType::new(38, 10); @@ -795,14 +792,14 @@ mod tests { let codes_array = PrimitiveArray::new(Buffer::from(codes), NonNullable); let dict_array = DictArray::try_new(codes_array.into_array(), values.into_array()) - .vortex_expect("failed to create Dict array"); + .expect("failed to create Dict array"); let baseline = dict_array.to_canonical()?; let cuda_result = DictExecutor .execute(dict_array.to_array(), &mut cuda_ctx) .await - .vortex_expect("GPU decompression failed") + .expect("GPU decompression failed") .into_decimal(); let cuda_result = cuda_decimal_to_host(cuda_result)?; @@ -821,21 +818,21 @@ mod tests { #[tokio::test] async fn test_cuda_dict_string_values_u8_codes() -> VortexResult<()> { let mut cuda_ctx = CudaSession::create_execution_ctx(&VortexSession::empty()) - .vortex_expect("failed to create execution context"); + .expect("failed to create execution context"); let values = VarBinViewArray::from_iter_str(["cat", "dog", "bird", "fish"]); let codes: Vec = vec![0, 1, 2, 3, 0, 1, 2, 3, 2, 2, 1, 0]; let codes_array = PrimitiveArray::new(Buffer::from(codes), NonNullable); let dict_array = DictArray::try_new(codes_array.into_array(), values.into_array()) - .vortex_expect("failed to create Dict array"); + .expect("failed to create Dict array"); let baseline = dict_array.to_canonical()?; let cuda_result = DictExecutor .execute(dict_array.to_array(), &mut cuda_ctx) .await - .vortex_expect("GPU decompression failed") + .expect("GPU decompression failed") .into_varbinview(); let cuda_result = cuda_varbinview_to_host(cuda_result).await?; @@ -846,21 +843,21 @@ mod tests { #[tokio::test] async fn test_cuda_dict_string_values_u16_codes() -> VortexResult<()> { let mut cuda_ctx = CudaSession::create_execution_ctx(&VortexSession::empty()) - .vortex_expect("failed to create execution context"); + .expect("failed to create execution context"); let values = VarBinViewArray::from_iter_str(["alpha", "beta", "gamma", "delta", "epsilon"]); let codes: Vec = vec![4, 3, 2, 1, 0, 0, 1, 2, 3, 4]; let codes_array = PrimitiveArray::new(Buffer::from(codes), NonNullable); let dict_array = DictArray::try_new(codes_array.into_array(), values.into_array()) - .vortex_expect("failed to create Dict array"); + .expect("failed to create Dict array"); let baseline = dict_array.to_canonical()?; let cuda_result = DictExecutor .execute(dict_array.to_array(), &mut cuda_ctx) .await - .vortex_expect("GPU decompression failed") + .expect("GPU decompression failed") .into_varbinview(); let cuda_result = cuda_varbinview_to_host(cuda_result).await?; @@ -871,7 +868,7 @@ mod tests { #[tokio::test] async fn test_cuda_dict_string_max_inlined_12_bytes() -> VortexResult<()> { let mut cuda_ctx = CudaSession::create_execution_ctx(&VortexSession::empty()) - .vortex_expect("failed to create execution context"); + .expect("failed to create execution context"); // Exactly 12 bytes — the maximum inlined BinaryView size let values = @@ -880,14 +877,14 @@ mod tests { let codes_array = PrimitiveArray::new(Buffer::from(codes), NonNullable); let dict_array = DictArray::try_new(codes_array.into_array(), values.into_array()) - .vortex_expect("failed to create Dict array"); + .expect("failed to create Dict array"); let baseline = dict_array.to_canonical()?; let cuda_result = DictExecutor .execute(dict_array.to_array(), &mut cuda_ctx) .await - .vortex_expect("GPU decompression failed") + .expect("GPU decompression failed") .into_varbinview(); let cuda_result = cuda_varbinview_to_host(cuda_result).await?; @@ -898,7 +895,7 @@ mod tests { #[tokio::test] async fn test_cuda_dict_string_outlined_views() -> VortexResult<()> { let mut cuda_ctx = CudaSession::create_execution_ctx(&VortexSession::empty()) - .vortex_expect("failed to create execution context"); + .expect("failed to create execution context"); // 13+ bytes — outlined BinaryViews that reference data buffers let values = VarBinViewArray::from_iter_str([ @@ -910,14 +907,14 @@ mod tests { let codes_array = PrimitiveArray::new(Buffer::from(codes), NonNullable); let dict_array = DictArray::try_new(codes_array.into_array(), values.into_array()) - .vortex_expect("failed to create Dict array"); + .expect("failed to create Dict array"); let baseline = dict_array.to_canonical()?; let cuda_result = DictExecutor .execute(dict_array.to_array(), &mut cuda_ctx) .await - .vortex_expect("GPU decompression failed") + .expect("GPU decompression failed") .into_varbinview(); let cuda_result = cuda_varbinview_to_host(cuda_result).await?; @@ -928,21 +925,21 @@ mod tests { #[tokio::test] async fn test_cuda_dict_string_empty_strings() -> VortexResult<()> { let mut cuda_ctx = CudaSession::create_execution_ctx(&VortexSession::empty()) - .vortex_expect("failed to create execution context"); + .expect("failed to create execution context"); let values = VarBinViewArray::from_iter_str(["", "a", ""]); let codes: Vec = vec![0, 1, 2, 0, 1, 2]; let codes_array = PrimitiveArray::new(Buffer::from(codes), NonNullable); let dict_array = DictArray::try_new(codes_array.into_array(), values.into_array()) - .vortex_expect("failed to create Dict array"); + .expect("failed to create Dict array"); let baseline = dict_array.to_canonical()?; let cuda_result = DictExecutor .execute(dict_array.to_array(), &mut cuda_ctx) .await - .vortex_expect("GPU decompression failed") + .expect("GPU decompression failed") .into_varbinview(); let cuda_result = cuda_varbinview_to_host(cuda_result).await?; @@ -953,7 +950,7 @@ mod tests { #[tokio::test] async fn test_cuda_dict_string_values_with_validity() -> VortexResult<()> { let mut cuda_ctx = CudaSession::create_execution_ctx(&VortexSession::empty()) - .vortex_expect("failed to create execution context"); + .expect("failed to create execution context"); let values = VarBinViewArray::from_iter_nullable_str([Some("hello"), None, Some("world")]); @@ -961,14 +958,14 @@ mod tests { let codes_array = PrimitiveArray::new(Buffer::from(codes), NonNullable); let dict_array = DictArray::try_new(codes_array.into_array(), values.into_array()) - .vortex_expect("failed to create Dict array"); + .expect("failed to create Dict array"); let baseline = dict_array.to_canonical()?; let cuda_result = DictExecutor .execute(dict_array.to_array(), &mut cuda_ctx) .await - .vortex_expect("GPU decompression failed") + .expect("GPU decompression failed") .into_varbinview(); let cuda_result = cuda_varbinview_to_host(cuda_result).await?; @@ -979,7 +976,7 @@ mod tests { #[tokio::test] async fn test_cuda_dict_string_outlined_with_validity() -> VortexResult<()> { let mut cuda_ctx = CudaSession::create_execution_ctx(&VortexSession::empty()) - .vortex_expect("failed to create execution context"); + .expect("failed to create execution context"); // Mix of inlined, outlined, and null dictionary values let values = VarBinViewArray::from_iter_nullable_str([ @@ -994,14 +991,14 @@ mod tests { let codes_array = PrimitiveArray::new(Buffer::from(codes), NonNullable); let dict_array = DictArray::try_new(codes_array.into_array(), values.into_array()) - .vortex_expect("failed to create Dict array"); + .expect("failed to create Dict array"); let baseline = dict_array.to_canonical()?; let cuda_result = DictExecutor .execute(dict_array.to_array(), &mut cuda_ctx) .await - .vortex_expect("GPU decompression failed") + .expect("GPU decompression failed") .into_varbinview(); let cuda_result = cuda_varbinview_to_host(cuda_result).await?; @@ -1012,7 +1009,7 @@ mod tests { #[tokio::test] async fn test_cuda_dict_decimal_i256_values() -> VortexResult<()> { let mut cuda_ctx = CudaSession::create_execution_ctx(&VortexSession::empty()) - .vortex_expect("failed to create execution context"); + .expect("failed to create execution context"); // Precision 76 uses i256 backing type let decimal_dtype = DecimalDType::new(76, 20); @@ -1030,14 +1027,14 @@ mod tests { let codes_array = PrimitiveArray::new(Buffer::from(codes), NonNullable); let dict_array = DictArray::try_new(codes_array.into_array(), values.into_array()) - .vortex_expect("failed to create Dict array"); + .expect("failed to create Dict array"); let baseline = dict_array.to_canonical()?; let cuda_result = DictExecutor .execute(dict_array.to_array(), &mut cuda_ctx) .await - .vortex_expect("GPU decompression failed") + .expect("GPU decompression failed") .into_decimal(); let cuda_result = cuda_decimal_to_host(cuda_result)?; diff --git a/vortex-cuda/src/kernel/arrays/shared.rs b/vortex-cuda/src/kernel/arrays/shared.rs index 914f5edbd32..1443cab6de4 100644 --- a/vortex-cuda/src/kernel/arrays/shared.rs +++ b/vortex-cuda/src/kernel/arrays/shared.rs @@ -6,7 +6,6 @@ use tracing::instrument; use vortex_array::ArrayRef; use vortex_array::Canonical; use vortex_array::arrays::SharedVTable; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use crate::executor::CudaArrayExt; @@ -27,8 +26,7 @@ impl CudaExecute for SharedExecutor { ) -> VortexResult { let shared = array .try_into::() - .ok() - .vortex_expect("Array is not a Shared array"); + .expect("Array is not a Shared array"); shared .get_or_compute_async(|source| source.execute_cuda(ctx)) diff --git a/vortex-cuda/src/kernel/encodings/alp.rs b/vortex-cuda/src/kernel/encodings/alp.rs index 65192f7445d..5cdf2656ba4 100644 --- a/vortex-cuda/src/kernel/encodings/alp.rs +++ b/vortex-cuda/src/kernel/encodings/alp.rs @@ -127,7 +127,6 @@ mod tests { use vortex_array::validity::Validity; use vortex_buffer::Buffer; use vortex_buffer::buffer; - use vortex_error::VortexExpect; use vortex_session::VortexSession; use super::*; @@ -137,7 +136,7 @@ mod tests { #[tokio::test] async fn test_cuda_alp_decompression_f32() -> VortexResult<()> { let mut cuda_ctx = CudaSession::create_execution_ctx(&VortexSession::empty()) - .vortex_expect("failed to create execution context"); + .expect("failed to create execution context"); // Create encoded values (what ALP would produce) // For f32 with exponents (e=0, f=2): decoded = encoded * F10[2] * IF10[0] @@ -168,7 +167,7 @@ mod tests { let gpu_result = ALPExecutor .execute(alp_array.to_array(), &mut cuda_ctx) .await - .vortex_expect("GPU decompression failed") + .expect("GPU decompression failed") .into_host() .await? .into_array(); diff --git a/vortex-cuda/src/kernel/encodings/bitpacked.rs b/vortex-cuda/src/kernel/encodings/bitpacked.rs index 775e9ea665b..7d63c843ac9 100644 --- a/vortex-cuda/src/kernel/encodings/bitpacked.rs +++ b/vortex-cuda/src/kernel/encodings/bitpacked.rs @@ -19,7 +19,6 @@ use vortex_cuda_macros::cuda_tests; use vortex_dtype::NativePType; use vortex_dtype::match_each_integer_ptype; use vortex_dtype::match_each_unsigned_integer_ptype; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_ensure; use vortex_error::vortex_err; @@ -79,7 +78,7 @@ pub fn bitpacked_cuda_kernel( pub fn bitpacked_cuda_launch_config(output_width: usize, len: usize) -> VortexResult { let thread_count = bitpacked_thread_count(output_width); - let num_blocks = u32::try_from(len.div_ceil(1024))?; + let num_blocks = u32::try_from(len.div_ceil(1024)).expect("Number of blocks must fit u32"); Ok(LaunchConfig { grid_dim: (num_blocks, 1, 1), block_dim: (thread_count, 1, 1), @@ -132,7 +131,7 @@ where let buf = output_buf .as_any() .downcast_ref::() - .vortex_expect("we created this as CudaDeviceBuffer") + .expect("we created this as CudaDeviceBuffer") .clone(); let patched_buf = match_each_unsigned_integer_ptype!(p.indices_ptype()?, |I| { @@ -163,7 +162,6 @@ mod tests { use vortex_array::validity::Validity::NonNullable; use vortex_array::vtable::VTable; use vortex_buffer::Buffer; - use vortex_error::VortexExpect; use vortex_session::VortexSession; use super::*; @@ -173,7 +171,7 @@ mod tests { #[test] fn test_patches() -> VortexResult<()> { let mut cuda_ctx = CudaSession::create_execution_ctx(&VortexSession::empty()) - .vortex_expect("failed to create execution context"); + .expect("failed to create execution context"); let array = PrimitiveArray::new((0u16..=513).collect::>(), NonNullable); @@ -187,7 +185,7 @@ mod tests { BitPackedExecutor .execute(bp_with_patches.to_array(), &mut cuda_ctx) .await - .vortex_expect("GPU decompression failed") + .expect("GPU decompression failed") .into_host() .await .map(|a| a.into_array()) @@ -208,26 +206,26 @@ mod tests { #[case::bw_7(7)] fn test_cuda_bitunpack_u8(#[case] bit_width: u8) -> VortexResult<()> { let mut cuda_ctx = CudaSession::create_execution_ctx(&VortexSession::empty()) - .vortex_expect("failed to create execution context"); + .expect("failed to create execution context"); let max_val = (1u8 << bit_width).saturating_sub(1); let primitive_array = PrimitiveArray::new( (0u16..1024) - .map(|i| u8::try_from(i % (max_val as u16 + 1)).vortex_expect("")) + .map(|i| u8::try_from(i % (max_val as u16 + 1)).expect("")) .collect::>(), NonNullable, ); let bitpacked_array = BitPackedArray::encode(primitive_array.as_ref(), bit_width) - .vortex_expect("operation should succeed in test"); + .expect("operation should succeed in test"); let cpu_result = bitpacked_array.to_canonical()?; let gpu_result = block_on(async { BitPackedExecutor .execute(bitpacked_array.to_array(), &mut cuda_ctx) .await - .vortex_expect("GPU decompression failed") + .expect("GPU decompression failed") .into_host() .await .map(|a| a.into_array()) @@ -256,7 +254,7 @@ mod tests { #[case::bw_15(15)] fn test_cuda_bitunpack_u16(#[case] bit_width: u8) -> VortexResult<()> { let mut cuda_ctx = CudaSession::create_execution_ctx(&VortexSession::empty()) - .vortex_expect("failed to create execution context"); + .expect("failed to create execution context"); let max_val = (1u16 << bit_width).saturating_sub(1); @@ -268,14 +266,14 @@ mod tests { ); let bitpacked_array = BitPackedArray::encode(primitive_array.as_ref(), bit_width) - .vortex_expect("operation should succeed in test"); + .expect("operation should succeed in test"); let cpu_result = bitpacked_array.to_canonical()?; let gpu_result = block_on(async { BitPackedExecutor .execute(bitpacked_array.to_array(), &mut cuda_ctx) .await - .vortex_expect("GPU decompression failed") + .expect("GPU decompression failed") .into_host() .await .map(|a| a.into_array()) @@ -320,7 +318,7 @@ mod tests { #[case::bw_31(31)] fn test_cuda_bitunpack_u32(#[case] bit_width: u8) -> VortexResult<()> { let mut cuda_ctx = CudaSession::create_execution_ctx(&VortexSession::empty()) - .vortex_expect("failed to create execution context"); + .expect("failed to create execution context"); let max_val = (1u32 << bit_width).saturating_sub(1); @@ -332,14 +330,14 @@ mod tests { ); let bitpacked_array = BitPackedArray::encode(primitive_array.as_ref(), bit_width) - .vortex_expect("operation should succeed in test"); + .expect("operation should succeed in test"); let cpu_result = bitpacked_array.to_canonical()?; let gpu_result = block_on(async { BitPackedExecutor .execute(bitpacked_array.to_array(), &mut cuda_ctx) .await - .vortex_expect("GPU decompression failed") + .expect("GPU decompression failed") .into_host() .await .map(|a| a.into_array()) @@ -416,7 +414,7 @@ mod tests { #[case::bw_63(63)] fn test_cuda_bitunpack_u64(#[case] bit_width: u8) -> VortexResult<()> { let mut cuda_ctx = CudaSession::create_execution_ctx(&VortexSession::empty()) - .vortex_expect("failed to create execution context"); + .expect("failed to create execution context"); let max_val = (1u64 << bit_width).saturating_sub(1); @@ -428,13 +426,13 @@ mod tests { ); let bitpacked_array = BitPackedArray::encode(primitive_array.as_ref(), bit_width) - .vortex_expect("operation should succeed in test"); + .expect("operation should succeed in test"); let cpu_result = bitpacked_array.to_canonical()?; let gpu_result = block_on(async { BitPackedExecutor .execute(bitpacked_array.to_array(), &mut cuda_ctx) .await - .vortex_expect("GPU decompression failed") + .expect("GPU decompression failed") .into_host() .await .map(|a| a.into_array()) @@ -449,7 +447,7 @@ mod tests { fn test_cuda_bitunpack_sliced() -> VortexResult<()> { let bit_width = 32; let mut cuda_ctx = CudaSession::create_execution_ctx(&VortexSession::empty()) - .vortex_expect("failed to create execution context"); + .expect("failed to create execution context"); let max_val = (1u64 << bit_width).saturating_sub(1); @@ -461,7 +459,7 @@ mod tests { ); let bitpacked_array = BitPackedArray::encode(primitive_array.as_ref(), bit_width) - .vortex_expect("operation should succeed in test"); + .expect("operation should succeed in test"); let slice_ref = bitpacked_array.clone().into_array().slice(67..3969)?; let mut exec_ctx = ExecutionCtx::new(VortexSession::empty().with::()); let sliced_array = ::execute_parent( @@ -476,7 +474,7 @@ mod tests { BitPackedExecutor .execute(sliced_array, &mut cuda_ctx) .await - .vortex_expect("GPU decompression failed") + .expect("GPU decompression failed") .into_host() .await .map(|a| a.into_array()) diff --git a/vortex-cuda/src/kernel/encodings/date_time_parts.rs b/vortex-cuda/src/kernel/encodings/date_time_parts.rs index f1c82a72c91..aead2e38c32 100644 --- a/vortex-cuda/src/kernel/encodings/date_time_parts.rs +++ b/vortex-cuda/src/kernel/encodings/date_time_parts.rs @@ -214,7 +214,6 @@ mod tests { use vortex_buffer::buffer; use vortex_datetime_parts::DateTimePartsArray; use vortex_dtype::datetime::TimeUnit; - use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_session::VortexSession; @@ -247,7 +246,7 @@ mod tests { seconds_arr, subseconds_arr, ) - .vortex_expect("Failed to create DateTimePartsArray") + .expect("Failed to create DateTimePartsArray") } #[rstest] @@ -283,7 +282,7 @@ mod tests { #[case] time_unit: TimeUnit, ) -> VortexResult<()> { let mut cuda_ctx = CudaSession::create_execution_ctx(&VortexSession::empty()) - .vortex_expect("failed to create execution context"); + .expect("failed to create execution context"); let dtp_array = make_datetimeparts_array(days, seconds, subseconds, time_unit); let cpu_result = dtp_array.to_canonical()?; @@ -291,7 +290,7 @@ mod tests { let gpu_result = DateTimePartsExecutor .execute(dtp_array.to_array(), &mut cuda_ctx) .await - .vortex_expect("GPU decompression failed") + .expect("GPU decompression failed") .into_host() .await? .into_array(); @@ -304,7 +303,7 @@ mod tests { #[tokio::test] async fn test_cuda_datetimeparts_large_array() -> VortexResult<()> { let mut cuda_ctx = CudaSession::create_execution_ctx(&VortexSession::empty()) - .vortex_expect("failed to create execution context"); + .expect("failed to create execution context"); let len = 2050; let days: Vec = (0..len).collect(); @@ -317,7 +316,7 @@ mod tests { let gpu_result = DateTimePartsExecutor .execute(dtp_array.to_array(), &mut cuda_ctx) .await - .vortex_expect("GPU decompression failed") + .expect("GPU decompression failed") .into_host() .await? .into_array(); @@ -330,7 +329,7 @@ mod tests { #[tokio::test] async fn test_cuda_datetimeparts_with_nulls() -> VortexResult<()> { let mut cuda_ctx = CudaSession::create_execution_ctx(&VortexSession::empty()) - .vortex_expect("failed to create execution context"); + .expect("failed to create execution context"); let days_arr = PrimitiveArray::new( buffer![1i32, 2, 3, 4, 5], @@ -358,14 +357,14 @@ mod tests { seconds_arr, subseconds_arr, ) - .vortex_expect("Failed to create DateTimePartsArray"); + .expect("Failed to create DateTimePartsArray"); let cpu_result = dtp_array.to_canonical()?; let gpu_result = DateTimePartsExecutor .execute(dtp_array.to_array(), &mut cuda_ctx) .await - .vortex_expect("GPU decompression failed") + .expect("GPU decompression failed") .into_host() .await? .into_array(); diff --git a/vortex-cuda/src/kernel/encodings/decimal_byte_parts.rs b/vortex-cuda/src/kernel/encodings/decimal_byte_parts.rs index 9a50d30aec7..543e28b4049 100644 --- a/vortex-cuda/src/kernel/encodings/decimal_byte_parts.rs +++ b/vortex-cuda/src/kernel/encodings/decimal_byte_parts.rs @@ -62,7 +62,6 @@ mod tests { use vortex_buffer::Buffer; use vortex_decimal_byte_parts::DecimalBytePartsArray; use vortex_dtype::DecimalDType; - use vortex_error::VortexExpect; use vortex_session::VortexSession; use super::*; @@ -80,21 +79,21 @@ mod tests { #[case] scale: i8, ) { let mut cuda_ctx = CudaSession::create_execution_ctx(&VortexSession::empty()) - .vortex_expect("create execution context"); + .expect("create execution context"); let decimal_dtype = DecimalDType::new(precision, scale); let dbp_array = DecimalBytePartsArray::try_new( PrimitiveArray::new(encoded, Validity::NonNullable).into_array(), decimal_dtype, ) - .vortex_expect("create DecimalBytePartsArray"); + .expect("create DecimalBytePartsArray"); - let cpu_result = dbp_array.to_canonical().vortex_expect("CPU canonicalize"); + let cpu_result = dbp_array.to_canonical().expect("CPU canonicalize"); let gpu_result = DecimalBytePartsExecutor .execute(dbp_array.to_array(), &mut cuda_ctx) .await - .vortex_expect("GPU decode"); + .expect("GPU decode"); assert_arrays_eq!(cpu_result.into_array(), gpu_result.into_array()); } diff --git a/vortex-cuda/src/kernel/encodings/for_.rs b/vortex-cuda/src/kernel/encodings/for_.rs index 60b4ebd1595..b8961c744a7 100644 --- a/vortex-cuda/src/kernel/encodings/for_.rs +++ b/vortex-cuda/src/kernel/encodings/for_.rs @@ -15,7 +15,6 @@ use vortex_array::arrays::PrimitiveArrayParts; use vortex_cuda_macros::cuda_tests; use vortex_dtype::NativePType; use vortex_dtype::match_each_native_simd_ptype; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_ensure; use vortex_error::vortex_err; @@ -62,7 +61,7 @@ where .reference_scalar() .as_primitive() .as_::

() - .vortex_expect("Cannot have a null reference"); + .expect("Cannot have a null reference"); // Execute child and copy to device let canonical = array.encoded().clone().execute_cuda(ctx).await?; @@ -103,7 +102,6 @@ mod tests { use vortex_array::validity::Validity::NonNullable; use vortex_buffer::Buffer; use vortex_dtype::NativePType; - use vortex_error::VortexExpect; use vortex_fastlanes::FoRArray; use vortex_session::VortexSession; @@ -127,14 +125,14 @@ mod tests { #[tokio::test] async fn test_cuda_for_decompression(#[case] for_array: FoRArray) -> VortexResult<()> { let mut cuda_ctx = CudaSession::create_execution_ctx(&VortexSession::empty()) - .vortex_expect("failed to create execution context"); + .expect("failed to create execution context"); let cpu_result = for_array.to_canonical()?; let gpu_result = FoRExecutor .execute(for_array.to_array(), &mut cuda_ctx) .await - .vortex_expect("GPU decompression failed") + .expect("GPU decompression failed") .into_host() .await? .into_array(); diff --git a/vortex-cuda/src/kernel/encodings/runend.rs b/vortex-cuda/src/kernel/encodings/runend.rs index eb781ab6716..3fad7ccca3f 100644 --- a/vortex-cuda/src/kernel/encodings/runend.rs +++ b/vortex-cuda/src/kernel/encodings/runend.rs @@ -168,7 +168,6 @@ mod tests { use vortex_array::assert_arrays_eq; use vortex_array::validity::Validity; use vortex_buffer::Buffer; - use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_runend::RunEndArray; use vortex_session::VortexSession; @@ -199,14 +198,14 @@ mod tests { #[tokio::test] async fn test_cuda_runend_types(#[case] runend_array: RunEndArray) -> VortexResult<()> { let mut cuda_ctx = CudaSession::create_execution_ctx(&VortexSession::empty()) - .vortex_expect("failed to create execution context"); + .expect("failed to create execution context"); let cpu_result = runend_array.to_canonical()?; let gpu_result = RunEndExecutor .execute(runend_array.to_array(), &mut cuda_ctx) .await - .vortex_expect("GPU decompression failed") + .expect("GPU decompression failed") .into_host() .await? .into_array(); @@ -219,7 +218,7 @@ mod tests { #[tokio::test] async fn test_cuda_runend_large_array() -> VortexResult<()> { let mut cuda_ctx = CudaSession::create_execution_ctx(&VortexSession::empty()) - .vortex_expect("failed to create execution context"); + .expect("failed to create execution context"); let num_runs = 41; let run_length = 50; @@ -236,7 +235,7 @@ mod tests { let gpu_result = RunEndExecutor .execute(runend_array.to_array(), &mut cuda_ctx) .await - .vortex_expect("GPU decompression failed") + .expect("GPU decompression failed") .into_host() .await? .into_array(); @@ -249,7 +248,7 @@ mod tests { #[tokio::test] async fn test_cuda_runend_single_run() -> VortexResult<()> { let mut cuda_ctx = CudaSession::create_execution_ctx(&VortexSession::empty()) - .vortex_expect("failed to create execution context"); + .expect("failed to create execution context"); let runend_array = make_runend_array(vec![100u32], vec![42i32]); @@ -258,7 +257,7 @@ mod tests { let gpu_result = RunEndExecutor .execute(runend_array.to_array(), &mut cuda_ctx) .await - .vortex_expect("GPU decompression failed") + .expect("GPU decompression failed") .into_host() .await? .into_array(); @@ -271,7 +270,7 @@ mod tests { #[tokio::test] async fn test_cuda_runend_many_small_runs() -> VortexResult<()> { let mut cuda_ctx = CudaSession::create_execution_ctx(&VortexSession::empty()) - .vortex_expect("failed to create execution context"); + .expect("failed to create execution context"); // Create an array where each run has length 1. let num_elements = 2050; @@ -285,7 +284,7 @@ mod tests { let gpu_result = RunEndExecutor .execute(runend_array.to_array(), &mut cuda_ctx) .await - .vortex_expect("GPU decompression failed") + .expect("GPU decompression failed") .into_host() .await? .into_array(); diff --git a/vortex-cuda/src/kernel/encodings/zigzag.rs b/vortex-cuda/src/kernel/encodings/zigzag.rs index 393a87eed43..72e33c34478 100644 --- a/vortex-cuda/src/kernel/encodings/zigzag.rs +++ b/vortex-cuda/src/kernel/encodings/zigzag.rs @@ -103,7 +103,6 @@ mod tests { use vortex_array::assert_arrays_eq; use vortex_array::validity::Validity::NonNullable; use vortex_buffer::Buffer; - use vortex_error::VortexExpect; use vortex_session::VortexSession; use vortex_zigzag::ZigZagArray; @@ -114,7 +113,7 @@ mod tests { #[tokio::test] async fn test_cuda_zigzag_decompression_u32() -> VortexResult<()> { let mut cuda_ctx = CudaSession::create_execution_ctx(&VortexSession::empty()) - .vortex_expect("failed to create execution context"); + .expect("failed to create execution context"); // ZigZag encoding: 0->0, 1->-1, 2->1, 3->-2, 4->2, ... // So encoded [0, 2, 4, 1, 3] should decode to [0, 1, 2, -1, -2] @@ -129,7 +128,7 @@ mod tests { let gpu_result = ZigZagExecutor .execute(zigzag_array.to_array(), &mut cuda_ctx) .await - .vortex_expect("GPU decompression failed") + .expect("GPU decompression failed") .into_host() .await? .into_array(); diff --git a/vortex-cuda/src/kernel/encodings/zstd.rs b/vortex-cuda/src/kernel/encodings/zstd.rs index ed11bab81a3..062ff394641 100644 --- a/vortex-cuda/src/kernel/encodings/zstd.rs +++ b/vortex-cuda/src/kernel/encodings/zstd.rs @@ -24,7 +24,6 @@ use vortex_buffer::Buffer; use vortex_buffer::ByteBuffer; use vortex_cuda_macros::cuda_tests; use vortex_dtype::DType; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_err; use vortex_mask::AllOr; @@ -93,10 +92,7 @@ pub async fn zstd_kernel_prepare( let output_sizes: Vec = metadata .frames .iter() - .map(|m| { - usize::try_from(m.uncompressed_size) - .vortex_expect("uncompressed size must fit in usize") - }) + .map(|m| usize::try_from(m.uncompressed_size).expect("uncompressed size must fit in usize")) .collect(); let output_size_total: usize = output_sizes.iter().sum(); let output_size_max = output_sizes.iter().copied().max().unwrap_or(0); @@ -323,7 +319,7 @@ mod tests { #[tokio::test] async fn test_cuda_zstd_decompression_utf8() -> VortexResult<()> { let mut cuda_ctx = CudaSession::create_execution_ctx(&VortexSession::empty()) - .vortex_expect("failed to create execution context"); + .expect("failed to create execution context"); let strings = VarBinViewArray::from_iter_str([ "hello", @@ -348,7 +344,7 @@ mod tests { #[tokio::test] async fn test_cuda_zstd_decompression_multiple_frames() -> VortexResult<()> { let mut cuda_ctx = CudaSession::create_execution_ctx(&VortexSession::empty()) - .vortex_expect("failed to create execution context"); + .expect("failed to create execution context"); let strings = VarBinViewArray::from_iter_str([ "the quick brown fox jumps over the lazy dog", @@ -383,7 +379,7 @@ mod tests { #[tokio::test] async fn test_cuda_zstd_decompression_sliced() -> VortexResult<()> { let mut cuda_ctx = CudaSession::create_execution_ctx(&VortexSession::empty()) - .vortex_expect("failed to create execution context"); + .expect("failed to create execution context"); let strings = VarBinViewArray::from_iter_str([ "the quick brown fox jumps over the lazy dog", diff --git a/vortex-cuda/src/kernel/encodings/zstd_buffers.rs b/vortex-cuda/src/kernel/encodings/zstd_buffers.rs index 509ed41f121..5e8a8172bee 100644 --- a/vortex-cuda/src/kernel/encodings/zstd_buffers.rs +++ b/vortex-cuda/src/kernel/encodings/zstd_buffers.rs @@ -206,7 +206,6 @@ mod tests { use vortex_array::arrays::PrimitiveArray; use vortex_array::arrays::VarBinViewArray; use vortex_array::assert_arrays_eq; - use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_session::VortexSession; use vortex_zstd::ZstdBuffersArray; @@ -218,7 +217,7 @@ mod tests { #[tokio::test] async fn test_cuda_zstd_buffers_decompression_primitive() -> VortexResult<()> { let mut cuda_ctx = CudaSession::create_execution_ctx(&VortexSession::empty()) - .vortex_expect("failed to create execution context"); + .expect("failed to create execution context"); let input = PrimitiveArray::from_iter(0i64..1024).into_array(); let compressed = ZstdBuffersArray::compress(&input, 3)?; @@ -237,7 +236,7 @@ mod tests { #[tokio::test] async fn test_cuda_zstd_buffers_decompression_varbinview() -> VortexResult<()> { let mut cuda_ctx = CudaSession::create_execution_ctx(&VortexSession::empty()) - .vortex_expect("failed to create execution context"); + .expect("failed to create execution context"); let input = VarBinViewArray::from_iter_str([ "hello", diff --git a/vortex-cuda/src/kernel/filter/decimal.rs b/vortex-cuda/src/kernel/filter/decimal.rs index 80315a59ba0..4dcc1c50756 100644 --- a/vortex-cuda/src/kernel/filter/decimal.rs +++ b/vortex-cuda/src/kernel/filter/decimal.rs @@ -46,7 +46,6 @@ mod tests { use vortex_array::assert_arrays_eq; use vortex_dtype::DecimalDType; use vortex_dtype::i256; - use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_mask::Mask; use vortex_session::VortexSession; @@ -91,7 +90,7 @@ mod tests { #[case] mask: Mask, ) -> VortexResult<()> { let mut cuda_ctx = CudaSession::create_execution_ctx(&VortexSession::empty()) - .vortex_expect("failed to create CUDA execution context"); + .expect("failed to create CUDA execution context"); let filter_array = FilterArray::try_new(input.clone().into_array(), mask.clone())?; @@ -100,7 +99,7 @@ mod tests { let gpu_result = FilterExecutor .execute(filter_array.into_array(), &mut cuda_ctx) .await - .vortex_expect("GPU filter failed") + .expect("GPU filter failed") .into_host() .await? .into_array(); @@ -113,7 +112,7 @@ mod tests { #[tokio::test] async fn test_gpu_filter_decimal_large_array() -> VortexResult<()> { let mut cuda_ctx = CudaSession::create_execution_ctx(&VortexSession::empty()) - .vortex_expect("failed to create CUDA execution context"); + .expect("failed to create CUDA execution context"); // Create a large array to test multi-block execution let data: Vec = (0..2050).collect(); @@ -129,7 +128,7 @@ mod tests { let gpu_result = FilterExecutor .execute(filter_array.into_array(), &mut cuda_ctx) .await - .vortex_expect("GPU filter failed") + .expect("GPU filter failed") .into_host() .await? .into_array(); diff --git a/vortex-cuda/src/kernel/filter/primitive.rs b/vortex-cuda/src/kernel/filter/primitive.rs index 80b1628d1de..653dbcaaa09 100644 --- a/vortex-cuda/src/kernel/filter/primitive.rs +++ b/vortex-cuda/src/kernel/filter/primitive.rs @@ -44,7 +44,6 @@ mod tests { use vortex_array::arrays::FilterArray; use vortex_array::arrays::PrimitiveArray; use vortex_array::assert_arrays_eq; - use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_mask::Mask; use vortex_session::VortexSession; @@ -85,7 +84,7 @@ mod tests { #[case] mask: Mask, ) -> VortexResult<()> { let mut cuda_ctx = CudaSession::create_execution_ctx(&VortexSession::empty()) - .vortex_expect("failed to create CUDA execution context"); + .expect("failed to create CUDA execution context"); let filter_array = FilterArray::try_new(input.clone().into_array(), mask.clone())?; @@ -94,7 +93,7 @@ mod tests { let gpu_result = FilterExecutor .execute(filter_array.into_array(), &mut cuda_ctx) .await - .vortex_expect("GPU filter failed") + .expect("GPU filter failed") .into_host() .await? .into_array(); @@ -107,7 +106,7 @@ mod tests { #[tokio::test] async fn test_gpu_filter_large_array() -> VortexResult<()> { let mut cuda_ctx = CudaSession::create_execution_ctx(&VortexSession::empty()) - .vortex_expect("failed to create CUDA execution context"); + .expect("failed to create CUDA execution context"); // Create a large array to test multi-block execution let data: Vec = (0..2050).collect(); @@ -123,7 +122,7 @@ mod tests { let gpu_result = FilterExecutor .execute(filter_array.into_array(), &mut cuda_ctx) .await - .vortex_expect("GPU filter failed") + .expect("GPU filter failed") .into_host() .await? .into_array(); diff --git a/vortex-cuda/src/kernel/filter/varbinview.rs b/vortex-cuda/src/kernel/filter/varbinview.rs index 48eb94e20df..58db2d54bb8 100644 --- a/vortex-cuda/src/kernel/filter/varbinview.rs +++ b/vortex-cuda/src/kernel/filter/varbinview.rs @@ -44,7 +44,6 @@ mod tests { use vortex_array::arrays::FilterArray; use vortex_array::arrays::VarBinViewArray; use vortex_array::assert_arrays_eq; - use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_mask::Mask; use vortex_session::VortexSession; @@ -71,7 +70,7 @@ mod tests { #[case] mask: Mask, ) -> VortexResult<()> { let mut cuda_ctx = CudaSession::create_execution_ctx(&VortexSession::empty()) - .vortex_expect("failed to create CUDA execution context"); + .expect("failed to create CUDA execution context"); let filter_array = FilterArray::try_new(input.into_array(), mask.clone())?; @@ -80,7 +79,7 @@ mod tests { let gpu_result = FilterExecutor .execute(filter_array.into_array(), &mut cuda_ctx) .await - .vortex_expect("GPU filter failed") + .expect("GPU filter failed") .into_host() .await? .into_array(); diff --git a/vortex-cuda/src/kernel/mod.rs b/vortex-cuda/src/kernel/mod.rs index 02660052eb7..742643fec54 100644 --- a/vortex-cuda/src/kernel/mod.rs +++ b/vortex-cuda/src/kernel/mod.rs @@ -145,7 +145,8 @@ pub(crate) fn launch_cuda_kernel_impl( const ELEMENTS_PER_THREAD: u32 = 32; const ELEMENTS_PER_BLOCK: usize = (THREADS_PER_BLOCK * ELEMENTS_PER_THREAD) as usize; // 2048 - let num_blocks = u32::try_from(array_len.div_ceil(ELEMENTS_PER_BLOCK))?; + let num_blocks = u32::try_from(array_len.div_ceil(ELEMENTS_PER_BLOCK)) + .expect("number of blocks must fit u32"); let config = LaunchConfig { grid_dim: (num_blocks, 1, 1), @@ -287,7 +288,6 @@ mod tests { use cudarc::driver::CudaContext; use cudarc::driver::PushKernelArg; - use vortex_error::VortexExpect; use super::KernelLoader; @@ -318,7 +318,7 @@ mod tests { let kernel_loader = KernelLoader::new(); let function = kernel_loader .load_function("config_check", &[], &ctx) - .vortex_expect("failed to load config_check kernel"); + .expect("failed to load config_check kernel"); let config = cudarc::driver::LaunchConfig { grid_dim: (1, 1, 1), diff --git a/vortex-cuda/src/layout.rs b/vortex-cuda/src/layout.rs index 34f31f393eb..adb7c0e064a 100644 --- a/vortex-cuda/src/layout.rs +++ b/vortex-cuda/src/layout.rs @@ -43,7 +43,6 @@ use vortex_buffer::BufferString; use vortex_buffer::ByteBuffer; use vortex_dtype::DType; use vortex_dtype::FieldMask; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_bail; use vortex_error::vortex_panic; @@ -239,7 +238,7 @@ pub struct CudaFlatReader { impl CudaFlatReader { fn array_future(&self) -> SharedArrayFuture { let row_count = - usize::try_from(self.layout.row_count).vortex_expect("row count must fit in usize"); + usize::try_from(self.layout.row_count).expect("row count must fit in usize"); let segment_fut = self.segment_source.request(self.layout.segment_id); @@ -306,9 +305,9 @@ impl LayoutReader for CudaFlatReader { mask: MaskFuture, ) -> VortexResult { let row_range = usize::try_from(row_range.start) - .vortex_expect("Row range begin must fit within CudaFlatLayout size") + .expect("Row range begin must fit within CudaFlatLayout size") ..usize::try_from(row_range.end) - .vortex_expect("Row range end must fit within CudaFlatLayout size"); + .expect("Row range end must fit within CudaFlatLayout size"); let name = self.name.clone(); let array = self.array_future(); let expr = expr.clone(); @@ -354,9 +353,9 @@ impl LayoutReader for CudaFlatReader { mask: MaskFuture, ) -> VortexResult>> { let row_range = usize::try_from(row_range.start) - .vortex_expect("Row range begin must fit within CudaFlatLayout size") + .expect("Row range begin must fit within CudaFlatLayout size") ..usize::try_from(row_range.end) - .vortex_expect("Row range end must fit within CudaFlatLayout size"); + .expect("Row range end must fit within CudaFlatLayout size"); let name = self.name.clone(); let array = self.array_future(); let expr = expr.clone(); @@ -404,7 +403,7 @@ fn resolve_buffers_with_host_overrides( offset += fb_buf.padding() as usize; let buffer_len = fb_buf.length() as usize; - let idx = u32::try_from(idx).vortex_expect("buffer count must fit in u32"); + let idx = u32::try_from(idx).expect("buffer count must fit in u32"); let alignment = Alignment::from_exponent(fb_buf.alignment_exponent()); let handle = if let Some(host_data) = host_overrides.get(&idx) { // Inlined host buffers lose segment padding alignment after protobuf @@ -498,16 +497,14 @@ impl LayoutStrategy for CudaFlatLayoutStrategy { DType::Utf8(n) => { truncate_scalar_stat(chunk.statistics(), Stat::Min, |v| { lower_bound( - BufferString::from_scalar(v) - .vortex_expect("utf8 scalar must be a BufferString"), + BufferString::from_scalar(v).expect("utf8 scalar must be a BufferString"), self.max_variable_length_statistics_size, *n, ) }); truncate_scalar_stat(chunk.statistics(), Stat::Max, |v| { upper_bound( - BufferString::from_scalar(v) - .vortex_expect("utf8 scalar must be a BufferString"), + BufferString::from_scalar(v).expect("utf8 scalar must be a BufferString"), self.max_variable_length_statistics_size, *n, ) @@ -516,16 +513,14 @@ impl LayoutStrategy for CudaFlatLayoutStrategy { DType::Binary(n) => { truncate_scalar_stat(chunk.statistics(), Stat::Min, |v| { lower_bound( - ByteBuffer::from_scalar(v) - .vortex_expect("binary scalar must be a ByteBuffer"), + ByteBuffer::from_scalar(v).expect("binary scalar must be a ByteBuffer"), self.max_variable_length_statistics_size, *n, ) }); truncate_scalar_stat(chunk.statistics(), Stat::Max, |v| { upper_bound( - ByteBuffer::from_scalar(v) - .vortex_expect("binary scalar must be a ByteBuffer"), + ByteBuffer::from_scalar(v).expect("binary scalar must be a ByteBuffer"), self.max_variable_length_statistics_size, *n, ) @@ -598,7 +593,7 @@ fn extract_constant_buffers(chunk: &dyn Array) -> Vec { buffer_idx += 1; } } else { - buffer_idx += u32::try_from(n).vortex_expect("buffer count must fit in u32"); + buffer_idx += u32::try_from(n).expect("buffer count must fit in u32"); } } result diff --git a/vortex-datafusion/src/convert/scalars.rs b/vortex-datafusion/src/convert/scalars.rs index b1ff4f1530c..6d380149a15 100644 --- a/vortex-datafusion/src/convert/scalars.rs +++ b/vortex-datafusion/src/convert/scalars.rs @@ -14,7 +14,6 @@ use vortex::dtype::datetime::TemporalMetadata; use vortex::dtype::datetime::TimeUnit; use vortex::dtype::half::f16; use vortex::dtype::i256; -use vortex::error::VortexExpect; use vortex::error::VortexResult; use vortex::error::vortex_bail; use vortex::scalar::DecimalValue; @@ -220,7 +219,7 @@ impl FromDataFusion for Scalar { | ScalarValue::Time32Millisecond(v) => { let dtype = DType::from_arrow((&value.data_type(), Nullability::Nullable)); Scalar::try_new(dtype, v.map(vortex::scalar::ScalarValue::from)) - .vortex_expect("unable to create a time `Scalar`") + .expect("unable to create a time `Scalar`") } ScalarValue::Date64(v) | ScalarValue::Time64Microsecond(v) @@ -231,7 +230,7 @@ impl FromDataFusion for Scalar { | ScalarValue::TimestampNanosecond(v, _) => { let dtype = DType::from_arrow((&value.data_type(), Nullability::Nullable)); Scalar::try_new(dtype, v.map(vortex::scalar::ScalarValue::from)) - .vortex_expect("unable to create a time `Scalar`") + .expect("unable to create a time `Scalar`") } ScalarValue::Decimal32(decimal, precision, scale) => { let decimal_dtype = DecimalDType::new(*precision, *scale); diff --git a/vortex-datafusion/src/persistent/format.rs b/vortex-datafusion/src/persistent/format.rs index 7a64d0048d4..3d0514547d0 100644 --- a/vortex-datafusion/src/persistent/format.rs +++ b/vortex-datafusion/src/persistent/format.rs @@ -46,7 +46,6 @@ use vortex::dtype::DType; use vortex::dtype::Nullability; use vortex::dtype::PType; use vortex::dtype::arrow::FromArrowType; -use vortex::error::VortexExpect; use vortex::error::VortexResult; use vortex::error::vortex_err; use vortex::expr::stats; @@ -275,7 +274,7 @@ impl FileFormat for VortexFormat { let inferred_schema = vxf.dtype().to_arrow_schema()?; VortexResult::Ok((object.location, inferred_schema)) }) - .map(|f| f.vortex_expect("Failed to spawn infer_schema")) + .map(|f| f.expect("Failed to spawn infer_schema")) }) .buffer_unordered(state.config_options().execution.meta_fetch_concurrency) .try_collect::>() @@ -353,9 +352,7 @@ impl FileFormat for VortexFormat { } }; - let struct_dtype = dtype - .as_struct_fields_opt() - .vortex_expect("dtype is not a struct"); + let struct_dtype = dtype.as_struct_fields_opt().expect("dtype is not a struct"); // Evaluate the statistics for each column that we are able to return to DataFusion. let Some(file_stats) = file_stats else { @@ -364,7 +361,7 @@ impl FileFormat for VortexFormat { num_rows: Precision::Exact( usize::try_from(row_count) .map_err(|_| vortex_err!("Row count overflow")) - .vortex_expect("Row count overflow"), + .expect("Row count overflow"), ), total_byte_size: Precision::Absent, column_statistics: vec![ColumnStatistics::default(); struct_dtype.nfields()], @@ -404,12 +401,12 @@ impl FileFormat for VortexFormat { Scalar::try_new( Stat::Min .dtype(stats_dtype) - .vortex_expect("must have a valid dtype"), + .expect("must have a valid dtype"), Some(stat_val), ) - .vortex_expect("`Stat::Min` somehow had an incompatible `DType`") + .expect("`Stat::Min` somehow had an incompatible `DType`") .cast(&DType::from_arrow(field.as_ref())) - .vortex_expect("Unable to cast to target type that DataFusion wants") + .expect("Unable to cast to target type that DataFusion wants") .try_to_df() .ok() }) @@ -423,12 +420,12 @@ impl FileFormat for VortexFormat { Scalar::try_new( Stat::Max .dtype(stats_dtype) - .vortex_expect("must have a valid dtype"), + .expect("must have a valid dtype"), Some(stat_val), ) - .vortex_expect("`Stat::Max` somehow had an incompatible `DType`") + .expect("`Stat::Max` somehow had an incompatible `DType`") .cast(&DType::from_arrow(field.as_ref())) - .vortex_expect("Unable to cast to target type that DataFusion wants") + .expect("Unable to cast to target type that DataFusion wants") .try_to_df() .ok() }) @@ -457,14 +454,14 @@ impl FileFormat for VortexFormat { num_rows: Precision::Exact( usize::try_from(row_count) .map_err(|_| vortex_err!("Row count overflow")) - .vortex_expect("Row count overflow"), + .expect("Row count overflow"), ), total_byte_size, column_statistics, }) }) .await - .vortex_expect("Failed to spawn infer_stats") + .expect("Failed to spawn infer_stats") } async fn create_physical_plan( diff --git a/vortex-datafusion/src/persistent/metrics.rs b/vortex-datafusion/src/persistent/metrics.rs index 7e99430568f..a777c6359ad 100644 --- a/vortex-datafusion/src/persistent/metrics.rs +++ b/vortex-datafusion/src/persistent/metrics.rs @@ -17,7 +17,6 @@ use datafusion_physical_plan::metrics::Label as DatafusionLabel; use datafusion_physical_plan::metrics::MetricValue as DatafusionMetricValue; use datafusion_physical_plan::metrics::MetricsSet; use datafusion_physical_plan::metrics::Time; -use vortex::error::VortexExpect; use vortex::metrics::Label; use vortex::metrics::Metric; use vortex::metrics::MetricValue; @@ -114,18 +113,18 @@ fn metric_value_to_datafusion(name: &str, metric: &MetricValue) -> Vec Vec DFResult> { let batch_size = self .batch_size - .vortex_expect("batch_size must be supplied to VortexSource"); + .expect("batch_size must be supplied to VortexSource"); let expr_adapter_factory = base_config .expr_adapter_factory diff --git a/vortex-dtype/src/arbitrary/mod.rs b/vortex-dtype/src/arbitrary/mod.rs index 2ea4421098d..fdbcc9f3e67 100644 --- a/vortex-dtype/src/arbitrary/mod.rs +++ b/vortex-dtype/src/arbitrary/mod.rs @@ -55,7 +55,7 @@ fn random_dtype(u: &mut Unstructured<'_>, depth: u8) -> Result { // 8 => DType::FixedSizeList( // Arc::new(random_dtype(u, depth - 1)?), // // We limit the list size to 3 rather (following random struct fields). - // u.choose_index(3)?.try_into().vortex_expect("impossible"), + // u.choose_index(3)?.try_into().expect("impossible"), // u.arbitrary()?, // ), // Null, diff --git a/vortex-dtype/src/arrow.rs b/vortex-dtype/src/arrow.rs index 408034680eb..717652052df 100644 --- a/vortex-dtype/src/arrow.rs +++ b/vortex-dtype/src/arrow.rs @@ -24,7 +24,6 @@ use arrow_schema::SchemaBuilder; use arrow_schema::SchemaRef; use arrow_schema::TimeUnit as ArrowTimeUnit; use vortex_error::VortexError; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_bail; use vortex_error::vortex_err; @@ -154,7 +153,7 @@ impl FromArrowType<(&DataType, Nullability)> for DType { fn from_arrow((data_type, nullability): (&DataType, Nullability)) -> Self { if data_type.is_integer() || data_type.is_floating() { return DType::Primitive( - PType::try_from_arrow(data_type).vortex_expect("arrow float/integer to ptype"), + PType::try_from_arrow(data_type).expect("arrow float/integer to ptype"), nullability, ); } diff --git a/vortex-dtype/src/bigint/mod.rs b/vortex-dtype/src/bigint/mod.rs index 6846d202529..7e065683add 100644 --- a/vortex-dtype/src/bigint/mod.rs +++ b/vortex-dtype/src/bigint/mod.rs @@ -25,7 +25,6 @@ use num_traits::One; use num_traits::WrappingAdd; use num_traits::WrappingSub; use num_traits::Zero; -use vortex_error::VortexExpect; /// Signed 256-bit integer type. /// @@ -247,11 +246,8 @@ impl Shr for i256 { use num_traits::ToPrimitive; Self( - self.0.shr( - rhs.0 - .to_u8() - .vortex_expect("Can't shift more than 256 bits"), - ), + self.0 + .shr(rhs.0.to_u8().expect("Can't shift more than 256 bits")), ) } } @@ -263,7 +259,7 @@ impl Shl for i256 { use num_traits::ToPrimitive; Self( self.0 - .shl(rhs.to_u8().vortex_expect("Can't shift more than 256 bits")), + .shl(rhs.to_u8().expect("Can't shift more than 256 bits")), ) } } diff --git a/vortex-dtype/src/datetime/date.rs b/vortex-dtype/src/datetime/date.rs index 96040186f10..24eb02ea465 100644 --- a/vortex-dtype/src/datetime/date.rs +++ b/vortex-dtype/src/datetime/date.rs @@ -1,7 +1,6 @@ // SPDX-License-Identifier: Apache-2.0 // SPDX-FileCopyrightText: Copyright the Vortex contributors -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_ensure; use vortex_error::vortex_err; @@ -34,7 +33,7 @@ impl Date { /// /// Panics if the `time_unit` is not supported by date types. pub fn new(time_unit: TimeUnit, nullability: Nullability) -> ExtDType { - Self::try_new(time_unit, nullability).vortex_expect("failed to create date dtype") + Self::try_new(time_unit, nullability).expect("failed to create date dtype") } } diff --git a/vortex-dtype/src/datetime/time.rs b/vortex-dtype/src/datetime/time.rs index b654a30b4fd..b13d7ef344b 100644 --- a/vortex-dtype/src/datetime/time.rs +++ b/vortex-dtype/src/datetime/time.rs @@ -1,7 +1,6 @@ // SPDX-License-Identifier: Apache-2.0 // SPDX-FileCopyrightText: Copyright the Vortex contributors -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_ensure; use vortex_error::vortex_err; @@ -30,7 +29,7 @@ impl Time { /// Creates a new Time extension dtype with the given time unit and nullability. pub fn new(time_unit: TimeUnit, nullability: Nullability) -> ExtDType { - Self::try_new(time_unit, nullability).vortex_expect("failed to create time dtype") + Self::try_new(time_unit, nullability).expect("failed to create time dtype") } } diff --git a/vortex-dtype/src/datetime/timestamp.rs b/vortex-dtype/src/datetime/timestamp.rs index 004b09c4bc1..cb8b6decd9e 100644 --- a/vortex-dtype/src/datetime/timestamp.rs +++ b/vortex-dtype/src/datetime/timestamp.rs @@ -7,7 +7,6 @@ use std::fmt::Display; use std::fmt::Formatter; use std::sync::Arc; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_ensure; use vortex_error::vortex_err; @@ -44,7 +43,7 @@ impl Timestamp { }, DType::Primitive(PType::I64, nullability), ) - .vortex_expect("failed to create timestamp dtype") + .expect("failed to create timestamp dtype") } } @@ -101,10 +100,7 @@ impl ExtDTypeVTable for Timestamp { let tag = data[0]; let time_unit = TimeUnit::try_from(tag)?; - let tz_len_bytes: [u8; 2] = data[1..3] - .try_into() - .ok() - .vortex_expect("Verified to have two bytes"); + let tz_len_bytes: [u8; 2] = data[1..3].try_into().expect("Verified to have two bytes"); let tz_len = u16::from_le_bytes(tz_len_bytes) as usize; if tz_len == 0 { return Ok(TimestampOptions { diff --git a/vortex-dtype/src/decimal/mod.rs b/vortex-dtype/src/decimal/mod.rs index 6706a0f3019..98a914c372f 100644 --- a/vortex-dtype/src/decimal/mod.rs +++ b/vortex-dtype/src/decimal/mod.rs @@ -14,7 +14,6 @@ use num_traits::ToPrimitive; pub use precision::*; pub use types::*; use vortex_error::VortexError; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_bail; use vortex_error::vortex_err; @@ -105,7 +104,7 @@ impl DecimalDType { (self.precision.get() as f32 * 10.0f32.log(2.0)) .ceil() .to_usize() - .vortex_expect("too many bits required") + .expect("too many bits required") } } diff --git a/vortex-dtype/src/decimal/precision.rs b/vortex-dtype/src/decimal/precision.rs index 05999fa631e..aa7907a6c3f 100644 --- a/vortex-dtype/src/decimal/precision.rs +++ b/vortex-dtype/src/decimal/precision.rs @@ -6,7 +6,6 @@ use std::fmt::Display; use std::marker::PhantomData; use std::num::NonZero; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_bail; use vortex_error::vortex_err; @@ -42,7 +41,7 @@ impl PrecisionScale { /// /// Panics if the precision/scale are invalid. pub fn new(precision: u8, scale: i8) -> Self { - Self::try_new(precision, scale).vortex_expect("Failed to create `PrecisionScale`") + Self::try_new(precision, scale).expect("Failed to create `PrecisionScale`") } /// Try to create a new [`PrecisionScale`] with the given precision and scale. diff --git a/vortex-dtype/src/dtype.rs b/vortex-dtype/src/dtype.rs index 1970788e622..40e690b335f 100644 --- a/vortex-dtype/src/dtype.rs +++ b/vortex-dtype/src/dtype.rs @@ -9,7 +9,6 @@ use std::sync::Arc; use DType::*; use itertools::Itertools; -use vortex_error::VortexExpect; use vortex_error::vortex_panic; use crate::FieldDType; @@ -356,7 +355,7 @@ impl DType { let element_size = f.element_size()?; sum = sum .checked_add(element_size) - .vortex_expect("sum of field sizes is bigger than usize"); + .expect("sum of field sizes is bigger than usize"); } Some(sum) } diff --git a/vortex-dtype/src/extension/mod.rs b/vortex-dtype/src/extension/mod.rs index d6d955346c5..ce3d850eda6 100644 --- a/vortex-dtype/src/extension/mod.rs +++ b/vortex-dtype/src/extension/mod.rs @@ -17,7 +17,6 @@ use std::sync::Arc; use arcref::ArcRef; pub use matcher::*; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_err; pub use vtable::*; @@ -173,7 +172,7 @@ impl ExtDTypeRef { /// Panics if the match fails. pub fn metadata(&self) -> M::Match<'_> { self.metadata_opt::() - .vortex_expect("Failed to downcast DynExtDType") + .expect("Failed to downcast DynExtDType") } /// Downcast to the concrete [`ExtDType`]. @@ -205,7 +204,7 @@ impl ExtDTypeRef { type_name::(), ) }) - .vortex_expect("Failed to downcast DynExtDType") + .expect("Failed to downcast DynExtDType") } } @@ -311,7 +310,7 @@ impl ExtDTypeImpl for ExtDTypeAdapter { fn with_nullability(&self, nullability: Nullability) -> ExtDTypeRef { let storage_dtype = self.storage_dtype.with_nullability(nullability); ExtDType::::try_with_vtable(self.vtable.clone(), self.metadata.clone(), storage_dtype) - .vortex_expect("Extension DType {} incorrect fails validation with the same storage type but different nullability").erased() + .expect("Extension DType {} incorrect fails validation with the same storage type but different nullability").erased() } } diff --git a/vortex-dtype/src/struct_.rs b/vortex-dtype/src/struct_.rs index 51140eb2105..b46ea8aaec1 100644 --- a/vortex-dtype/src/struct_.rs +++ b/vortex-dtype/src/struct_.rs @@ -8,7 +8,6 @@ use std::sync::Arc; use std::sync::OnceLock; use itertools::Itertools; -use vortex_error::VortexExpect; use vortex_error::VortexResult; use vortex_error::vortex_bail; use vortex_error::vortex_err; @@ -65,16 +64,16 @@ impl PartialEq for FieldDTypeInner { match (self, other) { (Self::Owned(lhs), Self::Owned(rhs)) => lhs == rhs, (Self::View(lhs), Self::View(rhs)) => { - let lhs = DType::try_from(lhs.clone()) - .vortex_expect("Failed to parse FieldDType into DType"); - let rhs = DType::try_from(rhs.clone()) - .vortex_expect("Failed to parse FieldDType into DType"); + let lhs = + DType::try_from(lhs.clone()).expect("Failed to parse FieldDType into DType"); + let rhs = + DType::try_from(rhs.clone()).expect("Failed to parse FieldDType into DType"); lhs == rhs } (Self::View(view), Self::Owned(owned)) | (Self::Owned(owned), Self::View(view)) => { - let view = DType::try_from(view.clone()) - .vortex_expect("Failed to parse FieldDType into DType"); + let view = + DType::try_from(view.clone()).expect("Failed to parse FieldDType into DType"); owned == &view } } @@ -89,8 +88,8 @@ impl Hash for FieldDTypeInner { owned.hash(state); } FieldDTypeInner::View(view) => { - let owned = DType::try_from(view.clone()) - .vortex_expect("Failed to parse FieldDType into DType"); + let owned = + DType::try_from(view.clone()).expect("Failed to parse FieldDType into DType"); owned.hash(state); } } @@ -334,7 +333,7 @@ impl StructFields { Some( self.0.dtypes[index] .value() - .vortex_expect("field DType must be valid"), + .expect("field DType must be valid"), ) } @@ -345,7 +344,7 @@ impl StructFields { .dtypes .get(index)? .value() - .vortex_expect("field DType must be valid"), + .expect("field DType must be valid"), ) } @@ -354,7 +353,7 @@ impl StructFields { self.0 .dtypes .iter() - .map(|dt| dt.value().vortex_expect("field DType must be valid")) + .map(|dt| dt.value().expect("field DType must be valid")) } /// Project a subset of fields from the struct diff --git a/vortex-duckdb/src/convert/dtype.rs b/vortex-duckdb/src/convert/dtype.rs index d6481cafc70..3b3314630d5 100644 --- a/vortex-duckdb/src/convert/dtype.rs +++ b/vortex-duckdb/src/convert/dtype.rs @@ -92,9 +92,12 @@ impl FromLogicalType for DType { DUCKDB_TYPE::DUCKDB_TYPE_VARCHAR => DType::Utf8(nullability), DUCKDB_TYPE::DUCKDB_TYPE_BLOB => DType::Binary(nullability), DUCKDB_TYPE::DUCKDB_TYPE_DECIMAL => { - let (width, scale) = logical_type.as_decimal(); + let (precision, scale) = logical_type.as_decimal(); DType::Decimal( - DecimalDType::try_new(width, scale.try_into()?)?, + DecimalDType::try_new( + precision, + scale.try_into().expect("scale must fit into i8"), + )?, nullability, ) } @@ -218,7 +221,10 @@ impl TryFrom<&DType> for LogicalType { DType::Decimal(decimal_dtype, _) => { return LogicalType::decimal_type( decimal_dtype.precision(), - decimal_dtype.scale().try_into()?, + decimal_dtype + .scale() + .try_into() + .expect("scale must fit into i8"), ); } DType::List(element_dtype, _) => { diff --git a/vortex-duckdb/src/convert/expr.rs b/vortex-duckdb/src/convert/expr.rs index 5b95b631b72..dd75cec1303 100644 --- a/vortex-duckdb/src/convert/expr.rs +++ b/vortex-duckdb/src/convert/expr.rs @@ -7,7 +7,6 @@ use itertools::Itertools; use tracing::debug; use vortex::dtype::Nullability; use vortex::error::VortexError; -use vortex::error::VortexExpect; use vortex::error::VortexResult; use vortex::error::vortex_bail; use vortex::error::vortex_err; @@ -36,7 +35,7 @@ use crate::duckdb; const DUCKDB_FUNCTION_NAME_CONTAINS: &str = "contains"; fn like_pattern_str(value: &duckdb::Expression) -> VortexResult> { - match value.as_class().vortex_expect("unknown class") { + match value.as_class().expect("unknown class") { duckdb::ExpressionClass::BoundConstant(constant) => { Ok(Some(format!("%{}%", constant.value.as_string().as_str()))) } @@ -177,10 +176,10 @@ pub fn try_from_bound_expression(value: &duckdb::Expression) -> VortexResult { - and_collect(children).vortex_expect("cannot be empty") + and_collect(children).expect("cannot be empty") } DUCKDB_VX_EXPR_TYPE::DUCKDB_VX_EXPR_TYPE_CONJUNCTION_OR => { - or_collect(children).vortex_expect("cannot be empty") + or_collect(children).expect("cannot be empty") } _ => vortex_bail!("unexpected operator {:?} in bound conjunction", conj.op), } diff --git a/vortex-duckdb/src/convert/table_filter.rs b/vortex-duckdb/src/convert/table_filter.rs index a3a42a4e3b6..a6e639b6598 100644 --- a/vortex-duckdb/src/convert/table_filter.rs +++ b/vortex-duckdb/src/convert/table_filter.rs @@ -7,7 +7,6 @@ use itertools::Itertools; use vortex::compute::Operator; use vortex::dtype::DType; use vortex::dtype::Nullability; -use vortex::error::VortexExpect; use vortex::error::VortexResult; use vortex::error::vortex_bail; use vortex::expr::Binary; @@ -106,7 +105,7 @@ pub fn try_from_table_filter( move || { let value = data.latest()?; let scalar = Scalar::try_from(value.as_ref()) - .vortex_expect("failed to convert dynamic filter value to scalar"); + .expect("failed to convert dynamic filter value to scalar"); scalar.into_value() }, col.return_dtype(scope_dtype)?, diff --git a/vortex-duckdb/src/convert/vector.rs b/vortex-duckdb/src/convert/vector.rs index 681337f8974..c601767f59d 100644 --- a/vortex-duckdb/src/convert/vector.rs +++ b/vortex-duckdb/src/convert/vector.rs @@ -26,7 +26,6 @@ use vortex::dtype::FieldNames; use vortex::dtype::NativePType; use vortex::dtype::Nullability; use vortex::dtype::datetime::TimeUnit; -use vortex::error::VortexExpect; use vortex::error::VortexResult; use vortex::error::vortex_bail; @@ -120,13 +119,13 @@ fn convert_valid_list_entry( child_min_length: &mut usize, previous_end: &mut i64, ) -> (i64, i64) { - let offset = i64::try_from(entry.offset).vortex_expect("list offset must fit i64"); + let offset = i64::try_from(entry.offset).expect("list offset must fit i64"); assert!(offset >= 0, "list offset must be non-negative"); - let size = i64::try_from(entry.length).vortex_expect("list size must fit i64"); + let size = i64::try_from(entry.length).expect("list size must fit i64"); assert!(size >= 0, "list size must be non-negative"); let end = usize::try_from(offset + size) - .vortex_expect("child vector length did not fit into a 32-bit `usize` type"); + .expect("child vector length did not fit into a 32-bit `usize` type"); *child_min_length = (*child_min_length).max(end); *previous_end = offset + size; @@ -280,7 +279,8 @@ pub fn flat_vector_to_vortex(vector: &mut Vector, len: usize) -> VortexResult { let logical_type = vector.logical_type(); let (precision, scale) = logical_type.as_decimal(); - let decimal_dtype = DecimalDType::try_new(precision, scale.try_into()?)?; + let scale = scale.try_into().expect("decimal scale must fit i8"); + let decimal_dtype = DecimalDType::try_new(precision, scale)?; let validity = vector.validity_ref(len).to_validity(); // https://duckdb.org/docs/stable/sql/data_types/numeric.html#fixed-point-decimals @@ -377,7 +377,6 @@ mod tests { use vortex::array::ToCanonical; use vortex::array::arrays::BoolArray; - use vortex::error::VortexExpect; use vortex::mask::Mask; use vortex_array::assert_arrays_eq; @@ -620,7 +619,7 @@ mod tests { let logical_type = LogicalType::list_type(LogicalType::new(DUCKDB_TYPE::DUCKDB_TYPE_INTEGER)) - .vortex_expect("LogicalType creation should succeed for test data"); + .expect("LogicalType creation should succeed for test data"); let mut vector = Vector::with_capacity(logical_type, len); // Populate with data @@ -653,7 +652,7 @@ mod tests { let logical_type = LogicalType::array_type(LogicalType::new(DUCKDB_TYPE::DUCKDB_TYPE_INTEGER), 4) - .vortex_expect("LogicalType creation should succeed for test data"); + .expect("LogicalType creation should succeed for test data"); let mut vector = Vector::with_capacity(logical_type, len); // Populate with data @@ -678,7 +677,7 @@ mod tests { fn test_empty_struct() { let len = 4; let logical_type = LogicalType::struct_type([], []) - .vortex_expect("LogicalType creation should succeed for test data"); + .expect("LogicalType creation should succeed for test data"); let mut vector = Vector::with_capacity(logical_type, len); // Test conversion @@ -702,7 +701,7 @@ mod tests { ], [CString::new("a").unwrap(), CString::new("b").unwrap()], ) - .vortex_expect("LogicalType creation should succeed for test data"); + .expect("LogicalType creation should succeed for test data"); let mut vector = Vector::with_capacity(logical_type, len); // Populate with data diff --git a/vortex-duckdb/src/copy.rs b/vortex-duckdb/src/copy.rs index 00428a4574c..8f3bcb976ca 100644 --- a/vortex-duckdb/src/copy.rs +++ b/vortex-duckdb/src/copy.rs @@ -15,7 +15,6 @@ use vortex::dtype::DType; use vortex::dtype::Nullability::NonNullable; use vortex::dtype::Nullability::Nullable; use vortex::dtype::StructFields; -use vortex::error::VortexExpect; use vortex::error::VortexResult; use vortex::error::vortex_err; use vortex::file::WriteOptionsSessionExt; @@ -92,7 +91,7 @@ impl CopyFunction for VortexCopyFunction { init_global .sink .as_mut() - .vortex_expect("sink closed early") + .expect("sink closed early") .send(chunk) .await .map_err(|e| vortex_err!("send error {}", e.to_string())) @@ -113,7 +112,7 @@ impl CopyFunction for VortexCopyFunction { .write_task .lock() .take() - .vortex_expect("no file to close"); + .expect("no file to close"); task.await?; Ok(()) }) diff --git a/vortex-duckdb/src/duckdb/copy_function/callback.rs b/vortex-duckdb/src/duckdb/copy_function/callback.rs index 783bd9f2a2e..1b8558ba824 100644 --- a/vortex-duckdb/src/duckdb/copy_function/callback.rs +++ b/vortex-duckdb/src/duckdb/copy_function/callback.rs @@ -8,7 +8,6 @@ use std::os::raw::c_void; use itertools::Itertools; use num_traits::AsPrimitive; -use vortex::error::VortexExpect; use crate::cpp; use crate::cpp::duckdb_data_chunk; @@ -61,8 +60,8 @@ pub(crate) unsafe extern "C-unwind" fn global_callback( let file_path = unsafe { CStr::from_ptr(file_path) } .to_string_lossy() .into_owned(); - let bind_data = unsafe { bind_data.cast::().as_ref() } - .vortex_expect("global_init_data null pointer"); + let bind_data = + unsafe { bind_data.cast::().as_ref() }.expect("global_init_data null pointer"); try_or_null(error_out, || { let ctx = unsafe { ClientContext::borrow(client_context) }; let bind_data = T::init_global(ctx, bind_data, file_path)?; @@ -75,7 +74,7 @@ pub(crate) unsafe extern "C-unwind" fn local_callback( error_out: *mut duckdb_vx_error, ) -> cpp::duckdb_vx_data { let bind_data = - unsafe { bind_data.cast::().as_ref() }.vortex_expect("bind_data null pointer"); + unsafe { bind_data.cast::().as_ref() }.expect("bind_data null pointer"); try_or_null(error_out, || { let bind_data = T::init_local(bind_data)?; Ok(Data::from(Box::new(bind_data)).as_ptr()) @@ -90,11 +89,11 @@ pub(crate) unsafe extern "C-unwind" fn copy_to_sink_callback( error_out: *mut duckdb_vx_error, ) { let bind_data = - unsafe { bind_data.cast::().as_ref() }.vortex_expect("bind_data null pointer"); - let global_data = unsafe { global_data.cast::().as_mut() } - .vortex_expect("bind_data null pointer"); - let local_data = unsafe { local_data.cast::().as_mut() } - .vortex_expect("bind_data null pointer"); + unsafe { bind_data.cast::().as_ref() }.expect("bind_data null pointer"); + let global_data = + unsafe { global_data.cast::().as_mut() }.expect("bind_data null pointer"); + let local_data = + unsafe { local_data.cast::().as_mut() }.expect("bind_data null pointer"); try_or(error_out, || { T::copy_to_sink(bind_data, global_data, local_data, &mut unsafe { @@ -110,9 +109,9 @@ pub(crate) unsafe extern "C-unwind" fn copy_to_finalize_callback().as_ref() }.vortex_expect("bind_data null pointer"); - let global_data = unsafe { global_data.cast::().as_mut() } - .vortex_expect("bind_data null pointer"); + unsafe { bind_data.cast::().as_ref() }.expect("bind_data null pointer"); + let global_data = + unsafe { global_data.cast::().as_mut() }.expect("bind_data null pointer"); try_or(error_out, || { T::copy_to_finalize(bind_data, global_data)?; diff --git a/vortex-duckdb/src/duckdb/copy_function/mod.rs b/vortex-duckdb/src/duckdb/copy_function/mod.rs index 0c4a6f903b8..3531890d2ce 100644 --- a/vortex-duckdb/src/duckdb/copy_function/mod.rs +++ b/vortex-duckdb/src/duckdb/copy_function/mod.rs @@ -6,7 +6,6 @@ mod callback; use std::ffi::CStr; use std::fmt::Debug; -use vortex::error::VortexExpect; use vortex::error::VortexResult; use crate::Connection; @@ -71,7 +70,7 @@ impl Connection { extension: &CStr, ) -> VortexResult<()> { let vtab: &mut cpp::duckdb_vx_copy_func_vtab_t = - unsafe { cpp::get_vtab_one().as_mut() }.vortex_expect("copy vtab cannot be null"); + unsafe { cpp::get_vtab_one().as_mut() }.expect("copy vtab cannot be null"); vtab.name = name.as_ptr(); vtab.extension = extension.as_ptr(); diff --git a/vortex-duckdb/src/duckdb/data_chunk.rs b/vortex-duckdb/src/duckdb/data_chunk.rs index bda0024eedc..0b531ae9fee 100644 --- a/vortex-duckdb/src/duckdb/data_chunk.rs +++ b/vortex-duckdb/src/duckdb/data_chunk.rs @@ -5,7 +5,6 @@ use std::ffi::CStr; use std::ptr; use vortex::error::VortexError; -use vortex::error::VortexExpect; use vortex::error::vortex_bail; use crate::cpp; @@ -37,7 +36,7 @@ impl DataChunk { /// Returns the column count of the data chunk. pub fn column_count(&self) -> usize { usize::try_from(unsafe { cpp::duckdb_data_chunk_get_column_count(self.as_ptr()) }) - .vortex_expect("Column count exceeds usize") + .expect("Column count exceeds usize") } /// Set the length of the data chunk. diff --git a/vortex-duckdb/src/duckdb/ddb_string.rs b/vortex-duckdb/src/duckdb/ddb_string.rs index df5b513b8a6..053ba9e64a0 100644 --- a/vortex-duckdb/src/duckdb/ddb_string.rs +++ b/vortex-duckdb/src/duckdb/ddb_string.rs @@ -6,7 +6,6 @@ use std::fmt::Display; use std::fmt::Formatter; use vortex::dtype::FieldName; -use vortex::error::VortexExpect; use vortex::error::vortex_err; use crate::cpp::duckdb_free; @@ -20,7 +19,7 @@ wrapper!( unsafe { CStr::from_ptr(ptr) } .to_str() .map_err(|e| vortex_err!("Failed to convert C string to str: {e}")) - .vortex_expect("DuckDB string should be valid UTF-8") + .expect("DuckDB string should be valid UTF-8") }, |ptr: &mut *mut std::ffi::c_char| unsafe { duckdb_free((*ptr).cast()) } ); diff --git a/vortex-duckdb/src/duckdb/logical_type.rs b/vortex-duckdb/src/duckdb/logical_type.rs index d21c4126345..2983f634618 100644 --- a/vortex-duckdb/src/duckdb/logical_type.rs +++ b/vortex-duckdb/src/duckdb/logical_type.rs @@ -5,7 +5,6 @@ use std::ffi::CString; use std::fmt::Debug; use std::fmt::Formatter; -use vortex::error::VortexExpect; use vortex::error::VortexResult; use vortex::error::vortex_bail; @@ -186,7 +185,7 @@ impl LogicalType { pub fn array_type_array_size(&self) -> u32 { u32::try_from(unsafe { duckdb_array_type_array_size(self.as_ptr()) }) - .vortex_expect("Array size must fit in u32") + .expect("Array size must fit in u32") } pub fn list_child_type(&self) -> Self { @@ -211,7 +210,7 @@ impl LogicalType { pub fn struct_type_child_count(&self) -> usize { usize::try_from(unsafe { duckdb_struct_type_child_count(self.as_ptr()) }) - .vortex_expect("Struct type child count must fit in usize") + .expect("Struct type child count must fit in usize") } pub fn union_member_type(&self, idx: usize) -> Self { @@ -224,7 +223,7 @@ impl LogicalType { pub fn union_member_count(&self) -> usize { usize::try_from(unsafe { duckdb_union_type_member_count(self.as_ptr()) }) - .vortex_expect("Union member count must fit in usize") + .expect("Union member count must fit in usize") } } @@ -395,8 +394,7 @@ mod tests { #[test] fn test_clone_decimal_logical_type() { - let decimal_type = - LogicalType::decimal_type(10, 2).vortex_expect("Failed to create decimal type"); + let decimal_type = LogicalType::decimal_type(10, 2).expect("Failed to create decimal type"); #[allow(clippy::redundant_clone)] let cloned = decimal_type.clone(); @@ -415,8 +413,7 @@ mod tests { fn test_clone_list_logical_type() { // Create a list of integers let int_type = LogicalType::new(DUCKDB_TYPE::DUCKDB_TYPE_INTEGER); - let list_type = - LogicalType::list_type(int_type).vortex_expect("Failed to create list type"); + let list_type = LogicalType::list_type(int_type).expect("Failed to create list type"); #[allow(clippy::redundant_clone)] let cloned = list_type.clone(); @@ -440,7 +437,7 @@ mod tests { // Create an array of strings with size 5 let array_type = LogicalType::array_type(LogicalType::new(DUCKDB_TYPE::DUCKDB_TYPE_VARCHAR), 5) - .vortex_expect("Failed to create array type"); + .expect("Failed to create array type"); #[allow(clippy::redundant_clone)] let cloned = array_type.clone(); @@ -508,7 +505,7 @@ mod tests { let member_names = vec![CString::new("name").unwrap(), CString::new("age").unwrap()]; let struct_type = LogicalType::struct_type(member_types, member_names) - .vortex_expect("Failed to create struct type"); + .expect("Failed to create struct type"); #[allow(clippy::redundant_clone)] let cloned = struct_type.clone(); diff --git a/vortex-duckdb/src/duckdb/object_cache.rs b/vortex-duckdb/src/duckdb/object_cache.rs index b6e80ceee4c..4557833587b 100644 --- a/vortex-duckdb/src/duckdb/object_cache.rs +++ b/vortex-duckdb/src/duckdb/object_cache.rs @@ -4,7 +4,6 @@ use std::ffi::CString; use std::os::raw::c_void; -use vortex::error::VortexExpect; use vortex::error::vortex_err; use crate::cpp; @@ -31,7 +30,7 @@ impl ObjectCacheRef<'_> { pub fn put(&self, key: &str, entry: T) -> *mut T { let key_cstr = CString::new(key) .map_err(|e| vortex_err!("invalid key: {}", e)) - .vortex_expect("object cache key should be valid C string"); + .expect("object cache key should be valid C string"); let opaque_ptr = Box::into_raw(Box::new(entry)); unsafe { @@ -50,7 +49,7 @@ impl ObjectCacheRef<'_> { pub fn get(&self, key: &str) -> Option<&T> { let key_cstr = CString::new(key) .map_err(|e| vortex_err!("invalid key: {}", e)) - .vortex_expect("object cache key should be valid C string"); + .expect("object cache key should be valid C string"); unsafe { let opaque_ptr = cpp::duckdb_vx_object_cache_get(self.as_ptr(), key_cstr.as_ptr()); diff --git a/vortex-duckdb/src/duckdb/scalar_function.rs b/vortex-duckdb/src/duckdb/scalar_function.rs index c27af3c43b6..97f8cfaa2ad 100644 --- a/vortex-duckdb/src/duckdb/scalar_function.rs +++ b/vortex-duckdb/src/duckdb/scalar_function.rs @@ -1,7 +1,6 @@ // SPDX-License-Identifier: Apache-2.0 // SPDX-FileCopyrightText: Copyright the Vortex contributors -use vortex::error::VortexExpect; use vortex::error::vortex_err; use crate::cpp; @@ -17,7 +16,7 @@ impl ScalarFunction { std::ffi::CStr::from_ptr(name_ptr) .to_str() .map_err(|e| vortex_err!("invalid utf-8: {e}")) - .vortex_expect("scalar function name should be valid UTF-8") + .expect("scalar function name should be valid UTF-8") } } diff --git a/vortex-duckdb/src/duckdb/table_function/cardinality.rs b/vortex-duckdb/src/duckdb/table_function/cardinality.rs index 56693ca2001..9b58872f052 100644 --- a/vortex-duckdb/src/duckdb/table_function/cardinality.rs +++ b/vortex-duckdb/src/duckdb/table_function/cardinality.rs @@ -3,8 +3,6 @@ use std::ffi::c_void; -use vortex::error::VortexExpect; - use crate::cpp; use crate::duckdb::Cardinality; use crate::duckdb::TableFunction; @@ -15,9 +13,8 @@ pub(crate) unsafe extern "C-unwind" fn cardinality_callback( node_stats_out: *mut cpp::duckdb_vx_node_statistics, ) { let bind_data = - unsafe { bind_data.cast::().as_mut() }.vortex_expect("bind_data null pointer"); - let node_stats = - unsafe { node_stats_out.as_mut() }.vortex_expect("node_stats_out null pointer"); + unsafe { bind_data.cast::().as_mut() }.expect("bind_data null pointer"); + let node_stats = unsafe { node_stats_out.as_mut() }.expect("node_stats_out null pointer"); match T::cardinality(bind_data) { Cardinality::Unknown => {} diff --git a/vortex-duckdb/src/duckdb/table_function/init.rs b/vortex-duckdb/src/duckdb/table_function/init.rs index c3cd5d0d013..13eca8090b7 100644 --- a/vortex-duckdb/src/duckdb/table_function/init.rs +++ b/vortex-duckdb/src/duckdb/table_function/init.rs @@ -6,7 +6,6 @@ use std::fmt::Debug; use std::fmt::Formatter; use std::ptr; -use vortex::error::VortexExpect; use vortex::error::VortexResult; use vortex::error::vortex_bail; @@ -21,9 +20,8 @@ pub(crate) unsafe extern "C-unwind" fn init_global_callback( init_input: *const cpp::duckdb_vx_tfunc_init_input, error_out: *mut cpp::duckdb_vx_error, ) -> cpp::duckdb_vx_data { - let init_input = TableInitInput::new( - unsafe { init_input.as_ref() }.vortex_expect("init_input null pointer"), - ); + let init_input = + TableInitInput::new(unsafe { init_input.as_ref() }.expect("init_input null pointer")); match T::init_global(&init_input) { Ok(init_data) => Data::from(Box::new(init_data)).as_ptr(), @@ -43,12 +41,11 @@ pub(crate) unsafe extern "C-unwind" fn init_local_callback( global_init_data: *mut c_void, error_out: *mut cpp::duckdb_vx_error, ) -> cpp::duckdb_vx_data { - let init_input = TableInitInput::new( - unsafe { init_input.as_ref() }.vortex_expect("init_input null pointer"), - ); + let init_input = + TableInitInput::new(unsafe { init_input.as_ref() }.expect("init_input null pointer")); let global_init_data = unsafe { global_init_data.cast::().as_mut() } - .vortex_expect("global_init_data null pointer"); + .expect("global_init_data null pointer"); match T::init_local(&init_input, global_init_data) { Ok(init_data) => Data::from(Box::new(init_data)).as_ptr(), diff --git a/vortex-duckdb/src/duckdb/table_function/mod.rs b/vortex-duckdb/src/duckdb/table_function/mod.rs index 95a09cce68b..fcf25d292b5 100644 --- a/vortex-duckdb/src/duckdb/table_function/mod.rs +++ b/vortex-duckdb/src/duckdb/table_function/mod.rs @@ -7,7 +7,6 @@ use std::ffi::c_void; use std::fmt::Debug; use std::ptr; -use vortex::error::VortexExpect; use vortex::error::VortexResult; mod bind; mod cardinality; @@ -244,9 +243,9 @@ unsafe extern "C-unwind" fn function( let client_context = unsafe { ClientContext::borrow(duckdb_client_context) }; let bind_data = unsafe { &*(bind_data as *const T::BindData) }; let global_init_data = unsafe { global_init_data.cast::().as_mut() } - .vortex_expect("global_init_data null pointer"); + .expect("global_init_data null pointer"); let local_init_data = unsafe { local_init_data.cast::().as_mut() } - .vortex_expect("local_init_data null pointer"); + .expect("local_init_data null pointer"); let mut data_chunk = unsafe { DataChunk::borrow(output) }; match T::scan( diff --git a/vortex-duckdb/src/duckdb/table_function/partition.rs b/vortex-duckdb/src/duckdb/table_function/partition.rs index 6313a5f727c..9c5d5fb828d 100644 --- a/vortex-duckdb/src/duckdb/table_function/partition.rs +++ b/vortex-duckdb/src/duckdb/table_function/partition.rs @@ -4,7 +4,6 @@ use std::ffi::c_void; use cpp::duckdb_vx_error; -use vortex::error::VortexExpect; use crate::cpp; use crate::cpp::idx_t; @@ -18,11 +17,11 @@ pub(crate) unsafe extern "C-unwind" fn get_partition_data_callback idx_t { let bind_data = - unsafe { bind_data.cast::().as_ref() }.vortex_expect("bind_data null pointer"); + unsafe { bind_data.cast::().as_ref() }.expect("bind_data null pointer"); let global_init_data = unsafe { global_init_data.cast::().as_mut() } - .vortex_expect("global_init_data null pointer"); + .expect("global_init_data null pointer"); let local_init_data = unsafe { local_init_data.cast::().as_mut() } - .vortex_expect("local_init_data null pointer"); + .expect("local_init_data null pointer"); match T::partition_data(bind_data, global_init_data, local_init_data) { Ok(batch_id) => batch_id, diff --git a/vortex-duckdb/src/duckdb/table_function/pushdown_complex_filter.rs b/vortex-duckdb/src/duckdb/table_function/pushdown_complex_filter.rs index 52be2b1f78e..373d00f0e8d 100644 --- a/vortex-duckdb/src/duckdb/table_function/pushdown_complex_filter.rs +++ b/vortex-duckdb/src/duckdb/table_function/pushdown_complex_filter.rs @@ -3,8 +3,6 @@ use std::ffi::c_void; -use vortex::error::VortexExpect; - use crate::cpp; use crate::duckdb::TableFunction; use crate::duckdb::expr::Expression; @@ -17,7 +15,7 @@ pub(crate) unsafe extern "C-unwind" fn pushdown_complex_filter_callback bool { let bind_data = - unsafe { bind_data.cast::().as_mut() }.vortex_expect("bind_data null pointer"); + unsafe { bind_data.cast::().as_mut() }.expect("bind_data null pointer"); let expr = unsafe { Expression::borrow(expr) }; try_or(error_out, || T::pushdown_complex_filter(bind_data, &expr)) } diff --git a/vortex-duckdb/src/duckdb/table_function/virtual_columns.rs b/vortex-duckdb/src/duckdb/table_function/virtual_columns.rs index f6fb2214b80..95099f578da 100644 --- a/vortex-duckdb/src/duckdb/table_function/virtual_columns.rs +++ b/vortex-duckdb/src/duckdb/table_function/virtual_columns.rs @@ -3,8 +3,6 @@ use std::ffi::c_void; -use vortex::error::VortexExpect; - use crate::cpp; use crate::duckdb::LogicalType; use crate::duckdb::TableFunction; @@ -16,7 +14,7 @@ pub(crate) unsafe extern "C-unwind" fn get_virtual_columns_callback().as_ref() }.vortex_expect("bind_data null pointer"); + unsafe { bind_data.cast::().as_ref() }.expect("bind_data null pointer"); let mut result = unsafe { VirtualColumnsResult::borrow(result) }; T::virtual_columns(bind_data, &mut result); diff --git a/vortex-duckdb/src/duckdb/value.rs b/vortex-duckdb/src/duckdb/value.rs index 54c684ef97a..777f4209e9c 100644 --- a/vortex-duckdb/src/duckdb/value.rs +++ b/vortex-duckdb/src/duckdb/value.rs @@ -11,7 +11,6 @@ use vortex::buffer::BufferString; use vortex::buffer::ByteBuffer; use vortex::dtype::NativeDType; use vortex::error::VortexError; -use vortex::error::VortexExpect; use vortex::error::vortex_err; use vortex::error::vortex_panic; @@ -83,7 +82,7 @@ impl<'a> ValueRef<'a> { let string = BufferString::from( cstr.to_str() .map_err(|e| vortex_err!("Invalid UTF-8 string from DuckDB: {e}")) - .vortex_expect("Invalid UTF-8 string from DuckDB"), + .expect("Invalid UTF-8 string from DuckDB"), ); unsafe { cpp::duckdb_free(ptr.cast()) }; ExtractedValue::Varchar(string) @@ -128,14 +127,14 @@ impl<'a> ValueRef<'a> { let value = i128_from_parts(decimal.value.upper, decimal.value.lower); ExtractedValue::Decimal( decimal.width, - i8::try_from(decimal.scale).vortex_expect("invalid scale"), + i8::try_from(decimal.scale).expect("invalid scale"), value, ) } DUCKDB_TYPE::DUCKDB_TYPE_LIST => { let elem_count = usize::try_from(unsafe { cpp::duckdb_get_list_size(self.as_ptr()) }) - .vortex_expect("List size must fit usize"); + .expect("List size must fit usize"); ExtractedValue::List( (0..elem_count) .map(|i| unsafe { diff --git a/vortex-duckdb/src/duckdb/vector.rs b/vortex-duckdb/src/duckdb/vector.rs index 6a1b88bb5e8..0bdfdb30170 100644 --- a/vortex-duckdb/src/duckdb/vector.rs +++ b/vortex-duckdb/src/duckdb/vector.rs @@ -12,7 +12,6 @@ use bitvec::view::BitView; use vortex::array::validity::Validity; use vortex::buffer::BitBuffer; use vortex::buffer::Buffer; -use vortex::error::VortexExpect; use vortex::error::VortexResult; use vortex::error::vortex_bail; use vortex::error::vortex_err; @@ -102,7 +101,7 @@ impl Vector { pub fn set_dictionary_id(&mut self, dict_id: String) { let dict_id = CString::new(dict_id) .map_err(|e| vortex_err!("cstr creation error {e}")) - .vortex_expect("dictionary ID should be valid C string"); + .expect("dictionary ID should be valid C string"); unsafe { cpp::duckdb_vx_set_dictionary_vector_id( self.ptr, diff --git a/vortex-duckdb/src/exporter/decimal.rs b/vortex-duckdb/src/exporter/decimal.rs index ff4904c0b5e..7961e2e178b 100644 --- a/vortex-duckdb/src/exporter/decimal.rs +++ b/vortex-duckdb/src/exporter/decimal.rs @@ -14,7 +14,6 @@ use vortex::dtype::DecimalDType; use vortex::dtype::DecimalType; use vortex::dtype::NativeDecimalType; use vortex::dtype::match_each_decimal_value_type; -use vortex::error::VortexExpect; use vortex::error::VortexResult; use vortex::error::vortex_bail; use vortex::mask::Mask; @@ -92,7 +91,7 @@ where .iter() .zip(unsafe { vector.as_slice_mut(len) }) { - *dst = ::from(*src).vortex_expect( + *dst = ::from(*src).expect( "We know all decimals with this scale/precision fit into the target bit width", ); } @@ -130,7 +129,6 @@ pub fn precision_to_duckdb_storage_size(decimal_dtype: &DecimalDType) -> VortexR mod tests { use vortex::array::arrays::DecimalArray; use vortex::dtype::DecimalDType; - use vortex::error::VortexExpect; use super::*; use crate::duckdb::DataChunk; @@ -166,7 +164,7 @@ mod tests { // Create a DuckDB integer chunk since decimal will be stored as i32 for this precision let mut chunk = DataChunk::new([LogicalType::decimal_type(10, 2) - .vortex_expect("LogicalType creation should succeed for test data")]); + .expect("LogicalType creation should succeed for test data")]); new_zero_copy_exporter(&arr) .unwrap() @@ -193,7 +191,7 @@ mod tests { ); let mut chunk = DataChunk::new([LogicalType::decimal_type(5, 1) - .vortex_expect("LogicalType creation should succeed for test data")]); + .expect("LogicalType creation should succeed for test data")]); // Export first 3 elements new_zero_copy_exporter(&arr) @@ -219,7 +217,7 @@ mod tests { DecimalArray::from_option_iter([Some(123456i32), None, Some(789012i32)], decimal_dtype); let mut chunk = DataChunk::new([LogicalType::decimal_type(8, 3) - .vortex_expect("LogicalType creation should succeed for test data")]); + .expect("LogicalType creation should succeed for test data")]); new_zero_copy_exporter(&arr) .unwrap() diff --git a/vortex-duckdb/src/exporter/fixed_size_list.rs b/vortex-duckdb/src/exporter/fixed_size_list.rs index 1caa2f982d0..fbf23861d52 100644 --- a/vortex-duckdb/src/exporter/fixed_size_list.rs +++ b/vortex-duckdb/src/exporter/fixed_size_list.rs @@ -94,7 +94,6 @@ mod tests { use vortex::array::IntoArray as _; use vortex::array::validity::Validity; use vortex::buffer::buffer; - use vortex::error::VortexExpect; use vortex_array::VortexSessionExecute; use super::*; @@ -297,7 +296,7 @@ mod tests { LogicalType::new_array(cpp::DUCKDB_TYPE::DUCKDB_TYPE_INTEGER, inner_list_size); LogicalType::array_type(inner_array_type, outer_list_size) - .vortex_expect("failed to create nested array type") + .expect("failed to create nested array type") } #[test] diff --git a/vortex-duckdb/src/exporter/list.rs b/vortex-duckdb/src/exporter/list.rs index 237b84813c5..90790093208 100644 --- a/vortex-duckdb/src/exporter/list.rs +++ b/vortex-duckdb/src/exporter/list.rs @@ -159,7 +159,6 @@ mod tests { use vortex::array::validity::Validity; use vortex::buffer::Buffer; use vortex::buffer::buffer; - use vortex::error::VortexExpect; use vortex_array::VortexSessionExecute; use super::*; @@ -181,7 +180,7 @@ mod tests { .into_array(); let list_type = LogicalType::list_type(LogicalType::int32()) - .vortex_expect("LogicalType creation should succeed for test data"); + .expect("LogicalType creation should succeed for test data"); let mut chunk = DataChunk::new([list_type]); new_array_exporter( @@ -220,7 +219,7 @@ mod tests { .into_array(); let list_type = LogicalType::list_type(LogicalType::varchar()) - .vortex_expect("LogicalType creation should succeed for test data"); + .expect("LogicalType creation should succeed for test data"); let mut chunk = DataChunk::new([list_type]); new_array_exporter( diff --git a/vortex-duckdb/src/exporter/list_view.rs b/vortex-duckdb/src/exporter/list_view.rs index 3a39705475d..b0ffa2a0e5f 100644 --- a/vortex-duckdb/src/exporter/list_view.rs +++ b/vortex-duckdb/src/exporter/list_view.rs @@ -171,7 +171,6 @@ mod tests { use vortex::array::validity::Validity; use vortex::buffer::Buffer; use vortex::buffer::buffer; - use vortex::error::VortexExpect; use vortex_array::VortexSessionExecute; use super::*; @@ -195,7 +194,7 @@ mod tests { .into_array(); let list_type = LogicalType::list_type(LogicalType::varchar()) - .vortex_expect("LogicalType creation should succeed for test data"); + .expect("LogicalType creation should succeed for test data"); let mut chunk = DataChunk::new([list_type]); new_array_exporter( @@ -230,7 +229,7 @@ mod tests { .into_array(); let list_type = LogicalType::list_type(LogicalType::int32()) - .vortex_expect("LogicalType creation should succeed for test data"); + .expect("LogicalType creation should succeed for test data"); let mut chunk = DataChunk::new([list_type]); new_array_exporter( @@ -271,7 +270,7 @@ mod tests { .into_array(); let list_type = LogicalType::list_type(LogicalType::varchar()) - .vortex_expect("LogicalType creation should succeed for test data"); + .expect("LogicalType creation should succeed for test data"); let mut chunk = DataChunk::new([list_type]); new_array_exporter( diff --git a/vortex-duckdb/src/exporter/primitive.rs b/vortex-duckdb/src/exporter/primitive.rs index 06bcca2af41..5b352e39600 100644 --- a/vortex-duckdb/src/exporter/primitive.rs +++ b/vortex-duckdb/src/exporter/primitive.rs @@ -69,7 +69,6 @@ impl ColumnExporter for PrimitiveExporter { #[cfg(test)] mod tests { use itertools::Itertools; - use vortex::error::VortexExpect; use vortex_array::VortexSessionExecute; use super::*; @@ -103,7 +102,7 @@ mod tests { fn test_long_primitive_exporter() { const ARRAY_COUNT: usize = 2; const LEN: usize = DUCKDB_STANDARD_VECTOR_SIZE * ARRAY_COUNT; - let arr = PrimitiveArray::from_iter(0..i32::try_from(LEN).vortex_expect("")); + let arr = PrimitiveArray::from_iter(0..i32::try_from(LEN).expect("")); { let mut chunk = (0..ARRAY_COUNT) diff --git a/vortex-duckdb/src/exporter/run_end.rs b/vortex-duckdb/src/exporter/run_end.rs index 32b102f949a..07af5a2962f 100644 --- a/vortex-duckdb/src/exporter/run_end.rs +++ b/vortex-duckdb/src/exporter/run_end.rs @@ -12,7 +12,6 @@ use vortex::dtype::IntegerPType; use vortex::dtype::match_each_integer_ptype; use vortex::encodings::runend::RunEndArray; use vortex::encodings::runend::RunEndArrayParts; -use vortex::error::VortexExpect; use vortex::error::VortexResult; use crate::convert::ToDuckDBScalar; @@ -59,9 +58,9 @@ impl ColumnExporter for RunEndExporter { // Adjust offset to account for the run-end offset. let mut offset = E::from_usize(self.run_end_offset + offset) - .vortex_expect("RunEndExporter::export: offset is not a valid value"); + .expect("RunEndExporter::export: offset is not a valid value"); // Compute the final end offset. - let end_offset = offset + E::from_usize(len).vortex_expect("len is not end type"); + let end_offset = offset + E::from_usize(len).expect("len is not end type"); // Find the run that contains the start offset. let start_run_idx = ends_slice @@ -71,7 +70,7 @@ impl ColumnExporter for RunEndExporter { // Find the final run in case we can short-circuit and return a constant vector. let end_run_idx = ends_slice .search_sorted( - &offset.add(E::from_usize(len).vortex_expect("len out of bounds")), + &offset.add(E::from_usize(len).expect("len out of bounds")), SearchSortedSide::Right, )? .to_ends_index(ends_slice.len()); @@ -91,12 +90,10 @@ impl ColumnExporter for RunEndExporter { for (run_idx, &next_end) in ends_slice[start_run_idx..=end_run_idx].iter().enumerate() { let next_end = next_end.min(end_offset); - let run_len = (next_end - offset) - .to_usize() - .vortex_expect("run_len is usize"); + let run_len = (next_end - offset).to_usize().expect("run_len is usize"); // Push the runs into the selection vector. - sel_vec_slice[..run_len].fill(u32::try_from(run_idx).vortex_expect("sel_idx is u32")); + sel_vec_slice[..run_len].fill(u32::try_from(run_idx).expect("sel_idx is u32")); sel_vec_slice = &mut sel_vec_slice[run_len..]; offset = next_end; @@ -110,7 +107,7 @@ impl ColumnExporter for RunEndExporter { // values we referenced by looking at the last index of the selection vector. let values_len = *unsafe { sel_vec.as_slice_mut(len) } .last() - .vortex_expect("non-empty") + .expect("non-empty") + 1; // Export the run-end values into the vector, and then turn it into a dictionary vector. diff --git a/vortex-duckdb/src/exporter/sequence.rs b/vortex-duckdb/src/exporter/sequence.rs index 93b2c419b3f..58b1087ba20 100644 --- a/vortex-duckdb/src/exporter/sequence.rs +++ b/vortex-duckdb/src/exporter/sequence.rs @@ -3,7 +3,6 @@ use bitvec::macros::internal::funty::Fundamental; use vortex::encodings::sequence::SequenceArray; -use vortex::error::VortexExpect; use vortex::error::VortexResult; use crate::duckdb::Vector; @@ -16,11 +15,11 @@ struct SequenceExporter { pub(crate) fn new_exporter(array: &SequenceArray) -> VortexResult> { Ok(Box::new(SequenceExporter { - start: array.base().as_i64().vortex_expect("cannot have null base"), + start: array.base().as_i64().expect("cannot have null base"), step: array .multiplier() .as_i64() - .vortex_expect("cannot have null multiplier"), + .expect("cannot have null multiplier"), })) } diff --git a/vortex-duckdb/src/exporter/struct_.rs b/vortex-duckdb/src/exporter/struct_.rs index eda7024106b..78b56842125 100644 --- a/vortex-duckdb/src/exporter/struct_.rs +++ b/vortex-duckdb/src/exporter/struct_.rs @@ -80,7 +80,6 @@ mod tests { use vortex::array::validity::Validity; use vortex::buffer::BitBuffer; use vortex::buffer::buffer; - use vortex::error::VortexExpect; use vortex_array::VortexSessionExecute; use super::*; @@ -95,8 +94,7 @@ mod tests { let strings = VarBinViewArray::from_iter_str(vec!["a", "b", "c", "d", "e", "f", "g", "h", "i", "j"]) .into_array(); - let arr = - StructArray::from_fields(&[("a", prim), ("b", strings)]).vortex_expect("struct array"); + let arr = StructArray::from_fields(&[("a", prim), ("b", strings)]).expect("struct array"); let mut chunk = DataChunk::new([LogicalType::struct_type( vec![ LogicalType::new(cpp::duckdb_type::DUCKDB_TYPE_INTEGER), @@ -104,7 +102,7 @@ mod tests { ], vec![CString::new("col1").unwrap(), CString::new("col2").unwrap()], ) - .vortex_expect("LogicalType creation should succeed for test data")]); + .expect("LogicalType creation should succeed for test data")]); new_exporter( arr, @@ -160,7 +158,7 @@ mod tests { true, true, true, false, false, false, true, true, true, true, ])), ) - .vortex_expect("StructArray creation should succeed for test data"); + .expect("StructArray creation should succeed for test data"); let mut chunk = DataChunk::new([LogicalType::struct_type( vec![ LogicalType::new(cpp::duckdb_type::DUCKDB_TYPE_INTEGER), @@ -168,7 +166,7 @@ mod tests { ], vec![CString::new("col1").unwrap(), CString::new("col2").unwrap()], ) - .vortex_expect("LogicalType creation should succeed for test data")]); + .expect("LogicalType creation should succeed for test data")]); new_exporter( arr, @@ -195,7 +193,7 @@ mod tests { buffer![0u8, 1, 1, 2, 2, 2, 2, 3, 3, 4].into_array(), VarBinViewArray::from_iter_str(vec!["b", "c", "d", "g", "h"]).into_array(), ) - .vortex_expect("DictArray creation should succeed for test data") + .expect("DictArray creation should succeed for test data") .into_array(); let arr = StructArray::try_new( ["col1", "col2"].into(), @@ -205,7 +203,7 @@ mod tests { true, true, true, false, false, false, true, true, true, true, ])), ) - .vortex_expect("StructArray creation should succeed for test data"); + .expect("StructArray creation should succeed for test data"); let mut chunk = DataChunk::new([LogicalType::struct_type( vec![ LogicalType::new(cpp::duckdb_type::DUCKDB_TYPE_INTEGER), @@ -213,7 +211,7 @@ mod tests { ], vec![CString::new("col1").unwrap(), CString::new("col2").unwrap()], ) - .vortex_expect("LogicalType creation should succeed for test data")]); + .expect("LogicalType creation should succeed for test data")]); new_exporter( arr, diff --git a/vortex-duckdb/src/lib.rs b/vortex-duckdb/src/lib.rs index 701ac40021e..1e8f1179aaa 100644 --- a/vortex-duckdb/src/lib.rs +++ b/vortex-duckdb/src/lib.rs @@ -8,7 +8,6 @@ use std::ffi::c_char; use std::sync::LazyLock; use vortex::VortexSessionDefault; -use vortex::error::VortexExpect; use vortex::error::VortexResult; use vortex::io::runtime::BlockingRuntime; use vortex::io::runtime::current::CurrentThreadRuntime; @@ -66,13 +65,13 @@ pub unsafe extern "C" fn vortex_init_rust(db: cpp::duckdb_database) { database .register_vortex_scan_replacement() - .vortex_expect("failed to register vortex scan replacement"); + .expect("failed to register vortex scan replacement"); let conn = database .connect() .inspect_err(|e| println!("err {e}")) - .vortex_expect("Failed to connect to DuckDB database"); - register_table_functions(&conn).vortex_expect("Failed to initialize Vortex extension"); + .expect("Failed to connect to DuckDB database"); + register_table_functions(&conn).expect("Failed to initialize Vortex extension"); } /// The DuckDB extension ABI version function. diff --git a/vortex-duckdb/src/scan.rs b/vortex-duckdb/src/scan.rs index 52f8880aceb..0fea9b3d3fc 100644 --- a/vortex-duckdb/src/scan.rs +++ b/vortex-duckdb/src/scan.rs @@ -32,7 +32,6 @@ use vortex::array::arrays::StructArray; use vortex::array::arrays::StructVTable; use vortex::array::optimizer::ArrayOptimizer; use vortex::dtype::FieldNames; -use vortex::error::VortexExpect; use vortex::error::VortexResult; use vortex::error::vortex_bail; use vortex::error::vortex_err; @@ -162,7 +161,7 @@ fn extract_projection_expr(init: &TableInitInput) -> Vortex init.bind_data() .column_names .get(idx) - .vortex_expect("prune idx in column names") + .expect("prune idx in column names") }) .map(|s| Arc::from(s.as_str())) .collect::(), @@ -182,11 +181,7 @@ fn extract_table_filter_expr( .map(|(idx, ex)| { let idx_u: usize = idx.as_(); let col_idx: usize = column_ids[idx_u].as_(); - let name = init - .bind_data() - .column_names - .get(col_idx) - .vortex_expect("exists"); + let name = init.bind_data().column_names.get(col_idx).expect("exists"); try_from_table_filter( &ex, &col(name.as_str()), @@ -349,7 +344,7 @@ impl TableFunction for VortexTableFunction { let exporter = local_state .exporter .as_mut() - .vortex_expect("error: exporter missing"); + .expect("error: exporter missing"); let has_more_data = exporter.export(chunk)?; diff --git a/vortex-error/public-api.lock b/vortex-error/public-api.lock index 757f9a986dc..f7985f19010 100644 --- a/vortex-error/public-api.lock +++ b/vortex-error/public-api.lock @@ -50,8 +50,6 @@ pub vortex_error::VortexError::Serde(vortex_error::ErrString, alloc::boxed::Box< pub vortex_error::VortexError::Shared(alloc::sync::Arc) -pub vortex_error::VortexError::TryFromInt(core::num::error::TryFromIntError, alloc::boxed::Box) - impl vortex_error::VortexError pub fn vortex_error::VortexError::with_context>(self, msg: T) -> Self @@ -72,10 +70,6 @@ impl core::convert::From for vortex_error::VortexErro pub fn vortex_error::VortexError::from(_: core::convert::Infallible) -> Self -impl core::convert::From for vortex_error::VortexError - -pub fn vortex_error::VortexError::from(value: core::num::error::TryFromIntError) -> Self - impl core::convert::From for vortex_error::VortexError pub fn vortex_error::VortexError::from(value: flatbuffers::verifier::InvalidFlatbuffer) -> Self @@ -144,24 +138,6 @@ impl core::convert::From for vortex_error::ErrString where T: core::conver pub fn vortex_error::ErrString::from(msg: T) -> Self -pub trait vortex_error::VortexExpect - -pub type vortex_error::VortexExpect::Output - -pub fn vortex_error::VortexExpect::vortex_expect(self, msg: &'static str) -> Self::Output - -impl vortex_error::VortexExpect for core::result::Result where E: core::convert::Into - -pub type core::result::Result::Output = T - -pub fn core::result::Result::vortex_expect(self, msg: &'static str) -> Self::Output - -impl vortex_error::VortexExpect for core::option::Option - -pub type core::option::Option::Output = T - -pub fn core::option::Option::vortex_expect(self, msg: &'static str) -> Self::Output - pub type vortex_error::SharedVortexResult = core::result::Result> pub type vortex_error::VortexResult = core::result::Result diff --git a/vortex-error/src/lib.rs b/vortex-error/src/lib.rs index 156ab574706..f84d61fa3b0 100644 --- a/vortex-error/src/lib.rs +++ b/vortex-error/src/lib.rs @@ -17,7 +17,6 @@ use std::fmt::Debug; use std::fmt::Display; use std::fmt::Formatter; use std::io; -use std::num::TryFromIntError; use std::ops::Deref; use std::sync::Arc; @@ -118,8 +117,6 @@ pub enum VortexError { /// A wrapper for Tokio join error. #[cfg(feature = "tokio")] Join(tokio::task::JoinError, Box), - /// Wrap errors for fallible integer casting. - TryFromInt(TryFromIntError, Box), /// Wrap protobuf-related errors Prost(Box, Box), } @@ -155,7 +152,6 @@ impl VortexError { Jiff(..) => "Jiff error: ", #[cfg(feature = "tokio")] Join(..) => "Tokio join error:", - TryFromInt(..) => "Try from int error:", Prost(..) => "Prost error:", } } @@ -183,7 +179,6 @@ impl VortexError { Jiff(.., bt) => Some(bt.as_ref()), #[cfg(feature = "tokio")] Join(.., bt) => Some(bt.as_ref()), - TryFromInt(.., bt) => Some(bt.as_ref()), Prost(.., bt) => Some(bt.as_ref()), Context(_, inner) => inner.backtrace(), Shared(inner) => inner.backtrace(), @@ -236,9 +231,6 @@ impl VortexError { Join(err, _) => { format!("{err}") } - TryFromInt(err, _) => { - format!("{err}") - } Prost(err, _) => { format!("{err}") } @@ -312,50 +304,6 @@ impl From<&Arc> for VortexError { } } -/// A trait for expect-ing a VortexResult or an Option. -pub trait VortexExpect { - /// The type of the value being expected. - type Output; - - /// Returns the value of the result if it is Ok, otherwise panics with the error. - /// Should be called only in contexts where the error condition represents a bug (programmer error). - /// - /// # `&'static` message lifetime - /// - /// The panic string argument should be a string literal, hence the `&'static` lifetime. If - /// you'd like to panic with a dynamic format string, consider using `unwrap_or_else` combined - /// with the `vortex_panic!` macro instead. - fn vortex_expect(self, msg: &'static str) -> Self::Output; -} - -impl VortexExpect for Result -where - E: Into, -{ - type Output = T; - - #[inline(always)] - fn vortex_expect(self, msg: &'static str) -> Self::Output { - self.map_err(|err| err.into()) - .unwrap_or_else(|e| vortex_panic!(e.with_context(msg.to_string()))) - } -} - -impl VortexExpect for Option { - type Output = T; - - #[inline(always)] - fn vortex_expect(self, msg: &'static str) -> Self::Output { - self.unwrap_or_else(|| { - let err = VortexError::AssertionFailed( - msg.to_string().into(), - Box::new(Backtrace::capture()), - ); - vortex_panic!(err) - }) - } -} - /// A convenient macro for creating a VortexError. #[macro_export] macro_rules! vortex_err { @@ -544,12 +492,6 @@ impl From for VortexError { } } -impl From for VortexError { - fn from(value: TryFromIntError) -> Self { - VortexError::TryFromInt(value, Box::new(Backtrace::capture())) - } -} - impl From for VortexError { fn from(value: prost::EncodeError) -> Self { Self::Prost(Box::new(value), Box::new(Backtrace::capture())) diff --git a/vortex-ffi/src/array.rs b/vortex-ffi/src/array.rs index 54d794f52a5..c1c745553ec 100644 --- a/vortex-ffi/src/array.rs +++ b/vortex-ffi/src/array.rs @@ -8,7 +8,6 @@ use std::sync::Arc; use vortex::array::Array; use vortex::array::ToCanonical; use vortex::dtype::half::f16; -use vortex::error::VortexExpect; use vortex::error::vortex_err; use crate::arc_dyn_wrapper; @@ -89,9 +88,7 @@ pub unsafe extern "C-unwind" fn vx_array_is_null( ) -> bool { let array = vx_array::as_ref(array); // TODO(joe): propagate this error up instead of expecting - array - .is_invalid(index as usize) - .vortex_expect("is_invalid failed") + array.is_invalid(index as usize).expect("is_invalid failed") } // TODO(robert): Make this return usize and remove error @@ -101,7 +98,13 @@ pub unsafe extern "C-unwind" fn vx_array_null_count( error_out: *mut *mut vx_error, ) -> u32 { let array = vx_array::as_ref(array); - try_or_default(error_out, || Ok(array.invalid_count()?.try_into()?)) + + try_or_default(error_out, || { + Ok(array + .invalid_count()? + .try_into() + .expect("null count must fit into u32")) + }) } macro_rules! ffiarray_get_ptype { @@ -111,24 +114,24 @@ macro_rules! ffiarray_get_ptype { pub unsafe extern "C-unwind" fn [](array: *const vx_array, index: u32) -> $ptype { let array = vx_array::as_ref(array); // TODO(joe): propagate this error up instead of expecting - let value = array.scalar_at(index as usize).vortex_expect("scalar_at failed"); + let value = array.scalar_at(index as usize).expect("scalar_at failed"); // TODO(joe): propagate this error up instead of expecting value.as_primitive() .as_::<$ptype>() - .vortex_expect("null value") + .expect("null value") } #[unsafe(no_mangle)] pub unsafe extern "C-unwind" fn [](array: *const vx_array, index: u32) -> $ptype { let array = vx_array::as_ref(array); // TODO(joe): propagate this error up instead of expecting - let value = array.scalar_at(index as usize).vortex_expect("scalar_at failed"); + let value = array.scalar_at(index as usize).expect("scalar_at failed"); // TODO(joe): propagate this error up instead of expecting value.as_extension() .to_storage_scalar() .as_primitive() .as_::<$ptype>() - .vortex_expect("null value") + .expect("null value") } } }; @@ -155,9 +158,7 @@ pub unsafe extern "C-unwind" fn vx_array_get_utf8( ) -> *const vx_string { let array = vx_array::as_ref(array); // TODO(joe): propagate this error up instead of expecting - let value = array - .scalar_at(index as usize) - .vortex_expect("scalar_at failed"); + let value = array.scalar_at(index as usize).expect("scalar_at failed"); let utf8_scalar = value.as_utf8(); if let Some(buffer) = utf8_scalar.value() { vx_string::new(Arc::from(buffer.as_str())) @@ -175,9 +176,7 @@ pub unsafe extern "C-unwind" fn vx_array_get_binary( ) -> *const vx_binary { let array = vx_array::as_ref(array); // TODO(joe): propagate this error up instead of expecting - let value = array - .scalar_at(index as usize) - .vortex_expect("scalar_at failed"); + let value = array.scalar_at(index as usize).expect("scalar_at failed"); let binary_scalar = value.as_binary(); if let Some(bytes) = binary_scalar.value() { vx_binary::new(Arc::from(bytes.as_bytes())) diff --git a/vortex-ffi/src/dtype.rs b/vortex-ffi/src/dtype.rs index bb0302cb4eb..bc99755bf77 100644 --- a/vortex-ffi/src/dtype.rs +++ b/vortex-ffi/src/dtype.rs @@ -10,7 +10,6 @@ use vortex::dtype::datetime::AnyTemporal; use vortex::dtype::datetime::Date; use vortex::dtype::datetime::Time; use vortex::dtype::datetime::Timestamp; -use vortex::error::VortexExpect; use vortex::error::vortex_panic; use crate::arc_wrapper; @@ -183,7 +182,7 @@ pub unsafe extern "C-unwind" fn vx_dtype_decimal_precision(dtype: *const vx_dtyp // TODO(joe): propagate this error up instead of expecting vx_dtype::as_ref(dtype) .as_decimal_opt() - .vortex_expect("not a decimal dtype") + .expect("not a decimal dtype") .precision() } @@ -193,7 +192,7 @@ pub unsafe extern "C-unwind" fn vx_dtype_decimal_scale(dtype: *const vx_dtype) - // TODO(joe): propagate this error up instead of expecting vx_dtype::as_ref(dtype) .as_decimal_opt() - .vortex_expect("not a decimal dtype") + .expect("not a decimal dtype") .scale() } @@ -208,7 +207,7 @@ pub unsafe extern "C-unwind" fn vx_dtype_struct_dtype( // TODO(joe): propagate this error up instead of expecting let struct_dtype = vx_dtype::as_ref(dtype) .as_struct_fields_opt() - .vortex_expect("not a struct dtype"); + .expect("not a struct dtype"); vx_struct_fields::new_ref(struct_dtype) } @@ -221,7 +220,7 @@ pub unsafe extern "C-unwind" fn vx_dtype_list_element(dtype: *const vx_dtype) -> // TODO(joe): propagate this error up instead of expecting let element_dtype = vx_dtype::as_ref(dtype) .as_list_element_opt() - .vortex_expect("not a list dtype"); + .expect("not a list dtype"); vx_dtype::new_ref(element_dtype) } @@ -236,7 +235,7 @@ pub unsafe extern "C-unwind" fn vx_dtype_fixed_size_list_element( // TODO(joe): propagate this error up instead of expecting let element_dtype = vx_dtype::as_ref(dtype) .as_fixed_size_list_element_opt() - .vortex_expect("not a fixed-size list dtype"); + .expect("not a fixed-size list dtype"); vx_dtype::new_ref(element_dtype) } @@ -254,7 +253,7 @@ pub unsafe extern "C-unwind" fn vx_dtype_fixed_size_list_size(dtype: *const vx_d #[unsafe(no_mangle)] pub unsafe extern "C-unwind" fn vx_dtype_is_time(dtype: *const DType) -> bool { // TODO(joe): propagate this error up instead of expecting - let dtype = unsafe { dtype.as_ref() }.vortex_expect("dtype null"); + let dtype = unsafe { dtype.as_ref() }.expect("dtype null"); match dtype { DType::Extension(ext_dtype) => ext_dtype.is::