Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
25 commits
Select commit Hold shift + click to select a range
aad102a
move over all code changes supporting high
danieljbruce Feb 5, 2026
d4b2b0b
feat: Add high precision TIMESTAMP values for queries
danieljbruce Feb 5, 2026
c369bdf
Merge branch 'big-query-query-changes' of https://github.com/googleap…
danieljbruce Feb 5, 2026
0c799a2
correct typo in comment
danieljbruce Feb 6, 2026
b033b9a
picoseconds / not nanoseconds
danieljbruce Feb 6, 2026
5871908
rename nanoseconds to picoseconds
danieljbruce Feb 6, 2026
d39d0a9
nanoseconds not microseconds
danieljbruce Feb 6, 2026
9b61a74
Add comment for non-meaningful use case
danieljbruce Feb 6, 2026
4cbde94
feat(bigquery): add unit tests for buildQueryRequest_ format options
google-labs-jules[bot] Feb 9, 2026
00795de
Move the code back into the BigQueryRange static
danieljbruce Feb 9, 2026
f042cc4
Move the “High Precision Query System Tests” block
danieljbruce Feb 9, 2026
8c4c2d5
Revert "Move the “High Precision Query System Tests” block"
danieljbruce Feb 9, 2026
48176dd
Reapply "Move the “High Precision Query System Tests” block"
danieljbruce Feb 9, 2026
756d77b
Move the tests to the end position
danieljbruce Feb 9, 2026
4ad1233
delete unwanted test cases
danieljbruce Feb 9, 2026
e38a4cb
Change the name used in the describe block
danieljbruce Feb 10, 2026
4886b51
Apply suggestion from @alvarowolfx
danieljbruce Feb 10, 2026
583ad83
Linter and clean up last commit
danieljbruce Feb 10, 2026
3775cb4
Fix unit tests in test/bigquery to expect right va
danieljbruce Feb 10, 2026
b508781
JS doc conversions
danieljbruce Feb 10, 2026
2e5188d
remove only
danieljbruce Feb 10, 2026
02a1eab
Remove bail: true
danieljbruce Feb 10, 2026
f0ec49b
removed todo
danieljbruce Feb 10, 2026
d4c401b
remove error in name
danieljbruce Feb 10, 2026
94aa537
remove error from tests
danieljbruce Feb 10, 2026
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
79 changes: 70 additions & 9 deletions handwritten/bigquery/src/bigquery.ts
Original file line number Diff line number Diff line change
Expand Up @@ -1099,6 +1099,11 @@ export class BigQuery extends Service {
};
}),
};
} else if ((providedType as string).toUpperCase() === 'TIMESTAMP(12)') {
return {
type: 'TIMESTAMP',
timestampPrecision: '12',
};
}

providedType = (providedType as string).toUpperCase();
Expand Down Expand Up @@ -2249,11 +2254,30 @@ export class BigQuery extends Service {
if (res && res.jobComplete) {
let rows: any = [];
if (res.schema && res.rows) {
rows = BigQuery.mergeSchemaWithRows_(res.schema, res.rows, {
wrapIntegers: options.wrapIntegers || false,
parseJSON: options.parseJSON,
});
delete res.rows;
try {
/*
Without this try/catch block, calls to getRows will hang indefinitely if
a call to mergeSchemaWithRows_ fails because the error never makes it to
the callback. Instead, pass the error to the callback the user provides
so that the user can see the error.
*/
// eslint-disable-next-line @typescript-eslint/no-explicit-any
const listParams = {
'formatOptions.timestampOutputFormat':
queryReq.formatOptions?.timestampOutputFormat,
'formatOptions.useInt64Timestamp':
queryReq.formatOptions?.useInt64Timestamp,
};
rows = BigQuery.mergeSchemaWithRows_(res.schema, res.rows, {
wrapIntegers: options.wrapIntegers || false,
parseJSON: options.parseJSON,
listParams,
});
delete res.rows;
} catch (e) {
(callback as SimpleQueryRowsCallback)(e as Error, null, job);
return;
}
}
this.trace_('[runJobsQuery] job complete');
options._cachedRows = rows;
Expand Down Expand Up @@ -2334,6 +2358,18 @@ export class BigQuery extends Service {
if (options.job) {
return undefined;
}
const hasAnyFormatOpts =
options['formatOptions.timestampOutputFormat'] !== undefined ||
options['formatOptions.useInt64Timestamp'] !== undefined;
const defaultOpts = hasAnyFormatOpts
? {}
: {
timestampOutputFormat: 'ISO8601_STRING',
};
const formatOptions = extend(defaultOpts, {
timestampOutputFormat: options['formatOptions.timestampOutputFormat'],
useInt64Timestamp: options['formatOptions.useInt64Timestamp'],
});
const req: bigquery.IQueryRequest = {
useQueryCache: queryObj.useQueryCache,
labels: queryObj.labels,
Expand All @@ -2342,9 +2378,7 @@ export class BigQuery extends Service {
maximumBytesBilled: queryObj.maximumBytesBilled,
timeoutMs: options.timeoutMs,
location: queryObj.location || options.location,
formatOptions: {
useInt64Timestamp: true,
},
formatOptions,
maxResults: queryObj.maxResults || options.maxResults,
query: queryObj.query,
useLegacySql: false,
Expand Down Expand Up @@ -2588,6 +2622,7 @@ function convertSchemaFieldValue(
value = BigQueryRange.fromSchemaValue_(
value,
schemaField.rangeElementType!.type!,
options.listParams, // Required to convert TIMESTAMP values
);
break;
}
Expand Down Expand Up @@ -2665,6 +2700,14 @@ export class BigQueryRange {
};
}

/**
* This method returns start and end values for RANGE typed values returned from
* the server. It decodes the server RANGE value into start and end values so
* they can be used to construct a BigQueryRange.
* @private
* @param {string} value The range value.
* @returns {string[]} The start and end of the range.
*/
private static fromStringValue_(value: string): [start: string, end: string] {
let cleanedValue = value;
if (cleanedValue.startsWith('[') || cleanedValue.startsWith('(')) {
Expand All @@ -2684,14 +2727,32 @@ export class BigQueryRange {
return [start, end];
}

static fromSchemaValue_(value: string, elementType: string): BigQueryRange {
/**
* This method is only used by convertSchemaFieldValue and only when range
* values are passed into convertSchemaFieldValue. It produces a value that is
* delivered to the user for read calls and it needs to pass along listParams
* to ensure TIMESTAMP types are converted properly.
* @private
* @param {string} value The range value.
* @param {string} elementType The element type.
* @param {bigquery.tabledata.IListParams | bigquery.jobs.IGetQueryResultsParams} [listParams] The list parameters.
* @returns {BigQueryRange}
*/
static fromSchemaValue_(
value: string,
elementType: string,
listParams?:
| bigquery.tabledata.IListParams
| bigquery.jobs.IGetQueryResultsParams,
): BigQueryRange {
const [start, end] = BigQueryRange.fromStringValue_(value);
const convertRangeSchemaValue = (value: string) => {
if (value === 'UNBOUNDED' || value === 'NULL') {
return null;
}
return convertSchemaFieldValue({type: elementType}, value, {
wrapIntegers: false,
listParams,
});
};
return BigQuery.range(
Expand Down
19 changes: 15 additions & 4 deletions handwritten/bigquery/src/job.ts
Original file line number Diff line number Diff line change
Expand Up @@ -595,10 +595,21 @@ class Job extends Operation {
let rows: any = [];

if (resp.schema && resp.rows) {
rows = BigQuery.mergeSchemaWithRows_(resp.schema, resp.rows, {
wrapIntegers,
parseJSON,
});
try {
/*
Without this try/catch block, calls to /query endpoint will hang
indefinitely if a call to mergeSchemaWithRows_ fails because the
error never makes it to the callback. Instead, pass the error to the
callback the user provides so that the user can see the error.
*/
rows = BigQuery.mergeSchemaWithRows_(resp.schema, resp.rows, {
wrapIntegers,
parseJSON,
});
} catch (e) {
callback!(e as Error, null, null, resp);
return;
}
}

let nextQuery: QueryResultsOptions | null = null;
Expand Down
164 changes: 161 additions & 3 deletions handwritten/bigquery/system-test/bigquery.ts
Original file line number Diff line number Diff line change
Expand Up @@ -1472,9 +1472,14 @@ describe('BigQuery', () => {
],
},
(err, rows) => {
assert.ifError(err);
assert.strictEqual(rows!.length, 1);
done();
try {
// Without this try block the test runner silently fails
assert.ifError(err);
assert.strictEqual(rows!.length, 1);
done();
} catch (e) {
done(e);
}
},
);
});
Expand All @@ -1498,6 +1503,159 @@ describe('BigQuery', () => {
},
);
});
describe('High Precision Query System Tests', () => {
let bigquery: BigQuery;
const expectedTsValueNanoseconds = '2023-01-01T12:00:00.123456000Z';
const expectedTsValuePicoseconds =
'2023-01-01T12:00:00.123456789123Z';
const expectedErrorMessage =
'Cannot specify both timestamp_as_int and timestamp_output_format.';

before(() => {
bigquery = new BigQuery();
});

const testCases = [
{
name: 'TOF: FLOAT64, UI64: true (error)',
timestampOutputFormat: 'FLOAT64',
useInt64Timestamp: true,
expectedTsValue: undefined,
expectedError: expectedErrorMessage,
},
{
name: 'TOF: omitted, UI64: omitted (default INT64)',
timestampOutputFormat: undefined,
useInt64Timestamp: undefined,
expectedTsValue: expectedTsValuePicoseconds,
},
{
name: 'TOF: omitted, UI64: true',
timestampOutputFormat: undefined,
useInt64Timestamp: true,
expectedTsValue: expectedTsValueNanoseconds,
},
];

testCases.forEach(testCase => {
it(`should handle ${testCase.name}`, async () => {
/*
The users use the new TIMESTAMP(12) type to indicate they want to
opt in to using timestampPrecision=12. The reason is that some queries
like `SELECT CAST(? as TIMESTAMP(12))` will fail if we set
timestampPrecision=12 and we don't want this code change to affect
existing users. Queries using TIMESTAMP_ADD are another example.
*/
const query = {
query: 'SELECT ? as ts',
params: [
bigquery.timestamp('2023-01-01T12:00:00.123456789123Z'),
],
types: ['TIMESTAMP(12)'],
};

const options: any = {};
if (testCase.timestampOutputFormat !== undefined) {
options['formatOptions.timestampOutputFormat'] =
testCase.timestampOutputFormat;
}
if (testCase.useInt64Timestamp !== undefined) {
options['formatOptions.useInt64Timestamp'] =
testCase.useInt64Timestamp;
}

try {
const [rows] = await bigquery.query(query, options);
if (testCase.expectedError) {
assert.fail(
`Query should have failed for ${testCase.name}, but succeeded`,
);
}
assert.ok(rows.length > 0);
assert.ok(rows[0].ts.value !== undefined);
assert.strictEqual(
rows[0].ts.value,
testCase.expectedTsValue,
);
} catch (err: any) {
if (!testCase.expectedError) {
throw err;
}

const message = err.message;
assert.strictEqual(
message,
testCase.expectedError,
`Expected ${testCase.expectedError} error for ${testCase.name}, got ${message} (${err.message})`,
);
}
});
it(`should handle nested ${testCase.name}`, async () => {
/*
The users use the new TIMESTAMP(12) type to indicate they want to
opt in to using timestampPrecision=12. The reason is that some queries
like `SELECT CAST(? as TIMESTAMP(12))` will fail if we set
timestampPrecision=12 and we don't want this code change to affect
existing users.
*/
const query = {
query: 'SELECT ? obj',
params: [
{
nested: {
a: bigquery.timestamp(
'2023-01-01T12:00:00.123456789123Z',
),
},
},
],
types: [
{
nested: {
a: 'TIMESTAMP(12)',
},
},
],
};

const options: any = {};
if (testCase.timestampOutputFormat !== undefined) {
options['formatOptions.timestampOutputFormat'] =
testCase.timestampOutputFormat;
}
if (testCase.useInt64Timestamp !== undefined) {
options['formatOptions.useInt64Timestamp'] =
testCase.useInt64Timestamp;
}

try {
const [rows] = await bigquery.query(query, options);
if (testCase.expectedError) {
assert.fail(
`Query should have failed for ${testCase.name}, but succeeded`,
);
}
assert.ok(rows.length > 0);
assert.ok(rows[0].obj.nested.a.value !== undefined);
assert.strictEqual(
rows[0].obj.nested.a.value,
testCase.expectedTsValue,
);
} catch (err: any) {
if (!testCase.expectedError) {
throw err;
}

const message = err.message;
assert.strictEqual(
message,
testCase.expectedError,
`Expected ${testCase.expectedError} error for ${testCase.name}, got ${message} (${err.message})`,
);
}
});
});
});
});

describe('named', () => {
Expand Down
Loading