Press n or j to go to the next uncovered block, b, p or k for the previous block.
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 | 404x 404x 404x 404x 404x 404x 404x 2009203x 2009203x 2009203x 6069276x 6069276x 293x 6068983x 6068983x 6068983x 2009191x 404x 2009167x 2009167x 2009167x 40056x 40056x 2009167x 2009167x 2009167x 8x 2009159x 2009159x 32x 2009159x 4x 2009155x 2009155x 12x 40x 40x 36x 2009155x 2009155x 1426x 233x 233x 233x 233x 2009155x 2008465x 2008465x 2008465x 2008465x 2008465x 2008465x 2008465x 2008465x 404x 2008465x 2008465x 28x 28x 28x 16x 4x 12x 28x 16x 28x 404x 5042108x 5042108x 5042108x 5042108x 5042108x 5042108x 5042108x 821x 805x 821x 821x 5041287x 2435x 1610x 2435x 805x 2435x 2435x 5038852x 809x 809x 5038043x 837x 805x 837x 805x 837x 805x 837x 837x 5037206x 2009159x 24x 24x 2009135x 20x 20x 2009115x | import * as os from 'os'; import * as process from 'process'; import { BSON, type Document, Int32 } from '../../bson'; import { MongoInvalidArgumentError } from '../../error'; import type { MongoOptions } from '../../mongo_client'; import { fileIsAccessible } from '../../utils'; // eslint-disable-next-line @typescript-eslint/no-require-imports const NODE_DRIVER_VERSION = require('../../../package.json').version; /** * @public * @see https://github.com/mongodb/specifications/blob/master/source/mongodb-handshake/handshake.md#hello-command */ export interface ClientMetadata { driver: { name: string; version: string; }; os: { type: string; name?: NodeJS.Platform; architecture?: string; version?: string; }; platform: string; application?: { name: string; }; /** FaaS environment information */ env?: { name: 'aws.lambda' | 'gcp.func' | 'azure.func' | 'vercel'; timeout_sec?: Int32; memory_mb?: Int32; region?: string; url?: string; }; } /** @public */ export interface ClientMetadataOptions { driverInfo?: { name?: string; version?: string; platform?: string; }; appName?: string; } /** @internal */ export class LimitedSizeDocument { private document = new Map(); /** BSON overhead: Int32 + Null byte */ private documentSize = 5; constructor(private maxSize: number) {} /** Only adds key/value if the bsonByteLength is less than MAX_SIZE */ public ifItFitsItSits(key: string, value: Record<string, any> | string): boolean { // The BSON byteLength of the new element is the same as serializing it to its own document // subtracting the document size int32 and the null terminator. const newElementSize = BSON.serialize(new Map().set(key, value)).byteLength - 5; if (newElementSize + this.documentSize > this.maxSize) { return false; } this.documentSize += newElementSize; this.document.set(key, value); return true; } toObject(): Document { return BSON.deserialize(BSON.serialize(this.document), { promoteLongs: false, promoteBuffers: false, promoteValues: false, useBigInt64: false }); } } type MakeClientMetadataOptions = Pick<MongoOptions, 'appName' | 'driverInfo'>; /** * From the specs: * Implementors SHOULD cumulatively update fields in the following order until the document is under the size limit: * 1. Omit fields from `env` except `env.name`. * 2. Omit fields from `os` except `os.type`. * 3. Omit the `env` document entirely. * 4. Truncate `platform`. -- special we do not truncate this field */ export function makeClientMetadata(options: MakeClientMetadataOptions): ClientMetadata { const metadataDocument = new LimitedSizeDocument(512); const { appName = '' } = options; // Add app name first, it must be sent if (appName.length > 0) { const name = Buffer.byteLength(appName, 'utf8') <= 128 ? options.appName : Buffer.from(appName, 'utf8').subarray(0, 128).toString('utf8'); metadataDocument.ifItFitsItSits('application', { name }); } const { name = '', version = '', platform = '' } = options.driverInfo; const driverInfo = { name: name.length > 0 ? `nodejs|${name}` : 'nodejs', version: version.length > 0 ? `${NODE_DRIVER_VERSION}|${version}` : NODE_DRIVER_VERSION }; if (!metadataDocument.ifItFitsItSits('driver', driverInfo)) { throw new MongoInvalidArgumentError( 'Unable to include driverInfo name and version, metadata cannot exceed 512 bytes' ); } let runtimeInfo = getRuntimeInfo(); if (platform.length > 0) { runtimeInfo = `${runtimeInfo}|${platform}`; } if (!metadataDocument.ifItFitsItSits('platform', runtimeInfo)) { throw new MongoInvalidArgumentError( 'Unable to include driverInfo platform, metadata cannot exceed 512 bytes' ); } // Note: order matters, os.type is last so it will be removed last if we're at maxSize const osInfo = new Map() .set('name', process.platform) .set('architecture', process.arch) .set('version', os.release()) .set('type', os.type()); if (!metadataDocument.ifItFitsItSits('os', osInfo)) { for (const key of osInfo.keys()) { osInfo.delete(key); if (osInfo.size === 0) break; if (metadataDocument.ifItFitsItSits('os', osInfo)) break; } } const faasEnv = getFAASEnv(); if (faasEnv != null) { if (!metadataDocument.ifItFitsItSits('env', faasEnv)) { for (const key of faasEnv.keys()) { faasEnv.delete(key); Iif (faasEnv.size === 0) break; Eif (metadataDocument.ifItFitsItSits('env', faasEnv)) break; } } } return metadataDocument.toObject() as ClientMetadata; } let dockerPromise: Promise<boolean>; /** @internal */ async function getContainerMetadata() { const containerMetadata: Record<string, any> = {}; dockerPromise ??= fileIsAccessible('/.dockerenv'); const isDocker = await dockerPromise; const { KUBERNETES_SERVICE_HOST = '' } = process.env; const isKubernetes = KUBERNETES_SERVICE_HOST.length > 0 ? true : false; Iif (isDocker) containerMetadata.runtime = 'docker'; if (isKubernetes) containerMetadata.orchestrator = 'kubernetes'; return containerMetadata; } /** * @internal * Re-add each metadata value. * Attempt to add new env container metadata, but keep old data if it does not fit. */ export async function addContainerMetadata(originalMetadata: ClientMetadata) { const containerMetadata = await getContainerMetadata(); if (Object.keys(containerMetadata).length === 0) return originalMetadata; const extendedMetadata = new LimitedSizeDocument(512); const extendedEnvMetadata = { ...originalMetadata?.env, container: containerMetadata }; for (const [key, val] of Object.entries(originalMetadata)) { if (key !== 'env') { extendedMetadata.ifItFitsItSits(key, val); } else { Iif (!extendedMetadata.ifItFitsItSits('env', extendedEnvMetadata)) { // add in old data if newer / extended metadata does not fit extendedMetadata.ifItFitsItSits('env', val); } } } if (!('env' in originalMetadata)) { extendedMetadata.ifItFitsItSits('env', extendedEnvMetadata); } return extendedMetadata.toObject(); } /** * Collects FaaS metadata. * - `name` MUST be the last key in the Map returned. */ export function getFAASEnv(): Map<string, string | Int32> | null { const { AWS_EXECUTION_ENV = '', AWS_LAMBDA_RUNTIME_API = '', FUNCTIONS_WORKER_RUNTIME = '', K_SERVICE = '', FUNCTION_NAME = '', VERCEL = '', AWS_LAMBDA_FUNCTION_MEMORY_SIZE = '', AWS_REGION = '', FUNCTION_MEMORY_MB = '', FUNCTION_REGION = '', FUNCTION_TIMEOUT_SEC = '', VERCEL_REGION = '' } = process.env; const isAWSFaaS = AWS_EXECUTION_ENV.startsWith('AWS_Lambda_') || AWS_LAMBDA_RUNTIME_API.length > 0; const isAzureFaaS = FUNCTIONS_WORKER_RUNTIME.length > 0; const isGCPFaaS = K_SERVICE.length > 0 || FUNCTION_NAME.length > 0; const isVercelFaaS = VERCEL.length > 0; // Note: order matters, name must always be the last key const faasEnv = new Map(); // When isVercelFaaS is true so is isAWSFaaS; Vercel inherits the AWS env if (isVercelFaaS && !(isAzureFaaS || isGCPFaaS)) { if (VERCEL_REGION.length > 0) { faasEnv.set('region', VERCEL_REGION); } faasEnv.set('name', 'vercel'); return faasEnv; } if (isAWSFaaS && !(isAzureFaaS || isGCPFaaS || isVercelFaaS)) { if (AWS_REGION.length > 0) { faasEnv.set('region', AWS_REGION); } if ( AWS_LAMBDA_FUNCTION_MEMORY_SIZE.length > 0 && Number.isInteger(+AWS_LAMBDA_FUNCTION_MEMORY_SIZE) ) { faasEnv.set('memory_mb', new Int32(AWS_LAMBDA_FUNCTION_MEMORY_SIZE)); } faasEnv.set('name', 'aws.lambda'); return faasEnv; } if (isAzureFaaS && !(isGCPFaaS || isAWSFaaS || isVercelFaaS)) { faasEnv.set('name', 'azure.func'); return faasEnv; } if (isGCPFaaS && !(isAzureFaaS || isAWSFaaS || isVercelFaaS)) { if (FUNCTION_REGION.length > 0) { faasEnv.set('region', FUNCTION_REGION); } if (FUNCTION_MEMORY_MB.length > 0 && Number.isInteger(+FUNCTION_MEMORY_MB)) { faasEnv.set('memory_mb', new Int32(FUNCTION_MEMORY_MB)); } if (FUNCTION_TIMEOUT_SEC.length > 0 && Number.isInteger(+FUNCTION_TIMEOUT_SEC)) { faasEnv.set('timeout_sec', new Int32(FUNCTION_TIMEOUT_SEC)); } faasEnv.set('name', 'gcp.func'); return faasEnv; } return null; } /** * @internal * This type represents the global Deno object and the minimal type contract we expect it to satisfy. */ declare const Deno: { version?: { deno?: string } } | undefined; /** * @internal * This type represents the global Bun object and the minimal type contract we expect it to satisfy. */ declare const Bun: { (): void; version?: string } | undefined; /** * @internal * Get current JavaScript runtime platform * * NOTE: The version information fetching is intentionally written defensively * to avoid having a released driver version that becomes incompatible * with a future change to these global objects. */ function getRuntimeInfo(): string { if ('Deno' in globalThis) { const version = typeof Deno?.version?.deno === 'string' ? Deno?.version?.deno : '0.0.0-unknown'; return `Deno v${version}, ${os.endianness()}`; } if ('Bun' in globalThis) { const version = typeof Bun?.version === 'string' ? Bun?.version : '0.0.0-unknown'; return `Bun v${version}, ${os.endianness()}`; } return `Node.js ${process.version}, ${os.endianness()}`; } |