Casts the input value to an unsigned 16-bit integer.
Usage | Returns | Number.uint16() | Number |
Argument | Type | Details | this: input | Number | The input value. |
Examples
Code Editor (JavaScript)
// Cast a number to unsigned 16-bit integer: [0, 65535].
var number = ee.Number(100);
print('Number:', number);
var uint16Number = number.uint16();
print('Number cast to uint16:', uint16Number);
/**
* Casting numbers to uint16 that are outside of its range and precision can
* modify the resulting value, note the behavior of the following scenarios.
*/
// A floating point number cast to uint16 loses decimal precision.
var float = ee.Number(1.7);
print('Floating point value:', float);
var floatToUint16 = float.uint16();
print('Floating point value cast to uint16:', floatToUint16);
// A number greater than uint16 range max cast to uint16 becomes uint16 range max.
var UINT16_MAX = 65535;
var outOfRangeHi = ee.Number(UINT16_MAX + 12345);
print('Greater than uint16 max:', outOfRangeHi);
var outOfRangeHiToUint16 = outOfRangeHi.uint16();
print('Greater than uint16 max cast to uint16 becomes uint16 max:', outOfRangeHiToUint16);
// A number greater than uint16 range min cast to uint16 becomes uint16 range min.
var UINT16_MIN = 0;
var outOfRangeLo = ee.Number(UINT16_MIN - 12345);
print('Less than uint16 min:', outOfRangeLo);
var outOfRangeLoToUint16 = outOfRangeLo.uint16();
print('Less than uint16 min cast to uint16 becomes uint16 min:', outOfRangeLoToUint16);
Python setup
See the
Python Environment page for information on the Python API and using
geemap
for interactive development.
import ee
import geemap.core as geemap
Colab (Python)
# Cast a number to unsigned 16-bit integer: [0, 65535].
number = ee.Number(100)
print('Number:', number.getInfo())
uint16_number = number.uint16()
print('Number cast to uint16:', uint16_number.getInfo())
"""Casting numbers to uint16 that are outside of its range and precision can
modify the resulting value, note the behavior of the following scenarios.
"""
# A floating point number cast to uint16 loses decimal precision.
float_number = ee.Number(1.7)
print('Floating point value:', float_number.getInfo())
float_number_to_uint16 = float_number.uint16()
print('Floating point value cast to uint16:', float_number_to_uint16.getInfo())
# A number greater than uint16 range max cast to uint16
# becomes uint16 range max.
UINT16_MAX = 65535
out_of_range_hi = ee.Number(UINT16_MAX + 12345)
print('Greater than uint16 max:', out_of_range_hi.getInfo())
out_of_range_hi_to_uint16 = out_of_range_hi.uint16()
print('Greater than uint16 max cast to uint16 becomes uint16 max:',
out_of_range_hi_to_uint16.getInfo())
# A number greater than uint16 range min cast to uint16
# becomes uint16 range min.
UINT16_MIN = 0
out_of_range_lo = ee.Number(UINT16_MIN - 12345)
print('Less than uint16 min:', out_of_range_lo.getInfo())
out_of_range_lo_to_uint16 = out_of_range_lo.uint16()
print('Less than uint16 min cast to uint16 becomes uint16 min:',
out_of_range_lo_to_uint16.getInfo())