|
template<length_t L, qualifier Q> |
GLM_FUNC_DECL vec< L, uint, Q > | glm::uaddCarry (vec< L, uint, Q > const &x, vec< L, uint, Q > const &y, vec< L, uint, Q > &carry) |
| Adds 32-bit unsigned integer x and y, returning the sum modulo pow(2, 32). More...
|
|
template<length_t L, qualifier Q> |
GLM_FUNC_DECL vec< L, uint, Q > | glm::usubBorrow (vec< L, uint, Q > const &x, vec< L, uint, Q > const &y, vec< L, uint, Q > &borrow) |
| Subtracts the 32-bit unsigned integer y from x, returning the difference if non-negative, or pow(2, 32) plus the difference otherwise. More...
|
|
template<length_t L, qualifier Q> |
GLM_FUNC_DECL void | glm::umulExtended (vec< L, uint, Q > const &x, vec< L, uint, Q > const &y, vec< L, uint, Q > &msb, vec< L, uint, Q > &lsb) |
| Multiplies 32-bit integers x and y, producing a 64-bit result. More...
|
|
template<length_t L, qualifier Q> |
GLM_FUNC_DECL void | glm::imulExtended (vec< L, int, Q > const &x, vec< L, int, Q > const &y, vec< L, int, Q > &msb, vec< L, int, Q > &lsb) |
| Multiplies 32-bit integers x and y, producing a 64-bit result. More...
|
|
template<length_t L, typename T , qualifier Q> |
GLM_FUNC_DECL vec< L, T, Q > | glm::bitfieldExtract (vec< L, T, Q > const &Value, int Offset, int Bits) |
| Extracts bits [offset, offset + bits - 1] from value, returning them in the least significant bits of the result. More...
|
|
template<length_t L, typename T , qualifier Q> |
GLM_FUNC_DECL vec< L, T, Q > | glm::bitfieldInsert (vec< L, T, Q > const &Base, vec< L, T, Q > const &Insert, int Offset, int Bits) |
| Returns the insertion the bits least-significant bits of insert into base. More...
|
|
template<length_t L, typename T , qualifier Q> |
GLM_FUNC_DECL vec< L, T, Q > | glm::bitfieldReverse (vec< L, T, Q > const &v) |
| Returns the reversal of the bits of value. More...
|
|
template<typename genType > |
GLM_FUNC_DECL int | glm::bitCount (genType v) |
| Returns the number of bits set to 1 in the binary representation of value. More...
|
|
template<length_t L, typename T , qualifier Q> |
GLM_FUNC_DECL vec< L, int, Q > | glm::bitCount (vec< L, T, Q > const &v) |
| Returns the number of bits set to 1 in the binary representation of value. More...
|
|
template<typename genIUType > |
GLM_FUNC_DECL int | glm::findLSB (genIUType x) |
| Returns the bit number of the least significant bit set to 1 in the binary representation of value. More...
|
|
template<length_t L, typename T , qualifier Q> |
GLM_FUNC_DECL vec< L, int, Q > | glm::findLSB (vec< L, T, Q > const &v) |
| Returns the bit number of the least significant bit set to 1 in the binary representation of value. More...
|
|
template<typename genIUType > |
GLM_FUNC_DECL int | glm::findMSB (genIUType x) |
| Returns the bit number of the most significant bit in the binary representation of value. More...
|
|
template<length_t L, typename T , qualifier Q> |
GLM_FUNC_DECL vec< L, int, Q > | glm::findMSB (vec< L, T, Q > const &v) |
| Returns the bit number of the most significant bit in the binary representation of value. More...
|
|