Go to the source code of this file.
Classes | |
struct | DateTime_t |
Structure to encapsulate time information broken down into components (year, month, etc). More... | |
Defines | |
#define | min(a, b) ( ( (a) < (b) ) ? (a) : (b) ) |
#define | max(a, b) ( ( (a) > (b) ) ? (a) : (b) ) |
#define | abs(value) ( ( (value) > 0 ) ? (value) : -(value) ) |
#define | div_round(numer, denom) ( ( (numer) + (denom)/2 ) / (denom) ) |
#define | div_round_up(numer, denom) ( ( (numer) + (denom) - 1 ) / (denom) ) |
#define | four_cc(a, b, c, d) ( (a<<24) | (b<<16) | (c<<8) | (d) ) |
#define | sizeof_array_element(object) (sizeof(object[2]) / 2) |
#define | takes |
#define | gives |
#define | out |
#define | S_IRWUSR (S_IRUSR | S_IWUSR) |
#define | S_IRWGRP (S_IRGRP | S_IWGRP) |
#define | S_IRWOTH (S_IROTH | S_IWOTH) |
#define | postfix int |
Typedefs | |
typedef unsigned int | uint |
typedef unsigned char | uint8 |
typedef signed char | int8 |
typedef unsigned short int | uint16 |
typedef signed short int | int16 |
typedef unsigned int | uint32 |
typedef signed int | int32 |
typedef unsigned long long | uint64 |
typedef signed long long | int64 |
typedef uint8 | byte |
typedef uint8 | ascii7 |
typedef uint8 | utf8 |
typedef const char | stringliteral |
typedef uint32 | ptr_as_uint_t |
typedef int64 | ustime_t |
typedef int32 | mstime_t |
typedef int32 | roughtime_t |
Functions | |
void | get_memory_info (uint32 *total_phys_kb, uint32 *total_virt_kb, uint32 *free_phys_kb, uint32 *free_virt_kb) |
void | get_cpu_utilization (int *highest_of_any_one_core, int *aggregate) |
void | get_date_time (DateTime_t *date_time) |
ustime_t | system_time_us () |
mstime_t | system_time_ms () |
roughtime_t | rough_time_ms () |
int32 | atomic_add (volatile int32 *value_pointer, int32 add) |
bool | atomic_add_if_greater (volatile int32 *value_pointer, int32 add_value, int32 if_greater_than) |
bool | atomic_add_if_less (volatile int32 *value_pointer, int32 add_value, int32 if_less_than) |
int | seeded_rand15 () |
int32 | seeded_rand (int bits) |
int32 | seeded_rand_range (int32 from_val, int32 to_val) |
int64 | seeded_rand_range64 (int64 from_val, int64 to_val) |
int | stricmp_utf8 (stringliteral *a, stringliteral *b) |
int | strnicmp_utf8 (stringliteral *a, stringliteral *b, int len) |
stringliteral * | stristr_utf8 (stringliteral *haystack, stringliteral *needle) |
stringliteral * | strnistr_utf8 (stringliteral *haystack, int haystack_length, stringliteral *needle, int needle_length) |
uint8 | mask_ls_bits_on8 (uint on_bit_count) |
uint16 | mask_ls_bits_on16 (uint on_bit_count) |
uint32 | mask_ls_bits_on32 (uint on_bit_count) |
uint64 | mask_ls_bits_on64 (uint on_bit_count) |
uint8 | mask_ms_bits_on8 (uint on_bit_count) |
uint16 | mask_ms_bits_on16 (uint on_bit_count) |
uint32 | mask_ms_bits_on32 (uint on_bit_count) |
uint64 | mask_ms_bits_on64 (uint on_bit_count) |
uint | count_bits_on8 (uint8 value) |
uint | count_bits_on16 (uint16 value) |
uint | count_bits_on32 (uint32 value) |
uint | count_bits_on64 (uint64 value) |
int | ls_on_bit_index8 (uint8 value) |
int | ls_on_bit_index16 (uint16 value) |
int | ls_on_bit_index32 (uint32 value) |
int | ls_on_bit_index64 (uint64 value) |
int | ms_on_bit_index8 (uint8 value) |
int | ms_on_bit_index16 (uint16 value) |
int | ms_on_bit_index32 (uint32 value) |
int | ms_on_bit_index64 (uint64 value) |
uint8 | flip_bit_order8 (uint8 value) |
uint16 | flip_bit_order16 (uint16 value) |
uint32 | flip_bit_order32 (uint32 value) |
uint64 | flip_bit_order64 (uint64 value) |
#define min | ( | a, | |||
b | ) | ( ( (a) < (b) ) ? (a) : (b) ) |
#define max | ( | a, | |||
b | ) | ( ( (a) > (b) ) ? (a) : (b) ) |
#define abs | ( | value | ) | ( ( (value) > 0 ) ? (value) : -(value) ) |
#define div_round | ( | numer, | |||
denom | ) | ( ( (numer) + (denom)/2 ) / (denom) ) |
#define div_round_up | ( | numer, | |||
denom | ) | ( ( (numer) + (denom) - 1 ) / (denom) ) |
#define four_cc | ( | a, | |||
b, | |||||
c, | |||||
d | ) | ( (a<<24) | (b<<16) | (c<<8) | (d) ) |
#define sizeof_array_element | ( | object | ) | (sizeof(object[2]) / 2) |
#define takes |
#define gives |
#define out |
#define S_IRWUSR (S_IRUSR | S_IWUSR) |
#define S_IRWGRP (S_IRGRP | S_IWGRP) |
#define S_IRWOTH (S_IROTH | S_IWOTH) |
#define postfix int |
When overloading an operator in C++, use postfix to indicate that it is of the i++ rather than the ++i form. For example:
Prefix (++i): MyIncrementableClass_t operator ++() Postfix (i++): MyIncrementableClass_t operator ++(postfix) Prefix (++i): incrementable_t operator ++(incrementable_t& increment) Postfix (i++): incrementable_t operator ++( incrementable_t& increment, postfix )
typedef unsigned int uint |
typedef const char stringliteral |
typedef uint32 ptr_as_uint_t |
typedef int32 roughtime_t |
Type representing the system time in milliseconds, which rolls over every 49.7 days. Because of the rollover, in order to perform a comparison like (t1 < t2), it is necessary to instead evaluate (t1-t2 < 0). The only difference from mstime_t is that roughtime_t can be obtained more efficiently on some platforms but does not have the same value space as mstime_t.
void get_memory_info | ( | uint32 * | total_phys_kb, | |
uint32 * | total_virt_kb, | |||
uint32 * | free_phys_kb, | |||
uint32 * | free_virt_kb | |||
) |
Gets a snapshot of the system's memory state. The virtual memory statistics represent memory space in RAM and swapfile, so a system with 1 GB RAM and an additional 1 GB available in swap would indicate a virtual memory total of 2 GB. If the system has more RAM than the application can address, the virtual memory counts will be limited to what the application actually has available. The total virtual memory can grow or shrink with the swap file, particularly on MacOS. Any of the pointers may be NULL if the value is not needed.
When loading a data set of unbounded size, it is important to call this function periodically in order to determine whether it is safe to load more data into RAM, or unload some data. A good approach is to calculate an initial estimate of how much can be safely loaded, then calculate the memory used by new additions to the data set in memory. After 10% of the estimated safe total has been read, recheck the memory state and recalculate the safe total. It is never wise to go over 90% of the available virtual memory, or the application may run out of address space. A typical algorithm would be:
uint32 total_phys_kb, total_virt_kb, free_phys_kb, free_virt_kb; get_memory_info( &total_phys_kb, &total_virt_kb, &free_phys_kb, &free_virt_kb );
if(m_safe_max_kb == 0) m_safe_max_kb = free_phys_kb - free_phys_kb / 20; // Initialize else if(free_virt_kb < total_virt_kb / 10) // Virtual memory is low m_safe_max_kb -= total_virt_kb / 20; // aggressive release else if(free_phys_kb < total_phys_kb / 100) // Physical memory is > 99% m_safe_max_kb -= total_phys_kb / 20; // aggressive release else if(free_phys_kb < total_phys_kb / 50) // Physical memory is > 98% m_safe_max_kb -= total_phys_kb / 50; // gently release else if(free_phys_kb > total_phys_kb / 25) // Physical memory is < 95% m_safe_max_kb = m_now_in_memory_kb + free_phys_kb / 100;
if(m_safe_max_kb < total_phys_kb / 20) m_safe_max_kb = total_phys_kb / 20; // Don't allow starvation
if(m_now_in_memory_kb < m_safe_max_kb) { m_recheck_after_alloc = (m_safe_max_kb - m_now_in_memory_kb) / 10; m_release = 0; } else { m_recheck_after_alloc = 0; m_release = (m_now_in_memory_kb - m_safe_max_kb); }
void get_cpu_utilization | ( | int * | highest_of_any_one_core, | |
int * | aggregate | |||
) |
Returns the percentage of available CPU power being used by the system. To get accurate results, if it is called at all it should be called fairly often, though not so often as not to degrade performance. Long delays between calls will result in the CPU utilization being for the entire previous interval. The same is true for short delays. If the delay between calls is too short, the result will be from the previous snapshot. Either result pointer can be NULL if the result is not needed. This function has only an indirect relationship with the CPU used by the application. If the application were to use this to "slow down" under high CPU loading, it would result in the application slowing down to an inappropriate degree if other processes are consuming a lot of CPU. On the other hand, if this application is running continuously in one thread with other threads and processes being idle, the CPU utilization will be about 50% on a dual-core system, 25% on a quad core, and so on if aggregate is requested, or 100% if highest_of_any_one_core is requested.
void get_date_time | ( | DateTime_t * | date_time | ) |
Gets the local time, similar to calling time then localtime_r but without the intermediate step and with microsecond accuracy without resorting to nonportable methods.
ustime_t system_time_us | ( | ) |
Gets the system time in microseconds, guaranteed to be monotonic and accurate to the microsecond, without resorting to nonportable methods.
mstime_t system_time_ms | ( | ) |
Gets the system time in milliseconds, guaranteed to be monotonic and accurate to the millisecond, without resorting to nonportable methods.
roughtime_t rough_time_ms | ( | ) |
Gets the system time in milliseconds. This function is optimized for speed. The value is accurate only to whatever precision the target OS can provide using a very fast call and should be over 100 ms per increment and more likely 10 (Windows) or less. The value is monotonic with respect to other calls to rough_time_ms, but has no relation to system_time_ms.
Adds the specified value to the integer pointed to by value_pointer, and returns the new value. The value pointer must be aligned on a 32-bit word boundary. The addition is atomic in the sense that the read/modify/write cycle is synchronized at the hardware level to protect against thread preemption between the read and the write and even to guarantee coherency if two CPU cores simultaneously try to add to the value. The atomic_add function is a synchronization primative which can be used when the protection of a Mutex_t object is more than what is needed. It is also used by Mutex_t and Event_t to improve their efficiency when there is no contention for the resource.
bool atomic_add_if_greater | ( | volatile int32 * | value_pointer, | |
int32 | add_value, | |||
int32 | if_greater_than | |||
) |
Adds the specified value to the integer pointed to by value_pointer, but only if the original value was greater than if_greater_than. The value pointer must be aligned on a 32-bit word boundary. The addition is atomic in the sense that the read/modify/write cycle is synchronized at the hardware level to protect against thread preemption between the read and the write and even to guarantee coherency if two CPU cores simultaneously try to add to the value. The atomic_add_if_greater function is a synchronization primative which can be used when the protection of a Mutex_t object is more than what is needed. It is also used by Event_t to improve its efficiency when the event is already available. Returns true if the addition was performed.
Adds the specified value to the integer pointed to by value_pointer, but only if the original value was less than if_less_than. The value pointer must be aligned on a 32-bit word boundary. The addition is atomic in the sense that the read/modify/write cycle is synchronized at the hardware level to protect against thread preemption between the read and the write and even to guarantee coherency if two CPU cores simultaneously try to add to the value. The atomic_add_if_less function is a synchronization primative which can be used when the protection of a Mutex_t object is more than what is needed. Returns true if the addition was performed.
int seeded_rand15 | ( | ) |
rand subsitute which will automatically seed the random number generator with a random value, and has a guaranteed range of 0 to 0x7fff.
int32 seeded_rand | ( | int | bits | ) |
rand subsitute which will automatically seed the random number generator with a random value, and has a guaranteed range of 0 to (1<<bits)-1. bits must be between 0 and 32. If bits is 0, seeded_rand will always return 0.
rand subsitute which will automatically seed the random number generator with a random value, and has a guaranteed range of from_val to to_val.
rand subsitute which will automatically seed the random number generator with a random value, and has a guaranteed range of from_val to to_val.
int stricmp_utf8 | ( | stringliteral * | a, | |
stringliteral * | b | |||
) |
stricmp, strcasecmp (case-insensitive string compare) replacement which is safe for being case insensitive with international characters, not just A to Z and a to z as is common. Also avoids the fact that which function name (stricmp or strcasecmp) is valid is not well standardized, with stricmp being available on some platforms and strcasecmp on others, leading to portability issues. Returns a positive number if a comes after b, a negative number if b comes before a, and zero if they match.
int strnicmp_utf8 | ( | stringliteral * | a, | |
stringliteral * | b, | |||
int | len | |||
) |
strnicmp, strncasecmp (case-insensitive, length-limited string compare) replacement which is safe for being case insensitive with international characters, not just A to Z and a to z as is common. Also avoids the fact that which function name (strnicmp or strncasecmp) is valid is not well standardized, with strnicmp being available on some platforms and strncasecmp on others, leading to portability issues. Returns a positive number if a comes after b, a negative number if b comes before a, and zero if they match.
stringliteral* stristr_utf8 | ( | stringliteral * | haystack, | |
stringliteral * | needle | |||
) |
stristr, strcasestr (case-insensitive substring search) replacement which is safe for being case insensitive with international characters, not just A to Z and a to z as is common. Also avoids the fact that which function name (stristr or strcasestr) is valid is not well standardized, with stristr being available on some platforms and strcasestr on others, leading to portability issues. Searches for needle occurring in haystack, and returns a pointer to the beginning of the substring in haystack if it is found, or NULL if the needle substring is not found in haystack.
stringliteral* strnistr_utf8 | ( | stringliteral * | haystack, | |
int | haystack_length, | |||
stringliteral * | needle, | |||
int | needle_length | |||
) |
stristr, strcasestr (case-insensitive substring search) replacement which is safe for being case insensitive with international characters, not just A to Z and a to z as is common. Also avoids the fact that which function name (stristr or strcasestr) is valid is not well standardized, with stristr being available on some platforms and strcasestr on others, leading to portability issues. Searches for needle occurring in haystack, and returns a pointer to the beginning of the substring in haystack if it is found, or NULL if the needle substring is not found in haystack. This version is optimized for when the lengths of the needle and haystack are already known.
Generates a mask value with on_bit_count bits set to one starting with the least significant bit, and the remaining bits set to zero.
Generates a mask value with on_bit_count bits set to one starting with the least significant bit, and the remaining bits set to zero.
Generates a mask value with on_bit_count bits set to one starting with the least significant bit, and the remaining bits set to zero.
Generates a mask value with on_bit_count bits set to one starting with the least significant bit, and the remaining bits set to zero.
Generates a mask value with on_bit_count bits set to one starting with the most significant bit, and the remaining bits set to zero.
Generates a mask value with on_bit_count bits set to one starting with the most significant bit, and the remaining bits set to zero.
Generates a mask value with on_bit_count bits set to one starting with the most significant bit, and the remaining bits set to zero.
Generates a mask value with on_bit_count bits set to one starting with the most significant bit, and the remaining bits set to zero.
int ls_on_bit_index8 | ( | uint8 | value | ) |
Finds the bit index of the least significant bit which is on. For example ls_on_bit_index8(0x14) returns 2. Returns -1 if no bits are on.
int ls_on_bit_index16 | ( | uint16 | value | ) |
Finds the bit index of the least significant bit which is on. For example ls_on_bit_index16(0x14) returns 2. Returns -1 if no bits are on.
int ls_on_bit_index32 | ( | uint32 | value | ) |
Finds the bit index of the least significant bit which is on. For example ls_on_bit_index32(0x14) returns 2. Returns -1 if no bits are on.
int ls_on_bit_index64 | ( | uint64 | value | ) |
Finds the bit index of the least significant bit which is on. For example ls_on_bit_index64(0x14) returns 2. Returns -1 if no bits are on.
int ms_on_bit_index8 | ( | uint8 | value | ) |
Finds the bit index of the most significant bit which is on. For example ms_on_bit_index8(0x14) returns 4. Returns -1 if no bits are on.
int ms_on_bit_index16 | ( | uint16 | value | ) |
Finds the bit index of the most significant bit which is on. For example ms_on_bit_index16(0x14) returns 4. Returns -1 if no bits are on.
int ms_on_bit_index32 | ( | uint32 | value | ) |
Finds the bit index of the most significant bit which is on. For example ms_on_bit_index32(0x14) returns 4. Returns -1 if no bits are on.
int ms_on_bit_index64 | ( | uint64 | value | ) |
Finds the bit index of the most significant bit which is on. For example ms_on_bit_index64(0x14) returns 4. Returns -1 if no bits are on.
Flips the order of the bits in value from most significant to least significant bit, for example flip_bit_order8(0x20) returns 0x04.
Flips the order of the bits in value from most significant to least significant bit, for example flip_bit_order16(0x8020) returns 0x0401.
Flips the order of the bits in value from most significant to least significant bit, for example flip_bit_order32(0x10020000) returns 0x00004008.